prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import warnings
warnings.filterwarnings("ignore")
import sys
#sys.argv=['SAGP.py','test.txt','test.info','test.out']
#import matplotlib
#matplotlib.use('Agg')
###We checked and print the arguments
print('Number of arguments:', str(len(sys.argv)), 'arguments.')
if len(sys.argv)<3:
print("""
### usage: python SAGP.py [datafile] [mcmc] [burn] [L] [m] [newlocations]
### example: python SAGP.py test.txt 100 100 2 5 newloc.txt
###[datafile] The dataset you want to analyze. The columns of the txt file are dimensions/variables; the columns of the txt file are samples.
####The first column of the datafile is considered Y (response) and the rest columns are considered X (inputs).
###[mcmc] [burn] The number of MCMC steps and burn-in steps.
###[L] The number of layers in the SAGP
###[m] The number of pseudo-inputs in each component in the SAGP
###[newlocations] (optional) The dataset of locations for which we want to predict. If not provided, the predictions will be done at the original locations cotained in [datafile].
###Functionality of this code.
###Read in the data file and output the SAGP fit with parameters in setup file.""")
sys.exit()
print('Argument List:', str(sys.argv),'\n')
datafile=sys.argv[1]
print('Data file:', datafile)
#setupfile=sys.argv[3]
#print('Setup file:', setupfile)
mcmc=sys.argv[2]
burn=sys.argv[3]
tree_L=sys.argv[4]
pinfo_m=sys.argv[5]
outputfile=datafile+'.out'
meanfile=outputfile+'.mean'
pdffile=outputfile+'.pdf'
print('Output file:', outputfile)
print('numpy output',pdffile)
import numpy as np
#stepl = 0.02
#x = np.arange(0, 1+stepl, stepl)
#y = np.arange(0, 1+stepl, stepl)
#xx, yy = np.meshgrid(x, y, sparse=False)
#tt = np.asarray(np.hstack((xx.reshape(-1,1),yy.reshape(-1,1))))
#np.shape(tt)
#print(tt)
# In[2]:
def generateTree(L=2,D=2):
VERBOSE=False
tinfo = [None]*L
for l in range(L):
loc_temp = np.linspace(start=0, stop=1, num=1+2**(l+1))[1:-1]
if VERBOSE:print(l,loc_temp)
loc_d = loc_temp[np.arange(start=0,stop=len(loc_temp),step=2)]
if VERBOSE:print(l,loc_d)
mesh_d = np.meshgrid(*[loc_d]*D)
if VERBOSE:print(l,np.asarray(mesh_d).reshape(-1,D).shape)
tmp_arr=np.asarray(mesh_d).T.reshape(-1,D)
if VERBOSE:print(tmp_arr)
tinfo[l]=np.vstack( ( tmp_arr, tmp_arr ) )
if VERBOSE:print(l,tinfo[l])
for k in range(2*tmp_arr.shape[0]):
if np.mod(k,2)==1:
tinfo[l][k,:]=[1/2**(l+1)]*D
else:
tinfo[l][k,:]=tmp_arr[int(k/2),:]
if VERBOSE:print(l,tinfo[l])
return(tinfo)
generateTree(2,2)
# In[3]:
#Preparing toy dataset, for simplicity we set X to be in [0.1]^2 and avoid standardization of X.
dataset = np.loadtxt(datafile)
#print(dataset)
#Y = np.array([1, 2, 3, 4, 5, 2.5, 6.5, 2.141])
#X = np.array([[0.55, 0.55],
# [0.25, 0.30],
# [0.77, 0.25],
# [0.26, 0.76],
# [0.67, 0.33],
# [0.22, 0.89],
# [0.34, 0.56],
# [0.51, 0.77]])
X = dataset[:,1:np.shape(dataset)[1]]
Y = dataset[:,0]
#Y = np.random.uniform(low=0,high=0.5,size=441)+X[:,0]*0.5*np.sin(X[:,1])
#Plot the dataset to illustrate the data.
import matplotlib.pyplot as plt
#plt.figure(0)
#plt.scatter(X[:,0],X[:,1], c=Y, cmap='viridis',vmin=0,vmax=1)
#plt.xlim(xmax = 1.1, xmin = -0.1)
#plt.ylim(ymax = 1.1, ymin = -0.1)
#plt.colorbar()
#plt.title('observed dataset')
#Set the predictive locations
if len(sys.argv)>6:
Xnew = np.loadtxt(sys.argv[6])
else:
Xnew = X
#Xnew = np.array([[0.50, 0.50],
# [0.25, 0.80],
# [0.75, 0.25],
# [0.25, 0.75],
# [0.50, 0.95],
# [0.30, 0.80],
# [0.90, 0.50],
# [0.80, 0.25]])
#Xnew = tt
print('X shape',np.shape(X))
print('Xnew shape',np.shape(Xnew))
#plt.figure(1)
#plt.scatter(Xnew[:,0],Xnew[:,1],vmin=0,vmax=1)
#plt.xlim(xmax = 1.1, xmin = -0.1)
#plt.ylim(ymax = 1.1, ymin = -0.1)
#plt.colorbar()
#plt.title('predictive locations')
#Preparing model parameters and a fixed 2-layer binary tree on [0,1]^2 for illustration purpose.
pinfo = {'m': int(pinfo_m),
'a':100, 'b':1,
'logRhoFirstLayer':1,'logRhoLastLayer':50,
'r':0.1}
minfo = {'seed': 123,
'Nmcmc': int(mcmc), 'burnIn': int(burn),
'semiWidthRho':0.05,'semiWidthEta':0.05}
#tinfo = [np.array([[0.50,0.50],[0.50,0.50]]),
# np.array([[0.25,0.25],[0.25,0.25],
# [0.25,0.75],[0.25,0.25],
# [0.75,0.25],[0.25,0.25],
# [0.75,0.75],[0.25,0.25]]) ]
tinfo=generateTree(L=int(tree_L),D=X.shape[1])
######################
#IMPORTANT CONVENTION#
#In python, the array starts its indexing at 0. We index the additive components starting from 1, this would cause some index out of bound error.
# #
######################
#from matplotlib.patches import Rectangle
#Draw rectangles for illustrating partitions.
#plt.figure(1)
#currentAxis = plt.gca()
#for j in range(np.shape(tinfo)[0]):
# for k in range(np.shape(tinfo[j])[0]%2):
# someX= tinfo[j][k][0]
# someY= tinfo[j][k][1]
# someR_X= tinfo[j][k][0]
# someR_Y= tinfo[j][k][1]
# print(j,someX,someY,someR_X,someR_Y)
#currentAxis.add_patch(Rectangle((someX-someR_X, someY-someR_Y), someR_X, someR_Y, alpha=0.5))
#Seed, for reproducibility
#set.seed(minfo$seed)
np.random.seed(seed=minfo['seed'])
#Transform Y to have mean = 0 and sd = 1
#At the end of the code, samples of Y are transformed back to original scale
meanY = np.mean(Y, axis=0)
sdY = np.std(Y, axis=0)
Y = (Y-meanY)/sdY
#print('Standardized Y:',Y)
#Overall number of observations in the sample.
n = np.shape(X)[0]
print('Number of input locations:',n)
if(n!=np.shape(Y)[0]):
print('sagpfun::sagp @ the length of X and Y shall match.')
exit()
#Overall number of new locations to be predicted.
nNew = np.shape(Xnew)[0]
print('Number of predictive locations:',n)
#number of additive components - defined by the list with tree partition structure
#N=length(unlist(tinfo))/2
#number of pseudo inputs - if equal for all components
m = pinfo['m']
#number of layers
L = len(tinfo)
#dimension of X
D = np.shape(X)[1]
#number of samples to store and return in the result list
Nmcmc = minfo['Nmcmc']
#number of burn-in iterations
burnIn = minfo['burnIn']
#total number of iterations to run
Nsamples = Nmcmc + burnIn
print('Total number of steps:',Nsamples)
sampleRho=True
sampleEta=True
#******Verified 2020-08-10
# In[4]:
#loc=np.array([0.0,0.0])
#rad=np.array([0.5,0.5])
#flag=np.copy(X)#Make a copy of array, so flag operations would not affect X
#print(flag)
#for i in range(np.shape(X)[1]):
# d = np.power(X[:,i]-loc[i],2)
# #print(d<rad[i])
# flag[:,i]= d < rad[i]
#print(flag)
#flag=np.sum(flag,axis=1)
#return flag
#print(flag)
#Set up this function that determines whether each row in X is in the component of center and radius.
#Input: X: locations to be determined
# loc: list of location of partition components.
# rad: list of radius of partition components.
#Output: A boolean vector with the same length as np.shape(X)[0], saying whether a certain input location is in this specific component.
def ids_X_comp(X,loc,rad):
flag = np.copy(X)
#Make a copy of array, so flag operations would not affect X
for i in range(np.shape(X)[1]):
d = np.absolute(X[:,i]-loc[i])
#d = np.power(X[:,i]-loc[i],2)
#print(d)
flag[:,i] = d <= rad[i] #We used closed balls for now.
flag=np.sum(flag,axis=1)
#print(flag)
return flag>=np.shape(X)[1]
#print(ids_X_comp(X,loc,rad))
#print(ids_X_comp(Xnew,loc,rad))
#******Verified 2020-09-16
# In[5]:
#from functools import reduce
#Identify the locations (X and Xnew) corresponding to each component, for each layer
# indexCompX is a list of L elements. For each layer l, indexCompX[[l]] is a vector with length n.
# For each element X[i] in X[1],...,X[n], indexCompX[[l]][i] is the index of the component in the l-th layer to which X[i] belongs.
# indexCompNew is a list of L elements. For each layer l, indexCompX[[l]] is a vector with length nNew.
# For each element X[i] in X[1],...,X[nNew], indexCompX[[l]][i] is the index of the component in the l-th layer to which X[i] belongs.
# layers is a data.frame that links layers and components
# e.g. layer=data.frame(comp=c(1,2,3),layer=c(1,2,2)) means the component 1 is in the 1st layer while the component 2,3 are in the 2nd layer.
# It is used throughtout the code to know which is the layer of each component: layers$layer[layers$comp==j] is the layer of component j.
indexCompX = [None] * L#vector("list",L)
indexCompNew = [None] * L#vector("list",L)
layers = []
# m_l is a vector of length N=N_1+N_2+...+N_L recording how many PIs should there be in each componet, or the l-th component to be more precise.
m_comp = []
#Initialize the layers dataframe. numCompTot is the iterator that records the loop;
numCompTot = 0
print('\n ===== Allocating layer...for L =',L,'=====')
for l in range(L):
print('Fitting layer:',l)
#The outer loop loops through 1,2,...,L layer.
#tinfo_layer is a data frame consisting of N_l*2 elements,
# the number of rows N_l means the number of components in the l-th layer
# the number of PIs in each component m are determined before.
# the first column is recording the center of each component.
# the second column is recording the radius of each component.
tinfo_layer = np.copy(tinfo[l])
#print(indexCompX[l])
indexCompX[l] = np.zeros((n,))* np.nan #<- rep(NA, n)
indexCompNew[l] = np.zeros((nNew,))* np.nan #<- rep(NA, nNew)
#N_l is the number of components in the l-th layer.
N_l = len(tinfo_layer)/2
N_l = int(N_l)
print('= There are ',int(N_l),' components in layer ',l,'.')
#print('= Handling each component:')
for j_l in range(N_l):
#print('Total components included: ',numCompTot)
#The inner loop loops through different components j_l=1,2,...,nrow(tinfo_layer) in each layer/ the l-th layer.
#loc_j_l is the 'center' of the j_l component domain
#rad_j_l is the 'radius' of the j_l component domain.
loc_j_l = tinfo_layer[2*j_l]
rad_j_l = tinfo_layer[2*j_l+1]
print('== The component ',numCompTot+1,' has center ',loc_j_l,', radius',rad_j_l,'.')
ids_j_l = ids_X_comp(X=X,loc=loc_j_l,rad=rad_j_l)
#print(ids_j_l)
print('== This component contains ',np.sum(ids_j_l),' input locations.')
#cat('Initialize...Layer',l,'Component',j_l,'/',N_l,'\n');message('loc=',loc_j_l,' rad=',rad_j_l,'\n')
#Check:are there points in this component that have not been already assigned to other components?
#Need to check that the points are not already assigned because points could be on the border between
#two components and could be assigned to both.
#ids1 = ids_j_l.reshape(5,)
#ids2 = indexCompX[l].reshape(5,)
#print(ids1)
#print(ids2[ids1])
#print('isnan',np.isnan(ids2[ids1]))
###idToReplace = ids_j_l[np.isnan(ids2[ids1])]#ids_j_l[is.na(indexCompX[[l]][ids_j_l])]
idToReplace = np.argwhere( ids_j_l & np.isnan(indexCompX[l]) )
#idToReplace = reduce(operator.add, idToReplace)
idToReplace = [item for items in idToReplace for item in items]
#print(idToReplace)
#This is the list of indices that are not assigned in this layer yet.
#If yes: include it. Otherwise: do not increase the counter and do not include the component.
if len(idToReplace) > 0:
numCompTot = numCompTot + 1
indexCompX[l][idToReplace] = numCompTot
ids_new_j_l = ids_X_comp(X=Xnew,loc=loc_j_l,rad=rad_j_l)
indexCompNew[l][ids_new_j_l] = numCompTot
layers.append([numCompTot,l])#rbind(layers, data.frame(layer=l,comp=numCompTot))
# Assign different m_comp (number of PIs) in this case we use the same number of PIs in each component.
#m_comp <- c(m_comp,m-4*floor(log(numCompTot,base=sqrt(2))))
# or set all m_comp to be the same
m_comp.append([numCompTot,m])
print('Assignments of input locations in layer ',l,':',indexCompX[l])
print('Assignments of predictive locations in layer ',l,':',indexCompNew[l])
print('Layer assigments [Comp,Layer]:',layers)
print('Psudo-input assigments [Comp,# of pseudo-inputs]:',m_comp)
m_comp = np.asarray(m_comp)
m_max = np.max(m_comp[:,1])
m_compActual = np.copy(m_comp)
if(m_max<=0):
print('sagpfuns::sagp @ the maximal number of PIs in components must be greater than 0.')
exit()
#"Deactivate" components corresponding to areas where there are not enough data points.
# Loop from last component (lowest layer, local) to first (higher layer, global).
# For each component j, look how many components are nested in j and compute necessary number of PIs.
# If number of data points is not sufficient, deactivate the most local components nested in j (lowest layer)
# and check if there are enough data points. Iterate, removing one layer of components each time.
print('\n ===== Reverse pruning...for m =',m,'=====')
layers = np.asarray(layers)
for j in range(numCompTot,-1,-1):
if len(layers[layers[:,0]==j,1])<=0:
print(layers)
continue
layer_j = int(layers[layers[:,0]==j,1])
print('Pruning component ',j,' in layer',layer_j)
#Layer of component j
#print('Layer assignment ',indexCompX[layer_j])
#ids_layer_j = (indexCompX[layer_j] %in% j) #ids of points in comp j
ids_layer_j = indexCompX[layer_j]==j
#print(ids_layer_j)
nestedComp = [None]*numCompTot #list where store the nested components
layers_below_j = 0 #counter of nested layers
for l in range(layer_j,L,1):
#Which components are nested in j in layer l? stored in nestedComp_l
compOfNestedPoints = indexCompX[l][ids_layer_j]
compOfNestedPoints_noNA = compOfNestedPoints[~np.isnan(compOfNestedPoints)]
nestedComp_l = np.sort(np.unique(compOfNestedPoints_noNA))
#If there is one component at layer l: store it in list nestedComp.
if len(nestedComp_l)>0:
layers_below_j = layers_below_j + 1
nestedComp[layers_below_j] = nestedComp_l
else:
break
#print('LATS',compOfNestedPoints_noNA)
#print('LATS',nestedComp)
def joint(lst):
ret = np.array([])
for t in range(len(lst)):
if lst[t] is None:
continue
else:
ret = np.concatenate((ret,lst[t]))
return ret
#Given all the components nested in j, how many PIs do we need?
req_m = len(joint(nestedComp))*m #len(nestedComp_un) * m
#print(joint(nestedComp))
#print(req_m)
#How many data points are available in component j?
numPointsComp = np.sum(ids_layer_j)
print('= There are', numPointsComp,', AND',req_m,' input locations are required in component',j,' and its children.')
#If the component does not contain enough points for its PI and the PI of the nested components:
if numPointsComp<req_m:
print('== Deactive component',j,'!')
#Deactivate components from the layer at lowest level (most local) to the highest (more global).
for l_deact in range(layers_below_j,0,-1):
#Components to deactivate
compToDeact = nestedComp[l_deact]
#Loop to deactivate the components. Also, drop the component from the dataframe layers.
for j_deact in compToDeact:
#print('j_deact',j_deact)
layer_j_deact = layers[layers[:,0]==j_deact,1]
layer_j_deact = int(layer_j_deact)
#print(indexCompX[layer_j_deact])
indexCompX[layer_j_deact][indexCompX[layer_j_deact] == j_deact] = np.nan
indexCompNew[layer_j_deact][indexCompNew[layer_j_deact] == j_deact] = np.nan
layers = layers[layers[:,0]!=j_deact,]
#When a layer of components is removed, check: do we have enough data points now?
nestedComp = nestedComp[0:(l_deact-1)]
req_m = len(joint(nestedComp))*m #Required data points now
#If now there are enough data points: OK, move to check another component
if numPointsComp>=req_m:
break
#print(nestedComp)
#layers is a matrix showing all active components (column 0) and the layes they belongs to (column 1).
#print(layers)
#Rename objects with correct number of components:
N = numCompTot#np.shape(layers)[0]
L = len(indexCompX)
#******Verified 2020-08-10
# In[6]:
#Define prior on rhos and eta using the correct number of components and layers
#Same priors for all the dimensions
pinfo['rho']=[None]*L # one rho prior per layer, list of values defined below
pinfo['eta']=[None]*L # one eta prior per layer, list of values defined below
#Fixed vector of rhos
fixedRhos = np.power(0.1, | np.linspace(pinfo['logRhoFirstLayer'], pinfo['logRhoLastLayer'], L) | numpy.linspace |
"""
Copyright (c) 2013 <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .Algorithm import *
from PIL import Image
import numpy as np
from math import exp, log10
import scipy.ndimage.filters as sf
#import matplotlib
#from matplotlib import pyplot as plt
import scipy.signal
class MFS (Algorithm):
"""
:version: 1.0
:author: <NAME>
"""
def __init__(self):
pass
def setDef(self,ind,f,ite):
# parameters: ind -> determines how many levels are used when computing the density
# choose 1 for using directly the image measurement im or
# >= 6 for computing the density of im (quite stable for >=5)
# f ----> determines the dimension of MFS vector
# ite ---> determines how many levels are used when computing MFS for each
self.ind_num = ind # number of pixels for averaging
self.f_num = f # window
self.ite_num = ite
def gauss_kern(self,size, sizey):
""" Returns a normalized 2D gauss kernel array for convolutions """
m = | np.float32(size) | numpy.float32 |
from scipy import misc
import tensorflow as tf
import align.detect_face
import matplotlib.pyplot as plt
import numpy as np
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
gpu_memory_fraction = 1.0
# function pick = nms(boxes,threshold,type)
# 非极大值抑制,去掉重复的检测框
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
# 还原后的框的坐标
print("进入nms非极大值抑制")
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
print(x1,y1,x2,y2)
# 得分值,即是人脸的可信度
s = boxes[:,4]
print(s)
area = (x2-x1+1) * (y2-y1+1)
print(area)
# 排序,从小到大,返回的是坐标
I = np.argsort(s)
#print(I)
pick = np.zeros_like(s, dtype=np.int16)
#print(pick)
counter = 0
s = 0
while I.size>0:
i = I[-1]
s = s+1
print("进入while%d"%s)
print(i)
pick[counter] = i
counter += 1
idx = I[0:-1]
#print(idx)
#print(type(idx))
#x22= np.array([17.,18.,19.])
#print(x22[idx])
#print( x1[idx])
#print( y1[idx])
#print( x2[idx])
#print( y2[idx])
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
#print(xx1)
#print(yy1)
#print(xx2)
#print(yy2)
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
#print(inter)
#print(area[idx])
#print(area[i])
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
#print(o)
#print(threshold)
I = I[np.where(o<=threshold)]
#print(I)
pick = pick[0:counter]
print(pick)
print("_________________________")
return pick
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
# 获取x1,y1,x2,y2的坐标
print("进入generate")
#print(imap.shape)
imap = np.transpose(imap)
print(imap.shape)
#print(type(imap))
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
print("进入reg")
#print(reg[:, :, 0].shape)
print(dx1)
print(dy1)
print(dx2)
print(dy2)
# 获取可信度大于阈值的人脸框的坐标
print(imap)
y, x = np.where(imap >= t)
print(y)
print(x)
#print(type(y))
#print(y.shape)
#print(y.shape[0])
# 只有一个符合的情况
if y.shape[0] == 1:
#print("进入if判断")
dx1 = np.flipud(dx1)#翻转矩阵
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
# 筛选出符合条件的框
print("_____________")
# a= imap[(y,x)]
# print(a)
score = imap[(y, x)]
print(score)
print("_____________")
#print(dx1[(y, x)].shape)
print([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])
print((np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])).shape)
print("_____________")
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
print(reg.shape)
if reg.size == 0:
#print("进入if")
reg = np.empty((0, 3))
# 还原尺度
print("_____________")
#print(np.vstack([y,x]))
bb = np.transpose(np.vstack([y, x]))
print(bb)
print('进入计算部分')
#print(stride * bb)
print(scale)
# #print((stride * bb + 1))
#print((stride * bb + 1) / scale)
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
print(q1)
print(q2)
# shape(None, 9)
#print(np.expand_dims(score, 0))
#print(np.expand_dims(score, 1))
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
#print(boundingbox)
return boundingbox, reg
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
image_path = 'C:\\Users\\rjx\\PycharmProjects\\untitled1\\facenet-master\\data\\test\\test4.jpg'
img = misc.imread(image_path)
#print(img.shape)
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0] # 人脸数目
#print('找到人脸数目为:{}'.format(nrof_faces))
print(_.shape)
print(bounding_boxes.shape)
#print(type(bounding_boxes))
print(bounding_boxes[:,:4])
det = bounding_boxes[:,0:4]
# 保存所有人脸框
det_arr = []
#print(type(det_arr))
# 原图片大小
img_size = np.asarray(img.shape)[0:2]
#print(img_size)
# for i in range(nrof_faces):
# #print(det[i])
# print(np.squeeze(det[i]))
# det_arr.append(np.squeeze(det[i]))
# print(det_arr)
# 即使有多张人脸,也只要一张人脸就够了
# 获取人脸框的大小
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
print(bounding_box_size)
# 原图片中心坐标
img_center = img_size / 2
#print(img_center)
# 求人脸框中心点相对于图片中心点的偏移,
# (det[:,0]+det[:,2])/2和(det[:,1]+det[:,3])/2组成的坐标其实就是人脸框中心点
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print(offsets)
# 求人脸框中心到图片中心偏移的平方和
# 假设offsets=[[ 4.20016056 145.02849352 -134.53862838] [ -22.14250919 -26.74770141 -30.76835772]]
# 则offset_dist_squared=[ 507.93206189 21748.70346425 19047.33436466]
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
#print(offset_dist_squared)
# 用人脸框像素大小减去偏移平方和的两倍,得到的结果哪个大就选哪个人脸框
# 其实就是综合考虑了人脸框的位置和大小,优先选择框大,又靠近图片中心的人脸框
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
#print(bounding_box_size-offset_dist_squared*2.0)
#print(index)
det_arr.append(det[index,:])
print("______________________________")
#print(det_arr)
#print(enumerate(det_arr))
for i, det in enumerate(det_arr):
# [4,] 边界框扩大margin区域,并进行裁切
det = np.squeeze(det)
#print(i)
#print(det)
bb = np.zeros(4, dtype=np.int32)
# 边界框周围的裁剪边缘,就是我们这里要裁剪的人脸框要比MTCNN获取的人脸框大一点,
# 至于大多少,就由margin参数决定了
# print(bb)
bb[0] = np.maximum(det[0] - 32 / 2, 0)
bb[1] = np.maximum(det[1] - 32 / 2, 0)
bb[2] = np.minimum(det[2] + 32 / 2, img_size[1])
bb[3] = np.minimum(det[3] + 32 / 2, img_size[0])
# print(np.max(det[0] - 32 / 2, 0))
# print(det[1] - 32 / 2)
# print(det[2] + 32 / 2)
# print(det[3] + 32 / 2)
#print(bb)
# 裁剪人脸框,再缩放
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
#print(cropped)
# 缩放到指定大小,并保存图片,以及边界框位置信息
scaled = misc.imresize(cropped, (160, 160), interp='bilinear')
#nrof_successfully_aligned += 1
#filename_base, file_extension = os.path.splitext(output_filename)
#if args.detect_multiple_faces:
# output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
#else:
# output_filename_n = "{}{}".format(filename_base, file_extension)
# 保存图片
#misc.imsave(output_filename_n, scaled)
# 记录信息到bounding_boxes_XXXXX.txt文件里
#text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
###########################################################################################
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
#print(type(total_boxes))
print(total_boxes)
print("显示total_boxes")
#print(points)
#print(type(points))
# 获取输入的图片的宽高
h=img.shape[0]
w=img.shape[1]
print(h)
print(w)
# 宽/高,谁小取谁 250*250
minl=np.amin([h, w])
#print(minl)
m=12.0/minsize#P Net 12*12 12/20=0.6
minl=minl*m#250*0.6=150
#print(minl)
# create scale pyramid
# 创建比例金字塔
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
#print(minl)
factor_count += 1
#print(factor_count)
print(scales)
# 将图片显示出来
plt.figure()
scale_img = img.copy()
# 第一步,首先将图像缩放到不同尺寸形成“图像金字塔”
# 然后,经过P-Net网络
# first stage
i=0
for scale in scales:
# 宽高要取整
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
print(hs)
print(ws)
# 使用opencv的方法对图片进行缩放
im_data = align.detect_face.imresample(img, (hs, ws))
print(im_data.shape)
print("im_data设置完毕")
#plt.imshow(scale_img)
#plt.show()
# 可视化的显示“图像金字塔”的效果
# --韦访添加
#plt.imshow(img)
#plt.show()
#plt.imshow(im_data)
#plt.show()
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = 0
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = im_data[0:im_data.shape[0], 0:im_data.shape[1]]
# plt.imshow(scale_img)
# plt.show()
# print('im_data.shape[0]', im_data.shape[0])
# print('im_data.shape[1]', im_data.shape[1])
# # 对图片数据进行归一化处理 [-1,1]
# #print(im_data.shape)
im_data = (im_data - 127.5) * 0.0078125
print("---------------------")
#print(im_data.shape)
# 增加一个维度,即batch size,因为我们这里每次只处理一张图片,其实batch size就是1
img_x = np.expand_dims(im_data, 0)
#print(img_x.shape)
img_y = np.transpose(img_x, (0, 2, 1, 3))
#print(img_y.shape)
# 送进P-Net网络
# 假设img_y.shape=(1, 150, 150, 3)
# 因为P-Net网络要经过3层核为3*3步长为1*1的卷积层,一层步长为2*2池化层
# 所以conv4-2层输出形状为(1, 70, 70, 4)
# 70是这么来的,(150-3+1)/1=148,经过池化层后为148/2=74,
# 再经过一个卷积层(74-3+1)/1=72,再经过一个卷积层(72-3+1)/1=70
# 计算方法参考博客:https://blog.csdn.net/rookie_wei/article/details/80146620
# prob1层的输出形状为(1, 70, 70, 2)
out = pnet(img_y)
#print(type(out))
#print(out[0].shape)
#print(out[1].shape)
# 又变回来
# out0的形状是(1, 70, 70, 4)
# 返回的是可能是人脸的框的坐标
out0 = np.transpose(out[0], (0, 2, 1, 3))
# out1的形状是(1, 70, 70, 2)
# 返回的是对应与out0框中是人脸的可信度,第2个值为是人脸的概率
out1 = np.transpose(out[1], (0, 2, 1, 3))
print("out的shape")
print(out0.shape)
print(out1.shape)
print("-----------------")
#print(out0[:,:,:,:].shape)
print(out0[0,:,:,:].shape)
print("-----------------")
#print(out1[:,:,:,1].shape)
print(out1[0,:,:,1].shape)
# out1[0,:,:,1]:表示框的可信度,只要一个值即可,因为这两个值相加严格等于1,这里只要获取“是”人脸框的概率
# out0[0,:,:,:]:人脸框
# scales:图片缩减比例
# threshold:阈值,这里取0.6
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])
# # 人脸框坐标对应的可信度
# print('处理之前:', out1[0, :, :, 1])
# print('------------------')
# s = boxes[:, 4]
# print('处理之后:', s)
#
# # # 显示人脸框
# print('------------------')
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# print(len(boxes))
# print('------------------')
# for i in range(len(boxes)):
# print(x1[i])
# print(y1[i])
# print(x2[i])
# print(y2[i])
# print('------------------')
# print(i)
# plt.gca().add_patch(plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w',facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# for i in range(len(boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# --韦访添加
#plt.imshow(scale_img)
#plt.show()
#exit()
# 图片按照所有scale走完一遍,会得到在原图上基于不同scale的所有的bb,然后对这些bb再进行一次NMS
# 并且这次NMS的threshold要提高
numbox = total_boxes.shape[0]
if numbox > 0:
# 再经过nms筛选掉一些可靠度更低的人脸框
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
# 获取每个人脸框的宽高
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
# x1 = total_boxes[:, 0]
# y1 = total_boxes[:, 1]
# x2 = total_boxes[:, 2]
# y2 = total_boxes[:, 3]
# for i in range(len(total_boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# 对人脸框坐标做一些处理,使得人脸框更紧凑
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
# x1 = qq1
# y1 = qq2
# x2 = qq3
# y2 = qq4
# for i in range(len(total_boxes)):
# print('lll', x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='r', facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = align.detect_face.rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad(total_boxes.copy(), w, h)
# R-Net
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage R-Net 对于P-Net输出的bb,缩放到24x24大小
tempimg = | np.zeros((24, 24, 3, numbox)) | numpy.zeros |
import pandas as pd
import os
import numpy as np
# threshold for motility above value dead below motile
THRESHOLD = 0.8
MIN_FRAMES = 10
WORM_MULTIPLIER = 1.1
def calculate_motility(input_folder, output_file):
"""
Calculate the motility for multiple videos from the links csv files and store the results as a csv
:param input_folder:
:param output_file:
:return:
"""
all_files = sorted(os.scandir(input_folder), key=lambda e: e.name)
output = []
# process all files within the input folder
for entry in all_files:
# process only files ending with "links.csv"
if entry.name.endswith("links.csv") and entry.is_file():
# print('Processing file: {0}'.format(entry.name))
out = {}
df = pd.read_csv(os.path.join(input_folder, entry.name))
# get the min/max number for each frame
particle_max = df.groupby('frame').size().max()
particle_min = df.groupby('frame').size().min()
# remove ones where the particle didn't exist for the comparison
df = df.loc[df['IoU'] != -1,]
# count the number of observations
df['size'] = df.groupby('particle')['frame'].transform('size')
# group per particle and calculate mean
df_grouped = df.groupby('particle')[['IoU', 'size']].mean().reset_index()
# get the particles with the highest number of frames up to the max detected
df_grouped = df_grouped.nlargest(int(particle_max * WORM_MULTIPLIER), 'size')
# remove records with less than min_frames
# df_grouped = df_grouped.loc[df_grouped['size'] >= MIN_FRAMES,]
# mark as motile or non-motile based on threshold value
df_grouped['motile'] = | np.where(df_grouped['IoU'] >= THRESHOLD, 0, 1) | numpy.where |
import numpy as np
from scipy.sparse import csc_matrix
"""# Objective Functions
"""
#Basis generator.
#Generates a set of n-orthonormal vectors.
def rvs(dim=3):
random_state = np.random
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1) - 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
#Generate a random PSD quadratic with eigenvalues between certain numbers.
def randomPSDGenerator(dim, Mu, L):
eigenval = np.zeros(dim)
eigenval[0] = Mu
eigenval[-1] = L
eigenval[1:-1] = np.random.uniform(Mu, L, dim - 2)
M = np.zeros((dim, dim))
A = rvs(dim)
for i in range(dim):
M += eigenval[i]*np.outer(A[i], A[i])
return M
#Random PSD matrix with a given sparsity.
def randomPSDGeneratorSparse(dim, sparsity):
mask = np.random.rand(dim,dim)> (1- sparsity)
mat = np.random.normal(size = (dim,dim))
Aux = np.multiply(mat, mask)
return np.dot(Aux.T, Aux) + np.identity(dim)
def calculateEigenvalues(M):
from scipy.linalg import eigvalsh
dim = len(M)
L = eigvalsh(M, eigvals = (dim - 1,dim - 1))[0]
Mu = eigvalsh(M, eigvals = (0,0))[0]
return L, Mu
#Takes a random PSD matrix generated by the functions above and uses them as a function.
class funcQuadratic:
import numpy as np
def __init__(self, size, matrix, vector, Mu, L):
self.len = size
self.M = matrix.copy()
self.b = vector.copy()
self.L = L
self.Mu = Mu
return
def dim(self):
return self.len
#Evaluate function.
def fEval(self, x):
return 0.5*np.dot(x, self.M.dot(x)) + np.dot(self.b, x)
#Evaluate gradient.
def fEvalGrad(self, x):
return self.M.dot(x) + self.b
#Line Search.
def lineSearch(self, grad, d):
return -np.dot(grad, d)/np.dot(d, self.M.dot(d))
#Return largest eigenvalue.
def largestEig(self):
return self.L
#Return smallest eigenvalue.
def smallestEig(self):
return self.Mu
#Return largest eigenvalue.
def returnM(self):
return self.M
#Return smallest eigenvalue.
def returnb(self):
return self.b
class funcQuadraticDiag:
import numpy as np
def __init__(self, size, xOpt, Mu = 1.0, L = 2.0):
self.len = size
self.matdim = int(np.sqrt(size))
self.eigenval = np.zeros(size)
self.eigenval[0] = Mu
self.eigenval[-1] = L
self.eigenval[1:-1] = np.random.uniform(Mu, L, size - 2)
self.L = L
self.Mu = Mu
self.xOpt = xOpt
self.b = - np.multiply(self.xOpt, self.eigenval)
return
def dim(self):
return self.len
#Evaluate function.
def fEval(self, x):
return 0.5*np.dot(x, np.multiply(self.eigenval,x)) + np.dot(self.b, x)
#Evaluate gradient.
def fEvalGrad(self, x):
return np.multiply(x, self.eigenval) + self.b
#Return largest eigenvalue.
def largestEig(self):
return self.L
#Return smallest eigenvalue.
def smallestEig(self):
return self.Mu
#Line Search.
def lineSearch(self, grad, d):
return -np.dot(grad, d)/np.dot(d, np.multiply(self.eigenval, d))
#Return largest eigenvalue.
def returnM(self):
return self.eigenval
#Return smallest eigenvalue.
def returnb(self):
return self.b
#Function used in NAGD
class funcSimplexLambdaNormalizedEigen:
#Assemble the matrix from the active set.
def __init__(self, activeSet, z, A, L, Mu):
from scipy.sparse.linalg import eigsh
self.len = len(activeSet)
Mat = np.zeros((len(activeSet[0]), self.len))
self.c = Mu*A + L - Mu
self.b = np.zeros(len(activeSet))
for i in range(0, self.len):
Mat[:, i] = activeSet[i]
self.b[i] = -np.dot(z, activeSet[i])
self.b /= self.c
self.M = np.dot(np.transpose(Mat),Mat)
#Create a sparse matrix from the data.
self.M = csc_matrix(self.M)
if(self.M.shape == (1,1)):
self.L = 1.0
self.Mu = 1.0
else:
self.L = eigsh(self.M, 1, which='LM', return_eigenvectors = False)[0]
self.Mu = eigsh(self.M, 1, sigma=1.0e-10, which='LM', return_eigenvectors = False)[0]
return
def fEval(self, x):
return 0.5*np.dot(x.T, self.M.dot(x)) + | np.dot(self.b, x) | numpy.dot |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import os
import sys
import platform
import onnx
import ctypes
import struct
import numpy as np
sys.path.insert(0, os.getcwd())
from importlib import import_module
from code.common import logging, dict_get, BENCHMARKS
from code.common import get_system
from code.common.builder import BenchmarkBuilder
import pycuda.autoinit
RN50Calibrator = import_module("code.resnet50.tensorrt.calibrator").RN50Calibrator
AUTOSINIAN_CNN_PLUGIN_LIBRARY = "code/resnet50/tensorrt/libautosiniancnnplugin_ampere.so" if pycuda.autoinit.device.compute_capability()[0] > 7 else "code/resnet50/tensorrt/libautosiniancnnplugin_turing.so"
if not os.path.isfile(AUTOSINIAN_CNN_PLUGIN_LIBRARY):
raise IOError("{}\n".format(
"Failed to load library ({}).".format(AUTOSINIAN_CNN_PLUGIN_LIBRARY)
))
ctypes.CDLL(AUTOSINIAN_CNN_PLUGIN_LIBRARY)
class ResNet50(BenchmarkBuilder):
"""Resnet50 engine builder."""
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(1 << 30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.ResNet50, workspace_size=workspace_size)
# Model path
self.model_path = dict_get(args, "model_path", default="code/resnet50/tensorrt/ofa_autosinian_is176.onnx")
logging.info("Using AutoSinian optimized once-for-all network")
self.cache_file = None
self.need_calibration = False
if self.precision == "int8":
# Get calibrator variables
calib_batch_size = dict_get(self.args, "calib_batch_size", default=1)
calib_max_batches = dict_get(self.args, "calib_max_batches", default=500)
force_calibration = dict_get(self.args, "force_calibration", default=False)
cache_file = dict_get(self.args, "cache_file", default="code/resnet50/tensorrt/calibrator.cache")
preprocessed_data_dir = dict_get(self.args, "preprocessed_data_dir", default="build/preprocessed_data")
calib_data_map = dict_get(self.args, "calib_data_map", default="data_maps/imagenet/cal_map.txt")
calib_image_dir = os.path.join(preprocessed_data_dir, "imagenet/ResNet50/fp32")
# Set up calibrator
self.calibrator = RN50Calibrator(calib_batch_size=calib_batch_size, calib_max_batches=calib_max_batches,
force_calibration=force_calibration, cache_file=cache_file,
image_dir=calib_image_dir, calib_data_map=calib_data_map)
self.builder_config.int8_calibrator = self.calibrator
self.cache_file = cache_file
self.need_calibration = force_calibration or not os.path.exists(cache_file)
def initialize(self):
"""
Parse input ONNX file to a TRT network. Apply layer optimizations and fusion plugins on network.
"""
# Query system id for architecture
self.system = get_system()
self.gpu_arch = self.system.arch
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Parse from onnx file.
parser = trt.OnnxParser(self.network, self.logger)
with open(self.model_path, "rb") as f:
model = f.read()
success = parser.parse(model)
if not success:
raise RuntimeError("ofa_autusinian onnx model processing failed! Error: {:}".format(parser.get_error(0).desc()))
# Set input dtype and format
input_tensor = self.network.get_input(0)
if self.input_dtype == "int8":
input_tensor.dtype = trt.int8
scale = struct.unpack('!f', bytes.fromhex('3caa5293'))[0]
input_tensor.dynamic_range = (-scale*127.0, scale*127.0)
if self.input_format == "linear":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
elif self.input_format == "chw4":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.CHW4)
# Get the layers we care about.
nb_layers = self.network.num_layers
while self.network.num_outputs > 0:
logging.info("Unmarking output: {:}".format(self.network.get_output(0).name))
self.network.unmark_output(self.network.get_output(0))
#add top-k
last_fc_layer = self.network.get_layer(nb_layers - 1)
topk_layer = self.network.add_topk(last_fc_layer.get_output(0), trt.TopKOperation.MAX, 1, 2)
topk_layer.name = "topk_layer"
topk_layer.get_output(0).name = "topk_layer_output_value"
topk_layer.get_output(1).name = "topk_layer_output_index"
self.network.mark_output(topk_layer.get_output(1))
if self.network.num_outputs != 1:
logging.warning("num outputs should be 1 after unmarking! Has {:}".format(self.network.num_outputs))
raise Exception
if self.precision == "int8" and self.batch_size > 1 and (not self.need_calibration):
self.autosinian_optimize()
self.initialized = True
def autosinian_optimize(self):
logging.info("Applying AutoSinian Optimization...")
optimize_points = [(10,15), (21,26), (27,32), (38,43), (44,49), (55,60), (61,66), (67,72), (78,83), (84,89), (90,95), (0,4), (5,9), (16,20), (33,37), (50,54), (73,77), (96,100)]
optimizer = AutoSinian_Optimizer(self.cache_file)
for point in optimize_points:
optimizer.optimize(self.network, point)
class AutoSinian_Optimizer:
'''AutoSinian optimizer, optimize the hardware implementation of the layers.'''
def __init__(self, cache_file = None):
self.plugin_registery = trt.get_plugin_registry()
foundPlugin = False
for plugin_creator in self.plugin_registery.plugin_creator_list:
if plugin_creator.name == self.name:
self.creator = self.plugin_registery.get_plugin_creator(self.name,'1','')
foundPlugin = True if self.creator else False
break
assert(foundPlugin), "fail to found %s!" % self.name
self.scale_map = {}
with open(cache_file, "r") as f:
for line in f:
pair = line.rstrip().split(':')
if len(pair) == 2:
self.scale_map[pair[0]] = struct.unpack('!f', bytes.fromhex(pair[1]))[0]
self.count = 0
@property
def name(self):
return "AutoSinianCNN_TRT"
def optimize(self, network, point):
fields = trt.PluginFieldCollection()
saved = [] #values must be alive when creating the plugin.
inputs = [network.get_layer(point[0]).get_input(0)]
append_fields(network, point[0], fields, saved, self.scale_map)
append_fields(network, point[0]+2, fields, saved, self.scale_map)
append_fields(network, point[0]+4, fields, saved, self.scale_map)
plugin=self.creator.create_plugin(self.name, fields)
if plugin is None:
raise Exception("Plugin creation failed")
plugin_layer = network.add_plugin_v2(inputs, plugin)
plugin_layer.name = self.name + "_%d" % self.count
self.count += 1
origin_output = network.get_layer(point[1]).get_output(0)
plugin_output = plugin_layer.get_output(0)
assert(origin_output.name in self.scale_map), "%s not found!" % origin_output.name
dynamic_range=self.scale_map[origin_output.name]*127.0
plugin_output.set_dynamic_range(-dynamic_range, dynamic_range)
for j in range(network.num_layers):
layer = network.get_layer(j)
if layer.name==plugin_layer.name :
continue
for k in range(layer.num_inputs):
if layer.get_input(k) == origin_output:
layer.set_input(k, plugin_output)
def append_fields(network, index, fields, saved, scale_map):
layer = network.get_layer(index)
assert(isinstance(layer, trt.ILayer) and (layer.type == trt.LayerType.CONVOLUTION)), "must be a conv layer"
layer.__class__ = trt.IConvolutionLayer
output_layer = layer
npa1 = | np.array([layer.kernel_size.h], dtype=np.int32) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE
"""
import copy
import os
import sys
import unittest
from contextlib import contextmanager
import numpy as np
import rmsd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestRMSD(unittest.TestCase):
"""Test the DSSP parser methods."""
def setUp(self):
"""Initialize the framework for testing."""
abs_path = os.path.abspath(os.path.dirname(__file__))
self.xyzpath = abs_path + "/tests/"
self.centroid = rmsd.centroid
self.rmsd = rmsd.rmsd
self.get_coordinates = rmsd.get_coordinates
self.get_coordinates_pdb = rmsd.get_coordinates_pdb
self.get_coordinates_xyz = rmsd.get_coordinates_xyz
self.get_coordinates_ase = rmsd.get_coordinates_ase
self.parse_periodic_case = rmsd.parse_periodic_case
self.kabsch_rmsd = rmsd.kabsch_rmsd
self.kabsch_rotate = rmsd.kabsch_rotate
self.kabsch_algo = rmsd.kabsch
self.quaternion_rmsd = rmsd.quaternion_rmsd
self.quaternion_rotate = rmsd.quaternion_rotate
self.quaternion_transform = rmsd.quaternion_transform
self.makeQ = rmsd.makeQ
self.makeW = rmsd.makeW
self.print_coordinates = rmsd.print_coordinates
self.reorder_brute = rmsd.reorder_brute
self.reorder_hungarian = rmsd.reorder_hungarian
self.reorder_distance = rmsd.reorder_distance
self.check_reflections = rmsd.check_reflections
def tearDown(self):
"""Clear the testing framework."""
self.xyzpath = None
self.centroid = None
self.rmsd = None
self.get_coordinates = None
self.get_coordinates_pdb = None
self.get_coordinates_xyz = None
self.kabsch_rmsd = None
self.kabsch_rotate = None
self.kabsch_algo = None
self.quaternion_rmsd = None
self.quaternion_rotate = None
self.quaternion_transform = None
self.makeQ = None
self.makeW = None
self.print_coordinates = None
self.reorder_brute = None
self.reorder_hungarian = None
self.reorder_distance = None
self.check_reflections = None
def assertListAlmostEqual(self, list1, list2, places):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, places=places)
def test_get_coordinates_pdb(self):
infile = self.xyzpath + 'ci2_1.pdb'
coords = self.get_coordinates_pdb(infile)
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
def test_get_coordinates_xyz(self):
infile = self.xyzpath + 'ethane.xyz'
coords = self.get_coordinates_xyz(infile)
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_get_coordinates(self):
infile = self.xyzpath + 'ci2_1.pdb'
coords = self.get_coordinates(infile, 'pdb')
self.assertEqual('N', coords[0][0])
self.assertEqual([-7.173, -13.891, -6.266], coords[1][0].tolist())
infile = self.xyzpath + 'ethane.xyz'
coords = self.get_coordinates(infile, 'xyz')
self.assertEqual('C', coords[0][0])
self.assertEqual([-0.98353, 1.81095, -0.0314], coords[1][0].tolist())
def test_centroid(self):
a1 = np.array([-19.658, 17.18, 25.163], dtype=float)
a2 = | np.array([-20.573, 18.059, 25.88], dtype=float) | numpy.array |
import time
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
import cooler
import numpy as np
import copy
import os,sys
import model
from utils import operations
import tensorflow as tf
from tensorflow import keras
tf.keras.backend.set_floatx('float32')
# 'Dixon2012-H1hESC-HindIII-allreps-filtered.10kb.cool'
# data from ftp://cooler.csail.mit.edu/coolers/hg19/
def addAtPos(mat1, mat2, xypos = (0,0)):
pos_v, pos_h = xypos[0], xypos[1] # offset
v1 = slice(max(0, pos_v), max(min(pos_v + mat2.shape[0], mat1.shape[0]), 0))
h1 = slice(max(0, pos_h), max(min(pos_h + mat2.shape[1], mat1.shape[1]), 0))
v2 = slice(max(0, -pos_v), min(-pos_v + mat1.shape[0], mat2.shape[0]))
h2 = slice(max(0, -pos_h), min(-pos_h + mat1.shape[1], mat2.shape[1]))
mat1[v1, h1] += mat2[v2, h2]
return mat1
def extract_features(path='./data',
raw_path='raw',
raw_file='Rao2014-GM12878-DpnII-allreps-filtered.10kb.cool',
model_path=None,
sr_path='output',
chromosome='22',
scale=4,
len_size=200,
genomic_distance=2000000,
start=None, end=None):
sr_file = raw_file.split('-')[0] + '_' + raw_file.split('-')[1] + '_' + raw_file.split('-')[2] + '_' + raw_file.split('.')[1]
directory_sr = os.path.join(path, sr_path, sr_file, 'extract_features')
if not os.path.exists(directory_sr):
os.makedirs(directory_sr)
# get generator model
if model_path is None:
gan_model_weights_path = './our_model/saved_model/gen_model_' + \
str(len_size)+'/gen_weights'
else:
gan_model_weights_path = model_path
Generator = model.make_generator_model(len_high_size=len_size, scale=4)
Generator.load_weights(gan_model_weights_path)
print(Generator)
name = os.path.join(path, raw_path, raw_file)
c = cooler.Cooler(name)
resolution = c.binsize
mat = c.matrix(balance=True).fetch('chr'+chromosome)
[Mh, idx] = operations.remove_zeros(mat)
print('Shape HR: {}'.format(Mh.shape), end='\t')
if start is None:
start = 0
if end is None:
end = Mh.shape[0]
Mh = Mh[start:end, start:end]
print('MH: {}'.format(Mh.shape), end='\t')
Ml = operations.sampling_hic(Mh, scale**2, fix_seed=True)
print('ML: {}'.format(Ml.shape))
# Normalization
# the input should not be type of np.matrix!
Ml = | np.asarray(Ml) | numpy.asarray |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.spinoffs.oryx.distributions.distributions_extensions."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
from oryx import core
from oryx import distributions
from oryx.core import ppl
from oryx.internal import test_util
from tensorflow_probability.substrates import jax as tfp
del distributions # only needed registering distributions with transformations
tfd = tfp.distributions
# Use lambdas to defer construction of distributions
# pylint: disable=g-long-lambda
DISTRIBUTIONS = [
('normal_scalar_args', tfd.Normal, lambda:
(0., 1.), lambda: {}, 0., [0., 1.]),
('normal_scalar_kwargs', tfd.Normal, lambda: (), lambda: {
'loc': 0.,
'scale': 1.
}, 0., [0., 1.]),
('mvn_diag_args', tfd.MultivariateNormalDiag, lambda:
(np.zeros(5, dtype=np.float32), np.ones(5, dtype=np.float32)), lambda: {},
np.zeros(5, dtype=np.float32),
[np.zeros(5, dtype=np.float32),
np.ones(5, dtype=np.float32)]),
('mvn_diag_kwargs', tfd.MultivariateNormalDiag, lambda: (), lambda: {
'loc': np.zeros(5, dtype=np.float32),
'scale_diag': np.ones(5, dtype=np.float32)
}, np.zeros(5, dtype=np.float32),
[np.zeros(5, dtype=np.float32),
np.ones(5, dtype=np.float32)]),
('independent_normal_args', tfd.Independent, lambda:
(tfd.Normal( | np.zeros(5, dtype=np.float32) | numpy.zeros |
"""This program is a collection of utility functions for the data."""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import shutil
if sys.version_info.major == 2:
from itertools import izip as zip
def wrl_to_np(wrl_name):
"""Return a mesh as a np data format of vertices and faces."""
vertices = []
faces = []
colors = []
in_geometry_obj = False
scan_vertices = False
scan_faces = False
scan_colors = False
with open(wrl_name) as wrl:
for line in wrl:
words = line.split()
if "IndexedFaceSet" in words:
in_geometry_obj = True
elif 'point' in words:
scan_vertices = True
elif 'coordIndex' in words:
scan_faces = True
elif 'color' in words and '[' in words:
scan_colors = True
elif in_geometry_obj and scan_vertices:
if len(words) == 3:
string_verts = [words[0], words[1], words[2][:-1]]
vertix = [float(i) for i in string_verts]
vertices.append(vertix)
elif ']' in words:
scan_vertices = False
elif in_geometry_obj and scan_faces:
if len(words) == 4:
string_faces = [words[0], words[1], words[2]]
face = [int(i) for i in string_faces]
faces.append(face)
elif ']' in words:
scan_faces = False
elif in_geometry_obj and scan_colors:
if len(words) == 3:
string_color = [words[0], words[1], words[2][:-1]]
color = [float(i) for i in string_color]
colors.append(color)
elif ']' in words:
scan_colors = False
# NOTE we ignore the normal values for now
np_vertices = np.array(vertices)
np_faces = np.array(faces)
np_colors = np.array(colors)
print("%d vertices and %d faces" % (len(np_vertices), len(np_faces)))
return np_vertices, np_faces, np_colors
def get_shape_from_msms_output(name, norms=False):
"""Return a numpy representation of the surface from a mol name."""
verts_file = name + '.vert'
faces_file = name + '.face'
verts = []
vertsn = []
faces = []
with open(verts_file) as f:
for line in f:
words = line.split()
if len(words) == 9:
verts.append([float(x) for x in words[:3]])
if norms:
vertsn.append([float(x) for x in words[3:6]])
with open(faces_file) as f:
for line in f:
words = line.split()
if len(words) == 5:
faces.append([int(x) for x in words[:3]])
np_vertices = np.array(verts)
np_faces = np.array(faces) - 1 # correct for msms indexing
clean_verts, clean_faces = clean_duplicates(np_vertices, np_faces)
mesh = np.array([clean_verts, clean_faces])
np.save(name, mesh)
def save_wrl_to_np_file(wrl_file):
"""Save np array of wrl mesh to a file."""
mol_mesh = np.array(wrl_to_np(wrl_file))
np.save(wrl_file, mol_mesh)
def clean_duplicates(verts, faces):
"""MSMS sometimes creates duplicate vertices so these need to be cleaned"""
# set of coords
# find duplicates
coords = set([])
dup = []
for index, coordinate in enumerate(verts):
coordinate_tup = tuple(coordinate)
if coordinate_tup not in coords:
coords.update([coordinate_tup])
else:
# if the coordinate is already there then perturb it
dup.append(index)
print(coordinate)
print(dup)
dup_verts = list(set(np.where(verts == verts[dup])[0]))
faces_to_replace = []
faces_to_delete = []
faces_ix = []
vert_to_keep = dup_verts[0]
print(dup_verts)
for index, face in enumerate(faces):
num_dups = sum([1 if x in dup_verts else 0 for x in face])
if num_dups == 1:
faces_to_replace.append([vert_to_keep if x in
dup_verts else x for x in face])
faces_ix.append(index)
elif num_dups > 1:
# there will be two or 3 duplicated elements
# which is a line or point
# TODO ADD CHECK FOR LINE
faces_to_delete.append(index)
# Change faces
for ix, face in zip(faces_ix, faces_to_replace):
faces[ix] = face
# Find new duplicates
final_faces = []
for ix, face in zip(faces_ix, faces_to_replace):
if set(face) in [set(x) for x in final_faces]:
faces_to_delete.append(ix)
print(faces[faces_ix])
# delete lines and points
print('pre delete', len(faces))
faces = np.delete(faces, faces_to_delete, axis=0)
print('post delete', len(faces))
# NOTE this leaves some retundant vertices in the array that are never used
# but this shouldn't be too much of a problem
return verts, faces
def get_shape_from_off(fname, debug=False):
"""Save numpy pickle of mesh"""
vertices = []
faces = []
in_verts = False
in_faces = False
with open(fname) as f:
for line in f:
words = line.split()
if not in_verts and len(words) == 3:
in_verts = True
elif in_verts and len(words) == 3:
vertix = [float(n) for n in words]
vertices.append(vertix)
elif in_verts and len(words) == 4:
in_verts = False
face = [int(n) for n in words]
faces.append(face[1:])
in_faces = True
elif in_faces and len(words) == 4:
face = [int(n) for n in words]
faces.append(face[1:])
np_vertices = np.array(vertices)
np_faces = np.array(faces)
if debug:
print("%d vertices and %d faces" % (len(np_vertices), len(np_faces)))
mesh = | np.array([np_vertices, np_faces]) | numpy.array |
import os
import numpy as np
import h5py
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import gc
import constants
def get_pixels_metadata(bg=False, n=-1, delta_R_min=float('-inf'), delta_R_max=float('inf'), same_file=False):
"""Return pixel data and metadata for either the octets or singlets.
Return:
pixels -- a (n, jet image width^2) numpy array of the pixel data.
metadata -- a (n, 4) pandas dataframe containing all other data, such as
mass, jet pull, and delta R.
Arguments:
octet -- true (false) if the octet (singlet) data should be collected.
n -- the number of samples to collect. If n == -1, all samples will be collected.
delta_R_min -- the minimum delta R allowed for a sample to be included.
delta_R_max -- the maximum delta R allowed for a sample to be included.
The pixel data is a (n, jet image width^2) numpy array.
The metadata is a (n, 4) pandas array.
"""
print("[data] Getting pixel data ...")
if bg:
h5file = constants.BG_H5
else:
h5file = constants.SIG_H5
print("[data] Loading from {} ...".format(h5file))
data = h5py.File(h5file, 'r')
sig_cutoff = int(np.sum(data['meta_variables/signal'][()]))
size = data['meta_variables/pull1'][()].shape[0]
if n == -1:
metadata = np.zeros((size, 4))
metadata[:, 0] = np.array(data['meta_variables/pull1'][()])
metadata[:, 1] = np.array(data['meta_variables/pull2'][()])
metadata[:, 2] = np.array(data['meta_variables/jet_mass'][()])
metadata[:, 3] = np.array(data['meta_variables/jet_delta_R'][()])
pixels = data['images'][()]
else:
metadata = | np.zeros((n, 4)) | numpy.zeros |
import os
import pickle, glob, shutil
import numpy as np
import pandas as pd
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.RBFNN_module import rbf_model
from Fuzzy_clustering.ver_tf2.RBF_ols import rbf_ols_module
from Fuzzy_clustering.ver_tf2.CNN_module import cnn_model
from Fuzzy_clustering.ver_tf2.CNN_module_3d import cnn_3d_model
from Fuzzy_clustering.ver_tf2.LSTM_module_3d import lstm_3d_model
from Fuzzy_clustering.ver_tf2.Combine_module_train import combine_model
from Fuzzy_clustering.ver_tf2.Clusterer import clusterer
from Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from datetime import datetime
from Fuzzy_clustering.ver_tf2.imblearn.over_sampling import BorderlineSMOTE, SVMSMOTE, SMOTE,ADASYN
import time, logging, warnings, joblib
class global_train(object):
def __init__(self, static_data, x_scaler):
self.istrained = False
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
try:
self.load(self.cluster_dir)
except:
pass
self.static_data=static_data
self.model_type=static_data['type']
self.x_scaler = x_scaler
self.methods=static_data['project_methods']
self.combine_methods=static_data['combine_methods']
self.rated=static_data['rated']
self.n_jobs=static_data['njobs']
self.var_lin = static_data['clustering']['var_lin']
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
self.data_dir = os.path.join(self.cluster_dir, 'data')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
logger = logging.getLogger('Glob_train_procedure' + '_' +self.model_type)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.cluster_dir, 'log_train_procedure.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger = logger
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def split_dataset(self, X, y, act, X_cnn=np.array([]), X_lstm=np.array([])):
if len(y.shape)>1:
y=y.ravel()
if len(act.shape)>1:
act=act.ravel()
self.N_tot, self.D = X.shape
X_train, X_test1, y_train, y_test1, mask_test1 = split_continuous(X, y, test_size=0.15, random_state=42, mask=False)
cvs = []
for _ in range(3):
X_train1 = | np.copy(X_train) | numpy.copy |
import argparse
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import load_model
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='My eigen-face batch tester')
arg_parser.add_argument('--model', dest='model_file', type=str, default='new.npy')
args = arg_parser.parse_args()
# Reload model
size, projected, components, mean, centered_data, labels = load_model(args.model_file)
test_photos = [f'./data/processed/{i}/{j}.png' for i in range(1, 42) for j in range(6, 11)]
dest_labels = np.array([i for i in range(1, 42) for _ in range(6, 11)])
fig = plt.figure()
res = []
for n_pc in tqdm(range(1, len(dest_labels) + 1)):
suc_count = 0
_components = components[:n_pc]
_projected = projected[:, :n_pc]
for test_photo, dest_label in zip(test_photos, dest_labels):
test_data = cv2.equalizeHist(cv2.resize(cv2.imread(test_photo, cv2.COLOR_BGR2GRAY), (size, size))).reshape(-1)
project_vector = (test_data - mean).dot(_components.T)
distances = np.sum((_projected - project_vector) ** 2, axis=1)
idx = | np.argmin(distances) | numpy.argmin |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import warnings
import numpy as np
from .widget import Widget
from ...util.np_backport import nanmean
class Grid(Widget):
"""
Widget that automatically sets the position and size of child Widgets to
proportionally divide its internal area into a grid.
Parameters
----------
spacing : int
Spacing between widgets.
**kwargs : dict
Keyword arguments to pass to `Widget`.
"""
def __init__(self, spacing=6, **kwargs):
from .viewbox import ViewBox
self._next_cell = [0, 0] # row, col
self._cells = {}
self._grid_widgets = {}
self.spacing = spacing
self._n_added = 0
self._default_class = ViewBox # what to add when __getitem__ is used
Widget.__init__(self, **kwargs)
def __getitem__(self, idxs):
"""Return an item or create it if the location is available"""
if not isinstance(idxs, tuple):
idxs = (idxs,)
if len(idxs) == 1:
idxs = idxs + (slice(0, 1, None),)
elif len(idxs) != 2:
raise ValueError('Incorrect index: %s' % (idxs,))
lims = np.empty((2, 2), int)
for ii, idx in enumerate(idxs):
if isinstance(idx, int):
idx = slice(idx, idx + 1, None)
if not isinstance(idx, slice):
raise ValueError('indices must be slices or integers, not %s'
% (type(idx),))
if idx.step is not None and idx.step != 1:
raise ValueError('step must be one or None, not %s' % idx.step)
start = 0 if idx.start is None else idx.start
end = self.grid_size[ii] if idx.stop is None else idx.stop
lims[ii] = [start, end]
layout = self.layout_array
existing = layout[lims[0, 0]:lims[0, 1], lims[1, 0]:lims[1, 1]] + 1
if existing.any():
existing = set(list(existing.ravel()))
ii = list(existing)[0] - 1
if len(existing) != 1 or ((layout == ii).sum() !=
np.prod(np.diff(lims))):
raise ValueError('Cannot add widget (collision)')
return self._grid_widgets[ii][-1]
spans = | np.diff(lims) | numpy.diff |
import time
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import cm
from matplotlib.colors import LightSource
ID = 0
R_EXP = 1
R_OBS = 2
P_EXP = 3
P_OBS = 5
class AngularRepresentation:
angle_id_ = 0
position_expected_ = np.array([], dtype=np.double)
position_observed_ = np.array([], dtype=np.double)
range_expected_ = np.array([], dtype=np.double)
range_observed_ = np.array([], dtype=np.double)
def __init__(self, angle_id, range_expected, range_observed, position_expected, position_observed):
self.angle_id_ = angle_id
self.range_expected_ = np.concatenate([self.range_expected_, np.array([range_expected], dtype=np.double)])
self.range_observed_ = np.concatenate([self.range_observed_, np.array([range_observed], dtype=np.double)])
self.position_expected_ = np.array([[position_expected[0]], [position_expected[1]]], dtype=np.double)
self.position_observed_ = np.array([[position_observed[0]], [position_observed[1]]], dtype=np.double)
def add(self, range_expected, range_observed, position_expected, position_observed):
# if hasattr(self, 'range_exp_cutoff'):
# if range_expected > self.range_exp_cutoff:
# return
# if hasattr(self, 'range_obs_cutoff'):
# if range_observed > self.range_obs_cutoff:
# return
assert (np.isnan(range_expected) or np.isnan(range_observed) or np.isnan(position_expected).any() or np.isnan(
position_observed).any()) == False, "read nan values"
self.range_expected_ = np.concatenate([self.range_expected_, np.array([range_expected], dtype=np.double)])
self.position_expected_ = np.concatenate([self.position_expected_, position_expected.reshape(2, 1)], axis=0)
self.range_observed_ = np.concatenate([self.range_observed_, np.array([range_observed], dtype=np.double)])
self.position_observed_ = np.concatenate([self.position_observed_, position_observed.reshape(2, 1)], axis=0)
def set_cutoff(self, range_exp_cutoff=np.finfo(np.double).max, range_obs_cutoff=np.finfo(np.double).max):
self.range_exp_cutoff = min(range_exp_cutoff, range_obs_cutoff)
self.range_obs_cutoff = min(range_exp_cutoff, range_obs_cutoff)
def normalize_observations(self):
'''
Calculates the distance between observations and expected measurements
:return:
'''
if not hasattr(self, 'normalized_observations'):
self.normalized_observations_ = self.range_expected_ - self.range_observed_
def getIdxArray(self):
if not hasattr(self, 'idx_array_'):
self.idx_array_ = np.array([self.angle_id_ for _ in range(self.range_observed_)], dtype=np.double)
return self.idx_array_
def getMaxExp(self):
if not hasattr(self, 'maxval_exp'):
self.maxval_exp = np.max(self.range_expected_)
return self.maxval_exp
def getMaxObs(self):
if not hasattr(self, 'maxval_obs'):
self.maxval_obs = np.max(self.range_observed_)
return self.maxval_obs
def getMinExp(self):
if not hasattr(self, 'minval_exp'):
self.minval_exp = np.min(self.range_expected_)
return self.minval_exp
def getMinObs(self):
if not hasattr(self, 'minval_obs'):
self.minval_obs = np.min(self.range_observed_)
return self.minval_obs
class StatisticsEvaluator:
def __init__(self):
pass
def parse(file, max_lines=100000000):
lines = []
cntr = 0
for l in file.readlines():
if cntr >= max_lines:
return lines
lelems = l.split(", ")
if len(lelems) != 7:
print("WARNING: line {} inconsistent, 7 entries per line expected, therefore aborting parse operation " % (
len(lines) + 1))
return lines
# print('%s, %s, %s, %s, %s' % (lelems[0], lelems[R_EXP], lelems[R_OBS], lelems[P_EXP], lelems[P_OBS]))
lines.append(lelems)
cntr += 1
return lines
def agglomerate(lines):
'''
agglomerates for each angle the respective measurements to perform a statistic on how many measurements
lie within the boundaries of the expected measurements etc...
:param lines: the individual lines read from result.csv
:return: datastructure containing the agglomerated result for further processing
'''
angle2measurements = {}
for line in lines:
angle_id = np.double(line[ID])
range_exp = np.double(line[R_EXP])
range_obs = np.double(line[R_OBS])
position_exp = np.array([np.double(line[P_EXP]), np.double(line[P_EXP + 1])], dtype=np.double)
position_obs = np.array([np.double(line[P_OBS]), np.double(line[P_OBS + 1])], dtype=np.double)
val = angle2measurements.get(angle_id)
if val is None:
angle2measurements[angle_id] = AngularRepresentation(angle_id,
range_exp,
range_obs,
position_exp,
position_obs)
else:
val.add(range_exp,
range_obs,
position_exp,
position_obs)
return angle2measurements
def eval(angle2meas):
total_meas = len(angle2meas.items())
print("eval: ", total_meas)
cntr = 0
for k, v in angle2meas.items():
v.normalize_observations()
print("", cntr, total_meas)
cntr += 1
# print("%d %lf %lf" % (key, table_range[key][0], table_range[key][1]))
def lookup_table(table, x, y):
v = table.get((x, y))
if v is None:
return 0
return 1.0
def hist3d_thrun_dbg(angle2meas, binsize_xy):
fig = plt.figure()
idxs = np.array([], dtype=np.double)
max_exp_val = np.finfo(np.double).min
min_exp_val = np.finfo(np.double).max
for key, val in angle2meas.items():
max_exp_val = max(val.getMaxExp(), max_exp_val)
min_exp_val = min(val.getMinExp(), min_exp_val)
binrange_y = max_exp_val - min_exp_val
binshape_xy = (np.int(np.ceil(binrange_y / binsize_xy[0])), np.int(np.ceil(binrange_y / binsize_xy[1])))
grid = np.zeros(shape=binshape_xy, dtype=np.double)
X, Y = np.meshgrid(np.arange(0, binshape_xy[0], 1), np.arange(0, binshape_xy[1], 1))
print(max_exp_val)
print(min_exp_val)
print(binshape_xy)
for key, val in angle2meas.items():
y_exp_idx = ((val.range_expected_ - min_exp_val) / binsize_xy[1]).astype(np.int)
grid[y_exp_idx, y_exp_idx] += 1
ax = fig.add_subplot(111, projection='3d')
# Construct arrays for the anchor positions of the 16 bars.
# Note: np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,
# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid
# with indexing='ij'.
# Construct arrays with the dimensions for the 16 bars.
# ls = LightSource(270, 45)
# rgb = ls.shade(grid, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
ax.plot_surface(X, Y, grid, rstride=1, cstride=1, # facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
plt.show(block=True)
def scatter_plot(angle2meas):
fig = plt.figure()
scatter_xrange = np.array([], dtype=np.double)
scatter_yrange = np.array([], dtype=np.double)
for key, val in angle2meas.items():
scatter_xrange = np.concatenate([scatter_xrange, val.range_observed_])
scatter_yrange = np.concatenate([scatter_yrange, val.range_expected_])
axsc = fig.add_subplot(111)
axsc.scatter(scatter_xrange, scatter_yrange, marker='+')
def hist3d_thrun(angle2meas, binsize_xy, normalize_total=False, normalize_rows=True, medianize=True, switch_axes=False,
compute_ratio=True):
max_obs_val = np.finfo(np.double).min
min_obs_val = np.finfo(np.double).max
max_exp_val = np.finfo(np.double).min
min_exp_val = np.finfo(np.double).max
for key, val in angle2meas.items():
max_obs_val = max(val.getMaxObs(), max_obs_val)
min_obs_val = min(val.getMinObs(), min_obs_val)
max_exp_val = max(val.getMaxExp(), max_exp_val)
min_exp_val = min(val.getMinExp(), min_exp_val)
print("minmax exp")
print(min_exp_val)
print(max_exp_val)
print("-----------")
print("minmax obs")
print(min_obs_val)
print(max_obs_val)
print("-----------")
binrange_x = max_obs_val
binrange_y = max_exp_val
binshape_xy = (np.int(np.ceil(binrange_x / binsize_xy[0])), np.int(np.ceil(binrange_y / binsize_xy[1])))
binshape_xy = (max(binshape_xy[0], binshape_xy[1]), max(binshape_xy[0], binshape_xy[1]))
grid = np.zeros(shape=binshape_xy, dtype=np.double)
if not switch_axes:
X, Y = np.meshgrid(np.arange(0, binshape_xy[0], 1), np.arange(0, binshape_xy[1], 1))
else:
X, Y = np.meshgrid(np.arange(binshape_xy[0] - 1, -1, -1), np.arange(0, binshape_xy[1], 1))
X = X * binsize_xy[0]
Y = Y * binsize_xy[1]
print(binsize_xy)
print(binshape_xy)
print(grid.shape)
print(X.shape)
print(Y.shape)
print(max_obs_val)
print(min_obs_val)
print(max_exp_val)
print(min_exp_val)
print(binshape_xy)
total_measurements = 0
for key, val in angle2meas.items():
x_obs_idx = (val.range_observed_ / np.double(binsize_xy[0])).astype(np.int)
y_exp_idx = (val.range_expected_ / np.double(binsize_xy[1])).astype(np.int)
assert np.isnan(val.range_expected_).any() == False, "nan values present (obs)"
assert np.isnan(val.range_observed_).any() == False, "nan values present (exp)"
assert len(x_obs_idx) == len(y_exp_idx), "not same number of obs and exp"
# if switch_axes:
# for iii in range(len(x_obs_idx)):
# grid[grid.shape[0] - 1 - x_obs_idx[iii], y_exp_idx[iii]] += 1
# else:
for iii in range(len(x_obs_idx)):
grid[x_obs_idx[iii], y_exp_idx[iii]] += 1
total_measurements += len(x_obs_idx)
ratio_out_in = np.array([[], []], dtype=np.double)
if compute_ratio:
for binc in range(grid.shape[1]):
coutliers = np.sum(grid[:, binc]) - grid[binc, binc]
ratio_out_in = np.concatenate([ratio_out_in, np.array([[coutliers], [grid[:, binc]]])], axis=1)
if medianize:
for c in range(grid.shape[1]):
tmp_row = np.ndarray(shape=(grid.shape[0] - 2,), dtype=np.double)
for r in range(1, grid.shape[0] - 1):
tmp_row[r - 1] = np.median(grid[(r - 1):(r + 1), c])
grid[0, c] = np.median([grid[0, c], grid[1, c]])
grid[grid.shape[0] - 1, c] = np.median([grid[grid.shape[0] - 2, c], grid[grid.shape[0] - 1, c]])
grid[1:(grid.shape[0] - 1), c] = tmp_row
if normalize_total:
grid = grid / total_measurements
elif normalize_rows:
rowsum = np.sum(grid, axis=0)
for c in range(grid.shape[1]):
if rowsum[c] != 0:
grid[:, c] = grid[:, c] / rowsum[c]
assert abs(np.sum(grid[:, c]) - 1.00) < 0.0001, "Not normalized {}, {}".format(np.sum(grid[:, c]),
grid[:, c])
return X, Y, grid, ratio_out_in
def hist3d_plot(angle2meas):
fig = plt.figure()
idxs = np.array([], dtype=np.double)
normalized_vals = np.array([], dtype=np.double)
for key, val in angle2meas.items():
idxs = np.concatenate(
[idxs, np.array([key for _ in range(len(val.normalized_observations_))], dtype=np.double)])
normalized_vals = np.concatenate([normalized_vals, val.normalized_observations_])
ax = fig.add_subplot(111, projection='3d')
hist, xedges, yedges = np.histogram2d(idxs, normalized_vals, bins=[180, 10])
# Construct arrays for the anchor positions of the 16 bars.
# Note: np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,
# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid
# with indexing='ij'.
xpos, ypos = | np.meshgrid(xedges[:-1], yedges[:-1]) | numpy.meshgrid |
# PYTHON 3
#
# Author: <NAME>
# Created: 1 February 2013 IDL, Converted to Python 3 12th Jan 2021
# Last update: 12 January 2021
# Location: /home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# For selected variable, grid the data and uncertainties, including the gridbox sampling uncertainty
# Read in list of goods
# - IF RAW: Read in raw netCDF of abs, anoms, clims.
# - IF PHA/IDPHA/PHADPD: Read in homogenised netCDF of abs, anoms, err, adjE, obsE, clmE, clims and climsds.
# move from gridbox to gridbox starting with -177.5W, 87.5S
# if there is a station then begin
# find all stations in GB - store lat, lon, elev
# calc gridbox mean (abs, anoms, clims), standard deviation (sds of abs), uncertainties (combined assuming no correlation and unique values) - already 2 sigma when read in!!!
# For Tw extremes calculate the Quality Scores for each gridbox and output:
# HQ1: based on number of stations within gridbox
# - 0 = > 1 station (should this be higher?)
# - 1 = 1 station
# HQ2: based on the number of inhomogeneity/adjustment per station detected
# - 0 = 0 inhomogeneity/adjustment detected
# - 1 = 0-1 inhomogeneity/adjustment per station detected
# - 2 = 1 inhomogeneity/adjustment per station detected
# HQ3: based on number of very large (>= 2 degrees) adjustments per station detected
# - 0 = 0 very large adjustments per station
# - 1-9 = > 0 and < 1 very large adjustments per station, scaled
# - 10 = 1 very large adjustment per station
# HQ4: based on number of large (>= 1 and <2 degrees) adjustments per station detected
# - 0 = 0 large adjustments per station
# - 1-4 = > 0 and < 1 large adjustments per station, scaled
# - 5 = 1 large adjustment per station
# HQ5: based on number of moderate (>= 0.5 and <1 degrees) adjustments per station detected
# - 0 = 0 moderate adjustments per station
# - 1-2 = > 0 and < 1 moderate adjustments per station, scaled
# - 3 = 1 moderate adjustment per station
# HQ6: based on number of small (> 0 and <0.5 degrees) adjustments per station detected
# - 0 = 0 small adjustments per station
# - 0 = > 0 and < 1 small adjustments per station (an HQ will have been allocated by HQ 2)
# - 1 = 1 small adjustment per station
# HQ7: based on average actual adjustment over gridbox month (are adjustments in opposite directions averaging out?)
# - 0 = 0 adjustment over gridbox month
# - 1 = > 0 and < 0.5 degree abs(mean adjustment) over gridbox month
# - 2-3 = >= 0.5 and < 1 degree abs(mean adjustment) over gridbox month, scaled
# - 4-9 = >= 1 and < 2 degree abs(mean adjustment) over gridbox month, scaled
# - 10 = >= 2 degree abs(mean adjustment) over gridbox month
# HQ8: based on average absolute adjustment over gridbox month
# - Mean(absolute adjustments) over gridbox month
# Homogenisation quality score and flag: combines homogenisation quality statistics 1 to 7 using the following method:
# - >=10 = terrible
# - 5-9 = bad
# - 2-4 = iffy
# - 1 = ok
# - 0 = Good
# Call gridbox_sampling_uncertainty.py to compute gridbox sampling error due to missing data and incomplete spatial sampling.
# Not sure how this will work for the days of exceedance
# Write out to netCDF, ascii (abs, anoms, uncertainty) - all errors are 2 sigma errors!!!
# Write out gridding results min/max of each var
#
# -----------------------
# LIST OF MODULES
# -----------------------
#import numpy as np # used
#import numpy.ma as npm # used
#from datetime import datetime # used
#import matplotlib.pyplot as plt
#import sys, os, getopt # used
#import struct
#import glob # used
#import pdb # used
#import netCDF4 as nc4
#
## Kate's Functions
#import CalcHums
#from RandomsRanges import LetterRange
#import gridbox_sampling_uncertainty as gsu
#
# -----------------------
# DATA
# -----------------------
# Input station list of 'good stations':
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/'
# Posthomog<typee><var>_anoms'+CLMlab+'_<goods>HadISDH.'+versiondots+'.txt'
# Input homogenised netCDF files of data with station uncertainties to grid - IDPHA version and PHADPD:
# /scratch/hadkw/UPDATE<YYYY>/MONTHLIES/HOMOG/<typee>NETCDF/<VAR>DIR/' # this will then be PHANETCDF or IDPHANETCDF
# <station>'_anoms<climLAB>_homog.nc'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# > module load scitools/default-current
# > python F13_GridHadISDHFLAT --var <var> --typee <type>
#
## Which variable?
# var = 'dpd' #'dpd','td','t','tw','e','q','rh'
#
## Which homog type?
# typee = 'PHA' #'PHA' (for DPD only),'IDPHA' (for t, e, q, rh and tw),'PHADPD' (for Td)
#
#
# Or ./F13_submit_spice.sh
#
#
# -----------------------
# OUTPUT
# -----------------------
# The gridded netCDF file:
# /scratch/hadkw/UPDATE<YYYY>/STATISTICS/GRIDS/
# HadISDH.land<var>.'+version+'_FLATgrid<homogtype>PHA5by5_anoms8110.nc
# The summary min and max values for each variable within the netCDF file:
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/
# GriddingResults_<versiondots>_anoms8110.txt max/mins of all fields in nc file
#
# THESE ARE OUTPUT AS 2 SIGMA ERRORS!!!
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 7 (27 August 2021)
# ---------
#
# Enhancements
#
# Changes
# Now grids all Tw extremes variables
# Additionally - outputs homogenisation quality scores for the Tw extremes only (because theses are from unhomogenised data)
#
# Bug fixes
#
#
# Version 6 (12 January 2021)
# ---------
#
# Enhancements
# Double checked uncertainty calculations and they are quantitatively the same as for the marine code
# but expressed differently so I have changed the code to match that for the marine.
#
# Changes
# Now Python 3
# Using pythong gridbox_sampling_uncertainty.py rather than IDL code (as used for the marine data)
# gridbox_sampling_uncertainty.py uses HadCRUT.4.3.0.0.land_fraction.py to select land boxes
# gridbox_sampling_uncertainty.py sets rbar to 0.8 if there are missing values rather than the 0.1 previously which
# was far too low. 0.8 is about mid-range for rbar
# Sampling uncertainty is very slightly different order 0.001 in a few places
# We now use the mean number of stations contributing to the gridbox rather than the maximum - this is smaller so
# will result in slightly larger sampling uncertainty, especially in gridboxes with very few stations LARGER UNCERTAINTIES
# Combining uncertainty over gridbox now uses actual numer of stations for that month rather than total over time
# period for that gridbox so new gridbox uncs are LARGER than IDL ones where there are fewer
# stations contributing to the gridbox compared to the total. LARGER UNCERTAINTIES
#
# Bug fixes
# In 2019 I reduced the combined uncertainties because I had thought that *2 made them 4 sigma. I hadn;t noticed the /2 in the equation. So, while the original
# equation of sqrt((staterr/2)^2 + (samperr/2)^2)*2 was pointless it was right and 2019 would have had combined uncertainty that was too small - now corrected!!!
# LARGER UNCERTAINTIES - BY *2
#
#
# Version 5 (29 March 2018)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
# Wrong FILE_SEARCH string was finding multiple files and therefore sometimes reading in the wrong one (with sats/subzeros or duplicate!)
#
# Version 4 (13 February 2018)
# ---------
#
# Enhancements
#Now has param and homogtype called at run time
## Which variable? T first, RH, DPD, q, e, td, tw
#param = 'tw'
## Which homog type?
#homogtype = 'ID' #'ID','DPD' for Td, 'PHA' - req for DPD or PHA versions of all variables
#
# Now looks at Posthomog...lists to get station counts automatically rather than being hard coded
#
# Changes
#
# Bug fixes
# NetCDF err outputs had wrong long_names
#
# Version 3 (1 February 2017)
# ---------
#
# Enhancements
# General tidy up and improved headers
#
# Changes
#
# Bug fixes
#
#
# Version 2 (7 September 2017)
# ---------
#
# Enhancements
# General tidy up and reframe of tweakable variables to make the file/data batching easier for each variable/climatology choice etc.
# Can now work with different anomaly periods 7605 or 8110 which have to be created by create_homogNCDFall_stunc_JAN2015.pro
#
# Changes
#
# Bug fixes
# Fixed bug in sampling error which was only using first 29 years of the 30 year climatology period (missing +1)
# This fix is actually in the calc_samplingerrorJUL2012_nofill.pro.
#
# Version 1 (15 January 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
# climerr is difference for some gridboxes - larger for new compared to old - when old is small new is large???
# sampling error needs to be saved only where there are data - not for all land.
#**** THIS IS WHERE TO ADD UNCERTAINTY ALA BROHAN et al. 2006
# Station error:
# Tob - Tclim + errorCLIM + measurementerror + homogadj + adjuncertainty + reporting error
# Samping error:
# SE^2 = GBstdev*avg.intersite correlation*(1-avg.intersite corr)
# --------------------------------------------------------
# 1 + ((num stations - 1) * avg.intersite correlation)
# Bias error:
# urbanisation? exposure change? irrigation?
# combine these by adding in quadrature.
# sampling error - after Jones et al. 1997
#Shat^2 = variance of gridbox(extended?) means over climatology period
#n = number of stations contributing to gridbox(extended?) over climatology period
#Xo = correlation decay distance (km) for that gridbox (where correlation = 1/e)
#X = diagonal from bottom left to top right of gridbox(extended?) (km) - use lats, longs and dist_calc
#rbar = (Xo/X)*(1-e(-X/Xo))
#sbar^2 = mean station variance within the gridbox
#sbar^2 = (Shat^2*n)/(1+((n-1)*rbar))
#INFILL empty gridboxes by interpolated Xo and then calculating rbar
#SE^2 = gridbox sampling error
#SE^2 = (sbar^2*rbar*(1-rbar))/(1+((n-1)*rbar))
#SE^2 (where n=0) = sbar^2*rbar (INFILL GB with Shat^2???)
#SEglob^2 = global average sampling error
#SEglob^2 = SEbar^2/Neff
#SEbar^2 = (SUM(SE^2*cos(lat)))/(SUM(cos(lat)))
#Neff = number of effectively independent points
#Neff = (2*R)/F
#R = radius of the earth (6371 km)
#F=(((e((-piR)/Xobar))/R)+(1/R))/((1/(Xobar^2))+(1/R^2))
#Xobar=(SUM(Xo*cos(lat)))/(SUM(cos(lat)))
#******************************************************
# Global variables and imports
# Inbuilt: (may not all be required actually)
import numpy as np # used
import numpy.ma as npm # used
from datetime import datetime # used
import matplotlib.pyplot as plt
import sys, os, getopt # used
import struct
import glob # used
import pdb # used
import netCDF4 as nc4
#from subprocess import call, check_output, run, PIPE # used
# Kate's Functions
import CalcHums
from RandomsRanges import LetterRange
import gridbox_sampling_uncertainty as gsu
# Start and end years if HardWire = 1
styear = 1973
edyear = 2019
# Which climatology?
MYclst = 1981 # 1976, 1981
MYcled = 2010 # 2005, 2010
CLMlab = str(MYclst)[2:4]+str(MYcled)[2:4]
# Dataset version if HardWire = 1
versiondots = '4.2.0.2019f'
version = 'v420_2019f'
hadisdversiondots = '3.1.0.2019f'
hadisdversion = 'v310_2019f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
versiondots = ConfigDict['VersionDots']
hadisdversiondots = ConfigDict['HadISDVersionDots']
styear = ConfigDict['StartYear']
edyear = ConfigDict['EndYear']
# AttribDict held in memory to probide global attribute text later
#' Read in the attribute file to get all of the info
with open('F1_HadISDHBuildAttributes.txt') as f:
AttribDict = dict(x.rstrip().split('=', 1) for x in f)
# NOT CODED THIS FUNCTIONALITY YET
## Are we working with homogenised actuals (True) or anomalies (False)?
#Actuals = True
# Set up directories locations
updateyy = str(edyear)[2:4]
updateyyyy = str(edyear)
workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
#workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+updateyyyy
# Set up filenames
INDIRLIST = workingdir+'/LISTS_DOCS/'
INDIRHOM = workingdir+'/MONTHLIES/HOMOG/' # this will then be PHAASCII or IDPHAASCII
#workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
OUTDIRLIST = workingdir+'/LISTS_DOCS/GriddingResults_'+versiondots+'_anoms'+CLMlab+'.txt'
OUTDIRDAT = workingdir+'/STATISTICS/GRIDS/'
# File for output stats but also for reading in missed adjustment uncertainties
OUTPUTLOG = workingdir+'/LISTS_DOCS/OutputLogFile'+versiondots+'.txt'
# Set up variables
MDI = -1e+30
INTMDI = -999.
LatBox = 5. # latitude gridbox size
LonBox = 5. # longitude gridbox size
# Dictionaries for param, units, homogdirprefix, STATION FILE PREFIX, standard name, long name, raw data suffix(only for test run)
ParamDict = dict([('q',['q','g/kg','IDPHA','Q','specific_humidity','monthly mean 2m specific humidity','qhum']),
('rh',['RH','%rh','IDPHA','RH','relative_humidity','monthly mean 2m relative humidity','rhum']),
('t',['T','deg C','IDPHA','T','drybulb_temperature','monthly mean 2m dry bulb temperature','temp']), # Note this needs to be changed to IDPHAMG later
('td',['Td','deg C','IDPHA','TD','dewpoint_temperature','monthly mean 2m dew point temperature','dewp']),
('tw',['Tw','deg C','IDPHA','TW','wetbulb_temperature','monthly mean 2m wetbulb temperature','twet']),
('e',['e','hPa','IDPHA','E','vapour_pressure','monthly mean 2m vapour pressure','evap']),
('dpd',['DPD','deg C','PHA','DPD','dewpoint depression','monthly mean 2m dew point depression','ddep']),
('tw_max',['TwX','deg C','IDPHA','TWMAX','wetbulb_temperature_maximum','monthly maximum 2m wetbulb temperature','twmx']),
('tw_max_95p',['TwX95p','1','IDPHA','TWMAX95','wetbulb_temperature_max95p','days per month maximum >= 95 percentile maximum 2m wetbulb temperature','twx95']),
('tw_mean_95p',['TwM95p','1','IDPHA','TWMEAN95','wetbulb_temperature_mean95p','days per month mean >= 95 percentile mean 2m wetbulb temperature','twm95']),
('tw_max_ex25',['Tw25','1','IDPHA','TW25','wetbulb_temperature_ex25','days per month >= 25 deg 2m wetbulb temperature','tw25']),
('tw_max_ex27',['Tw27','1','IDPHA','TW27','wetbulb_temperature_ex27','days per month >= 27 deg 2m wetbulb temperature','tw27']),
('tw_max_ex29',['Tw29','1','IDPHA','TW29','wetbulb_temperature_ex29','days per month >= 29 deg 2m wetbulb temperature','tw29']),
('tw_max_ex31',['Tw31','1','IDPHA','TW31','wetbulb_temperature_ex31','days per month >= 31 deg 2m wetbulb temperature','tw31']),
('tw_max_ex33',['Tw33','1','IDPHA','TW33','wetbulb_temperature_ex33','days per month >= 33 deg 2m wetbulb temperature','tw33']),
('tw_max_ex35',['Tw35','1','IDPHA','TW35','wetbulb_temperature_ex35','days per month >= 35 deg 2m wetbulb temperature','tw35'])])
# This is needed by WriteNetCDF and writing to ascii
MonthName = ['January ',
'February ',
'March ',
'April ',
'May ',
'June ',
'July ',
'August ',
'September ',
'October ',
'November ',
'December ']
#******************************************************
# SUBROUTINES #
#******************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee, delimiter=delimee, encoding='latin-1') # ReadData
# return np.genfromtxt(FileName, dtype=typee, delimiter=delimee) # ReadData
#****************************************************
# MakeDaysSince
def MakeDaysSince(TheStYr,TheStMon,TheEdYr,TheEdMon):
''' Take counts of months since styr, stmn (assume 15th day of month) '''
''' Work out counts of days since styr,stmn, January - incl leap days '''
''' Also work out time boundaries 1st and last day of month '''
''' This can cope with incomplete years or individual months '''
# set up arrays for month month bounds
BoundsArray = np.empty((((TheEdYr-TheStYr)+1)*((TheEdMon-TheStMon)+1),2))
# make a date object for each time point and subtract start date
StartDate = datetime(TheStYr,TheStMon,1,0,0,0) # January
DaysArray = list(np.array([[(datetime(j,i,1,0,0,0)-StartDate).days + 15 for i in np.arange(1,13)] for j in np.arange(TheStYr,TheEdYr+1)]).flat)
BoundsArray[:,0] = list(np.array([[(datetime(j,i,1,0,0,0)-StartDate).days for i in np.arange(1,13)] for j in np.arange(TheStYr,TheEdYr+1)]).flat)
BoundsArray[:,1] = np.append(BoundsArray[1:,0]-1,(datetime(TheEdYr,TheEdMon,31,23,59,59)-StartDate).days)
return DaysArray,BoundsArray
#*************************************************************************************
# Convert to integers with INTMDI
def IntConvert(OldArr):
OldArr[OldArr.mask] = INTMDI
OldArr = npm.masked_equal(OldArr, INTMDI)
OldArr = OldArr.astype(int)
return OldArr
#**************************************************************************************
# HomogQualityStats
def HomogQualityStats(AdjGB):
''' Using the adjustments for each station for each month provide a range of quality scores:
HQ1: based on number of stations within gridbox
- 0 = > 1 station (should this be higher?)
- 1 = 1 station
HQ2: based on the number of inhomogeneity/adjustment per station detected
- 0 = 0 inhomogeneity/adjustment detected
- 1 = 0-1 inhomogeneity/adjustment per station detected
- 2 = 1 inhomogeneity/adjustment per station detected
HQ3: based on number of very large (>= 2 degrees) adjustments per station detected
- 0 = 0 very large adjustments per station
- 1-9 = > 0 and < 1 very large adjustments per station, scaled
- 10 = 1 very large adjustment per station
HQ4: based on number of large (>= 1 and <2 degrees) adjustments per station detected
- 0 = 0 large adjustments per station
- 1-4 = > 0 and < 1 large adjustments per station, scaled
- 5 = 1 large adjustment per station
HQ5: based on number of moderate (>= 0.5 and <1 degrees) adjustments per station detected
- 0 = 0 moderate adjustments per station
- 1-2 = > 0 and < 1 moderate adjustments per station, scaled
- 3 = 1 moderate adjustment per station
HQ6: based on number of small (> 0 and <0.5 degrees) adjustments per station detected
- 0 = 0 small adjustments per station
- 0 = > 0 and < 1 small adjustments per station (an HQ will have been allocated by HQ 2)
- 1 = 1 small adjustment per station
HQ7: based on average actual adjustment over gridbox month (are adjustments in opposite directions averaging out?)
- 0 = 0 adjustment over gridbox month
- 1 = > 0 and < 0.5 degree abs(mean adjustment) over gridbox month
- 2-3 = >= 0.5 and < 1 degree abs(mean adjustment) over gridbox month, scaled
- 4-9 = >= 1 and < 2 degree abs(mean adjustment) over gridbox month, scaled
- 10 = >= 2 degree abs(mean adjustment) over gridbox month
HQ8: based on average absolute adjustment over gridbox month
- Mean(absolute adjustments) over gridbox month
Homogenisation quality score and flag: combines homogenisation quality statistics 1 to 7 using the following method:
- >=10 = terrible
- 5-9 = bad
- 2-4 = iffy
- 1 = ok
- 0 = Good '''
# Set up arrays for results
HQ1box = npm.masked_equal(np.repeat(MDI,len(AdjGB[0,:])),MDI)
HQ2box = npm.copy(HQ1box)
HQ3box = npm.copy(HQ1box)
HQ4box = npm.copy(HQ1box)
HQ5box = npm.copy(HQ1box)
HQ6box = npm.copy(HQ1box)
HQ7box = npm.copy(HQ1box)
HQ8box = npm.copy(HQ1box)
HQscorebox = npm.copy(HQ1box)
# HQ1: based on number of stations within gridbox
# - 0 = > 1 station (should this be higher?)
# - 1 = 1 station
# For each month count the number of data points present and divide by the total number of station months that could be present
HQ1box[npm.where(npm.count(AdjGB,axis=0) > 1)] = 0.
HQ1box[npm.where(npm.count(AdjGB,axis=0) == 1)] = 1.
# else left as MDI as there are no data for this month
#print('Check HQ1')
#pdb.set_trace()
# HQ2: based on the number of inhomogeneity/adjustment per station detected
# - 0 = 0 inhomogeneity/adjustment detected
# - 1 = 0-1 inhomogeneity/adjustment per station detected
# - 2 = 1 inhomogeneity/adjustment per station detected
HQ2box[npm.where((np.count_nonzero(AdjGB != 0.,axis=0) / npm.count(AdjGB,axis=0)) < 1.)] = 1. # should not include totally missing months but DOES make 0.0 adjustments have a score of 1 - so need to overwrite
HQ2box[npm.where((np.count_nonzero(AdjGB != 0.,axis=0) / npm.count(AdjGB,axis=0)) == 1.)] = 2.
HQ2box[npm.where(npm.sum(AdjGB,axis=0) == 0.)] = 0.
# else left as MDI as there are no data for this month
# Could make this a greater penalty for 1 per station and much lower penalty for <0.5 per station?
#print('Check HQ2box results and masked value')
#pdb.set_trace()
# HQ3: based on number of very large (>= 2 degrees) adjustments per station detected
# - 0 = 0 very large adjustments per station
# - 1-9 = > 0 and < 1 very large adjustments per station, scaled
# - 10 = 1 very large adjustment per station
TmpHQ = np.count_nonzero(abs(AdjGB) >= 2., axis=0) / npm.count(AdjGB,axis=0) # should be a complete nmons array, masked with MDI for missing months
# Now map these fractions from >0 to < 1 to integers from 1-9
HQ3box[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] = npm.round((TmpHQ[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] * 8.) + 1) # does this only map to the right locs?
HQ3box[npm.where(TmpHQ == 1.)] = 10.
HQ3box[npm.where(TmpHQ == 0.)] = 0.
# else left as MDI as there are no data for this month
#print('Check HQ3')
#pdb.set_trace()
# HQ4: based on number of large (>= 1 and <2 degrees) adjustments per station detected
# - 0 = 0 large adjustments per station
# - 1-4 = > 0 and < 1 large adjustments per station, scaled
# - 5 = 1 large adjustment per station
TmpHQ = np.count_nonzero(((abs(AdjGB) >= 1.) & (abs(AdjGB) < 2.)), axis=0) / npm.count(AdjGB,axis=0) # should be a complete nmons array, masked with MDI for missing months
# Now map these fractions from >0 to < 1 to integers from 1-4
HQ4box[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] = npm.round((TmpHQ[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] * 3.) + 1) # does this only map to the right locs?
HQ4box[npm.where(TmpHQ == 1.)] = 5.
HQ4box[npm.where(TmpHQ == 0.)] = 0.
# else left as MDI as there are no data for this month
#print('Check HQ4')
#pdb.set_trace()
# HQ5: based on number of moderate (>= 0.5 and <1 degrees) adjustments per station detected
# - 0 = 0 moderate adjustments per station
# - 1-2 = > 0 and < 1 moderate adjustments per station, scaled
# - 3 = 1 moderate adjustment per station
TmpHQ = np.count_nonzero(((abs(AdjGB) >= 0.5) & (abs(AdjGB) < 1.)), axis=0) / npm.count(AdjGB,axis=0) # should be a complete nmons array, masked with MDI for missing months
# Now map these fractions from >0 to < 1 to integers from 1-2
HQ5box[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] = npm.round((TmpHQ[npm.where((TmpHQ > 0.) & (TmpHQ < 1.))] * 1.) + 1) # does this only map to the right locs?
HQ5box[npm.where(TmpHQ == 1.)] = 3.
HQ5box[npm.where(TmpHQ == 0.)] = 0.
# else left as MDI as there are no data for this month
#print('Check HQ5')
#pdb.set_trace()
# HQ6: based on number of small (> 0 and <0.5 degrees) adjustments per station detected
# - 0 = 0 small adjustments per station
# - 0 = > 0 and < 1 small adjustments per station (an HQ will have been allocated by HQ 2)
# - 1 = 1 small adjustment per station
TmpHQ = np.count_nonzero(((abs(AdjGB) > 0.) & (abs(AdjGB) < 0.5)), axis=0) / npm.count(AdjGB,axis=0) # should be a complete nmons array, masked with MDI for missing months
# Now map these fractions from >0 to < 1 to integers from 1-2
HQ6box[npm.where(TmpHQ < 1.)] = 0.
HQ6box[npm.where(TmpHQ == 1.)] = 1.
#HQ6box[npm.where(TmpHQ == 0.)] = 0.
# else left as MDI as there are no data for this month
#print('Check HQ6')
#pdb.set_trace()
# HQ7: based on average actual adjustment over gridbox month (are adjustments in opposite directions averaging out?)
# - 0 = 0 adjustment over gridbox month
# - 1 = > 0 and < 0.5 degree abs(mean adjustment) over gridbox month
# - 2-3 = >= 0.5 and < 1 degree abs(mean adjustment) over gridbox month, scaled
# - 4-9 = >= 1 and < 2 degree abs(mean adjustment) over gridbox month, scaled
# - 10 = >= 2 degree abs(mean adjustment) over gridbox month
TmpHQ = abs(npm.sum(AdjGB,axis=0) / npm.count(AdjGB,axis=0)) # should be a complete nmons array, masked with MDI for missing months
# Now map these numbers to scales
HQ7box[npm.where((TmpHQ >= 1.) & (TmpHQ < 2.))] = npm.round(((TmpHQ[npm.where((TmpHQ >= 1.) & (TmpHQ < 2.))] - 1.) * 5.) + 4) # does this only map to the right locs?
HQ7box[npm.where((TmpHQ >= 0.5) & (TmpHQ < 1.))] = npm.round((((TmpHQ[npm.where((TmpHQ >= 0.5) & (TmpHQ < 1.))] - 0.5) *2.) * 1.) + 2) # does this only map to the right locs?
HQ7box[npm.where((TmpHQ > 0.) & (TmpHQ < 0.5))] = 1.
HQ7box[npm.where(TmpHQ > 2.)] = 10.
HQ7box[npm.where(TmpHQ == 0.)] = 0.
# else left as MDI as there are no data for this month
#print('Check HQ7')
#pdb.set_trace()
# HQ8: based on average absolute adjustment over gridbox month
# - Mean(absolute adjustments) over gridbox month
HQ8box = npm.mean(abs(AdjGB),axis=0) # should be a complete nmons array, masked with MDI for missing months
# Masked array fill values get set to default after operation, even if I use a npm.where - very annoying
# Work around...
HQ8box[HQ8box.mask == True] = MDI
HQ8box = npm.masked_equal(HQ8box,MDI)
# else left as MDI as there are no data for this month
#print('Check HQ8')
#pdb.set_trace()
# Homogenisation quality score and flag: combines homogenisation quality statistics 1 to 7 using the following method:
# - >=10 = terrible
# - 5-9 = bad
# - 2-4 = iffy
# - 1 = ok
# - 0 = Good
HQscorebox = npm.sum((HQ1box, HQ2box, HQ3box, HQ4box, HQ5box, HQ6box, HQ7box),axis=0) # does this work across axis?
HQscorebox[HQscorebox.mask == True] = MDI
HQscorebox = npm.masked_equal(HQscorebox,MDI)
# else left as MDI as there are no data for this month
# print('Check HQscorebox')
# pdb.set_trace()
HQ1box = IntConvert(HQ1box)
#print('Check IntConvert')
#pdb.set_trace()
HQ2box = IntConvert(HQ2box)
HQ3box = IntConvert(HQ3box)
HQ4box = IntConvert(HQ4box)
HQ5box = IntConvert(HQ5box)
HQ6box = IntConvert(HQ6box)
HQ7box = IntConvert(HQ7box)
HQscorebox = IntConvert(HQscorebox)
return HQ1box, HQ2box, HQ3box, HQ4box, HQ5box, HQ6box, HQ7box, HQ8box, HQscorebox
#**************************************************************************************
# WriteNetCDF
def WriteNetCDF(FileName,TheStYr,TheEdYr,TheClims,TheLats, TheLons, TheLatBounds, TheLonBounds, DataObject,DimObject,AttrObject,GlobAttrObject,TheMDI):
''' WRites NetCDF4 '''
''' Sort out the date/times to write out and time bounds '''
''' Convert variables using the obtained scale_factor and add_offset: stored_var=int((var-offset)/scale) '''
''' Write to file, set up given dimensions, looping through all potential variables and their attributes, and then the provided dictionary of global attributes '''
# # Attributes and things common to all vars
# add_offset = -100.0 # storedval=int((var-offset)/scale)
# scale_factor = 0.01
# Sort out date/times to write out
TimPoints,TimBounds = MakeDaysSince(int(TheStYr),1,int(TheEdYr),12)
nTims = len(TimPoints)
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = nc4.Dataset(FileName,'w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Write out the global attributes
if ('description' in GlobAttrObject):
ncfw.description = GlobAttrObject['description']
#print(GlobAttrObject['description'])
if ('File_created' in GlobAttrObject):
ncfw.File_created = GlobAttrObject['File_created']
if ('Title' in GlobAttrObject):
ncfw.Title = GlobAttrObject['Title']
if ('Institution' in GlobAttrObject):
ncfw.Institution = GlobAttrObject['Institution']
if ('History' in GlobAttrObject):
ncfw.History = GlobAttrObject['History']
if ('Licence' in GlobAttrObject):
ncfw.Licence = GlobAttrObject['Licence']
if ('Project' in GlobAttrObject):
ncfw.Project = GlobAttrObject['Project']
if ('Processing_level' in GlobAttrObject):
ncfw.Processing_level = GlobAttrObject['Processing_level']
if ('Acknowledgement' in GlobAttrObject):
ncfw.Acknowledgement = GlobAttrObject['Acknowledgement']
if ('Source' in GlobAttrObject):
ncfw.Source = GlobAttrObject['Source']
if ('Comment' in GlobAttrObject):
ncfw.Comment = GlobAttrObject['Comment']
if ('References' in GlobAttrObject):
ncfw.References = GlobAttrObject['References']
if ('Creator_name' in GlobAttrObject):
ncfw.Creator_name = GlobAttrObject['Creator_name']
if ('Creator_email' in GlobAttrObject):
ncfw.Creator_email = GlobAttrObject['Creator_email']
if ('Version' in GlobAttrObject):
ncfw.Version = GlobAttrObject['Version']
if ('doi' in GlobAttrObject):
ncfw.doi = GlobAttrObject['doi']
if ('Conventions' in GlobAttrObject):
ncfw.Conventions = GlobAttrObject['Conventions']
if ('netcdf_type' in GlobAttrObject):
ncfw.netcdf_type = GlobAttrObject['netcdf_type']
# Loop through and set up the dimension names and quantities
for vv in range(len(DimObject[0])):
ncfw.createDimension(DimObject[0][vv],DimObject[1][vv])
# Go through each dimension and set up the variable and attributes for that dimension if needed
for vv in range(len(DimObject)-2): # ignore first two elements of the list but count all other dictionaries
# print(DimObject[vv+2]['var_name'])
# NOt 100% sure this works in a loop with overwriting
# initiate variable with name, type and dimensions
MyVar = ncfw.createVariable(DimObject[vv+2]['var_name'],DimObject[vv+2]['var_type'],DimObject[vv+2]['var_dims'])
# Apply any other attributes
if ('standard_name' in DimObject[vv+2]):
MyVar.standard_name = DimObject[vv+2]['standard_name']
if ('long_name' in DimObject[vv+2]):
MyVar.long_name = DimObject[vv+2]['long_name']
if ('units' in DimObject[vv+2]):
MyVar.units = DimObject[vv+2]['units']
if ('axis' in DimObject[vv+2]):
MyVar.axis = DimObject[vv+2]['axis']
if ('calendar' in DimObject[vv+2]):
MyVar.calendar = DimObject[vv+2]['calendar']
if ('start_year' in DimObject[vv+2]):
MyVar.start_year = DimObject[vv+2]['start_year']
if ('end_year' in DimObject[vv+2]):
MyVar.end_year = DimObject[vv+2]['end_year']
if ('start_month' in DimObject[vv+2]):
MyVar.start_month = DimObject[vv+2]['start_month']
if ('end_month' in DimObject[vv+2]):
MyVar.end_month = DimObject[vv+2]['end_month']
if ('bounds' in DimObject[vv+2]):
MyVar.bounds = DimObject[vv+2]['bounds']
# Provide the data to the variable
if (DimObject[vv+2]['var_name'] == 'time'):
MyVar[:] = TimPoints
if (DimObject[vv+2]['var_name'] == 'bounds_time'):
MyVar[:,:] = TimBounds
if (DimObject[vv+2]['var_name'] == 'month'):
# pdb.set_trace()
# MyVar[mm,:] = [nc4.stringtochar(np.array(MonthName[mm],dtype='S10')) for mm in np.arange(1,13)]
MyVar[:,:] = [[MonthName[mm][cc] for cc in range(10)] for mm in range(12)]
if (DimObject[vv+2]['var_name'] == 'latitude'):
MyVar[:] = TheLats
if (DimObject[vv+2]['var_name'] == 'bounds_lat'):
MyVar[:,:] = TheLatBounds
if (DimObject[vv+2]['var_name'] == 'longitude'):
MyVar[:] = TheLons
if (DimObject[vv+2]['var_name'] == 'bounds_lon'):
MyVar[:,:] = TheLonBounds
# Go through each variable and set up the variable attributes
for vv in range(len(AttrObject)): # ignore first two elements of the list but count all other dictionaries
print(AttrObject[vv]['var_name'])
# initiate variable with name, type and dimensions
if (AttrObject[vv]['var_type'] == 'f4'):
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = TheMDI)
elif (AttrObject[vv]['var_type'] == 'i4'):
if (AttrObject[vv]['var_name'][0] == 'H') | (AttrObject[vv]['var_name'][0] == 't'):
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = INTMDI)
else:
MyVar = ncfw.createVariable(AttrObject[vv]['var_name'],AttrObject[vv]['var_type'],AttrObject[vv]['var_dims'],fill_value = 0)
# Apply any other attributes
if ('long_name' in AttrObject[vv]):
MyVar.long_name = AttrObject[vv]['long_name']
if ('units' in AttrObject[vv]):
MyVar.units = AttrObject[vv]['units']
# MyVar.add_offset = add_offset
# MyVar.scale_factor = scale_factor
MyVar.reference_period = str(TheClims[0])+', '+str(TheClims[1])
# Provide the data to the variable - depending on howmany dimensions there are
## First change masked array to normal array filled with MDI
if (len(AttrObject[vv]['var_dims']) == 1):
MyVar[:] = DataObject[vv].filled()
if (len(AttrObject[vv]['var_dims']) == 2):
MyVar[:,:] = DataObject[vv].filled()
if (len(AttrObject[vv]['var_dims']) == 3):
MyVar[:,:,:] = DataObject[vv].filled()
ncfw.close()
return # WriteNCCF
#
#*******************************************************
# MAIN
#******************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
typee = 'IDPHA' # 'PHA','IDPHA','PHADPD'
try:
opts, args = getopt.getopt(argv, "hi:",
["var=","typee="])
except getopt.GetoptError:
print('Usage (as strings) F13_GridHadISDHFLAT.py --var <q> --typee <IDPHA>')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
var = arg
except:
sys.exit("Failed: var not a string")
elif opt == "--typee":
try:
typee = arg
except:
sys.exit("Failed: typee not a string")
# assert var != '' and typee != '', "Input values not specified."
print(var,typee)
#*******************************************************
# variable specific filepaths and directories
# homogenised data file suffix
DatSuffix = '_anoms'+CLMlab+'_homog.nc'
# DatSuffix = '_anoms'+CLMlab+'_homogJAN2020.nc'
# Set up files for read in and write out
# InList = INDIRLIST+'Posthomog'+typee+var+'_anoms'+CLMlab+'_goodsHadISDH.'+versiondots+'_JAN2020.txt'
InList = INDIRLIST+'Posthomog'+typee+var+'_anoms'+CLMlab+'_goodsHadISDH.'+versiondots+'.txt'
InHom = INDIRHOM+ParamDict[var][2]+'NETCDF/'+ParamDict[var][3]+'DIR/' #***
OutFile = OUTDIRDAT+'HadISDH.land'+ParamDict[var][0]+'.'+versiondots+'_FLATgridHOM5by5_anoms'+CLMlab # will be .nc and .dat
# Time related variables and arrays
clst = MYclst - int(styear)
cled = MYcled - int(styear)
nyrs = (int(edyear) + 1) - int(styear)
nmons = nyrs * 12
# Save netCDF file as days since 01-01-1973 DD-MM-YYYY
# Space related variables and arrays
StLat = -90. + (LatBox / 2.)
StLon = -180. + (LonBox / 2.)
nlats = int(180 / LatBox)
nlons = int(360 / LonBox)
nbox = nlats * nlons
Lats = StLat + (np.arange(nlats) * 5.) # -90 to 90
Lons = StLon + (np.arange(nlons) * 5.) # -180 to 80
# Sort out LatBounds and LonBounds
LatBounds = np.transpose(np.tile(Lats-(LatBox/2.),(2,1)))
LatBounds[:,1] = LatBounds[:,1] + LatBox
LonBounds = np.transpose(np.tile(Lons-(LonBox/2.),(2,1)))
LonBounds[:,1] = LonBounds[:,1] + LonBox
#print('Check Lat and Lon Bounds')
#pdb.set_trace()
# Masked Arrays for grids
GBanoms = npm.masked_equal(np.tile(MDI,(nmons,nlats,nlons)),MDI) # Anomalies NOT lons,lats,time as in IDL
GBabs = npm.copy(GBanoms) # Actuals
GBstaterr = npm.copy(GBanoms) # Station Uncertainty
GBobserr = npm.copy(GBanoms) # Measurement Uncertainty
GBclmerr = npm.copy(GBanoms) # Climatology Uncertainty
GBadjerr = npm.copy(GBanoms) # Adjustment Uncertainty
GBsamperr = npm.copy(GBanoms) # Sampling Uncertainty
GBrbar = npm.masked_equal(np.tile(MDI,(nlats,nlons)),MDI) # intersite correlation within gridbox
GBsbarSQ = npm.copy(GBrbar) # mean station variance within gridbox
GBcomberr = npm.copy(GBanoms) # Total Uncertainty
GBstddevs = npm.copy(GBanoms) # Standard Deviation of Montyhly Mean Anomalies contributing to Gridbox mean
GBclims = npm.masked_equal(np.tile(MDI,(12,nlats,nlons)),MDI) # Monthly mean climatology
GBclimstds = npm.copy(GBclims) # Monthly mean standard deviation of station climatologies within gridbox
GBcounts = npm.masked_equal(np.tile(0,(nlats,nlons)),0) # GB average count - so could be a float but CEIL to nearest integer?
GBstation_counts = npm.masked_equal(np.tile(0,(nmons,nlats,nlons)),0) # actual gridbox station counts over time
# Extra arrays for Tw extremes
if (var in ['tw_max', 'tw_max_95p', 'tw_mean_95p', 'tw_max_ex25', 'tw_max_ex27', 'tw_max_ex29', 'tw_max_ex31', 'tw_max_ex33', 'tw_max_ex35']):
HQ1 = npm.masked_equal(np.tile(INTMDI,(nmons,nlats,nlons)),INTMDI)
HQ2 = npm.copy(HQ1)
HQ3 = npm.copy(HQ1)
HQ4 = npm.copy(HQ1)
HQ5 = npm.copy(HQ1)
HQ6 = npm.copy(HQ1)
HQ7 = npm.copy(HQ1)
HQ8 = npm.copy(GBanoms) # not integer!!!
HQscore = npm.copy(HQ1)
#*****************************************************************************************
# Read in station list
#*****************************************************************************************
# Open and read in station list
MyTypes = ("|U11","float","float","float","|U1","|U2","|U1","|U29")
MyDelimiters = [11,9,10,7,1,2,1,29]
RawData = ReadData(InList,MyTypes,MyDelimiters)
StationListID = np.array(RawData['f0'])
StationListLat = np.array(RawData['f1'])
StationListLon = np.array(RawData['f2'])
StationListElev = np.array(RawData['f3'])
StationListCID = np.array(RawData['f5'])
StationListName = np.array(RawData['f7'])
nstations = len(StationListID)
#*******************************************************************************************
# Loop through each gridbox to create gridbox averages - find stations >= Southern and WEstern boundaries and < northern and eastern boundaries
#******************************************************************************************
# There are no stations at 90.0 North!!!
# Note that the RAW data may have a different pattern of abs and anoms
# This is because RAW anoms are calculated from hourly clim anoms whereas HOM anoms are calculated from abs-clim
# I would like to homogenise the anomalies so that I can bring in this more robust way of calculating the anomalies and then abs = clim+anoms
for lt, Lat in enumerate(Lats):
LatLow = LatBounds[lt,0] # Gridbox Southern most point
LatHigh = LatBounds[lt,1] # Gribbox Northern most point
for ln, Lon in enumerate(Lons):
LonLow = LonBounds[ln,0] # Gridbox Western most point
LonHigh = LonBounds[ln,1] # Gribbox Eastern most point
# Locate all stations within this gridbox
LocateStations = np.where((StationListLat >= LatLow) & (StationListLat < LatHigh) & (StationListLon >= LonLow) & (StationListLon < LonHigh))
# Read in any stations within this gridbox
if (len(LocateStations[0]) > 0):
#print('Check station search works')
#pdb.set_trace() # NOT CONVINCED THIS IS WORKING
for s,ss in enumerate(LocateStations[0]):
# read in a masked array of the monthly station data
ncf = nc4.Dataset(InHom+StationListID[ss]+DatSuffix,'r')
# For the first station in the gridbox initialise arrays
if (s == 0):
TMPanoms = npm.reshape(ncf.variables[var+'_anoms'][:],(1,nmons))
TMPabs = npm.reshape(ncf.variables[var+'_abs'][:],(1,nmons))
TMPstaterr = npm.reshape(ncf.variables[var+'_uncertainty'][:],(1,nmons))
TMPobserr = npm.reshape(ncf.variables[var+'_obserr'][:],(1,nmons))
TMPclmerr = npm.reshape(ncf.variables[var+'_climerr'][:],(1,nmons))
TMPadjerr = npm.reshape(ncf.variables[var+'_adjerr'][:],(1,nmons))
TMPadj = npm.reshape(ncf.variables[var+'_adjustments'][:],(1,nmons))
TMPclims = npm.reshape(ncf.variables[var+'_clims'][:],(1,12))
# For station 2+ append
else:
TMPanoms = npm.append(TMPanoms,npm.reshape(ncf.variables[var+'_anoms'][:],(1,nmons)),axis=0)
TMPabs = npm.append(TMPabs,npm.reshape(ncf.variables[var+'_abs'][:],(1,nmons)),axis=0)
TMPstaterr = npm.append(TMPstaterr,npm.reshape(ncf.variables[var+'_uncertainty'][:],(1,nmons)),axis=0)
TMPobserr = npm.append(TMPobserr,npm.reshape(ncf.variables[var+'_obserr'][:],(1,nmons)),axis=0)
TMPclmerr = npm.append(TMPclmerr,npm.reshape(ncf.variables[var+'_climerr'][:],(1,nmons)),axis=0)
TMPadjerr = npm.append(TMPadjerr,npm.reshape(ncf.variables[var+'_adjerr'][:],(1,nmons)),axis=0)
TMPadj = npm.append(TMPadj, | npm.reshape(ncf.variables[var+'_adjustments'][:],(1,nmons)) | numpy.ma.reshape |
#
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, run_module_suite)
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=IntegrationWarning)
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
| assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg) | numpy.testing.assert_allclose |
import numpy as np
def sample_dist(stateCounts,hyperparams,Kextra):
#function dist_struct = sample_dist(stateCounts,hyperparams,Kextra)
numObj = (stateCounts.Ns).shape[2]
Kz_prev = (stateCounts.Ns).shape[0]
Kz = Kz_prev + Kextra
Ks = (stateCounts.Ns).shape[1]
# Define alpha0 and kappa0 in terms of alpha0+kappa0 and rho0:
alpha0 = hyperparams.alpha0
kappa0 = hyperparams.kappa0
sigma0 = hyperparams.sigma0
N = stateCounts.N # N(i,j) = # z_t = i to z_{t+1}=j transitions. N(Kz+1,i) = 1 for i=z_1.
Ns = stateCounts.Ns # Ns(i,j) = # s_t = j given z_t=i
dist_struct[:numObj] = {'pi_z':np.zeros((Kz,Kz)),'pi_init':np.zeros((1,Kz)),'pi_s':np.zeros((Kz,Ks))}
beta_vec = np.ones((1,Kz))
Ntemp = | np.zeros((Kz+1,Kz)) | numpy.zeros |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2021, MIT Interactive Robotics Group, PI <NAME>.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
All rights reserved.
'''
import casadi as cas
import numpy as np
from casadi import MX, mtimes, vertcat, sum2, sqrt, jacobian
from casadi import reshape as cas_reshape
from scipy import interpolate
import matplotlib.pyplot as plt
import hr_planning
from hr_planning.visualization.utils_visualization import print_FAIL
from hr_planning.utils_interp import waypts_2_pwsplines
use_human_ui_bound = True
# use_human_ui_bound = False
class HumanRefTracker:
def __init__(self, pH_view_pH_pR_min_sep_dist,
pH_min, pH_max, vH_min, vH_max, uH_min, uH_max,
w_ref, w_u, w_move_to_pR, w_avoid_pR, dt, mass):
"""
Class for an MPC to compute human trajectory in continuous state space.
n_pH = dimensionality of human position.
n_vH = dimensionality of human velocity.
n_uH = dimensionality of human control.
Parameters
----------
pH_view_pH_pR_min_sep_dist: float, from the human's view,
the min separation distance that the human
wants to keep from the robot.
The boundary conditions:
pH_max: (n_pH,) np vector = max human position.
pH_min: (n_pH,) np vector = min human position.
vH_max: (n_pH,) np vector = max human velocity.
vH_min: (n_pH,) np vector = min human velocity.
uH_max: (n_pH,) np vector = max human control.
uH_min: (n_pH,) np vector = min human control.
"""
self.pH_view_pH_pR_min_sep_dist = pH_view_pH_pR_min_sep_dist
self.mass = mass
assert mass > 1e-5
self.feas_tol = 1e-6
self.pH_min = pH_min
self.pH_max = pH_max
self.vH_min = vH_min
self.vH_max = vH_max
self.uH_min = uH_min
self.uH_max = uH_max
self.w_ref = w_ref
self.w_move_to_pR = w_move_to_pR
self.w_avoid_pR = w_avoid_pR
self.w_u = w_u
# self.dt = dt of MPC != dt_ref = dt of ref traj.
# We will use piecewise cubics to interlate the ref traj.
self.dt = dt
self.n_pH = self.pH_min.shape[0]
self.n_vH = self.vH_min.shape[0]
self.n_uH = self.uH_min.shape[0]
# We want the nx0 array, so that tolist works well
assert self.n_pH == self.n_vH
assert self.pH_min.shape == (self.n_pH,)
assert self.pH_max.shape == (self.n_pH,)
assert self.vH_min.shape == (self.n_vH,)
assert self.vH_max.shape == (self.n_vH,)
assert self.uH_min.shape == (self.n_uH,)
assert self.uH_max.shape == (self.n_uH,)
assert self.w_ref.shape == (self.n_pH, self.n_pH)
assert self.w_move_to_pR.shape == (self.n_pH, self.n_pH)
assert self.w_avoid_pR.shape == (self.n_pH, self.n_pH)
assert self.w_u.shape == (self.n_uH, self.n_uH)
def check_shapes(self, pHs_1_T=None, vHs_1_T=None, uHs_1_T=None,
pH_0=None, vH_0=None, horizon=None):
"""Ensure all shapes are correct."""
if pHs_1_T is not None:
assert pHs_1_T.shape == (horizon, self.n_pH)
if vHs_1_T is not None:
assert vHs_1_T.shape == (horizon, self.n_vH)
if uHs_1_T is not None:
assert uHs_1_T.shape == (horizon, self.n_uH)
if pH_0 is not None:
assert pH_0.shape == (self.n_pH, 1)
if vH_0 is not None:
assert vH_0.shape == (self.n_vH, 1)
def solve_mpc(self, pH_mode, pHs_0_T_ref, dt_ref, vH_0,
pR_0=None, plot=False):
"""
Solve MPC to find a trajectory for the human.
Parameters
----------
pH_mode: str
pH_indep_pR: H-Indep-R condition in the paper.
pH_avoid_pR: H-Away-R condition in the paper.
pH_move_to_pR: H-To-R condition in the paper.
pHs_0_T_ref: n_time_stepsxn_pH np array, the reference trajectory.
In our work, this comes from a human MDP policy rollout.
dt_ref: the dt of pHs_0_T_ref.
vH_0: n_vHx1 np array = initial human velocity.
pR_0: n_pRx1 np array = initial robot position.
plot: bool, whether to plot or not (for debugging).
Returns
----------
pHs_1_T_opt: horizon x n_pH, human positions computed by the MPC.
vHs_1_T_opt: horizon x n_vH, human velocities computed by the MPC.
uHs_1_T_opt: horizon x n_uH, controls computed by the MPC.
"""
assert pH_mode in ["pH_indep_pR", "pH_avoid_pR", "pH_move_to_pR"]
if pH_mode in ["pH_avoid_pR", "pH_move_to_pR"]:
assert pR_0 is not None
# pR, pH have to be in the same space
pH_0 = pHs_0_T_ref[0, :]
pR_0_sqz = pR_0.squeeze()
assert pR_0_sqz.shape == pH_0.shape
pw_spliness, dts_pw = waypts_2_pwsplines(
wp_traj=pHs_0_T_ref, dt=dt_ref,
degree=3, plot=False)
pH_0 = np.reshape(pHs_0_T_ref[0, :], (self.n_pH, 1))
assert vH_0.shape == (self.n_vH, 1)
for i in range(self.n_pH):
tmp = interpolate.splev(x=[0], tck=pw_spliness[i], ext=2)
assert abs(tmp[0] - pH_0[i, 0]) < 1e-5
end_time_pwc = dts_pw[-1]
# Note: horizon + 1 = the length of traj including x0.
horizon = int(np.ceil(end_time_pwc / self.dt))
assert horizon * self.dt >= end_time_pwc
# Decision variables
pHs_1_T = MX.sym("pHs_1_T", (horizon, self.n_pH))
vHs_1_T = MX.sym("vHs_1_T", (horizon, self.n_vH))
uHs_1_T = MX.sym("uHs_1_T", (horizon, self.n_uH))
# Constraints
g = []
lbg = []
ubg = []
g_name = []
# We make terminal goal as in the objective, rather than a constraint.
terminal_goal_cst = False
g_bd, lbg_bd, ubg_bd, g_names_bd = self.generate_boundary_csts_bounds(
pHs_1_T, vHs_1_T, uHs_1_T, horizon, pw_spliness,
terminal_goal_cst=terminal_goal_cst)
g = vertcat(g, g_bd)
lbg += lbg_bd
ubg += ubg_bd
g_name += g_names_bd
# XXX: Collision avoidance with obstacles in the env
# is handled by the MDP policy.
g_dyn, lbg_dyn, ubg_dyn, g_names_dyn = self.generate_dyn_csts(
pHs_1_T, vHs_1_T, uHs_1_T, pH_0, vH_0, horizon)
g = vertcat(g, g_dyn)
lbg += lbg_dyn
ubg += ubg_dyn
g_name += g_names_dyn
assert g.shape[0] == len(lbg) == len(ubg) == len(g_name)
track_traj = True
pTarget = None
pAvoid = None
if pH_mode == "pH_indep_pR":
track_traj = True
pTarget = None
pAvoid = None
elif pH_mode == "pH_move_to_pR":
track_traj = True
pTarget = pR_0
pAvoid = None
elif pH_mode == "pH_avoid_pR":
track_traj = True
pTarget = None
pAvoid = pR_0
else:
raise ValueError()
cost = self.generate_cost_function(
pHs_1_T=pHs_1_T, uHs_1_T=uHs_1_T, horizon=horizon,
pw_spliness=pw_spliness,
track_traj=track_traj, pTarget=pTarget, pAvoid=pAvoid)
opt_vars = vertcat(
pHs_1_T.reshape((-1, 1)),
vHs_1_T.reshape((-1, 1)),
uHs_1_T.reshape((-1, 1)))
prob = {'f': cost, 'x': opt_vars, 'g': g}
if True:
opt = {'error_on_fail': False,
'ipopt': {
'print_level': 0,
# 'hessian_approximation': 'limited-memory',
"max_iter": 400,
"expect_infeasible_problem": "no",
"acceptable_tol": 1e-4,
"acceptable_constr_viol_tol": 1e-5,
"bound_frac": 0.5,
"start_with_resto": "no",
"required_infeasibility_reduction": 0.85,
"acceptable_iter": 8}} # ipopt
solver = cas.nlpsol('solver', 'ipopt', prob, opt)
else:
raise ValueError()
# --------------------
# Solve
end_time = dts_pw[-1]
dts = np.linspace(0, end_time, num=horizon+1, endpoint=True)
# (horizon + 1) x npH
pHs_0_T_init = np.zeros((dts.shape[0], self.n_pH))
for i in range(self.n_pH):
tmp = interpolate.splev(x=dts, tck=pw_spliness[i], ext=2)
assert pHs_0_T_init[:, i].shape == tmp.shape
pHs_0_T_init[:, i] = tmp
pHs_1_T_init = pHs_0_T_init[1:, :]
assert pHs_1_T_init.shape == pHs_1_T.shape
pHs_0_T_minus_1_init = pHs_0_T_init[:-1, :]
uHs_1_T_init = (pHs_1_T_init - pHs_0_T_minus_1_init) / self.dt
assert uHs_1_T_init.shape == uHs_1_T.shape
vHs_1_T_init = np.zeros((horizon, self.n_vH))
assert vHs_1_T_init.shape == vHs_1_T.shape
opt_vars_init = vertcat(
pHs_1_T_init.reshape((-1, 1)),
vHs_1_T_init.reshape((-1, 1)),
uHs_1_T_init.reshape((-1, 1)))
assert opt_vars.shape == opt_vars_init.shape
sol = solver(x0=opt_vars_init, lbg=lbg, ubg=ubg)
x_opt = sol['x']
f_opt = sol['f']
print('f_opt = ', f_opt)
g_res = | np.array(sol["g"]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plots
plotrange, Btau, Ctau, ellipse, SUE
plotool:
set_clib, set_fig, set_ax,
reset_handles, append_handles, get_handles, set_legend,
plot, eplot, save, show, close
pplot(plotool):
add_plot, add_legend
"""
import warnings
from astropy import units as u
import numpy as np
from scipy import optimize
# import matplotlib as mpl
from matplotlib.ticker import (
NullFormatter, ScalarFormatter, LogFormatter,
LogFormatterExponent, LogFormatterSciNotation,
PercentFormatter
)
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
## Local
from utilities import InputError, merge_aliases
from arrays import arrayize, ramp
# cmap = mpl.cm.viridis
# norm = mpl.colors.Normalize(vmin=0, vmax=1)
##------------------------------
## Automatic plot range setting
##------------------------------
def plotrange(x,y,xran=None,yran=None,xlog=False,ylog=False,mask=None, \
errx=None,erry=None,xisln=False,yisln=False):
'''
Automatically sets the x and y ranges for (X,Y) plots, based on the entered
data set.
Copyright: <NAME>
'''
# Check the imput
N = np.size(x)
if (np.size(y) != N):
UT.strike('plotrange','x and y should have the same size.')
xran = arrayize(xran,N=2)
yran = arrayize(yran,N=2)
# X error bar settings
if errx is not None:
if (np.isscalar(errx)): errx = np.array([errx])
sex = np.shape(errx)
if (len(sex) == 2):
if (sex != (2,N) ): UT.strike('plotrange','wrong size for errx.')
elif (len(sex) == 1):
if (sex != (N,) ): UT.strike('plotrange','wrong size for errx.')
errx = np.array([errx,errx])
else:
errx = np.zeros((2,N))
# Y error bar settings
if erry is not None:
if (np.isscalar(erry)): erry = np.array([erry])
sey = np.shape(erry)
if (len(sey) == 2):
if (sey != (2,N) ): UT.strike('plotrange','wrong size for erry.')
elif (len(sey) == 1):
if (sey != (N,) ): UT.strike('plotrange','wrong size for erry.')
erry = np.array([erry,erry])
else:
erry = np.zeros((2,N))
# Homogenize the arrays and account for errors
xlow = np.array(x,dtype=float).flatten() - errx[0,:]
xhigh = xlow + errx[1,:]
ylow = np.array(y,dtype=float).flatten() - erry[0,:]
yhigh = ylow + erry[1,:]
# Lin/Log
if (xisln): xlow, xhigh = np.exp(xlow), np.exp(xhigh)
if (yisln): ylow, yhigh = | np.exp(ylow) | numpy.exp |
import torch
import torch.autograd
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
import numpy as np
import scipy as sp
import scipy.linalg
from qpsolvers import solve_qp
from core.MADDPG import MADDPGagent
from core.ConstraintNetwork import ConstraintNetwork
import ipdb
class SafeMADDPGagent(MADDPGagent):
def __init__(self, N_agents, state_dim, act_dim,
constraint_networks_dir, constraint_dim,critic_state_mask = [0,1,2,3,-1,-2], col_margin=0.33,
actor_learning_rate=1e-4,
critic_learning_rate=1e-3, gamma=0.99, tau=1e-2, max_memory_size=30000,
hidden_size_critic = [500, 500], hidden_size_actor = [100, 100],
batch_size = 128, soften = True):
# Call MADDPGagent's constructor
super().__init__(N_agents = N_agents, state_dim = state_dim,
act_dim = act_dim, critic_state_mask = critic_state_mask,
actor_learning_rate = actor_learning_rate,
critic_learning_rate = critic_learning_rate, gamma = gamma,
tau = tau, max_memory_size = max_memory_size,
hidden_size_critic = hidden_size_critic, hidden_size_actor = hidden_size_actor,
batch_size = batch_size)
# Extra Params
self.col_margin = col_margin
self.constraint_dim = constraint_dim
self.total_state_dim = self.state_dim * self.N_agents
self.total_constraint_dim = self.constraint_dim * self.N_agents
self.total_action_dim = self.act_dim * self.N_agents
self.constraint_nets = self.total_constraint_dim*[None]
# Initialize constraint networks
for i in range(self.total_constraint_dim):
self.constraint_nets[i] = ConstraintNetwork(self.total_state_dim, self.total_action_dim).double()
self.constraint_nets[i].load_state_dict(torch.load(constraint_networks_dir
+ "constraint_net_" + str(i) + ".pkl"))
# Define Solver Globaly
self.solver_interventions = 0
self.solver_infeasible = 0
# Choose Solver
if soften:
self.correct_actions = self.correct_actions_soften
else:
self.correct_actions = self.correct_actions_hard
self.soften = soften
def reset_metrics(self):
self.solver_interventions = 0
self.solver_infeasible = 0
def get_interventions(self):
return self.solver_interventions
def get_infeasible(self):
return self.solver_infeasible
@torch.no_grad()
def get_action(self, state, constraint):
# Original MADDPG
actions = []
for i in range(self.N_agents):
s = torch.tensor(state[i], dtype=torch.float64)
action = self.actors[i](s).detach()
actions.append(action)
# merge action and state vectors of all agents
action_total = torch.cat(actions).numpy()
return actions
@torch.no_grad()
def correct_actions_hard(self, state, actions, constraint):
actions = np.concatenate(actions)
state = torch.tensor(np.concatenate(state))
# (1) Problem Variables
# Problem specific constants
I = np.eye(self.total_action_dim)
ones = np.ones(self.total_action_dim)
C = np.concatenate(constraint)
# Formulate the constraints using neural networks
G = np.zeros([self.total_action_dim, self.total_action_dim])
for i, net in enumerate(self.constraint_nets):
G[i, :] = net(state).numpy()
# (2) Problem Variables in QP form
# Cost Function
q = -actions
P = np.eye(self.total_action_dim)
# Constraints
A = | np.concatenate([-G, I, -I]) | numpy.concatenate |
#=============================================================================
# This script makes some figures of the Temp12k GMST data, as well as a
# comparison between different Holocene composites.
# author: <NAME>
# date : 6/8/2020
#=============================================================================
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import copy
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
from mpl_toolkits.basemap import Basemap
save_instead_of_plot = True
reference_period = [1800,1899]
### LOAD DATA
data_dir = '/home/mpe32/analysis/14_Holocene_proxies/GMST_paper/data/'
# Load 12k data
handle = xr.open_dataset(data_dir+'final_data/temp12k_alldata.nc',decode_times=False)
gmst_12k_ages = handle['age'].values
gmst_scc_ensemble = handle['scc_globalmean'].values
gmst_dcc_ensemble = handle['dcc_globalmean'].values
gmst_cps_ensemble = handle['cps_globalmean'].values
gmst_pai_ensemble = handle['pai_globalmean'].values
gmst_gam_ensemble = handle['gam_globalmean'].values
handle.close()
# Load 2k GMST data
data_dir_2k = '/projects/pd_lab/data/paleoclimate_reconstructions/GMST_2k_reconstructions/final_recon_results/recons/'
filenames_2k = ['BHM','CPS','DA','M08','OIE','PAI','PCR']
years_2k = np.genfromtxt(data_dir_2k+'BHM.txt',skip_header=1)[:,0]
ages_2k = 1950-years_2k
nfiles = len(filenames_2k)
ntime = 2000
niter = 1000
data_gmst_all = np.zeros((nfiles,ntime,niter)); data_gmst_all[:] = np.nan
for i,filename in enumerate(filenames_2k):
print(filename)
data_gmst_all[i,:,:] = np.genfromtxt(data_dir_2k+filename+'.txt',skip_header=1)[:,1:]
# Load the ERA-20C reanalysis data
data_dir_era20c = '/projects/pd_lab/data/modern_datasets/ERA20C/'
handle_era20c = xr.open_dataset(data_dir_era20c+'t2m_annual_1900_to_2010_era20c.nc',decode_times=False)
tas_era20c = handle_era20c['t2m'].values
lon_era20c = handle_era20c['lon'].values
lat_era20c = handle_era20c['lat'].values
years_era20c = handle_era20c['years'].values
handle_era20c.close()
ages_era20c = 1950-years_era20c
# Load other Holocene reconstructions
data_dir_recons = '/projects/pd_lab/data/paleoclimate_reconstructions/Holocene_reconstructions/'
data_shakun = pd.ExcelFile(data_dir_recons+'Shakun_etal_2012/41586_2012_BFnature10915_MOESM60_ESM.xls').parse('TEMPERATURE STACKS').values
ages_shakun = (data_shakun[:,0]*1000).astype(np.float)
globalmean_shakun = data_shakun[:,1].astype(np.float)
onesigma_shakun = data_shakun[:,2].astype(np.float)
data_marcott = pd.ExcelFile(data_dir_recons+'Marcott_etal_2013/Marcott.SM.database.S1.xlsx').parse('TEMPERATURE STACKS').values
ages_marcott = data_marcott[5:,2].astype(np.float)
globalmean_marcott = data_marcott[5:,3].astype(np.float)
onesigma_marcott = data_marcott[5:,4].astype(np.float)
# Load the proxy locations
# Load the Temp12k proxy metadata
data_dir_proxies_temp12k = '/projects/pd_lab/data/proxies/Holocene/database_v1/'
metadata_temp12k = pd.ExcelFile(data_dir_proxies_temp12k+'Temp12k_v1_record_list.xlsx').parse('Suppl_Table_2').values
sitenames_all_temp12k = metadata_temp12k[2:798,0]
lat_all_temp12k = metadata_temp12k[2:798,1].astype(np.float)
lon_all_temp12k = metadata_temp12k[2:798,2].astype(np.float)
# Get the proxy lat and lon metadata for the other Holocene reconstructions
data_dir_proxies_recons = '/projects/pd_lab/data/paleoclimate_reconstructions/Holocene_reconstructions/'
metadata_shakun = pd.ExcelFile(data_dir_proxies_recons+'Shakun_etal_2012/41586_2012_BFnature10915_MOESM60_ESM.xls').parse('METADATA').values
sitenames_all_shakun = metadata_shakun[:,1]
lat_all_shakun = metadata_shakun[:,4].astype(np.float)
lon_all_shakun = metadata_shakun[:,5].astype(np.float)
metadata_marcott = pd.ExcelFile(data_dir_proxies_recons+'Marcott_etal_2013/Marcott.SM.database.S1.xlsx').parse('METADATA').values
sitenames_all_marcott = metadata_marcott[2:78,1]
lat_all_marcott = metadata_marcott[2:78,4]
lon_all_marcott = metadata_marcott[2:78,5]
# One of Marcott's proxies uses two locations. Plot both locations
index_proxy = np.where(lat_all_marcott == '71.3/ 81.0')[0]
lat_all_marcott[index_proxy] = 71.3
lon_all_marcott[index_proxy] = 26.7
lat_all_marcott = np.insert(lat_all_marcott,index_proxy+1,81.0)
lon_all_marcott = np.insert(lon_all_marcott,index_proxy+1,-71)
sitenames_all_marcott = np.insert(sitenames_all_marcott,index_proxy+1,'Agassiz & Renland, other location')
lat_all_marcott = lat_all_marcott.astype(np.float)
lon_all_marcott = lon_all_marcott.astype(np.float)
# A function to remove multiple proxy entrees at the same location
def proxy_locations(sitenames_all,lat_all,lon_all):
#
n_rows = len(sitenames_all)
sitenames_chosen = []
for i in range(n_rows):
if (sitenames_all[i] not in sitenames_chosen) or (sitenames_all[i] == '-'):
sitenames_chosen.append(sitenames_all[i])
else:
lat_all[i] = np.nan
lon_all[i] = np.nan
#
# Get rid of nans
valid_data = np.isfinite(lat_all) & np.isfinite(lon_all)
sitenames_all = sitenames_all[valid_data]
lat_all = lat_all[valid_data]
lon_all = lon_all[valid_data]
#
return sitenames_all,lat_all,lon_all
# Remove multiple proxy entrees at the same location
sitenames_all_temp12k,lat_all_temp12k,lon_all_temp12k = proxy_locations(sitenames_all_temp12k,lat_all_temp12k,lon_all_temp12k)
sitenames_all_shakun, lat_all_shakun, lon_all_shakun = proxy_locations(sitenames_all_shakun, lat_all_shakun, lon_all_shakun)
sitenames_all_marcott,lat_all_marcott,lon_all_marcott = proxy_locations(sitenames_all_marcott,lat_all_marcott,lon_all_marcott)
### CALCULATIONS
# Remove a reference period from each 2k method
indices_ref_2k = np.where((years_2k >= reference_period[0]) & (years_2k <= reference_period[1]))[0]
for i in range(data_gmst_all.shape[0]):
value_ref = np.mean(np.median(data_gmst_all[i,indices_ref_2k,:],axis=1),axis=0)
data_gmst_all[i,:,:] = data_gmst_all[i,:,:]-value_ref
# Put all methods on the same axis
data_gmst_all_swap = np.swapaxes(data_gmst_all,0,1)
data_gmst_all_2d = np.reshape(data_gmst_all_swap,(ntime,nfiles*niter))
# This function takes a time-lat-lon variable and computes the global-mean.
def global_mean(variable,lats):
variable_zonal = np.nanmean(variable,axis=2)
lat_weights = np.cos(np.radians(lats))
variable_global = np.average(variable_zonal,axis=1,weights=lat_weights)
return variable_global
# Compute the annual-mean of the ERA-20C data
tas_era20c_globalmean = global_mean(tas_era20c,lat_era20c)
# Find the difference in means between two time series during their period of overlap
def mean_of_overlap(ts1,ages1,ts2,ages2):
overlap_age_min = np.max([np.min(ages1),np.min(ages2)])
overlap_age_max = np.min([np.max(ages1),np.max(ages2)])
ts1_mean_overlap_period = np.mean(ts1[np.where((ages1 >= overlap_age_min) & (ages1 <= overlap_age_max))[0]])
ts2_mean_overlap_period = np.mean(ts2[np.where((ages2 >= overlap_age_min) & (ages2 <= overlap_age_max))[0]])
difference_in_means = ts1_mean_overlap_period - ts2_mean_overlap_period
return difference_in_means
# Scale the mean of the ERA20C value to the 2k composite using their overlapping segments
difference_in_means_era20c = mean_of_overlap(tas_era20c_globalmean,ages_era20c,np.median(data_gmst_all_2d,axis=1),ages_2k)
tas_era20c_globalmean = tas_era20c_globalmean - difference_in_means_era20c
# Combine all 12k methods into one array
gmst_all_ensemble = np.concatenate((gmst_scc_ensemble,gmst_dcc_ensemble,gmst_cps_ensemble,gmst_pai_ensemble,gmst_gam_ensemble),axis=1)
gmst_all_except_cps = np.concatenate((gmst_scc_ensemble,gmst_dcc_ensemble,gmst_pai_ensemble,gmst_gam_ensemble),axis=1)
# Test for NaNs
nantest_ensemble = copy.deepcopy(gmst_all_ensemble)
nantest_ensemble[~np.isnan(nantest_ensemble)] = 0
nantest_ensemble[np.isnan(nantest_ensemble)] = 1
nan_counts = np.sum(nantest_ensemble,axis=0)
print('Extra NaNs at indices:',str(np.where(nan_counts > 2)[0])) # GAM has NaN for 2 values. This looks for other NaNs.
#plt.pcolormesh(nantest_ensemble)
# Compute the values at 6ka vs 1800-1900
indices_6k = np.where((gmst_12k_ages >= 5500) & (gmst_12k_ages <= 6500))[0] # 11 values
indices_1800_1900 = np.where((gmst_12k_ages >= 50) & (gmst_12k_ages <= 150))[0] # 1 value
values_6ka = np.mean(gmst_all_ensemble[indices_6k,:], axis=0) - np.mean(gmst_all_ensemble[indices_1800_1900,:], axis=0)
values_6ka_scc = np.mean(gmst_scc_ensemble[indices_6k,:], axis=0) - np.mean(gmst_scc_ensemble[indices_1800_1900,:], axis=0)
values_6ka_dcc = np.mean(gmst_dcc_ensemble[indices_6k,:], axis=0) - np.mean(gmst_dcc_ensemble[indices_1800_1900,:], axis=0)
values_6ka_cps = np.mean(gmst_cps_ensemble[indices_6k,:], axis=0) - np.mean(gmst_cps_ensemble[indices_1800_1900,:], axis=0)
values_6ka_pai = np.mean(gmst_pai_ensemble[indices_6k,:], axis=0) - np.mean(gmst_pai_ensemble[indices_1800_1900,:], axis=0)
values_6ka_gam = np.mean(gmst_gam_ensemble[indices_6k,:], axis=0) - np.mean(gmst_gam_ensemble[indices_1800_1900,:], axis=0)
values_6ka_no_cps = np.mean(gmst_all_except_cps[indices_6k,:],axis=0) - np.mean(gmst_all_except_cps[indices_1800_1900,:],axis=0)
# List the median and 90% ranges
print('6k - present, median and 90% ranges')
print('SCC, median: '+str('{:.2f}'.format(np.median(values_6ka_scc)))+', range: '+str('{:.2f}'.format(np.percentile(values_6ka_scc,5)))+' - '+str('{:.2f}'.format(np.percentile(values_6ka_scc,95))))
print('DCC, median: '+str('{:.2f}'.format(np.median(values_6ka_dcc)))+', range: '+str('{:.2f}'.format(np.percentile(values_6ka_dcc,5)))+' - '+str('{:.2f}'.format(np.percentile(values_6ka_dcc,95))))
print('GAM, median: '+str('{:.2f}'.format(np.median(values_6ka_gam)))+', range: '+str('{:.2f}'.format(np.percentile(values_6ka_gam,5)))+' - '+str('{:.2f}'.format(np.percentile(values_6ka_gam,95))))
print('CPS, median: '+str('{:.2f}'.format(np.median(values_6ka_cps)))+', range: '+str('{:.2f}'.format(np.percentile(values_6ka_cps,5)))+' - '+str('{:.2f}'.format(np.percentile(values_6ka_cps,95))))
print('PAI, median: '+str('{:.2f}'.format( | np.median(values_6ka_pai) | numpy.median |
import kinematics as k
import numpy as np
def test_construct_and_sample_from_cov():
N = 2
ra_err, dec_err, plx_err = [np.ones(N)*.1 for i in range(3)]
pmra_err, pmdec_err = [np.ones(N)*.1 for i in range(2)]
ra_dec_corr, ra_plx_corr = np.ones(N)*.2, np.ones(N)*.3
ra_pmra_corr, ra_pmdec_corr = np.ones(N)*.4, np.ones(N)*.5
dec_plx_corr, dec_pmra_corr = np.ones(N)*.6, np.ones(N)*.7
dec_pmdec_corr = np.ones(N)*.8
plx_pmra_corr, plx_pmdec_corr = np.ones(N)*.9, np.ones(N)*.15
pmra_pmdec_corr = np.ones(N)*.25
cov_list = [ra_err, dec_err, plx_err, pmra_err, pmdec_err, ra_dec_corr,
ra_plx_corr, ra_pmra_corr, ra_pmdec_corr, dec_plx_corr,
dec_pmra_corr, dec_pmdec_corr, plx_pmra_corr, plx_pmdec_corr,
pmra_pmdec_corr]
cov1 = k.construct_cov(cov_list, 5)
assert | np.shape(cov1) | numpy.shape |
import datetime
import os
import argopy
import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from argopy import DataFetcher as ArgoDataFetcher
from argopy import IndexFetcher as ArgoIndexFetcher
from dmelon.ocean.argo import build_dl, launch_shell
from dmelon.utils import check_folder, findPointsInPolys
from scipy import interpolate
# ARGOpy global options
argopy.set_options(src="localftp", local_ftp="/data/datos/ARGO/gdac")
argopy.set_options(mode="expert")
# ARGO data fetcher
argo_loader = ArgoDataFetcher(parallel="process", progress=True)
index_loader = ArgoIndexFetcher()
# Data loading parameters
argo_region = [-90, -70, -20, -2.5]
today = datetime.datetime.today()
idate = today - datetime.timedelta(days=20)
endate = today + datetime.timedelta(days=15)
date_range = [
f"{idate:%Y-%m-%d}",
f"{endate:%Y-%m-%d}",
]
argo_df = (
index_loader.region(argo_region + date_range).to_dataframe().sort_values("date")
)
# Read and mask with 200nm shapefile
mask200 = gpd.read_file("/data/users/grivera/Shapes/NEW_MASK/200mn_full.shp")
pointInPolys = findPointsInPolys(argo_df, mask200)
# on-demand update of floats to be used
launch_shell(build_dl(pointInPolys))
# ARGO DATA LOADING
argo_codes = (
pointInPolys.sort_values("date")
.groupby("wmo")
.nth(-1)
.sort_values("latitude", ascending=False)
.index.tolist()
)
ds = argo_loader.float(argo_codes).to_xarray()
ds_profile = ds.argo.point2profile().sortby("TIME")
# Apply QC to variables of interest
ds_profile_qc = ds_profile.copy()
ds_profile_qc["PSAL"] = ds_profile.PSAL.where(ds_profile.PSAL_QC == 1)
ds_profile_qc["TEMP"] = ds_profile.TEMP.where(ds_profile.TEMP_QC == 1)
ds_profile_qc["PRES"] = ds_profile.PRES.where(ds_profile.PRES_QC == 1)
# Climatology loading
godas_clim_leap = xr.open_dataset(
"/data/users/grivera/GODAS/clim/godas_clim_leap.nc"
).pottmp
godas_clim_normal = xr.open_dataset(
"/data/users/grivera/GODAS/clim/godas_clim_normal.nc"
).pottmp
soda_clim_leap = xr.open_dataset("/data/users/grivera/SODA/clim/soda_clim_leap.nc").temp
soda_clim_normal = xr.open_dataset(
"/data/users/grivera/SODA/clim/soda_clim_normal.nc"
).temp
imarpe_clim_leap = xr.open_dataset(
"/data/users/grivera/IMARPE/clim/imarpe_clim_leap.nc"
).temperature
imarpe_clim_normal = xr.open_dataset(
"/data/users/grivera/IMARPE/clim/imarpe_clim_normal.nc"
).temperature
CLIM = {
"leap": {
"godas": godas_clim_leap,
"imarpe": imarpe_clim_leap,
"soda": soda_clim_leap,
},
"normal": {
"godas": godas_clim_normal,
"imarpe": imarpe_clim_normal,
"soda": soda_clim_normal,
},
}
# Plot parameters
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import gsw
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from cartopy.io.shapereader import Reader
from dmelon.plotting import format_latlon
from matplotlib.collections import LineCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from shapely.geometry import LineString, Point
peru_line = gpd.read_file(
"/data/users/grivera/Shapes/ESRI/baseline_diss_lines.shp", crs="EPSG:4326"
)
def redistribute_vertices(geom, distance, xarr):
if geom.geom_type == "LineString":
num_vert = int(round(geom.length / distance))
if num_vert == 0:
num_vert = 1
points = [
geom.interpolate(float(n) / num_vert, normalized=True)
for n in range(num_vert + 1)
]
points = [
Point(point.x - xarr.sel(lat=point.y, method="nearest").data, point.y)
for point in points
]
return LineString(points)
elif geom.geom_type == "MultiLineString":
parts = [redistribute_vertices(part, distance, xarr) for part in geom]
return type(geom)([p for p in parts if not p.is_empty])
else:
raise ValueError("unhandled geometry %s", (geom.geom_type,))
lamb = (
3
* np.abs(
gsw.geostrophy.f(
xr.DataArray(np.nan, coords=[("lat", np.arange(0, -20, -0.1))]).lat
)
)
** -1
)
lamb = lamb / 111e3
ross = peru_line.geometry.apply(
redistribute_vertices, distance=0.1, xarr=lamb.where(np.abs(lamb.lat) > 5)
)
HQ_SA = cfeature.NaturalEarthFeature(
category="cultural",
name="admin_0_countries",
scale="10m",
facecolor="white", # cfeature.COLORS['land'],
edgecolor="black",
linewidth=1.5,
)
c50 = "#C8C8C8"
c100 = "#DCDCDC"
c200 = "#F0F0F0"
shape50 = cfeature.ShapelyFeature(
Reader("/data/users/grivera/Shapes/NEW_MASK/50mn_full.shp").geometries(),
ccrs.PlateCarree(),
facecolor=c50,
)
shape100 = cfeature.ShapelyFeature(
Reader("/data/users/grivera/Shapes/NEW_MASK/100mn_full.shp").geometries(),
ccrs.PlateCarree(),
facecolor=c100,
)
shape200 = cfeature.ShapelyFeature(
Reader("/data/users/grivera/Shapes/NEW_MASK/200mn_full.shp").geometries(),
ccrs.PlateCarree(),
facecolor=c200,
)
patch50 = Patch(facecolor=c50, label="0-50nm", edgecolor="gray")
patch100 = Patch(facecolor=c100, label="50-100nm", edgecolor="gray")
patch200 = Patch(facecolor=c200, label="100-200nm", edgecolor="gray")
ross_line = Line2D(
[], [], color="k", ls="--", label="Rossby radius\nof deformation", lw=0.8
)
lpos = Line2D(
[0],
[0],
marker=r"$\bigodot$",
color="none",
label="Latest position",
markerfacecolor="none",
markeredgecolor="k",
markersize=10,
)
rgbset2 = {
0: "#FFFFFF",
21: "#FFFAAA",
22: "#FFE878",
23: "#FFC03C",
24: "#FFA000",
25: "#FF6000",
26: "#FF3200",
27: "#E11400",
28: "#C00000",
29: "#A50000",
41: "#E1FFFF",
42: "#B4F0FA",
43: "#96D2FA",
44: "#78B9FA",
45: "#50A5F5",
46: "#3C96F5",
47: "#2882F0",
48: "#1E6EEB",
49: "#1464D2",
}
ccols = [49, 48, 47, 46, 44, 42, 41, 0, 21, 22, 24, 26, 27, 28, 29]
cmap = mpl.colors.ListedColormap([rgbset2[cnum] for cnum in ccols[1:-1]]).with_extremes(
over=rgbset2[ccols[-1]], under=rgbset2[ccols[0]]
)
clevs = [-6, -5, -4, -3, -2, -1, -0.5, 0.5, 1, 2, 3, 4, 5, 6]
norm = mpl.colors.BoundaryNorm(clevs, cmap.N)
month_locator = mdates.MonthLocator()
month_formatter = mdates.DateFormatter("%^b")
year_locator = mdates.YearLocator()
year_formatter = mdates.DateFormatter("%Y")
depth = 500
SORTED_DIR = os.path.join("/data/users/service/ARGO/FLOATS/output/floats", "sorted")
check_folder(SORTED_DIR)
for order, argo_code in enumerate(argo_codes):
print(f"Processing ARGO float #{argo_code}")
OUT_DIR = os.path.join(
"/data/users/service/ARGO/FLOATS/output/floats", str(argo_code)
)
check_folder(OUT_DIR)
_argo_profile = (
ds_profile_qc.copy()
.where(ds_profile.PLATFORM_NUMBER == argo_code)
.dropna(dim="N_PROF", how="all")
)
level = np.arange(0, 2100, 1)
_cont = []
for _prof_num in _argo_profile.N_PROF.data:
_sel_prof = _argo_profile.sel(N_PROF=_prof_num)
_non_nan_index = _sel_prof.PRES.dropna("N_LEVELS").N_LEVELS
_PRES = _sel_prof.PRES.sel(N_LEVELS=_non_nan_index).data
_TEMP = _sel_prof.TEMP.sel(N_LEVELS=_non_nan_index).data
_PSAL = _sel_prof.PSAL.sel(N_LEVELS=_non_nan_index).data
if _PRES[0] < 10:
_PRES = np.concatenate(([0], _PRES))
_TEMP = np.concatenate((_TEMP[[0]], _TEMP))
_PSAL = np.concatenate((_PSAL[[0]], _PSAL))
_LEVEL = gsw.z_from_p(_PRES, np.full_like(_PRES, _sel_prof.LATITUDE.data)) * -1
TEMP_func = interpolate.interp1d(_LEVEL, _TEMP, bounds_error=False)
PSAL_func = interpolate.interp1d(_LEVEL, _PSAL, bounds_error=False)
TEMP_interp = TEMP_func(level)
PSAL_interp = PSAL_func(level)
_interp = xr.Dataset(
data_vars=dict(
TEMP=(["level"], TEMP_interp),
PSAL=(["level"], PSAL_interp),
),
coords=dict(
level=(["level"], level),
TIME=_sel_prof.TIME,
),
)
if _sel_prof.TIME.dt.is_leap_year.data:
_CLIM = CLIM["leap"]
else:
_CLIM = CLIM["normal"]
for source, _clim in _CLIM.items():
_clim_sel = (
_clim.sel(
time=f"{_clim.time.dt.year[0].data}-{pd.to_datetime(_sel_prof.TIME.data):%m-%d}",
)
.ffill(dim="lat")
.sel(
lat=_sel_prof.LATITUDE,
lon=np.where(
_sel_prof.LONGITUDE < 0,
_sel_prof.LONGITUDE + 360,
_sel_prof.LONGITUDE,
),
method="nearest",
)
.load()
)
if _clim_sel.level[0].data != 0:
_clim_sel = xr.DataArray(
np.concatenate((_clim_sel[[0]], _clim_sel)),
coords=[("level", np.concatenate(([0], _clim_sel.level)))],
)
TEMP_anom = _interp.TEMP - _clim_sel.interp_like(_interp.TEMP)
_interp[f"TEMP_anom_{source}"] = TEMP_anom
_cont.append(_interp)
argo_float_interp = xr.concat(_cont, dim="TIME").interpolate_na(dim="level").load()
argo_float_interp.to_netcdf(
f"/data/users/service/ARGO/FLOATS/ncstore/{argo_code}.nc"
)
sdate = pd.to_datetime(argo_float_interp.TIME.max().data) - pd.Timedelta("365D")
_data = (
argo_float_interp.where(argo_float_interp.TIME > sdate)
.dropna(dim="TIME", how="all")
.rolling(TIME=3, center=True, min_periods=1)
.mean()
)
lat_center = (_data.LATITUDE.min().data + _data.LATITUDE.max().data) / 2
lon_center = (_data.LONGITUDE.min().data + _data.LONGITUDE.max().data) / 2
lat_delta = (_data.LATITUDE.max().data - _data.LATITUDE.min().data) / 2
lon_delta = (_data.LONGITUDE.max().data - _data.LONGITUDE.min().data) / 2
if lat_delta > lon_delta:
lat_delta += 1
lon_delta = lat_delta / 1.35
else:
lon_delta += 1
lat_delta = lon_delta * 1.35
bnds = [
lon_center - lon_delta,
lon_center + lon_delta,
lat_center - lat_delta,
lat_center + lat_delta,
]
plt.rcParams["font.family"] = "monospace"
fig = plt.figure(constrained_layout=False, figsize=(11, 8.5), dpi=300)
gs = fig.add_gridspec(
nrows=3, ncols=2, wspace=0.12, hspace=0.21, width_ratios=[1, 1]
)
## TOP FIGURE ##############################################################
ax0 = fig.add_subplot(gs[0, 0])
cont = _data.TEMP_anom_godas.plot.contourf(
ax=ax0,
y="level",
yincrease=False,
cmap=cmap,
norm=norm,
extend="both",
add_colorbar=False,
)
_cont0 = _data.TEMP_anom_godas.plot.contour(
ax=ax0,
y="level",
yincrease=False,
norm=norm,
add_colorbar=False,
colors="k",
linewidths=0.5,
)
ax0.set_ylim(depth, 0)
ax0.set_title("Sea Temperature profile anomalies (°C)")
ax0.set_ylabel("Depth (m)")
ax0.set_xlabel("")
## MID FIGURE ##############################################################
ax1 = fig.add_subplot(gs[1, 0], sharey=ax0, sharex=ax0)
_data.TEMP_anom_soda.plot.contourf(
ax=ax1,
y="level",
yincrease=False,
cmap=cmap,
norm=norm,
extend="both",
add_colorbar=False,
)
_cont1 = _data.TEMP_anom_soda.plot.contour(
ax=ax1,
y="level",
yincrease=False,
norm=norm,
add_colorbar=False,
colors="k",
linewidths=0.5,
)
ax1.set_title("")
ax1.set_ylabel("Depth (m)")
ax1.set_xlabel("")
## BOTTOM FIGURE ##############################################################
ax2 = fig.add_subplot(gs[2, 0], sharey=ax0, sharex=ax0)
_data.TEMP_anom_imarpe.plot.contourf(
ax=ax2,
y="level",
yincrease=False,
cmap=cmap,
norm=norm,
extend="both",
add_colorbar=False,
)
_cont2 = _data.TEMP_anom_imarpe.plot.contour(
ax=ax2,
y="level",
yincrease=False,
norm=norm,
add_colorbar=False,
colors="k",
linewidths=0.5,
)
ax2.tick_params(axis="x", labelrotation=0) # , which="major", pad=10)
ax2.set_title("")
ax2.set_ylabel("Depth (m)")
ax2.set_xlabel("")
## MAP ##############################################################
ax3 = fig.add_subplot(gs[:, 1], projection=ccrs.PlateCarree())
points = np.array([_data.LONGITUDE.data, _data.LATITUDE.data]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
vals = | np.linspace(0, 100, _data.LONGITUDE.size) | numpy.linspace |
import numpy as np
import sys
pp = "/Users/andres.perez/source/parametric_spatial_audio_processing"
sys.path.append(pp)
import parametric_spatial_audio_processing as psa
import matplotlib.pyplot as plt
import scipy.stats
from utils import *
from file_utils import build_result_dict_from_metadata_array, build_metadata_result_array_from_event_dict
from seld_dcase2019_master.metrics.evaluation_metrics import distance_between_spherical_coordinates_rad
def preprocess(data, sr, params):
"""
Assert first order ambisonics and dimensionality order.
Compute Stft.
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: psa.Stft instance
"""
num_frames = np.shape(data)[0]
num_channels = np.shape(data)[1]
assert num_channels == 4
start_frame = 0
if params['quick_test']:
end_frame = int(np.ceil(sr * params['quick_test_file_duration']))
else:
end_frame = num_frames
window_size = params['window_size']
window_overlap = params['window_overlap']
nfft = params['nfft']
x = psa.Signal(data[start_frame:end_frame].T, sr, 'acn', 'n3d')
X = psa.Stft.fromSignal(x,
window_size=window_size,
window_overlap=window_overlap,
nfft=nfft
).limit_bands(params['fmin'], params['fmax'])
if params['plot']:
psa.plot_magnitude_spectrogram(X)
return X
def estimate_doa(data, sr, params):
"""
Given an input audio, get the most significant tf bins per frame
:param data: np.array (num_frames, num_channels)
:param sr: sampling rate
:param params: params dict
:return: an array in the form :
[frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, quantized at hop_size,
containing all valid tf bins doas.
"""
### Preprocess data
X = preprocess(data, sr, params)
N = X.get_num_time_bins()
K = X.get_num_frequency_bins()
r = params['r']
### Diffuseness mask
doa = psa.compute_DOA(X)
directivity = X.compute_ita_re(r=r)
directivity_mask = directivity.compute_mask(th=params['directivity_th'])
### Energy density mask
e = psa.compute_energy_density(X)
block_size = params['energy_density_local_th_size']
tl = e.compute_threshold_local(block_size=block_size)
e_mask = e.compute_mask(tl)
### DOA Variance mask (computed on azimuth variance)
vicinity_radius = params['doa_std_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
std = np.zeros((K, N))
doa0_k_array = []
for r in range(-r_n,r_n+1):
doa0_k_array.append(np.roll(doa.data[0,:,:],r))
doa0_k = np.stack(doa0_k_array, axis=0)
for k in range(r_k, K - r_k):
std[k, :] = scipy.stats.circstd(doa0_k[:, k - r_k:k + r_k + 1, :], high=np.pi, low=-np.pi, axis=(0, 1))
# not optimized version...
# for k in range(r_k, K-r_k):
# for n in range(r_n, N-r_n):
# # azi
# std[k, n] = scipy.stats.circstd(doa.data[0, k-r_k:k+r_k+1, n-r_n:n+r_n+1], high=np.pi, low=-np.pi)
# # ele
# # std[k, n] = np.std(doa.data[1, k-r_k:k+r_k+1, n-r_n:n+r_n+1])
# Edges: largest value
std_max = np.max(std)
std[0:r_k, :] = std_max
std[K-r_k:K, :] = std_max
std[:, 0:r_n] = std_max
std[:, N - r_n:N] = std_max
# Scale values to min/max
std_scaled = std / std_max
# Invert values
std_scaled_inv = 1 - std_scaled
# Compute mask
doa_std = psa.Stft(doa.t, doa.f, std_scaled_inv, doa.sample_rate)
doa_std_mask = doa_std.compute_mask(th=params['doa_std_th'])
mask_all = doa_std_mask.apply_mask(directivity_mask).apply_mask(e_mask)
doa_th = doa.apply_mask(mask_all)
## Median average
median_averaged_doa = np.empty(doa.data.shape)
median_averaged_doa.fill(np.nan)
vicinity_size = (2*r_k-1) + (2*r_n-1)
doa_median_average_nan_th = params['doa_median_average_nan_th']
vicinity_radius = params['median_filter_vicinity_radius']
if np.size(vicinity_radius) == 1:
# Square!
r_k = vicinity_radius
r_n = vicinity_radius
elif np.size(vicinity_radius) == 2:
# Rectangle! [k, n]
r_k = vicinity_radius[0]
r_n = vicinity_radius[1]
else:
Warning.warn()
# TODO: optimize the for loop
for k in range(r_k, K - r_k):
for n in range(r_n, N - r_n):
azis = discard_nans(doa_th.data[0, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if azis.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[0, k, n] = circmedian(azis, 'rad')
eles = discard_nans(doa_th.data[1, k - r_k:k + r_k + 1, n - r_n:n + r_n + 1].flatten())
if eles.size > vicinity_size * doa_median_average_nan_th:
median_averaged_doa[1, k, n] = np.median(eles)
doa_th_median = psa.Stft(doa.t, doa.f, median_averaged_doa, doa.sample_rate)
## Plot stuff
if params['plot']:
psa.plot_doa(doa, title='doa')
psa.plot_doa(doa.apply_mask(e_mask), title='e mask')
psa.plot_doa(doa.apply_mask(directivity_mask), title='directivity mask')
psa.plot_doa(doa.apply_mask(doa_std_mask), title='doa std mask')
psa.plot_doa(doa_th, title='doa mask all')
psa.plot_doa(doa_th_median, title='doa circmedian')
plt.show()
## Fold values into a vector
# Get a list of bins with the position estimation according to the selected doa_method
# TODO: OPTIMIZE
active_windows = []
position = []
for n in range(N):
azi = discard_nans(doa_th_median.data[0, :, n])
ele = discard_nans(doa_th_median.data[1, :, n])
if np.size(azi) < params['num_min_valid_bins']:
# Empty! not enough suitable doa values in this analysis window
pass
else:
active_windows.append(n)
position.append([rad2deg(azi), rad2deg(ele)])
# result = [bin, class_id, azi, ele] with likely repeated bin instances
result = []
label = params['default_class_id']
for window_idx, window in enumerate(active_windows):
num_bins = np.shape(position[window_idx])[1]
for b in range(num_bins):
azi = position[window_idx][0][b]
ele = position[window_idx][1][b]
result.append([window, label, azi, ele])
# Perform the window transformation by averaging within frame
## TODO: assert our bins are smaller than required ones
current_window_hop = (params['window_size'] - params['window_overlap']) / float(sr)
window_factor = params['required_window_hop'] / current_window_hop
# Since frames are ordered (at least they should), we can optimise that a little bit
last_frame = -1
# result_quantized = [frame, [class_id, azi, ele],[class_id, azi, ele]... ] without repeated bin instances
result_quantized = []
for row in result:
frame = row[0]
new_frame = int(np.floor(frame / window_factor))
if new_frame == last_frame:
result_quantized[-1].append([row[1], row[2], row[3]])
else:
result_quantized.append([new_frame, [row[1], row[2], row[3]]])
last_frame = new_frame
return result_quantized
# Assumes overlapping, compute (1,2)-Kmeans on each segment
def group_events(result_quantized, params):
"""
Segmentate an array of doas into events
:param result_quantized: an array containing frames and doas
in the form [frame, [class_id, azi, ele],[class_id, azi, ele]... ]
without repeated frame instances, with ordered frames
:param params: params dict
:return: metadata_result_array, result_dict
metadata_result_array: array with one event per row, in the form
[sound_event_recording,start_time,end_time,ele,azi,dist]
result_dict: dict with one frame per key, in the form:
{frame: [class_id, azi, ele] or [[class_id1, azi1, ele1], [class_id2, azi2, ele2]]}
"""
## Generate result_averaged_dict: grouping doas per frame into 1 or 2 clusters
## result_averaged_dict = {frame: [label, azi, ele] or [[label, azi1, ele1],label, azi2, ele2]]}
result_averaged_dict = {}
frames = []
for row in result_quantized:
frames.append(row[0])
std_azis = []
std_eles = []
std_all = []
std_th = params['min_std_overlapping']
label = params['default_class_id']
for r_idx, row in enumerate(result_quantized):
# Get all doas
frame = row[0]
azis = []
eles = []
for v in row[1:]:
azis.append(v[1])
eles.append(v[2])
# Compute std of doas
std_azis.append(scipy.stats.circstd(azis, high=180, low=-180))
std_eles.append(np.std(eles))
std_all.append(std_azis[-1]/2 + std_eles[-1])
# If big std, we assume 2-overlap
if std_all[-1] >= std_th:
# 2 clusters:
x = deg2rad(np.asarray([azis, eles]).T)
try:
kmeans2 = HybridKMeans(n_init=params['num_init_kmeans']).fit(x)
except RuntimeWarning:
# All points in x are equal...
result_averaged_dict[frame] = [label, rad2deg(x[0,0]), rad2deg(x[0,1])]
continue
# Keep the centroids of this frame
result_averaged_dict[frame] = []
for c in kmeans2.cluster_centers_:
azi = rad2deg(c[0])
ele = rad2deg(c[1])
result_averaged_dict[frame].append([label, azi, ele])
else:
# 1 cluster: directly compute the median and keep it
azi = circmedian(np.asarray(azis), unit='deg')
ele = np.median(eles)
result_averaged_dict[frame] = [label, azi, ele]
if params['plot']:
plt.figure()
plt.suptitle('kmeans stds')
plt.scatter(frames,std_all,label='all')
plt.axhline(y=std_th)
plt.legend()
plt.grid()
plt.show()
## Group doas by distance and time proximity
# Generate event_dict = { event_id: [ [label, azi_frame, ele_frame] ...}
# each individual event is a key, and values is a list of [frame, azi, ele]
d_th = params['max_angular_distance_within_event']
frame_th = params['max_frame_distance_within_event']
event_idx = 0
event_dict = {}
# Ensure ascending order
frames = result_averaged_dict.keys()
frames.sort()
# TODO: write in a more modular way
for frame in frames:
value = result_averaged_dict[frame]
if len(value) == 3:
# One source
azi = value[1]
ele = value[2]
if not bool(event_dict):
# Empty: append
event_dict[event_idx] = [[frame, azi, ele]]
event_idx += 1
else:
# Compute distance with all previous frames
new_event = True # default
for idx in range(event_idx):
# Compute distance with median of all previous
azis = | np.asarray(event_dict[idx]) | numpy.asarray |
"""
Tests for matrix.py
"""
import pytest
import numpy as np
import sksurgerycore.transforms.matrix as mat
import sksurgerycore.utilities.validate_matrix as vm
def check_construct_rx_matrix(angle, is_in_radians, point):
""""
Check if the rotation matrix for rotating around the x axis is correct.
:param angle: the angle to rotate
:param is_in_radians: if angle is in radians or not
:param point: the point to be rotated
:returns: new_point -- the point after rotation
"""
rot_x = mat.construct_rx_matrix(angle, is_in_radians)
vm.validate_rotation_matrix(rot_x)
new_point = np.matmul(rot_x, point)
assert new_point[0] == point[0]
assert np.abs(np.linalg.norm(new_point) - np.linalg.norm(point)) <= 0.0001
return new_point
def check_construct_ry_matrix(angle, is_in_radians, point):
""""
Check if the rotation matrix for rotating around the y axis is correct.
:param angle: the angle to rotate
:param is_in_radians: if angle is in radians or not
:param point: the point to be rotated
:returns: new_point -- the point after rotation
"""
rot_y = mat.construct_ry_matrix(angle, is_in_radians)
vm.validate_rotation_matrix(rot_y)
new_point = np.matmul(rot_y, point)
assert new_point[1] == point[1]
assert np.abs(np.linalg.norm(new_point) - np.linalg.norm(point)) <= 0.0001
return new_point
def check_construct_rz_matrix(angle, is_in_radians, point):
""""
Check if the rotation matrix for rotating around the z axis is correct.
:param angle: the angle to rotate
:param is_in_radians: if angle is in radians or not
:param point: the point to be rotated
:returns: new_point -- the point after rotation
"""
rot_z = mat.construct_rz_matrix(angle, is_in_radians)
vm.validate_rotation_matrix(rot_z)
new_point = np.matmul(rot_z, point)
assert new_point[2] == point[2]
assert np.abs(np.linalg.norm(new_point) - np.linalg.norm(point)) <= 0.0001
return new_point
def test_construct_rx_matrix():
tiny = 0.0001
new_point = check_construct_rx_matrix(90., 0, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rx_matrix(np.pi/2, 1, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rx_matrix(-90., 0, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] + 1) < tiny
new_point = check_construct_rx_matrix(-np.pi/2, 1, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] + 1) < tiny
new_point = check_construct_rx_matrix(180., 0, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rx_matrix(np.pi, 1, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rx_matrix(-180., 0, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rx_matrix(-np.pi, 1, np.array([0, 1, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
#check for bad types
with pytest.raises(TypeError):
mat.construct_rx_matrix(str('10.'), False)
def test_construct_ry_matrix():
tiny = 0.0001
new_point = check_construct_ry_matrix(90., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] + 1) < tiny
new_point = check_construct_ry_matrix(np.pi/2, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] + 1) < tiny
new_point = check_construct_ry_matrix(-90., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_ry_matrix(-np.pi/2, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_ry_matrix(180., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) <= tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_ry_matrix(np.pi, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_ry_matrix(-180., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_ry_matrix(-np.pi, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
#check for bad types
with pytest.raises(TypeError):
mat.construct_ry_matrix(int(10.), True)
def test_construct_rz_matrix():
tiny = 0.0001
new_point = check_construct_rz_matrix(90., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(np.pi/2, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(-90., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(-np.pi/2, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0]) < tiny
assert np.abs(new_point[1] + 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(180., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(np.pi, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(-180., 0, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rz_matrix(-np.pi, 1, np.array([1, 0, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2]) < tiny
#check for bad types
with pytest.raises(TypeError):
mat.construct_rz_matrix(100, True)
def check_construct_rotm_from_euler(
angle_a, angle_b, angle_c,
sequence, is_in_radians,
point):
""""
Check if the rotation matrix for rotating around the x axis is correct.
:param angle_a: first Euler angle
:param angle_b: second Euler angle
:param angle_c: third Euler angle
:param is_in_radians: if angle is in radians or not
:param point: the point to be rotated
:returns: new_point -- the point after rotation
"""
rot_m = mat.construct_rotm_from_euler(
angle_a, angle_b, angle_c,
sequence, is_in_radians)
vm.validate_rotation_matrix(rot_m)
new_point = np.matmul(rot_m, point)
assert np.abs(np.linalg.norm(new_point) - np.linalg.norm(point)) <= 0.0001
return new_point
def test_construct_rotm_from_euler(recwarn):
tiny = 0.0001
new_point = check_construct_rotm_from_euler(
90, -90, 0,
'zxz', False,
np.array([1, 0, -1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
90., -90., 0.,
'zyz', 0,
np.array([0, -1, -1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
np.pi/2, -np.pi/2, 0.,
'zyz', 1,
np.array([0, -1, -1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
0., 45., 45.,
'xyx', 0,
np.array([0, 1, 1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rotm_from_euler(
0., -45., -45.,
'xzx', 0,
np.array([0, 1, 1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
45., 45., -90.,
'yxy', 0,
np.array([1, 1, 0]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rotm_from_euler(
45., 45., 180.,
'yzy', 0,
np.array([1, 1, 0]).T)
assert np.abs(new_point[0] + 1) < tiny
assert np.abs(new_point[1]) < tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rotm_from_euler(
0., 90., -90.,
'xyz', 0,
np.array([-1, 0, 1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
0., 90., -90.,
'zyx', 0,
np.array([0, -1, 1]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1] - 1) < tiny
assert np.abs(new_point[2]) < tiny
new_point = check_construct_rotm_from_euler(
0., -45., -45.,
'xyz', 0,
np.array([1, 1, 0]).T)
assert np.abs(new_point[0] - 1) < tiny
assert np.abs(new_point[1]) <= tiny
assert np.abs(new_point[2] - 1) < tiny
new_point = check_construct_rotm_from_euler(
0., -np.pi/4, -np.pi/4,
'xyz', 1,
| np.array([1, 1, 0]) | numpy.array |
import os, codecs
import pandas as pd
import numpy as np
PATH = '../input/'
# 共享单车轨迹数据
bike_track = pd.concat([
pd.read_csv(PATH + 'gxdc_gj20201221.csv'),
pd.read_csv(PATH + 'gxdc_gj20201222.csv'),
pd.read_csv(PATH + 'gxdc_gj20201223.csv'),
pd.read_csv(PATH + 'gxdc_gj20201224.csv'),
pd.read_csv(PATH + 'gxdc_gj20201225.csv')
])
# 按照单车ID和时间进行排序
bike_track = bike_track.sort_values(['BICYCLE_ID', 'LOCATING_TIME'])
import folium
m = folium.Map(location=[24.482426, 118.157606], zoom_start=12)
my_PolyLine=folium.PolyLine(locations=bike_track[bike_track['BICYCLE_ID'] == '000152773681a23a7f2d9af8e8902703'][['LATITUDE', 'LONGITUDE']].values,weight=5)
m.add_children(my_PolyLine)
def bike_fence_format(s):
s = s.replace('[', '').replace(']', '').split(',')
s = | np.array(s) | numpy.array |
#!/usr/bin/env python3
# manual
"""
This script allows you to manually control the simulator or Duckiebot
using the keyboard arrows.
"""
import sys
import argparse
import pyglet
from pyglet.window import key
import numpy as np
import gym
import gym_duckietown
from gym_duckietown.envs import DuckietownEnv
from gym_duckietown.wrappers import UndistortWrapper
####
from PIL import Image
import cv2
import math
from apriltag import Detector
import transformations as tf
####
# from experiments.utils import save_img
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='Duckietown')
parser.add_argument('--map-name', default='udem1')
parser.add_argument('--distortion', default=False, action='store_true')
parser.add_argument('--draw-curve', action='store_true', help='draw the lane following curve')
parser.add_argument('--draw-bbox', action='store_true', help='draw collision detection bounding boxes')
parser.add_argument('--domain-rand', action='store_true', help='enable domain randomization')
parser.add_argument('--frame-skip', default=1, type=int, help='number of frames to skip')
parser.add_argument('--seed', default=1, type=int, help='seed')
args = parser.parse_args()
if args.env_name and args.env_name.find('Duckietown') != -1:
env = DuckietownEnv(
seed = args.seed,
map_name = args.map_name,
draw_curve = args.draw_curve,
draw_bbox = args.draw_bbox,
domain_rand = args.domain_rand,
frame_skip = args.frame_skip,
distortion = args.distortion,
)
else:
env = gym.make(args.env_name)
#env.reset()
env.render()
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
"""
This handler processes keyboard commands that
control the simulation
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render()
elif symbol == key.PAGEUP:
env.unwrapped.cam_angle[0] = 0
elif symbol == key.ESCAPE:
env.close()
sys.exit(0)
# Take a screenshot
# UNCOMMENT IF NEEDED - Skimage dependency
# elif symbol == key.RETURN:
# print('saving screenshot')
# img = env.render('rgb_array')
# save_img('screenshot.png', img)
# Register a keyboard handler
key_handler = key.KeyStateHandler()
env.unwrapped.window.push_handlers(key_handler)
def _draw_pose(overlay, camera_params, tag_size, pose, z_sign=1):
opoints = np.array([
-1, -1, 0,
1, -1, 0,
1, 1, 0,
-1, 1, 0,
-1, -1, -2*z_sign,
1, -1, -2*z_sign,
1, 1, -2*z_sign,
-1, 1, -2*z_sign,
]).reshape(-1, 1, 3) * 0.5*tag_size
edges = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
0, 4,
1, 5,
2, 6,
3, 7,
4, 5,
5, 6,
6, 7,
7, 4
]).reshape(-1, 2)
fx, fy, cx, cy = camera_params
# matriz homogenea
K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
print("matriz homogenea ", K)
rvec, _ = cv2.Rodrigues(pose[:3,:3])
tvec = pose[:3, 3]
dcoeffs = np.zeros(5)
ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
ipoints = np.round(ipoints).astype(int)
ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
for i, j in edges:
cv2.line(overlay, ipoints[i], ipoints[j], (0, 255, 0), 1, 16)
def global_pose(matrix,x_ob,y_ob,angle): # matrix es la pose del apriltag x_ob e y_ob es el x e y del apriltag
tag_size = 0.18
tile_size = 0.585
T_a = tf.translation_matrix([
-x_ob, -tag_size*3/4, y_ob]) # esto ya viene multiplicado por el tile_size
R_a = tf.euler_matrix(0,angle,0)
T_m_a = tf.concatenate_matrices(T_a, R_a)
# pose tag con respecto al robot
T_r_a = np.dot(matrix, tf.euler_matrix(0, np.pi, 0))
# pose tag con respecto al mapa
T_a_r = | np.linalg.inv(T_r_a) | numpy.linalg.inv |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""" Stochastic Dynamic Programming library
Implements naive methods of Dynamic Programming (Value Iteration)
to solve *simple* Optimal Stochastic Control problems
classes : SysDescription, DPSolver
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import inspect
import itertools
from datetime import datetime
def _zero_cost(*x):
'''zero cost function g(x), used as default terminal cost'''
return 0.
def _enforce_sig_len(fun, args, with_params, shortname=None):
''' Enforces the signature length of `fun` to match `args`
Checks that function `fun` indeed accepts len(`args`) arguments,
raises ValueError othewise.
Also `shortname` is used, if provided, in the error message to
prepend fun.__name__
'''
fun_args = inspect.getargspec(fun).args
kw_args = inspect.getargspec(fun).keywords
err_msg = ''
if shortname is not None:
err_msg += shortname
err_msg += "'{:s}' ".format(fun.__name__)
if not len(fun_args) == len(args):
# Build an error message of the kind
# "dynamics function 'dyn_sto' should accept 3 args (x1, u1, w1), not 4."
err_msg += 'should accept {:d} args ({:s}), not {:d}'.format(
len(args), ', '.join(args), len(fun_args))
raise ValueError(err_msg)
if with_params and kw_args is None:
err_msg += 'should accept extra keyword arguments'
raise ValueError(err_msg)
if not with_params and kw_args is not None:
err_msg += 'should not accept extra keyword arguments'
raise ValueError(err_msg)
else:
return True
# end _enforce_sig_len
class SysDescription(object):
def __init__(self, dims, stationnary=True, name='', params=None):
'''Description of a Dynamical System in the view of optimal (stochastic)
control, using Dynamic Programming approach.
Each system basically has
* a dynamics function x_{k+1} = f_k(x_k, u_k, w_k)
* an instant cost function g_k(x_k, u_k, w_k)
The sum over instants of g_k is the total cost J which is to be minimized
by choosing the control policy
'''
self.name = name
self.stationnary = bool(stationnary)
if params is not None:
self.params = params
else:
self.params = {}
if len(dims) == 3:
dim_state, dim_control, dim_perturb = dims
elif len(dims) == 2:
dim_state, dim_control = dims
dim_perturb = 0
else:
raise ValueError('dims tuple should be of len 2 or 3')
self.state = ['x{:d}'.format(i+1) for i in range(dim_state)]
self.control = ['u{:d}'.format(i+1) for i in range(dim_control)]
self.perturb = ['w{:d}'.format(i+1) for i in range(dim_perturb)]
# Expected signature length of dyn and cost functions:
self._dyn_args = self.state + self.control + self.perturb
if not self.stationnary:
# for unstationnary systems, instant `k` must be provided as 1st argument
self._dyn_args.insert(0, 'time_k')
# Dynamics and Cost functions (to be set separately)
self._dyn = None
self._cost = None
self._control_box = None
self._terminal_cost = _zero_cost
self._perturb_laws = None
@property
def stochastic(self):
'''is the system stochastic or deterministic ?'''
return len(self.perturb) > 0
@property
def dyn(self):
'''dynamics function x_{k+1} = f_k(x_k, u_k, w_k)'''
return self._dyn
@dyn.setter
def dyn(self, dyn):
'''sets the dynamics function'''
# Check the signature length:
with_params = bool(self.params)
if _enforce_sig_len(dyn, self._dyn_args, with_params, 'dynamics function'):
self._dyn = dyn
# Read the variable names from the signature of `dyn`
dyn_args = inspect.getargspec(dyn).args
# Rewrite the internally stored signature
self._dyn_args = dyn_args
# Split the signature between state, control and perturb:
if not self.stationnary:
# drop the first argument
dyn_args = dyn_args[1:]
self.state = dyn_args[0:len(self.state)]
dyn_args = dyn_args[len(self.state):] # drop state variables
self.control = dyn_args[0:len(self.control)]
dyn_args = dyn_args[len(self.control):] # drop control variables
self.perturb = dyn_args[0:len(self.perturb)]
@property
def control_box(self):
'''control description function U_k(x_k), expressed as a box (Hyperrectangle)
which means the admissible control set must be described as a
Cartesian product of intervals U = [u1_min, u1_max] x [u2_min, u2_max] x ...
'''
return self._control_box
@control_box.setter
def control_box(self, control_box):
'''sets the control description function'''
# Check the signature length:
args = list(self.state)
if not self.stationnary:
args.insert(0, 'time_k')
with_params = bool(self.params)
if _enforce_sig_len(control_box, args, with_params, 'control description function'):
self._control_box = control_box
@property
def cost(self):
'''cost function g_k(x_k, u_k, w_k)'''
return self._cost
@cost.setter
def cost(self, cost):
'''sets the cost function'''
# Check the signature length:
with_params = bool(self.params)
if _enforce_sig_len(cost, self._dyn_args, with_params, 'cost function'):
self._cost = cost
@property
def terminal_cost(self):
'''terminal cost function g(x_K)'''
return self._terminal_cost
@terminal_cost.setter
def terminal_cost(self, cost):
'''sets the terminal cost function'''
# Check the signature length:
cost_args = inspect.getargspec(cost).args
if not len(cost_args) == len(self.state):
raise ValueError('cost function should accept '
'{:d} args instead of {:d}'.format(
len(self.state), len(cost_args)))
self._terminal_cost = cost
@property
def perturb_laws(self):
'''distribution laws of perturbations `w_k`'''
return self._perturb_laws
@perturb_laws.setter
def perturb_laws(self, laws):
'''distribution laws of perturbations'''
# Check the number of laws
if not len(laws) == len(self.perturb):
raise ValueError('{:d} perturbation laws should be provided'
.format(len(self.perturb)))
self._perturb_laws = laws
# Check the type of perturbations (continuous vs. discrete)
self.perturb_types = []
for l in laws:
t = None
try:
l.pdf(0) # probability *density* -> continuous
t = 'continuous'
except AttributeError:
try:
l.pmf(0) # probability *mass* -> discrete
t = 'discrete'
except AttributeError:
raise ValueError('perturbation law {:s} should either have a pdf or a pmf method'.format(repr(l)))
self.perturb_types.append(t)
def print_summary(self):
'''summary information about the dynamical system'''
print('Dynamical system "{}" description'.format(self.name))
### 1) general properties
station = 'stationnary' if self.stationnary else 'time dependent'
stoch = 'stochastic' if self.stochastic else 'deterministic'
print('* behavioral properties: {}, {}'.format(station, stoch))
### 2) info about functions:
print('* functions:')
funclist = [('dynamics', self.dyn),
('cost', self.cost),
('control box', self.control_box)]
maxlen = max([len(name) for name, _ in funclist])
for name, fun in funclist:
if fun is not None:
fname = '{0.__module__}.{0.__name__}'.format(fun)
else:
fname = 'None (to be defined)'
print(' - {0:{width}}: {1}'.format(name, fname, width=maxlen+1))
# end for each function
### 1) information about variables
print('* variables')
vectlist = [('state', self.state),
('control', self.control)]
if self.stochastic:
vectlist.append(('perturbation', self.perturb))
maxlen = max([len(name) for name, _ in vectlist])
for name, vect in vectlist:
print(' - {0:{width}}: {1} (dim {2:d})'.format(
name, ', '.join(vect), len(vect), width=maxlen+1 ))
# end for each vector
# end print_summary()
def __repr__(self):
return '<SysDescription "{:s}" at 0x{:x}>'.format(self.name, id(self))
# end __repr__()
# end SysDescription class
################################################################################
# Interpolation class
# TODO : use a nicer n-dim method (like multilinear interpolation)
from scipy.interpolate import RectBivariateSpline
from stodynprog.dolointerpolation.multilinear_cython import multilinear_interpolation
class MlinInterpolator:
'''Multilinear interpolation class
wrapping Pablo Winant's Cython interpolation routine
Note : API of this class is different from Pablo Winant's MultilinInterpolator
'''
def __init__(self, *x_grid):
self.ndim = len(x_grid)
self._xmin = np.array([x[0] for x in x_grid])
self._xmax = np.array([x[-1] for x in x_grid])
self._xshape = np.array([len(x) for x in x_grid], dtype=np.int)
self.values = None
def set_values(self,values):
assert values.ndim == self.ndim
assert values.shape == tuple(self._xshape)
self.values = np.ascontiguousarray(np.atleast_2d(values.ravel()))
def __call__(self, *x_interp):
'''evaluate the interpolated function at coordinates `x_interp`
output shape is the shape of broadcasted coordinate inputs.
'''
assert len(x_interp) == self.ndim
# Prepare the interpolated coordinates array
x_mesh = np.broadcast_arrays(*x_interp)
shape = x_mesh[0].shape
x_stack = np.row_stack([x.astype(float).ravel() for x in x_mesh])
#
a = multilinear_interpolation(self._xmin, self._xmax, self._xshape,
self.values, x_stack)
a = a.reshape(shape)
return a
# end __call__()
# end MlinInterpolator
class RectBivariateSplineBc(RectBivariateSpline):
'''extended RectBivariateSpline class,
where spline evaluation works uses input broadcast
and returns an output with a coherent shape.
'''
#@profile
def __call__(self, x, y):
'''extended `ev` method, which supports array broadcasting
'''
if x.shape != y.shape:
x,y = np.broadcast_arrays(x,y) # costs about 30µs/call
# flatten the inputs after saving their shape:
shape = x.shape
x = np.ravel(x)
y = np.ravel(y)
# Evaluate the spline and reconstruct the dimension:
z = self.ev(x,y)
z = z.reshape(shape)
return z
# end __call__()
# end RectBivariateSplineBc class
################################################################################
# Stochastic Dynamic Programming class
class DPSolver(object):
def __init__(self, sys):
'''Dynamic Programming solver for stochastic dynamic control of `sys`
The dynamical system `sys` should be a `SysDescription` object.
DPSolver implements Value Iteration and Policy Iteration.
For the latter, policy evaluation is done by repeated value iterations.
'''
self.sys = sys
# Initialization of discrete grids:
self.state_grid = [[0.] for s in self.sys.state]
self.perturb_grid = [[0.] for p in self.sys.perturb]
self.perturb_proba = [[1.] for p in self.sys.perturb]
# steps for control discretization
self.control_steps = (1.,)*len(self.sys.control)
# end __init__()
def discretize_perturb(self, *linspace_args):
'''create a regular discrete grid for each perturbation variable
grids are stored in `self.perturb_grid` and can also be set manually
corresponding probability weights are in `self.perturb_proba`
'''
assert len(linspace_args) == len(self.sys.perturb)*3
self.perturb_grid = []
self.perturb_proba = []
for i in range(len(self.sys.perturb)):
# discrete grid for perturbation `i`
grid_wi = np.linspace(*linspace_args[i*3:i*3+3])
if self.sys.perturb_types[i] == 'continuous':
pdf_wi = self.sys.perturb_laws[i].pdf
proba_wi = pdf_wi(grid_wi)
proba_wi /= proba_wi.sum()
else: # discrete perturbation
pmf_wi = self.sys.perturb_laws[i].pmf
proba_wi = pmf_wi(grid_wi)
assert np.allclose(proba_wi.sum(), 1.)
#proba_wi /= proba_wi.sum()
self.perturb_grid.append(grid_wi)
self.perturb_proba.append(proba_wi)
return self.perturb_grid, self.perturb_proba
# end discretize_perturb()
def discretize_state(self, *linspace_args):
'''create a regular discrete grid for each state variable
grids are stored in `self.state_grid` and can also be set manually.
'''
assert len(linspace_args) == len(self.sys.state)*3
state_grid = []
for i in range(len(self.sys.state)):
# discrete grid for state `i`
grid_xi = np.linspace(*linspace_args[i*3:i*3+3])
state_grid.append(grid_xi)
self.state_grid = state_grid
### Store some additional data about the grid
# shape of the grid:
grid_shape = tuple(len(g) for g in self.state_grid)
self._state_grid_shape = grid_shape
# Reference indices (for relative DP algorithm)
# -> take the "middle" of the grid
ref_ind = tuple(nx//2 for nx in grid_shape)
self._state_ref_ind = ref_ind
self._state_ref = tuple(g[i] for g,i in zip(state_grid, ref_ind))
return self.state_grid
# end discretize_state()
@property
def state_grid_full(self):
'''broadcasted state grid
(compared to self.state_grid which is flat)
'''
state_dim = len(self.state_grid)
state_grid = []
for i, x_grid in enumerate(self.state_grid):
shape = [1]*state_dim
shape[i] = -1
state_grid.append(x_grid.reshape(shape))
return np.broadcast_arrays(*state_grid)
def interp_on_state(self, A):
'''returns an interpolating function of matrix A, assuming that A
is expressed on the state grid `self.state_grid`
the shape of A should be (len(g) for g in self.state_grid)
'''
# Check the dimension of A:
expect_shape = self._state_grid_shape
if A.shape != expect_shape:
raise ValueError('array `A` should be of shape {:s}, not {:s}'.format(
str(expect_shape), str(A.shape)) )
if len(expect_shape) <= 5:
A_interp = MlinInterpolator(*self.state_grid)
A_interp.set_values(A)
return A_interp
# if len(expect_shape) == 2:
# x1_grid = self.state_grid[0]
# x2_grid = self.state_grid[1]
# A_interp = RectBivariateSplineBc(x1_grid, x2_grid, A, kx=1, ky=1)
# return A_interp
else:
raise NotImplementedError('interpolation for state dimension >5'
' is not implemented.')
# end interp_on_state()
def control_grids(self, state_k, t_k=None):
'''returns u1_range, u2_range which is a grid on the box
of admissible controls using self.control_steps as hints
'''
# 1) Evaluate the admissible box:
if t_k is not None:
state_k = (t_k,) + state_k
sys_params = self.sys.params
intervals = self.sys.control_box(*state_k, **sys_params)
# 2) Build the dicretization grid for each control:
control_grids = []
control_dims = []
for (u_min, u_max), step in zip(intervals, self.control_steps):
width = u_max - u_min
n_interv = width / step # gives the number of intervals (float)
if n_interv < 0.1:
# step size is much (10x) thinner than the admissible width,
# only keep one control point at the interval center :
npts = 1
u_grid = np.array([(u_min+u_max)/2])
else:
# ensure we take enough points so that the actual discretization step
# is smaller or equal than the `step` hint
npts = int( | np.ceil(n_interv) | numpy.ceil |
"""Module containing the integration tests for the `tVGP` class."""
import gpflow
import numpy as np
import pytest
import tensorflow as tf
from gpflow.likelihoods import Bernoulli, Gaussian
from gpflow.optimizers import NaturalGradient
from src.models.tvgp import t_VGP
LENGTH_SCALE = 2.0
VARIANCE = 2.25
NUM_DATA = 8
NOISE_VARIANCE = 0.3
rng = | np.random.RandomState(123) | numpy.random.RandomState |
import os
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Reshape
from tensorflow.keras.models import Model
from mnistData import MNIST
PATH = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyGAN")
IMAGES_PATH = os.path.join(PATH, "Chapter7_Autoencoder/images")
mnist_data = MNIST()
x_train, _ = mnist_data.get_train_set()
x_test, _ = mnist_data.get_test_set()
x_train_noise = x_train + 0.1 * np.random.normal(size=x_train.shape)
x_test_noise = x_test + 0.1 * | np.random.normal(size=x_test.shape) | numpy.random.normal |
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the expval method of the :mod:`pennylane_lightning.LightningQubit` device.
"""
import pytest
import numpy as np
import pennylane as qml
from conftest import U, U2, A
np.random.seed(42)
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
@pytest.mark.parametrize("theta, phi", list(zip(THETA, PHI)))
class TestExpval:
"""Test expectation values"""
def test_identity_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that identity expectation value (i.e. the trace) is 1"""
dev = qubit_device_3_wires
O1 = qml.Identity(wires=[0])
O2 = qml.Identity(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([1, 1]), tol)
def test_pauliz_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that PauliZ expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliZ(wires=[0])
O2 = qml.PauliZ(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([np.cos(theta), np.cos(theta) * np.cos(phi)]), tol)
@pytest.mark.parametrize("C", [np.complex64, np.complex128])
def test_paulix_expectation(self, theta, phi, qubit_device_3_wires, tol, C):
"""Test that PauliX expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliX(wires=[0])
O2 = qml.PauliX(wires=[1])
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)], dtype=C)
assert np.allclose(res, np.array([np.sin(theta) * np.sin(phi), np.sin(phi)], dtype=C))
def test_pauliy_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that PauliY expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.PauliY(wires=[0])
O2 = qml.PauliY(wires=[1])
dev.apply(
[qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
assert np.allclose(res, np.array([0, -np.cos(theta) * np.sin(phi)]), tol)
def test_hadamard_expectation(self, theta, phi, qubit_device_3_wires, tol):
"""Test that Hadamard expectation value is correct"""
dev = qubit_device_3_wires
O1 = qml.Hadamard(wires=[0])
O2 = qml.Hadamard(wires=[1])
dev.apply(
[qml.RY(theta, wires=[0]), qml.RY(phi, wires=[1]), qml.CNOT(wires=[0, 1])],
rotations=[*O1.diagonalizing_gates(), *O2.diagonalizing_gates()],
)
res = np.array([dev.expval(O1), dev.expval(O2)])
expected = np.array(
[np.sin(theta) * np.sin(phi) + np.cos(theta), np.cos(theta) * np.cos(phi) + np.sin(phi)]
) / np.sqrt(2)
assert np.allclose(res, expected, tol)
@pytest.mark.parametrize("theta,phi,varphi", list(zip(THETA, PHI, VARPHI)))
class TestTensorExpval:
"""Test tensor expectation values"""
def test_paulix_pauliy(self, theta, phi, varphi, qubit_device_3_wires, tol):
"""Test that a tensor product involving PauliX and PauliY works
correctly"""
dev = qubit_device_3_wires
obs = qml.PauliX(0) @ qml.PauliY(2)
dev.apply(
[
qml.RX(theta, wires=[0]),
qml.RX(phi, wires=[1]),
qml.RX(varphi, wires=[2]),
qml.CNOT(wires=[0, 1]),
qml.CNOT(wires=[1, 2]),
],
rotations=obs.diagonalizing_gates(),
)
res = dev.expval(obs)
expected = | np.sin(theta) | numpy.sin |
# -*- encoding: utf-8 -*-
# Functions performing various data conversions for the ChaLearn AutoML
# challenge
from six.moves import range
import numpy as np
__all__ = [
'predict_RAM_usage',
'convert_to_num',
'convert_to_bin'
]
def binarization(array):
# Takes a binary-class datafile and turn the max value (positive class)
# into 1 and the min into 0
array = np.array(array, dtype=float) # conversion needed to use np.inf
if len(np.unique(array)) > 2:
raise ValueError('The argument must be a binary-class datafile. '
'{} classes detected'.format(len(np.unique(array))))
# manipulation which aims at avoid error in data
# with for example classes '1' and '2'.
array[array == np.amax(array)] = np.inf
array[array == np.amin(array)] = 0
array[array == np.inf] = 1
return np.array(array, dtype=int)
def multilabel_to_multiclass(array):
array = binarization(array)
return np.array([np.nonzero(array[i, :])[0][0] for i in range(len(array))])
def convert_to_num(Ybin):
"""
Convert binary targets to numeric vector
typically classification target values
:param Ybin:
:return:
"""
result = np.array(Ybin)
if len(Ybin.shape) != 1:
result = np.dot(Ybin, range(Ybin.shape[1]))
return result
def convert_to_bin(Ycont, nval, verbose=True):
# Convert numeric vector to binary (typically classification target values)
if verbose:
pass
Ybin = [[0] * nval for x in range(len(Ycont))]
for i in range(len(Ybin)):
line = Ybin[i]
line[ | np.int(Ycont[i]) | numpy.int |
import random
import pubchem as pc
import numpy as np
import pandas as pd
import sklearn as sk
import utility
import db.db as db
from config import config as cc
import sys
from sets import Set
import data
RD = cc.exp['params']['data']
RP = cc.exp['params']['rnn']
# not entirely correct, in one partition can appear two same elements, since we are concatenating two permutations
class PermutationPartitioner:
def __init__(self, samplesCount, partitionSize):
self.samplesCount = samplesCount
self.partitionSize = partitionSize
self.permutation = np.random.permutation(samplesCount)
self.idx = 0
def get(self):
part = np.copy(self.permutation[self.idx:self.idx+self.partitionSize])
if len(part) < self.partitionSize:
np.random.shuffle(self.permutation)
self.idx = self.partitionSize - len(part)
part = np.concatenate((part,self.permutation[:self.idx]))
else:
self.idx += self.partitionSize
return part
def computeR2(pred, truth):
return np.corrcoef([pred,truth])[0][1]**2
def computeMSE(pred, truth):
return ((pred - truth)**2).mean()
def computeMAE(pred, truth):
return (np.absolute(pred - truth)).mean()
def predict(model, input, labels, meta):
if RP['edge_prediction']:
partitioner = PermutationPartitioner(len(input[0]), len(input[0]) / RP['num_partitions'])
else:
partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
iterations = RP['num_partitions']**2
metrics = {
'r2': np.zeros((labels.shape[1], iterations)),
'mse': np.zeros((labels.shape[1], iterations)),
'mae': np.zeros((labels.shape[1], iterations)),
}
# first denormalize labels, so we do it only once
labels = data.denormalize(labels, meta)
for iteration in range(iterations):
print('\titer:\t{}/{}'.format(iteration+1, iterations))
part = partitioner.get()
if RP['edge_prediction']:
partIn = [input[0][part],input[1][part]]
else:
partIn = input[part]
partLabelsT = labels[part].T
partPredT = model.predict(partIn, batch_size = RP['batch']).T
for i in range(labels.shape[1]):
metrics['r2'][i][iteration] = computeR2(partPredT[i], partLabelsT[i])
metrics['mse'][i][iteration] = computeMSE(partPredT[i], partLabelsT[i])
metrics['mae'][i][iteration] = computeMAE(partPredT[i], partLabelsT[i])
del partIn
del partLabelsT
del partPredT
metricsPerLabel = {
'r2_avg': np.nanmean(metrics['r2'], axis = 1),
'r2_std': np.nanstd(metrics['r2'], axis = 1),
'mse_avg': np.nanmean(metrics['mse'], axis = 1),
'mse_std': np.nanstd(metrics['mse'], axis = 1),
'mae_avg': np.nanmean(metrics['mae'], axis = 1),
'mae_std': np.nanstd(metrics['mae'], axis = 1),
}
metricsOverall = {
'r2_avg': np.nanmean(metrics['r2']),
'r2_std': np.nanstd(metrics['r2']),
'mse_avg': np.nanmean(metrics['mse']),
'mse_std': np.nanstd(metrics['mse']),
'mae_avg': np.nanmean(metrics['mae']),
'mae_std': np.nanstd(metrics['mae']),
}
for i,labelName in enumerate(RD['labels']):
print('{}/{} - {}:'.format(i+1, len(RD['labels']),labelName))
print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['r2_avg'][i],metricsPerLabel['r2_std'][i]))
print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mse_avg'][i],metricsPerLabel['mse_std'][i]))
print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mae_avg'][i],metricsPerLabel['mae_std'][i]))
print('Overall metrics:')
print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['r2_avg'],metricsOverall['r2_std']))
print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mse_avg'],metricsOverall['mse_std']))
print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mae_avg'],metricsOverall['mae_std']))
return metricsOverall
def computeConfusion(pred, truth):
# pred_pos pred_neg
# true_pos TP FN
# true_neg FP TN
confusion = | np.zeros((2,2)) | numpy.zeros |
#!/usr/bin/env python
# coding: utf-8
"""
Train process with rendered videos (standarized)
Author: Ruochenliu
Date: June 2019
"""
import numpy as np
import pandas as pd
import pickle
import cv2
import matplotlib.pyplot as plt
from os import listdir
from imblearn.over_sampling import RandomOverSampler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout, SpatialDropout3D, MaxPooling3D, Conv3D, LeakyReLU, ReLU, BatchNormalization
from tensorflow.keras.optimizers import Adam, SGD, Adagrad
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.models import load_model
from tensorflow.keras.regularizers import l2
tf.logging.set_verbosity(tf.logging.ERROR)
def stand(img):
return (img - np.mean(img))/np.std(img)
def get_one_hot(targets, n_classes):
res = np.eye(n_classes)[np.array(targets).reshape(-1)]
return res.reshape(list(targets.shape)+[n_classes])
def get_train_data(s, std=False):
if std:
sd = "_std"
else:
sd = ""
if s == "xyz":
X = np.load("../data/X"+sd+".npy")
y = | np.load("../data/y.npy") | numpy.load |
import pickle as pkl
import numpy as np
import numpy.linalg as linalg
# import scipy.linalg as linalg
import scipy.stats as stats
import pandas as pd
import copy as cp
def getPeaksAndBWs(strf,dt=5,df=1/6, discard_thresh=0.05):
original_strf= strf
strf=np.maximum(original_strf,0)
l2_norm_pos = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
pos_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(pos_peaks_ix)>1:
pos_first_peak_ix = pos_peaks_ix[-1]
else:
pos_first_peak_ix = pos_peaks_ix
f_pos_peak = (abs_max_f1_ix)*df
f_pos_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_pos_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_pos_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
#Inhibition:
strf=np.minimum(original_strf,0)
l2_norm_neg = np.sum(strf[:]**2)
[u,s,v] = linalg.svd(strf)
f1 = u[:,0]
t1 = v.T[:,0]
abs_max_f1_val = np.max(np.abs(f1))
abs_max_f1_ix = np.argmax(np.abs(f1))
abs_max_t1_val = np.max(np.abs(t1))
abs_max_t1_ix = np.argmax(np.abs(t1))
neg_peaks_ix = np.argwhere(np.abs(t1)>0.1*abs_max_t1_val)
if len(neg_peaks_ix)>1:
neg_first_peak_ix = neg_peaks_ix[-1]
else:
neg_first_peak_ix = neg_peaks_ix
f_neg_peak = (abs_max_f1_ix)*df
f_neg_bw = np.sum(np.abs(f1)>0.5*abs_max_f1_val)*df
t_neg_peak = (len(t1) - abs_max_t1_ix)*dt*-1
t_neg_bw = np.sum(np.abs(t1)>0.5*abs_max_t1_val)*dt
discard_pos = False
discard_neg = False
flip_pos_neg = False
if l2_norm_neg<discard_thresh*l2_norm_pos:
discard_neg = True
f_neg_bw = 0
t_neg_bw = 0
elif l2_norm_pos<discard_thresh*l2_norm_neg:
discard_pos = True
f_pos_bw = 0
t_pos_bw = 0
if (neg_first_peak_ix>pos_first_peak_ix and not discard_neg) or discard_pos:
# print('flip_pos_neg = True')
flip_pos_neg = True
discard_neg = discard_pos
f_peak = [f_neg_peak, f_pos_peak]
f_bw = [f_neg_bw, f_pos_bw]
t_peak = [t_neg_peak, t_pos_peak]
t_bw = [t_neg_bw, t_pos_bw]
else:
f_peak = [f_pos_peak,f_neg_peak]
f_bw = [f_pos_bw,f_neg_bw]
t_peak = [t_pos_peak,t_neg_peak]
t_bw = [t_pos_bw,t_neg_bw]
# flags = [flip_pos_neg, discard_neg]
return [f_peak,f_bw, t_peak,t_bw, flip_pos_neg, discard_neg]
def flip_neg_weights(weights,n_h = 40, dt = 5,dF = 1/6):
numweights = weights.shape[0]
mf_peak = np.empty([numweights,2])
mf_bw = np.empty([numweights,2])
mt_bw = np.empty([numweights,2])
mt_peak = np.empty([numweights,2])
m_pow = np.empty([numweights, n_h])
flip_pos_neg = np.empty([numweights])
discard_neg = np.empty([numweights])
for ii in np.arange(numweights):
#normalize weight so that all are in same range
this_weight = weights[ii,:,:]
this_weight_norm = this_weight/np.max(np.abs(this_weight[:]))
[mf_peak[ii,:],mf_bw[ii,:], mt_peak[ii,:],mt_bw[ii,:], flip_pos_neg[ii], discard_neg[ii]] = getPeaksAndBWs(this_weight_norm,dt,dF)
if flip_pos_neg[ii]:
this_weight = -this_weight
weights[ii,:,:] = this_weight
return weights
def quantify_strfs(weights,n_h = 40, dt = 5,dF = 1/6):
numweights = weights.shape[0]
mf_peak = np.empty([numweights,2])
mf_bw = | np.empty([numweights,2]) | numpy.empty |
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = | np.meshgrid(w_means, w_stds, self.survey_ix) | numpy.meshgrid |
import os
import scipy.io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import joblib
print(tf.__version__)
from utils_data import *
from get_model import *
#
args = get_args(parse_args().task)
# Load data
eq_valid, d_valid, lens_v = load_data(args, train=0)
print('Data loaded.')
# Scale
scaler = joblib.load('./save/scaler.save')
print('Scaler loaded.')
# Create model
model = get_model(args)
model.load_weights('./save/checkpoint.ckpt')
print(model.summary())
# Convert Keras to onnx
import keras2onnx
import onnx
onnx_model = keras2onnx.convert_keras(model, model.name)
onnx.save_model(onnx_model, './save/model-best.onnx')
# Load onnx model
import onnx
import onnxruntime as rt
onnx_model = onnx.load('./save/model-best.onnx')
onnx.checker.check_model(onnx_model)
sess = rt.InferenceSession('./save/model-best.onnx')
#%%
import time
# from utils import lowpass
is_revised = 1
is_filted = 1
inp = args.past_history
step = args.local_sample_step
history_size = inp * step
nval = eq_valid.shape[0]
iteration = eq_valid.shape[1]
origin, body = scale_valset(eq_valid, d_valid, scaler)
head = np.zeros((nval, history_size, 4))
result = np.concatenate((head, body), axis=1)
tick1 = time.time()
for i in range(history_size, history_size+iteration):
indices = range(i-history_size, i, step)
seq = result[:, indices, :]
if is_revised == 0:
outputs = sess.run(None, {sess.get_inputs()[0].name: seq.astype(np.float32)})[0] # (batch_size, 3)
elif is_revised == 1:
seq = np.concatenate((seq, -seq), axis=0)
outputs = sess.run(None, {sess.get_inputs()[0].name: seq.astype(np.float32)})[0] # (batch_size, 3)
outputs = (outputs[:nval] - outputs[nval:])/2
result[:, i:i+1, 1:4] = np.reshape(outputs, (-1, 1, 3))
print ("\r processing: {} / {} iterations ({}%)".format(i-history_size+1, iteration, (i-history_size+1)*100//iteration), end="")
tick2 = time.time()
origin = origin.astype(np.float64)
result = result[:, history_size:, :]
print("\n", tick2 - tick1)
if is_filted == 1:
for index in range(nval):
for floor in range(3):
result[index,:,floor+1] = lowpass(result[index,:,floor+1], 8, 100)
IND = 0
floor = 1 # 1, 2, 3
window = range(500, 1000)
window = range(0, lens_v[IND, 0])
l1 = origin[IND, window, floor]
l2 = result[IND, window, floor]
plt.figure(figsize=(20,12))
line_0 = plt.plot(l1, alpha=0.5, label = 'original disp')[0]
line_0.set_color('red')
line_0.set_linewidth(2.0)
line_4 = plt.plot(l2, alpha=0.5, label = 'predicted disp{}-{}'.format(IND, floor))[0]
line_4.set_color('green')
line_4.set_linewidth(2.0)
plt.legend()
plt.show()
print(np.corrcoef(l1, l2)[1][0])
#%% evaluate
result_inv = result.copy()
origin_inv = origin.copy()
nval = origin.shape[0]
for i in range(nval):
result_inv[i, :, :] = scaler.inverse_transform(result_inv[i, :, :]).reshape(1, -1, 4)
origin_inv[i, :, :] = scaler.inverse_transform(origin_inv[i, :, :]).reshape(1, -1, 4)
# origin = origin/1e3
# result = result/1e3
nfloor = 3 # 1 5 9 / 1 3 6
evaluate_results = | np.zeros((nval, 7, nfloor)) | numpy.zeros |
# ============================================================================
# 第四章 暖冷房設備
# 第一節 全般
# Ver.06(エネルギー消費性能計算プログラム(住宅版)Ver.0205~)
# ============================================================================
############### 他モジュールの参照 ###############
import numpy as np
import pyhees.section3_1 as ld
from pyhees.section3_1_a import calc_etr_dash_t
from pyhees.section4_1_a import calc_heating_mode, get_default_heating_spec, get_default_heatsource
import pyhees.section9_3 as ass
# 設置なしの場合の設定
from pyhees.section4_1_b import get_default_cooling_spec
# ダクト式セントラル空調機
import pyhees.section4_2 as dc
import pyhees.section4_2_a as dc_a
import pyhees.section4_2_b as dc_spec
# エアーコンディショナー
import pyhees.section4_3 as rac
import pyhees.section4_3_a as rac_spec
# FF暖房
import pyhees.section4_4 as ff
import pyhees.section4_4_a as ff_spec
# 電気ヒーター床暖房
import pyhees.section4_5 as eheater
import pyhees.section4_5_a as eheater_spec
# 電気蓄熱暖房
import pyhees.section4_6 as ets
import pyhees.section4_6_a as ets_spec
# 温水暖房
import pyhees.section4_7 as hwh
# 温水暖房用パネルラジエーター
import pyhees.section4_7_j as rad_panel
# 温水暖房用ファンコンベクター
import pyhees.section4_7_k as rad_fanc
# 温水暖房用床暖房
import pyhees.section4_7_l as rad_floor
# ルームエアコンディショナー付温水床暖房
import pyhees.section4_8 as racfh
import pyhees.section4_8_a as racfh_spec
###################################################
# 6. 暖房設備の一次エネルギー消費量及び処理負荷と未処理負荷
###################################################
# ===================================================
# 6.1 処理負荷及び未処理負荷
# ===================================================
def calc_heating_load(region, sol_region, A_A, A_MR, A_OR, Q, mu_H, mu_C, NV_MR, NV_OR, TS, r_A_ufvnt, HEX, underfloor_insulation, mode_H, mode_C,
spec_MR, spec_OR, mode_MR, mode_OR, SHC):
"""暖房負荷の取得
Args:
region(int): 省エネルギー地域区分
sol_region(int): 年間の日射地域区分(1-5)
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
Q(float): 当該住戸の熱損失係数 (W/m2K)
mu_H(float): 当該住戸の暖房期の日射取得係数 ((W/m2)/(W/m2))
mu_C(float): 当該住戸の冷房期の日射取得係数 ((W/m2)/(W/m2))
NV_MR(float): 主たる居室における通風の利用における相当換気回数
NV_OR(float): その他の居室における通風の利用における相当換気回数
TS(bool): 蓄熱の利用
r_A_ufvnt(float): 当該住戸において、床下空間全体の面積に対する空気を供給する床下空間の面積の比 (-)
HEX(dict): 熱交換器型設備仕様辞書
underfloor_insulation(bool): 床下空間が断熱空間内である場合はTrue
mode_H(str): 暖房方式
mode_C(str): 冷房方式
spec_MR(dict): 主たる居室の暖房機器の仕様
spec_OR(dict): その他の居室の暖房機器の仕様
mode_MR(str): 主たる居室の運転方法 (連続運転|間歇運転)
mode_OR(str): その他の居室の運転方法 (連続運転|間歇運転)
SHC(dict): 集熱式太陽熱利用設備の仕様
Returns:
tuple(ndarray, ndarray): 暖房区画i=1-5それぞれの暖房負荷, 標準住戸の暖冷房区画iの負荷補正前の暖房負荷 (MJ/h))
"""
if region == 8:
return np.zeros((12, 24 * 365)), np.zeros((12, 24 * 365))
if mode_H == '住戸全体を連続的に暖房する方式' or \
mode_H == '居室のみを暖房する方式でかつ主たる居室とその他の居室ともに温水暖房を設置する場合に該当しない場合' or \
mode_H == '設置しない':
# 暖房区画i=1-5それぞれの暖房負荷
L_T_H_d_t_i, L_dash_H_R_d_t_i = calc_L_H_d_t(region, sol_region, A_A, A_MR, A_OR, mode_H, mode_C, spec_MR, spec_OR,
mode_MR, mode_OR, Q,
mu_H, mu_C, NV_MR, NV_OR, TS, r_A_ufvnt, HEX, SHC, underfloor_insulation)
return L_T_H_d_t_i, L_dash_H_R_d_t_i
elif mode_H is None:
return None, None
else:
raise ValueError(mode_H)
# ---------------------------------------------------
# 6.1.1 住戸全体を連続的に暖房する方式
# ---------------------------------------------------
def calc_Q_UT_H_A_d_t(A_A, A_MR, A_OR, A_env, mu_H, mu_C, q_hs_rtd_H, q_hs_rtd_C, V_hs_dsgn_H, V_hs_dsgn_C, Q,
VAV, general_ventilation, duct_insulation, region, L_H_d_t_i, L_CS_d_t_i, L_CL_d_t_i):
"""住宅全体を連続的に暖房する方式おける暖房設備の未処理暖房負荷 (1)
Args:
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
A_env(float): 外皮の部位の面積の合計 (m2)
mu_H(float): 当該住戸の暖房期の日射取得係数 ((W/m2)/(W/m2))
mu_C(float): 当該住戸の冷房期の日射取得係数 ((W/m2)/(W/m2))
q_hs_rtd_H(float): 熱源機の暖房時の定格出力 (MJ/h)
q_hs_rtd_C(float): 熱源機の冷房時の定格出力 (MJ/h)
V_hs_dsgn_H(float): 暖房時の設計風量(m3/h)
V_hs_dsgn_C(float): 冷房時の設計風量(m3/h)
Q(float): 当該住戸の熱損失係数 (W/m2K)
VAV(bool): VAV有無
general_ventilation(bool): 全版換気の機能の有無
duct_insulation(str): ダクトが通過する空間
region(int): 省エネルギー地域区分
L_H_d_t_i(ndarray): 日付dの時刻tにおける暖冷房区画iの1時間当たりの暖房負荷(MJ/h)
L_CS_d_t_i(ndarray): 暖冷房区画iの 1 時間当たりの冷房顕熱負荷
L_CL_d_t_i(ndarray): 暖冷房区画iの 1 時間当たりの冷房潜熱負荷
Returns:
ndarray: 住戸全体を連続的に暖房する方式における1時間当たりの暖房設備の未処理暖房負荷(MJ/h)
"""
_, Q_UT_H_d_t_i, _, _, _, _, _, _, _, _, _ = dc.calc_Q_UT_A(A_A, A_MR, A_OR, A_env, mu_H, mu_C, q_hs_rtd_H, q_hs_rtd_C,
V_hs_dsgn_H, V_hs_dsgn_C, Q, VAV, general_ventilation,
duct_insulation, region, L_H_d_t_i, L_CS_d_t_i, L_CL_d_t_i)
Q_UT_H_A_d_t = np.sum(Q_UT_H_d_t_i, axis=0)
return Q_UT_H_A_d_t
# ---------------------------------------------------
# 6.1.2 居室のみを暖房する方式
# ---------------------------------------------------
# # 主たる居室に設置された暖房設備の処理暖房負荷 (3a)
# def get_Q_T_H_MR_d_t():
# return get_Q_T_H_d_t_i(i=1)
#
#
# # その他の居室に設置された暖房設備の処理暖房負荷 (4a)
# def get_Q_T_H_OR_d_t():
# return np.sum([get_Q_T_H_d_t_i(i) for i in range(2, 6)], axis=0)
def calc_Q_UT_H_MR_d_t(region, A_A, A_MR, A_OR, spec_MR, spec_OR, spec_HS, mode_MR, mode_OR, CG, L_T_H_d_t):
"""主たる居室に設置された暖房設備の未処理暖房負荷 (2b)
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
spec_MR(dict): 主たる居室の暖房機器の仕様
spec_OR(dict): その他の居室の暖房機器の仕様
spec_HS(dict): 温水暖房機の仕様
mode_MR(str): 主たる居室の運転方法 (連続運転|間歇運転)
mode_OR(str): その他の居室の運転方法 (連続運転|間歇運転)
CG(dict): コージェネレーションの機器
L_T_H_d_t(ndarray): 暖房区画の暖房負荷
Returns:
ndarray: 住戸全体を連続的に暖房する方式における1時間当たりの主たる居室の暖房設備の未処理暖房負荷(MJ/h)
"""
if spec_MR['type'] in ['温水暖房用パネルラジエーター', '温水暖房用床暖房', '温水暖房用ファンコンベクター']:
# 送水温度の決定
Theta_SW_hs_op = hwh.get_Theta_SW_hs_op(spec_HS['type'], CG)
rad_list = hwh.get_rad_list(spec_MR, spec_OR)
p_hs_d_t = hwh.calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_d_t, A_A, A_MR, A_OR, region, mode_MR, mode_OR)
Theta_SW_d_t = hwh.get_Theta_SW_d_t(Theta_SW_hs_op, p_hs_d_t)
if spec_MR['type'] == '温水暖房用パネルラジエーター':
# 床面積
A_HCZ = calc_A_HCZ_i(1, A_A, A_MR, A_OR)
R_type = '主たる居室'
# パネルラジエーターの最大能力
q_max_rad = rad_panel.calc_q_max_rad(region, mode_MR, A_HCZ, R_type)
# パネルラジエーターの最大暖房出力
Q_max_H_rad = rad_panel.get_Q_max_H_rad(Theta_SW_d_t, q_max_rad)
elif spec_MR['type'] == '温水暖房用床暖房':
# 床面積
A_HCZ = calc_A_HCZ_i(1, A_A, A_MR, A_OR)
r_Af = spec_MR.get('r_Af')
A_f = rad_floor.get_A_f(A_HCZ, r_Af)
# 温水床暖房の単位面積当たりの上面最大放熱能力
Q_max_H_rad = rad_floor.get_Q_max_H_rad(Theta_SW_d_t, A_f)
elif spec_MR['type'] == '温水暖房用ファンコンベクター':
# 床面積
A_HCZ = calc_A_HCZ_i(1, A_A, A_MR, A_OR)
R_type = '主たる居室'
q_max_FC = rad_fanc.calc_q_max_FC(region, mode_MR, A_HCZ, R_type)
Q_max_H_rad = rad_fanc.calc_Q_max_H_rad(Theta_SW_d_t, q_max_FC)
else:
raise ValueError(spec_MR['type'])
# 処理負荷
Q_T_H_d_t_i = np.min([Q_max_H_rad, L_T_H_d_t[0]], axis=0)
# 未処理負荷
Q_UT_H_d_t_i = L_T_H_d_t[0] - Q_T_H_d_t_i
print('{} Q_UT_H_d_t_1 = {} [MJ]'.format(spec_MR['type'], np.sum(Q_UT_H_d_t_i)))
return Q_UT_H_d_t_i
else:
return calc_Q_UT_H_d_t(1, spec_MR, A_A, A_MR, A_OR, region, mode_MR, L_T_H_d_t[0])
def calc_Q_UT_H_OR_d_t(region, A_A, A_MR, A_OR, spec_MR, spec_OR, spec_HS, mode_MR, mode_OR, CG, L_T_H_d_t):
"""その他の居室に設置された暖房設備の未処理暖房負荷 (3b)
Args:
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
spec_MR(dict): 主たる居室の暖房機器の仕様
spec_OR(dict): その他の居室の暖房機器の仕様
spec_HS(dict): 温水暖房機の仕様
mode_MR(str): 主たる居室の運転方法 (連続運転|間歇運転)
mode_OR(str): その他の居室の運転方法 (連続運転|間歇運転)
CG(dict): コージェネレーションの機器
L_T_H_d_t(ndarray): 暖房区画の暖房負荷
Returns:
ndarray: 住戸全体を連続的に暖房する方式における1時間当たりのその他の居室の暖房設備の未処理暖房負荷(MJ/h)
"""
# その他の居室がない場合
if A_OR == 0:
return np.zeros(24 * 365)
else:
if spec_OR['type'] in ['温水暖房用パネルラジエーター', '温水暖房用床暖房', '温水暖房用ファンコンベクター']:
# 送水温度の決定
Theta_SW_hs_op = hwh.get_Theta_SW_hs_op(spec_HS['type'], CG)
rad_list = hwh.get_rad_list(spec_MR, spec_OR)
p_hs_d_t = hwh.calc_p_hs_d_t(Theta_SW_hs_op, rad_list, L_T_H_d_t, A_A, A_MR, A_OR, region, mode_MR,
mode_OR)
Theta_SW_d_t = hwh.get_Theta_SW_d_t(Theta_SW_hs_op, p_hs_d_t)
# 未処理負荷
Q_UT_H_d_t_i = np.zeros((5, 24 * 365))
for i in range(2, 6):
if spec_OR['type'] == '温水暖房用パネルラジエーター':
# 床面積
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = 'その他の居室'
# パネルラジエーターの最大能力
q_max_rad = rad_panel.calc_q_max_rad(region, mode_OR, A_HCZ, R_type)
# パネルラジエーターの最大暖房出力
Q_max_H_rad = rad_panel.get_Q_max_H_rad(Theta_SW_d_t, q_max_rad)
elif spec_OR['type'] == '温水暖房用床暖房':
# 床面積
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
r_Af = spec_OR.get('r_Af')
A_f = rad_floor.get_A_f(A_HCZ, r_Af)
# 温水床暖房の単位面積当たりの上面最大放熱能力
Q_max_H_rad = rad_floor.get_Q_max_H_rad(Theta_SW_d_t, A_f)
elif spec_OR['type'] == '温水暖房用ファンコンベクター':
# 床面積
A_HCZ = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
R_type = 'その他の居室'
q_max_FC = rad_fanc.calc_q_max_FC(region, mode_OR, A_HCZ, R_type)
Q_max_H_rad = rad_fanc.calc_Q_max_H_rad(Theta_SW_d_t, q_max_FC)
else:
raise ValueError(spec_OR['type'])
# 処理負荷
Q_T_H_d_t_i = | np.min([Q_max_H_rad, L_T_H_d_t[i - 1]], axis=0) | numpy.min |
r"""
Fitting Data (:mod:`desicos.conecylDB.fit_data`)
==================================================
.. currentmodule:: desicos.conecylDB.fit_data
This module includes functions used to fit measured imperfection data.
"""
from random import sample
import os
import numpy as np
from numpy import sin, cos, pi, deg2rad
from desicos.logger import *
from desicos.constants import FLOAT
def best_fit_cylinder(path, H, R_expected=10., save=True, errorRtol=1.e-9,
maxNumIter=1000, sample_size=None):
r"""Fit a best cylinder for a given set of measured data
The coordinate transformation which must be performed in order to adjust
the raw data to the finite element coordinate system is illustrated below:
.. figure:: ../../../figures/modules/conecylDB/fit_data/coord_sys_trans.png
:width: 400
This transformation can be represented in matrix form as:
.. math::
[T] = \begin{bmatrix}
cos(\beta) & sin(\alpha)sin(\beta) & -cos(\alpha)sin(\beta) & \Delta x_0
\\
0 & cos(\alpha) & sin(\alpha) & \Delta y_0
\\
sin(\beta) & -sin(\alpha)cos(\beta) & cos(\alpha)cos(\beta) & \Delta z_0
\\
\end{bmatrix}
Note that **five** variables are unknowns:
- the rotation angles `\alpha` and `\beta`
- the three components of the translation `\Delta x_0`, `\Delta y_0` and
`\Delta z_0`
The five unknowns are calculated iteratively in a non-linear least-sqares
problem (solved with ``scipy.optimize.leastsq``), where the measured data
is transformed to the reference coordinate system and there compared with
a reference cylinder in order to compute the residual error using:
.. math::
\begin{Bmatrix} x_{ref} \\ y_{ref} \\ z_{ref} \end{Bmatrix} =
[T]
\begin{Bmatrix} x_m \\ y_m \\ z_m \\ 1 \end{Bmatrix}
\\
Error = \sqrt{(\Delta r)^2 + (\Delta z)^2}
where:
- `x_m`, `y_m` and `z_m` are the data coordinates in the data coordinate
system
- `x_{ref}` `x_{ref}` are the data coordinates in the :ref:`reference
coordinate system <figure_conecyl>`
- `\Delta r` and `\Delta z` are defined as:
.. math::
\Delta r = R - \sqrt{x_{ref}^2 + y_{ref}^2}
\\
\Delta z = \begin{cases}
-z_{ref}, & \text{if } z_{ref} < 0 \\
0, & \text{if } 0 <= z_{ref} <= H \\
z_{ref} - H, & \text{if } z_{ref} > H \\
\end{cases}
Since the measured data may have an unknown radius `R`, the solution of
these equations has to be performed iteratively with one additional
external loop in order to update `R`.
Parameters
----------
path : str or np.ndarray
The path of the file containing the data. Can be a full path using
``r"C:\Temp\inputfile.txt"``, for example.
The input file must have 3 columns "`x` `y` `z`" expressed
in Cartesian coordinates.
This input can also be a ``np.ndarray`` object, with `x`, `y`, `z`
in each corresponding column.
H : float
The nominal height of the cylinder.
R_expected : float, optional
The nominal radius of the cylinder, used as a first guess to find
the best-fit radius (``R_best_fit``). Note that if not specified more
iterations may be required.
save : bool, optional
Whether to save an ``"output_best_fit.txt"`` in the working directory.
errorRtol : float, optional
The error tolerance for the best-fit radius to stop the iterations.
maxNumIter : int, optional
The maximum number of iterations for the best-fit radius.
sample_size : int, optional
If the input file containing the measured data is too big it may
be convenient to use only a sample of it in order to calculate the
best fit.
Returns
-------
out : dict
A Python dictionary with the entries:
``out['R_best_fit']`` : float
The best-fit radius of the input sample.
``out['T']`` : np.ndarray
The transformation matrix as a `3 \times 4` 2-D array. This matrix
does the transformation: input_pts --> output_pts.
``out['Tinv']`` : np.ndarray
The inverse transformation matrix as a `3 \times 4` 2-D array.
This matrix does the transformation: output_pts --> input_pts.
``out['input_pts']`` : np.ndarray
The input points in a `3 \times N` 2-D array.
``out['output_pts']`` : np.ndarray
The transformed points in a `3 \times N` 2-D array.
Examples
--------
1) General usage
For a given cylinder with expected radius and height of ``R_expected`` and
``H``::
from desicos.conecylDB.fit_data import best_fit_cylinder
out = best_fit_cylinder(path, H=H, R_expected=R_expected)
R_best_fit = out['R_best_fit']
T = out['T']
Tinv = out['Tinv']
2) Using the transformation matrices ``T`` and ``Tinv``
For a given input data with `x, y, z` positions in each line::
x, y, z = np.loadtxt('input_file.txt', unpack=True)
the transformation could be obtained with::
xnew, ynew, znew = T.dot(np.vstack((x, y, z, np.ones_like(x))))
and the inverse transformation::
x, y, z = Tinv.dot(np.vstack((xnew, ynew, znew, np.ones_like(xnew))))
"""
from scipy.optimize import leastsq
if isinstance(path, np.ndarray):
input_pts = path.T
else:
input_pts = np.loadtxt(path, unpack=True)
if input_pts.shape[0] != 3:
raise ValueError('Input does not have the format: "x, y, z"')
if sample_size:
num = input_pts.shape[1]
if sample_size < num:
input_pts = input_pts[:, sample(range(num), int(sample_size))]
pts = np.vstack((input_pts, np.ones_like(input_pts[0, :])))
def fT(p):
a, b, x0, y0, z0 = p
a %= 2*np.pi
b %= 2*np.pi
# rotation in x, y
T = np.array([[cos(b), sin(a)*sin(b), -cos(a)*sin(b), x0],
[ 0, cos(a), sin(a), y0],
[sin(b), -sin(a)*cos(b), cos(a)*cos(b), z0]])
return T
i = 0
R = R_expected
while i <= maxNumIter:
i += 1
def calc_dist(p, pts):
T = fT(p)
xn, yn, zn = T.dot(pts)
dz = np.zeros_like(zn)
factor = 0.1
# point below the bottom edge
mask = zn < 0
dz[mask] = -zn[mask]*factor
# point inside the cylinder
pass
#dz[(zn >= 0) & (zn <= H)] *= 0
# point above the top edge
mask = zn > H
dz[mask] = (zn[mask] - H)*factor
dr = R - np.sqrt(xn**2 + yn**2)
dist = np.sqrt(dr**2 + dz**2)
return dist
# initial guess for the optimization variables
# the variables are alpha, beta, x0, y0, z0
x, y, z = input_pts
p = [0.5, 0.5, 2*x.mean(), 2*y.mean(), 2*z.mean()]
# performing the leastsq analysis
popt, pcov = leastsq(func=calc_dist, x0=p, args=(pts,),
ftol=1.e-12, xtol=1.e-12, maxfev=1000000)
T = fT(popt)
output_pts = T.dot(pts)
x, y, z = output_pts
mask = (z>=0) & (z<=H)
R_best_fit = np.sqrt(x[mask]**2 + y[mask]**2).mean()
errorR = abs(R_best_fit - R)/R_best_fit
log('Iteration: {0}, R_best_fit: {1}, errorR: {2}'.format(
i, R_best_fit, errorR), level=1)
if errorR < errorRtol:
break
else:
R = R_best_fit
else:
warn('The maximum number of iterations was achieved!')
alpha, beta = popt[:2]
alpha %= 2*np.pi
beta %= 2*np.pi
log('')
log('Transformation matrix:\n{0}'.format(T))
log('')
log('Z versor: {0}*i + {1}*j + {2}*k'.format(*T[-1,:-1]))
log('')
log('alpha: {0} rad; beta: {1} rad'.format(alpha, beta))
log('')
log('x0, y0, z0: {0}, {1}, {2}'.format(*T[:,-1]))
log('')
log('Best fit radius: {0}'.format(R_best_fit))
log(' errorR: {0}, numiter: {1}'.format(errorR, i))
log('')
if save:
np.savetxt('output_best_fit.txt', np.vstack((x, y, z)).T)
Tinv = np.zeros_like(T)
Tinv[:3, :3] = T[:3, :3].T
Tinv[:, 3] = -T[:, 3]
return dict(R_best_fit=R_best_fit,
input_pts=input_pts,
output_pts=output_pts,
T=T, Tinv=Tinv)
def best_fit_cone(path, H, alphadeg, R_expected=10., save=True,
errorRtol=1.e-9, maxNumIter=1000, sample_size=None):
r"""Fit a best cone for a given set of measured data
.. note:: NOT IMPLEMENTED YET
"""
raise NotImplementedError('Function not implemented yet!')
def calc_c0(path, m0=50, n0=50, funcnum=2, fem_meridian_bot2top=True,
rotatedeg=None, filter_m0=None, filter_n0=None, sample_size=None,
maxmem=8):
r"""Find the coefficients that best fit the `w_0` imperfection
The measured data will be fit using one of the following functions,
selected using the ``funcnum`` parameter:
1) Half-Sine Function
.. math::
w_0 = \sum_{i=1}^{m_0}{ \sum_{j=0}^{n_0}{
{c_0}_{ij}^a sin{b_z} sin{b_\theta}
+{c_0}_{ij}^b sin{b_z} cos{b_\theta} }}
2) Half-Cosine Function (default)
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
{c_0}_{ij}^a cos{b_z} sin{b_\theta}
+{c_0}_{ij}^b cos{b_z} cos{b_\theta} }}
3) Complete Fourier Series
.. math::
w_0 = \sum_{i=0}^{m_0}{ \sum_{j=0}^{n_0}{
{c_0}_{ij}^a sin{b_z} sin{b_\theta}
+{c_0}_{ij}^b sin{b_z} cos{b_\theta}
+{c_0}_{ij}^c cos{b_z} sin{b_\theta}
+{c_0}_{ij}^d cos{b_z} cos{b_\theta} }}
where:
.. math::
b_z = i \pi \frac z H_{points}
b_\theta = j \theta
where `H_{points}` represents the difference between the maximum and
the minimum `z` values in the imperfection file.
The approximation can be written in matrix form as:
.. math::
w_0 = [g] \{c_0\}
where `[g]` carries the base functions and `{c_0}` the respective
amplitudes. The solution consists on finding the best `{c_0}` that
minimizes the least-square error between the measured imperfection pattern
and the `w_0` function.
Parameters
----------
path : str or np.ndarray
The path of the file containing the data. Can be a full path using
``r"C:\Temp\inputfile.txt"``, for example.
The input file must have 3 columns "`\theta` `z` `imp`" expressed
in Cartesian coordinates.
This input can also be a ``np.ndarray`` object, with
`\theta`, `z`, `imp` in each corresponding column.
m0 : int
Number of terms along the meridian (`z`).
n0 : int
Number of terms along the circumference (`\theta`).
funcnum : int, optional
As explained above, selects the base functions used for
the approximation.
fem_meridian_bot2top : bool, optional
A boolean indicating if the finite element has the `x` axis starting
at the bottom or at the top.
rotatedeg : float or None, optional
Rotation angle in degrees telling how much the imperfection pattern
should be rotated about the `X_3` (or `Z`) axis.
filter_m0 : list, optional
The values of ``m0`` that should be filtered (see :func:`.filter_c0`).
filter_n0 : list, optional
The values of ``n0`` that should be filtered (see :func:`.filter_c0`).
sample_size : int or None, optional
An in specifying how many points of the imperfection file should
be used. If ``None`` is used all points file will be used in the
computations.
maxmem : int, optional
Maximum RAM memory in GB allowed to compute the base functions.
The ``scipy.interpolate.lstsq`` will go beyond this limit.
Returns
-------
out : np.ndarray
A 1-D array with the best-fit coefficients.
Notes
-----
If a similar imperfection pattern is expected along the meridian and along
the circumference, the analyst can use an optimized relation between
``m0`` and ``n0`` in order to achieve a higher accuracy for a given
computational cost, as proposed by Castro et al. (2014):
.. math::
n_0 = m_0 \frac{\pi(R_{bot}+R_{top})}{2H}
"""
from scipy.linalg import lstsq
if isinstance(path, np.ndarray):
input_pts = path
path = 'unmamed.txt'
else:
input_pts = np.loadtxt(path)
if input_pts.shape[1] != 3:
raise ValueError('Input does not have the format: "theta, z, imp"')
if (input_pts[:,0].min() < -2*np.pi or input_pts[:,0].max() > 2*np.pi):
raise ValueError(
'In the input: "theta, z, imp"; "theta" must be in radians!')
log('Finding c0 coefficients for {0}'.format(str(os.path.basename(path))))
log('using funcnum {0}'.format(funcnum), level=1)
if sample_size:
num = input_pts.shape[0]
if sample_size < num:
input_pts = input_pts[sample(range(num), int(sample_size))]
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
else:
raise ValueError('Valid values for "funcnum" are 1, 2 or 3')
# the least-squares algorithm uses approximately the double the memory
# used by the coefficients matrix. This is non-linear though.
memfac = 2.2
maxnum = int(maxmem*1024*1024*1024*8/(64.*size*m0*n0)/memfac)
num = input_pts.shape[0]
if num >= maxnum:
input_pts = input_pts[sample(range(num), int(maxnum))]
warn('Using {0} measured points due to the "maxmem" specified'.
format(maxnum), level=1)
ts = input_pts[:, 0].copy()
if rotatedeg is not None:
ts += deg2rad(rotatedeg)
zs = input_pts[:, 1]
w0pts = input_pts[:, 2]
#NOTE using `H_measured` did not allow a good fitting result
#zs /= H_measured
zs = (zs - zs.min())/(zs.max() - zs.min())
if not fem_meridian_bot2top:
#TODO
zs *= -1
zs += 1
a = fa(m0, n0, zs, ts, funcnum)
log('Base functions calculated', level=1)
c0, residues, rank, s = lstsq(a, w0pts)
log('Finished scipy.linalg.lstsq', level=1)
if filter_m0 is not None or filter_n0 is not None:
c0 = filter_c0(m0, n0, c0, filter_m0, filter_n0, funcnum=funcnum)
return c0, residues
def filter_c0(m0, n0, c0, filter_m0, filter_n0, funcnum=2):
r"""Apply filter to the imperfection coefficients `\{c_0\}`
A filter consists on removing some frequencies that are known to be
related to rigid body modes or spurious measurement noise. The frequencies
to be removed should be passed through inputs ``filter_m0`` and
``filter_n0``.
Parameters
----------
m0 : int
The number of terms along the meridian.
n0 : int
The number of terms along the circumference.
c0 : np.ndarray
The coefficients of the imperfection pattern.
filter_m0 : list
The values of ``m0`` that should be filtered.
filter_n0 : list
The values of ``n0`` that should be filtered.
funcnum : int, optional
The function used for the approximation (see function :func:`.calc_c0`)
Returns
-------
c0_filtered : np.ndarray
The filtered coefficients of the imperfection pattern.
"""
log('Applying filter...')
log('using c0.shape={0}, funcnum={1}'.format(c0.shape, funcnum), level=1)
fm0 = filter_m0
fn0 = filter_n0
log('using filter_m0={0}'.format(fm0))
log('using filter_n0={0}'.format(fn0))
if funcnum==1:
if 0 in fm0:
raise ValueError('For funcnum==1 m0 starts at 1!')
pos = ([2*(m0*j + (i-1)) + 0 for j in range(n0) for i in fm0] +
[2*(m0*j + (i-1)) + 1 for j in range(n0) for i in fm0])
pos += ([2*(m0*j + (i-1)) + 0 for j in fn0 for i in range(1, m0+1)] +
[2*(m0*j + (i-1)) + 1 for j in fn0 for i in range(1, m0+1)])
elif funcnum==2:
pos = ([2*(m0*j + i) + 0 for j in range(n0) for i in fm0] +
[2*(m0*j + i) + 1 for j in range(n0) for i in fm0])
pos += ([2*(m0*j + i) + 0 for j in fn0 for i in range(m0)] +
[2*(m0*j + i) + 1 for j in fn0 for i in range(m0)])
elif funcnum==3:
pos = ([4*(m0*j + i) + 0 for j in range(n0) for i in fm0] +
[4*(m0*j + i) + 1 for j in range(n0) for i in fm0] +
[4*(m0*j + i) + 2 for j in range(n0) for i in fm0] +
[4*(m0*j + i) + 3 for j in range(n0) for i in fm0])
pos += ([4*(m0*j + i) + 0 for j in fn0 for i in range(m0)] +
[4*(m0*j + i) + 1 for j in fn0 for i in range(m0)] +
[4*(m0*j + i) + 2 for j in fn0 for i in range(m0)] +
[4*(m0*j + i) + 3 for j in fn0 for i in range(m0)])
c0_filtered = c0.copy()
c0_filtered[pos] = 0
log('Filter applied!')
return c0_filtered
def fa(m0, n0, zs_norm, thetas, funcnum=2):
"""Calculates the matrix with the base functions for `w_0`
The calculated matrix is directly used to calculate the `w_0` displacement
field, when the corresponding coefficients `c_0` are known, through::
a = fa(m0, n0, zs_norm, thetas, funcnum)
w0 = a.dot(c0)
Parameters
----------
m0 : int
The number of terms along the meridian.
n0 : int
The number of terms along the circumference.
zs_norm : np.ndarray
The normalized `z` coordinates (from 0. to 1.) used to compute
the base functions.
thetas : np.ndarray
The angles in radians representing the circumferential positions.
funcnum : int, optional
The function used for the approximation (see function :func:`.calc_c0`)
"""
try:
import _fit_data
return _fit_data.fa(m0, n0, zs_norm, thetas, funcnum)
except:
warn('_fit_data.pyx could not be imported, executing in Python/NumPy'
+ '\n\t\tThis mode is slower and needs more memory than the'
+ '\n\t\tPython/NumPy/Cython mode',
level=1)
zs = zs_norm.ravel()
ts = thetas.ravel()
n = zs.shape[0]
zsmin = zs.min()
zsmax = zs.max()
if zsmin < 0 or zsmax > 1:
log('zs.min()={0}'.format(zsmin))
log('zs.max()={0}'.format(zsmax))
raise ValueError('The zs array must be normalized!')
if funcnum==1:
a = np.array([[sin(i*pi*zs)*sin(j*ts), sin(i*pi*zs)*cos(j*ts)]
for j in range(n0) for i in range(1, m0+1)])
a = a.swapaxes(0,2).swapaxes(1,2).reshape(n,-1)
elif funcnum==2:
a = np.array([[cos(i*pi*zs)*sin(j*ts), cos(i*pi*zs)*cos(j*ts)]
for j in range(n0) for i in range(m0)])
a = a.swapaxes(0,2).swapaxes(1,2).reshape(n,-1)
elif funcnum==3:
a = np.array([[sin(i*pi*zs)*sin(j*ts), sin(i*pi*zs)*cos(j*ts),
cos(i*pi*zs)*sin(j*ts), cos(i*pi*zs)*cos(j*ts)]
for j in range(n0) for i in range(m0)])
a = a.swapaxes(0,2).swapaxes(1,2).reshape(n,-1)
return a
def fw0(m0, n0, c0, xs_norm, ts, funcnum=2):
r"""Calculates the imperfection field `w_0` for a given input
Parameters
----------
m0 : int
The number of terms along the meridian.
n0 : int
The number of terms along the circumference.
c0 : np.ndarray
The coefficients of the imperfection pattern.
xs_norm : np.ndarray
The meridian coordinate (`x`) normalized to be between ``0.`` and
``1.``.
ts : np.ndarray
The angles in radians representing the circumferential coordinate
(`\theta`).
funcnum : int, optional
The function used for the approximation (see function :func:`.calc_c0`)
Returns
-------
w0s : np.ndarray
An array with the same shape of ``xs_norm`` containing the calculated
imperfections.
Notes
-----
The inputs ``xs_norm`` and ``ts`` must be of the same size.
The inputs must satisfy ``c0.shape[0] == size*m0*n0``, where:
- ``size=2`` if ``funcnum==1 or funcnum==2``
- ``size=4`` if ``funcnum==3``
"""
if xs_norm.shape != ts.shape:
raise ValueError('xs_norm and ts must have the same shape')
if funcnum==1:
size = 2
elif funcnum==2:
size = 2
elif funcnum==3:
size = 4
if c0.shape[0] != size*m0*n0:
raise ValueError('Invalid c0 for the given m0 and n0!')
try:
import _fit_data
w0s = _fit_data.fw0(m0, n0, c0, xs_norm.ravel(), ts.ravel(), funcnum)
except:
a = fa(m0, n0, xs_norm.ravel(), ts.ravel(), funcnum)
w0s = a.dot(c0)
return w0s.reshape(xs_norm.shape)
def transf_matrix(alphadeg, betadeg, gammadeg, x0, y0, z0):
r"""Calculates the transformation matrix
The transformation matrix `[T]` is used to transform a set of points
from one coordinate system to another.
Many routines in the ``desicos`` require a transformation matrix when
the coordinate system is different than :ref:`the default one
<figure_conecyl>`. In such cases the angles `\alpha, \beta, \gamma` and
the translations `\Delta x_0, \Delta y_0, \Delta z_0` represent how
the user's coordinate system differs from the default.
.. math::
[T] = \begin{bmatrix}
cos(\beta)cos(\gamma) &
sin(\alpha)sin(\beta)cos(\gamma) + cos(\alpha)sin(\gamma) &
sin(\alpha)sin(\gamma) - cos(\alpha)sin(\beta)cos(\gamma) &
\Delta x_0
\\
-cos(\beta)sin(\gamma) &
cos(\alpha)cos(\gamma) - sin(\alpha)sin(\beta)sin(\gamma)&
sin(\alpha)cos(\gamma) + cos(\alpha)sin(\beta)sin(\gamma) &
\Delta y_0
\\
sin(\beta) &
-sin(\alpha)cos(\beta) &
cos(\alpha)cos(\beta) &
\Delta z_0
\\
\end{bmatrix}
Parameters
----------
alphadeg : float
Rotation around the x axis, in degrees.
betadeg : float
Rotation around the y axis, in degrees.
gammadeg : float
Rotation around the z axis, in degrees.
x0 : float
Translation along the x axis.
y0 : float
Translation along the y axis.
z0 : float
Translation along the z axis.
Returns
-------
T : np.ndarray
The 3 by 4 transformation matrix.
"""
a = deg2rad(alphadeg)
b = deg2rad(betadeg)
g = deg2rad(gammadeg)
return np.array([[cos(b)*cos(g),
(sin(a)*sin(b)*cos(g) + cos(a)*sin(g)),
( | sin(a) | numpy.sin |
import os, sys, glob, re
import random
import numpy as np
import time
import pandas as pd
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision
from torchvision import models, transforms
from efficientnet_pytorch import EfficientNet
from albumentations.pytorch import ToTensorV2
basepath = '/USER/INFERENCE/CANCER/'
sys.path.append(basepath + 'src/')
from utils import seed_everything, divide_fold, my_model, get_transform
from dataloader_cancer import ImageDataset
from classifier import train,test
#trial_no = 1
seed=20
seed_everything(seed)
num_channels = 3
num_classes = 2
num_fold = 5
def main(N_fold, batch_size, lr, n_epochs, num_workers, split_ratio, model_type, weight_filename, trial_no, augmentation, small_size, shuffle_data, class_balance, skip_batch, prefix):
if not os.path.exists(basepath+'val_perf'):
os.makedirs(basepath+'val_perf')
#N_fold = 4
TRANSFORM = get_transform(augmentation)
'''
Dataset
'''
filenames = glob.glob('/DATA/data_cancer/train/*.jpg')
targets = [re.sub(r'^.+/','',x).replace('.jpg','').split('_')[-1] for _,x in enumerate(filenames)]
labels = []
for x in targets:
if x == 'C': labels.append(1)
else: labels.append(0)
num_train = len(labels)
train_indices = divide_fold(np.array(labels), num_fold)[N_fold]
if shuffle_data:
np.random.shuffle(train_indices)
print('** train_indices: ', train_indices)
val_indices = np.setdiff1d(range(num_train), train_indices)
del targets, labels
filenames = np.array(filenames)
print('** do_augment: ', augmentation)
if augmentation:
stack_train = []
num_train = len(train_indices)
#for aug in aug_transform.keys():
stack_train.append(ImageDataset('train', TRANSFORM['base'], augmentation, small_size, filenames[train_indices].tolist()))
if augmentation < 3:
for i in range(augmentation):
fn = filenames[np.random.choice(num_train, int(num_train/2), replace=False)]
stack_train.append(ImageDataset('train', TRANSFORM['affine'], augmentation, small_size, fn.tolist()))
for i in range(augmentation):
fn = filenames[np.random.choice(num_train, int(num_train/2), replace=False)]
stack_train.append(ImageDataset('train', TRANSFORM['flip'], augmentation, small_size, fn.tolist()))
elif augmentation == 3:
for i in range(augmentation):
fn = filenames[np.random.choice(num_train, int(num_train/4), replace=False)]
stack_train.append(ImageDataset('train', TRANSFORM['blur'], augmentation, small_size, fn.tolist()))
import os, sys, glob, re
import random
import numpy as np
import time
import pandas as pd
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision
from torchvision import models, transforms
from efficientnet_pytorch import EfficientNet
from albumentations.pytorch import ToTensorV2
basepath = '/USER/INFERENCE/CANCER/'
sys.path.append(basepath + 'src/')
from utils import seed_everything, divide_fold, my_model, get_transform
from dataloader_cancer import ImageDataset
from classifier import train,test
#trial_no = 1
seed=20
seed_everything(seed)
num_channels = 3
num_classes = 2
num_fold = 5
def main(N_fold, batch_size, lr, n_epochs, num_workers, split_ratio, model_type, weight_filename, trial_no, augmentation, small_size, shuffle_data, class_balance, skip_batch, prefix):
if not os.path.exists(basepath+'val_perf'):
os.makedirs(basepath+'val_perf')
#N_fold = 4
TRANSFORM = get_transform(augmentation)
'''
Dataset
'''
filenames = glob.glob('/DATA/data_cancer/train/*.jpg')
targets = [re.sub(r'^.+/','',x).replace('.jpg','').split('_')[-1] for _,x in enumerate(filenames)]
labels = []
for x in targets:
if x == 'C': labels.append(1)
else: labels.append(0)
num_train = len(labels)
train_indices = divide_fold(np.array(labels), num_fold)[N_fold]
if shuffle_data:
| np.random.shuffle(train_indices) | numpy.random.shuffle |
import numpy as np
from PIL import Image
from scipy import special
# PSF functions
def scalar_a(x):
if x == 0:
return 1.0
else:
return (special.jn(1,2*np.pi*x)/(np.pi*x))**2
a = np.vectorize(scalar_a)
def s_b(x, NA=0.8, n=1.33):
if x == 0:
return 0
else:
return (NA/n)**2*(special.jn(2,2*np.pi*x)/(np.pi*x))**2
b = np.vectorize(s_b)
def h00(r_o, phi=0, NA=0.8, n=1.33, phi_p=None):
if phi_p==None:
return a(r_o) + 2*b(r_o, NA, n)
else:
return a(r_o) + 4*b(r_o, NA, n)*(np.cos(phi - phi_p)**2)
def h20(r_o, phi=0, NA=0.8, n=1.33, phi_p=None):
if phi_p==None:
return (1/np.sqrt(5))*(-a(r_o) + 4*b(r_o, NA, n))
else:
return (1/np.sqrt(5))*(-a(r_o) + 8*b(r_o, NA, n)*(np.cos(phi - phi_p)**2))
def h22(r_o, phi=0, NA=0.8, n=1.33, phi_p=None):
if phi_p==None:
return np.zeros(r_o.shape)
else:
return np.sqrt(3.0/5.0)*a(r_o)*(np.cos(phi_p)**2 - np.sin(phi_p)**2)
def h2_2(r_o, phi=0, NA=0.8, n=1.33, phi_p=None):
if phi_p==None:
return | np.zeros(r_o.shape) | numpy.zeros |
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from jtmri.np import flatten_axes, iter_axes, apply_to_axes, mosaic
def seq_array(dims):
return np.arange(reduce(np.multiply, dims)).reshape(dims)
def test_flatten_axes_one_dim():
"""Flattening one dimension does not change the array"""
a = seq_array((5, 4, 3, 2))
b = flatten_axes(a, -1)
assert_array_equal(a, b)
def test_flatten_axes():
a = seq_array((5, 4, 3, 2))
b = flatten_axes(a, (2, 3))
c = seq_array((5, 4, 6))
assert (5,4,6) == b.shape
assert_array_equal(c, b)
@pytest.mark.skip(reason='flatten_axes needs to be fixed: '
'fails when swapping two non-adjacent dimensions')
def test_flatten_axies_2d():
a = seq_array((2, 2, 2))
f = flatten_axes(a, (0, 2))
expected = np.array([[0, 1, 4, 5],
[2, 3, 6, 7]])
assert expected == f
def test_flatten_axes_fully():
a = seq_array((5, 4, 3, 2))
assert_array_equal(np.arange(5*4*3*2), flatten_axes(a, (0, 1, 2, 3)))
def test_flatten_axes_masked():
arr = seq_array((5, 4, 3, 2))
a = np.ma.array(arr, mask=(arr % 2 == 0))
b = flatten_axes(a, (0, 1, 2, 3))
assert_array_equal(np.arange(arr.size), b.data)
assert_array_equal(np.arange(arr.size) % 2 == 0, b.mask)
def test_flatten_axes_swapped():
a = seq_array((5,4,3,2))
assert_array_equal(flatten_axes(a, (0, 3)), flatten_axes(a, (3, 0)))
def test_iter_axes():
a = seq_array((5,4,3))
arrs = list(iter_axes(a, -1))
assert 3 == len(arrs)
assert_array_equal(a[...,0], arrs[0])
assert_array_equal(a[...,1], arrs[1])
assert_array_equal(a[...,2], arrs[2])
def test_iter_axes_none():
a = np.ones((5,4))
assert 0 == len(list(iter_axes(a, [])))
def test_iter_axes_multiple():
a = seq_array((5,4,3,2))
arrs = list(iter_axes(a, [2,3]))
assert 6 == len(arrs)
assert_array_equal(a[:,:,0,0], arrs[0])
assert_array_equal(a[:,:,0,1], arrs[1])
assert_array_equal(a[:,:,1,0], arrs[2])
| assert_array_equal(a[:,:,1,1], arrs[3]) | numpy.testing.assert_array_equal |
from sys import exit
import os
import h5py
import numpy as np
from scipy.stats import entropy, kstest, median_abs_deviation, cramervonmises
def create_directories(model_name):
# Define directories
path = os.getcwd() + '/galpro/' + str(model_name) + '/'
point_estimate_folder = 'point_estimates/'
posterior_folder = 'posteriors/'
validation_folder = 'validation/'
plot_folder = 'plots/'
# Create root directory
if not os.path.isdir(os.getcwd() + '/galpro/'):
os.mkdir(os.getcwd() + '/galpro/')
# Create model directory
os.mkdir(path)
os.mkdir(path + point_estimate_folder)
os.mkdir(path + posterior_folder)
os.mkdir(path + validation_folder)
os.mkdir(path + point_estimate_folder + plot_folder)
os.mkdir(path + posterior_folder + plot_folder)
os.mkdir(path + validation_folder + plot_folder)
def convert_1d_arrays(*arrays):
"""Convert given 1d arrays from shape (n,) to (n, 1) for compatibility with code."""
arrays = list(arrays)
for i in np.arange(len(arrays)):
array = arrays[i]
if array is not None:
arrays[i] = arrays[i].reshape(-1, 1)
return arrays
def load_point_estimates(path):
"""Loads saved point estimates."""
point_estimate_folder = 'point_estimates/'
if os.path.isfile(path + point_estimate_folder + 'point_estimates.h5'):
with h5py.File(path + point_estimate_folder + "point_estimates.h5", 'r') as f:
y_pred = f['point_estimates'][:]
print('Previously saved point estimates have been loaded.')
else:
print('Point estimates have not been found. Run point_estimates().')
exit()
return y_pred
def load_posteriors(path):
"""Loads saved posteriors."""
posterior_folder = 'posteriors/'
if os.path.isfile(path + posterior_folder + 'posteriors.h5'):
posteriors = h5py.File(path + posterior_folder + "posteriors.h5", "r")
print('Previously saved posteriors have been loaded.')
else:
print('No posteriors have been found. Run posterior() to generate posteriors.')
exit()
return posteriors
def load_validation(path):
"""Loads different calibrations"""
validation_folder = 'validation/'
if os.path.isfile(path + validation_folder + 'validation.h5'):
validation = h5py.File(path + validation_folder + "validation.h5", "r")
print('Previously saved validation has been loaded.')
else:
print('No validation has been found. Run validate().')
exit()
return validation
def get_pred_metrics(y_test, y_pred, no_features):
"""Calculates performance metrics for point predictions."""
metrics = np.empty(no_features)
for feature in np.arange(no_features):
nmad = median_abs_deviation(y_pred[:, feature]-y_test[:, feature], scale=1/1.4826)
metrics[feature] = nmad
return metrics
def get_pdf_metrics(pits, no_samples, no_features, no_bins, coppits=None):
"""Calculates performance metrics for PDFs."""
pit_outliers = np.empty(no_features)
pit_kld = np.empty(no_features)
pit_kst = np.empty(no_features)
pit_cvm = np.empty(no_features)
for feature in np.arange(no_features):
pit_pdf, pit_bins = np.histogram(pits[:, feature], density=True, bins=no_bins)
uniform_pdf = np.full(no_bins, 1.0/no_bins)
pit_kld[feature] = entropy(pit_pdf, uniform_pdf)
pit_kst[feature] = kstest(pits[:, feature], 'uniform')[0]
pit_cvm[feature] = cramervonmises(pits[:, feature], 'uniform').statistic
no_outliers = np.count_nonzero(pits[:, feature] == 0) + np.count_nonzero(pits[:, feature] == 1)
pit_outliers[feature] = (no_outliers / no_samples) * 100
if coppits is not None:
coppit_pdf, coppit_bins = np.histogram(coppits, density=True, bins=no_bins)
uniform_pdf = np.full(no_bins, 1.0 / no_bins)
coppit_kld = entropy(coppit_pdf, uniform_pdf)
coppit_kst = kstest(coppits, 'uniform')[0]
coppit_cvm = cramervonmises(coppits, 'uniform').statistic
no_outliers = len(set(np.where((pits == 0) | (pits == 1))[0]))
coppit_outliers = (no_outliers/no_samples) * 100
return coppit_outliers, coppit_kld, coppit_kst, coppit_cvm
return pit_outliers, pit_kld, pit_kst, pit_cvm
def get_quantiles(posteriors, no_samples, no_features):
"""Calculate the 16th, 50th and 84th quantiles."""
quantiles = np.empty((no_features, no_samples, 3))
for feature in | np.arange(no_features) | numpy.arange |
import operator
import pytest
from ...primitives import Int, Str, Any
from ...containers import List
from ...geospatial import ImageCollection
from ...identifier import parameter
from .. import MaskedArray, Array, DType, Scalar
import numpy as np
from ...core import ProxyTypeError
arr_fixture = [[1, 2], [3, 4]]
mask_fixture = [[True, False], [False, True]]
ma = MaskedArray([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]], False)
def test_init():
ma = MaskedArray(arr_fixture, mask_fixture)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_bool_mask():
ma = MaskedArray(arr_fixture, False)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_fill_value():
fill_value = 5
ma = MaskedArray(arr_fixture, mask_fixture, fill_value=fill_value)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
fill_value = Array([5, 6])
ma = MaskedArray(arr_fixture, mask_fixture, fill_value=fill_value)
def test_from_numpy():
np_ma = np.ma.masked_array([1, 2, 3], [True, True, False])
ma = MaskedArray.from_numpy(np_ma)
assert isinstance(ma, MaskedArray)
assert isinstance(ma, MaskedArray)
assert ma.params == ()
def test_init_params():
x = parameter("x", Int)
y = parameter("y", Int)
ma = MaskedArray(data=x, mask=mask_fixture, fill_value=y)
assert isinstance(ma, MaskedArray)
assert ma.params == (x, y)
@pytest.mark.parametrize(
"val",
[
[1, 2, 3],
| np.array([1, 2, 3]) | numpy.array |
from .parser import FileBuffer, NEWLINE, get_mask_from_intervals
from .chromosome_map import *
from dataclasses import dataclass
import numpy as np
class DelimitedBuffer(FileBuffer):
DELIMITER = ord("\t")
COMMENT = ord("#")
def __init__(self, data, new_lines):
super().__init__(data, new_lines)
self._delimiters = np.concatenate(([0],
np.flatnonzero(self._data == self.DELIMITER),
self._new_lines))
self._delimiters.sort(kind="mergesort")
@classmethod
def from_raw_buffer(cls, chunk):
new_lines = np.flatnonzero(chunk==NEWLINE)
return cls(chunk[:new_lines[-1]+1], new_lines)
def get_integers(self, cols):
cols = np.asanyarray(cols)
integer_starts = self._delimiters[:-1].reshape(-1, self._n_cols)[:, cols]+1
integer_ends = self._delimiters[1:].reshape(-1, self._n_cols)[:, cols]
integers = self._extract_integers(integer_starts.ravel(), integer_ends.ravel())
return integers.reshape(-1, cols.size)
def _extract_integers(self, integer_starts, integer_ends):
digit_chars = self._move_intervals_to_2d_array(integer_starts, integer_ends, DigitEncoding.MIN_CODE)
n_digits = digit_chars.shape[-1]
powers = np.uint32(10)**np.arange(n_digits)[::-1]
return DigitEncoding.from_bytes(digit_chars) @ powers
def get_text(self, col):
self.validate_if_not()
# delimiters = self._delimiters.reshape(-1, self._n_cols)
starts = self._delimiters[:-1].reshape(-1, self._n_cols)[:, col]+1
ends = self._delimiters[1:].reshape(-1, self._n_cols)[:, col]
return self._move_intervals_to_2d_array(starts, ends)
def get_text_range(self, col, start=0, end=None):
self.validate_if_not()
# delimiters = self._delimiters.reshape(-1, self._n_cols)
starts = self._delimiters[:-1].reshape(-1, self._n_cols)[:, col]+1+start
if end is not None:
ends = starts+end
else:
ends = self._delimiters[1:].reshape(-1, self._n_cols)[:, col]
return self._move_intervals_to_2d_array(starts.ravel(), ends.ravel())
def _validate(self):
chunk = self._data
delimiters = self._delimiters[1:]
n_delimiters_per_line = next(i for i, d in enumerate(delimiters) if chunk[d] == NEWLINE) + 1
self._n_cols = n_delimiters_per_line
last_new_line = next(i for i, d in enumerate(delimiters[::-1]) if chunk[d] == NEWLINE)
delimiters = delimiters[:delimiters.size-last_new_line]
assert delimiters.size % n_delimiters_per_line == 0, "irregular number of delimiters per line"
delimiters = delimiters.reshape(-1, n_delimiters_per_line)
assert np.all(chunk[delimiters[:, -1]] == NEWLINE)
self._validated = True
class StrandEncoding:
MIN_CODE = ord("+")
@classmethod
def from_bytes(cls, bytes_array):
return (bytes_array & | np.uint8(2) | numpy.uint8 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 16:41:37 2021
@author: elisabetta
"""
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans, MeanShift
# extended names of the clustering methods
meth_names = ['1D thresholding', '1D Mean-Shift', '2D k-means', '2D adjusted k-means']
def _compute_cluster(psds, freqs, ch_names= None, alpha_range=None, theta_range=None, method=4):
"""
Creates a cluster databese and compute the cluster
Parameters:
psds: array, shape (N_sensors, N_freqs)
power spectral matrix
freqs: array, shape (N_freqs,)
frequencies at which the psds is computed
ch_names: None | list of strings (default None)
names of the channels (must be ordered as they are in psds)
theta_range: tuple | list | array | None (default None)
theta range to compute alpha coefficients (eg: (5,7), [5,7], np.array([5,7])).
If None it is set automatically
alpha_range: tuple | list | array | None (default None)
alpha range to compute theta coefficients (eg: (9,11), [9,11], np.array([9,11])).
If None it is set automatically
method: 1, 2, 3, 4 (default 4)
clustering method
Returns:
tfbox: dictionary
Dictionary containing:\n
* the method used to compute the cluster
* the cluster: pandas dataframe (rows: alpha coefficients,theta coefficients,
clustering labels; columns:channel names)
* the transition frequency (tf)
"""
# Normalize power spectrum
psds = psds/psds.sum(axis=1).reshape((psds.shape[0], 1))
# individual alpha peak
f_in_idx = np.where((freqs >= 7) & (freqs <= 13))[0]
ap = f_in_idx[0] + np.argmax(psds.mean(axis=0)[f_in_idx])
ap = freqs[ap]
# define indices of the psds within the alpha and theta ranges
if alpha_range is None:
f_alpha_idx = np.where((freqs >= ap-1) & (freqs <= ap+1))[0]
elif len(alpha_range) != 2:
raise ValueError("len(alpha_range) must be 2")
elif alpha_range[0] < freqs[0] or alpha_range[-1] > freqs[-1]:
raise ValueError("alpha_range must be inside the interval [freqs[0], freqs[-1]]")
elif alpha_range[0] > alpha_range[-1]:
raise ValueError("alpha_range[-1] must be greater than alpha_range[0]")
else:
f_alpha_idx = np.where((freqs >= alpha_range[0]) & (freqs <= alpha_range[1]))[0]
if theta_range is None:
if ap-1 > 7:
f_theta_idx = np.where((freqs >= 5) & (freqs <= 7))[0]
else:
f_theta_idx = np.where((freqs >= ap-3) & (freqs < ap-1))[0]
elif len(theta_range) != 2:
raise ValueError("len(theta_range) must be 2")
elif theta_range[0] < freqs[0] or theta_range[-1] > freqs[-1]:
raise ValueError("theta_range must be inside the interval [freqs[0], freqs[-1]]")
elif theta_range[0] > theta_range[-1]:
raise ValueError("theta_range[-1] must be greater than theta_range[0]")
else:
f_theta_idx = np.where((freqs >= theta_range[0]) & (freqs <= theta_range[1]))[0]
if ch_names is None: ch_names = ch_names = ['ch'+'0'*(len(str(psds.shape[0]))-len(str(ch_idx+1)))+str(ch_idx+1) for ch_idx in range(psds.shape[0])]
# compute alpha and theta coefficients
alpha_coef = psds[:, f_alpha_idx].mean(axis=1)
theta_coef = psds[:, f_theta_idx].mean(axis=1)
# difine the labels associated to the cluster
labels = np.ones(len(ch_names), dtype=int)*2
if method == 1:
n_ch = 4
ratio_coef = alpha_coef/theta_coef
theta_idx = np.where(ratio_coef <= np.sort(ratio_coef)[n_ch-1])[0]
alpha_idx = np.where(ratio_coef >= np.sort(ratio_coef)[-n_ch])[0]
labels[theta_idx] = 0
labels[alpha_idx] = 1
elif method == 2:
ratio_coef = alpha_coef/theta_coef
kmeans1d = MeanShift(bandwidth=None).fit(ratio_coef.reshape((-1, 1)))
lab_count = 2
for label in range(max(kmeans1d.labels_)+1):
if kmeans1d.labels_[np.argsort(ratio_coef)[0]] == label:
theta_idx = np.where(kmeans1d.labels_ == label)[0]
labels[theta_idx] = 0
elif kmeans1d.labels_[np.argsort(ratio_coef)[-1]] == label:
alpha_idx = np.where(kmeans1d.labels_ == label)[0]
labels[alpha_idx] = 1
else:
tmp_idx = np.where(kmeans1d.labels_ == label)[0]
labels[tmp_idx] = lab_count
lab_count += 1
elif method == 3:
coef2d = np.zeros((len(alpha_coef), 2))
coef2d[:, 0] = alpha_coef
coef2d[:, 1] = theta_coef
# fitting the fuzzy-c-means
kmeans2d = KMeans(n_clusters=2, random_state=0).fit(coef2d)
if kmeans2d.cluster_centers_[0, 0] > kmeans2d.cluster_centers_[1, 0]:
alpha_label = 0
theta_label = 1
else:
alpha_label = 1
theta_label = 0
alpha_idx = np.where(kmeans2d.predict(coef2d) == alpha_label)[0]
theta_idx = np.where(kmeans2d.predict(coef2d) == theta_label)[0]
labels[theta_idx] = 0
labels[alpha_idx] = 1
elif method == 4:
coef2d = np.zeros((len(alpha_coef), 2))
coef2d[:, 0] = alpha_coef
coef2d[:, 1] = theta_coef
# fitting the fuzzy-c-means
kmeans2d = KMeans(n_clusters=2, random_state=0).fit(coef2d)
if kmeans2d.cluster_centers_[0, 0] > kmeans2d.cluster_centers_[1, 0]:
alpha_center = kmeans2d.cluster_centers_[0, :]
theta_center = kmeans2d.cluster_centers_[1, :]
else:
alpha_center = kmeans2d.cluster_centers_[1, :]
theta_center = kmeans2d.cluster_centers_[0, :]
coeff_ang = -1/((alpha_center[1]-theta_center[1])/(alpha_center[0]-theta_center[0]))
if coeff_ang > 0:
alpha_idx = [ii for ii in range(len(alpha_coef)) if theta_coef[ii] < coeff_ang*(alpha_coef[ii]-alpha_center[0]) + alpha_center[1]]
theta_idx = [ii for ii in range(len(alpha_coef)) if theta_coef[ii] > coeff_ang*(alpha_coef[ii]-theta_center[0])+theta_center[1]]
else:
alpha_idx = [ii for ii in range(len(alpha_coef)) if theta_coef[ii] > coeff_ang*(alpha_coef[ii]-alpha_center[0]) + alpha_center[1]]
theta_idx = [ii for ii in range(len(alpha_coef)) if theta_coef[ii] < coeff_ang*(alpha_coef[ii]-theta_center[0]) + theta_center[1]]
labels[theta_idx] = 0
labels[alpha_idx] = 1
else:
raise ValueError("Non valid method input. Supported values are 1, 2, 3, 4 ")
cluster = pd.DataFrame(index=['alpha_coef', 'theta_coef', 'labels'], columns=ch_names)
cluster.loc['alpha_coef'] = alpha_coef
cluster.loc['theta_coef'] = theta_coef
cluster.loc['labels'] = labels
tfbox = {'cluster': cluster, 'method': method, 'tf': None}
return tfbox
def compute_transfreq(psds, freqs, ch_names=None, theta_range=None, alpha_range=None, method=4, iterative=True):
"""
Automatically compute transition frequency
Parameters:
psds: array, shape (N_sensors, N_freqs)
power spectral matrix
freqs: array, shape (N_freqs,)
frequencies at which the psds is computed
ch_names: None | list of strings (default None)
name of the channels (must be ordered as they are in psds)
theta_range: tuple | list | array | None (default None)
theta range to compute alpha coefficients (eg: (5,7), [5,7], np.array([5,7])).
If None it is set automatically
alpha_range: tuple | list | array | None (default None)
alpha range to compute theta coefficients (eg: (9,11), [9,11], np.array([9,11])).
If None it is set automatically
method: 1, 2, 3, 4 (default 4)
clustering method
iterative: bool (default True)
Whether to use the iterative method (default) or not
Returns:
tfbox: dictionary
Dictionary containing:\n
* the method used to compute the cluster
* the cluster: pandas dataframe (rows: alpha coefficients,theta coefficients,
clustering labels; columns:channel names)
* the transition frequency (tf)
"""
if ch_names is None: ch_names = ch_names = ['ch'+'0'*(len(str(psds.shape[0]))-len(str(ch_idx+1)))+str(ch_idx+1) for ch_idx in range(psds.shape[0])]
if not isinstance(iterative, bool):
raise ValueError("iterative must be a boolean")
# Normalize power spectrum
psds = psds/psds.sum(axis=1).reshape((psds.shape[0], 1))
# individual alpha peak
f_in_idx = np.where((freqs >= 7) & (freqs <= 13))[0]
ap = f_in_idx[0] + np.argmax(psds.mean(axis=0)[f_in_idx])
ap = freqs[ap]
# initialize needed quantities
err = np.inf
toll = max(freqs[1]-freqs[0], 0.1)
n_iter = 0
max_iter = 20
tf_new = 0
# while loop for computation of tf
while abs(err) > toll and n_iter < max_iter:
tfbox = _compute_cluster(psds, freqs, ch_names,
alpha_range=alpha_range, theta_range=theta_range, method=method)
cluster = tfbox['cluster']
tf_old = tf_new
theta_coef = cluster.loc['theta_coef'].values
alpha_coef = cluster.loc['alpha_coef'].values
theta_idx = np.where(cluster.loc['labels'] == 0)[0]
alpha_idx = np.where(cluster.loc['labels'] == 1)[0]
theta_psds = (psds[theta_idx, :]*(theta_coef[theta_idx]/theta_coef[theta_idx].sum()).reshape(-1, 1)).sum(axis=0)
alpha_psds = (psds[alpha_idx, :]*(alpha_coef[alpha_idx]/alpha_coef[alpha_idx].sum()).reshape(-1, 1)).sum(axis=0)
tf_new = 5
f_in_idx = np.where((freqs >= 5) & (freqs <= ap-0.5))[0]
psds_diff = alpha_psds - theta_psds
for f in np.flip(f_in_idx)[:-1]:
if psds_diff[f]*psds_diff[f-1] < 0:
if (abs(psds_diff[f]) < abs(psds_diff[f-1])) & (freqs[f] >= 5):
tf_new = freqs[f]
else:
tf_new = freqs[f-1]
break
n_iter = n_iter + 1
if tf_new == 5 and n_iter == 20:
tf_new = tf_old
# compute the error (if iterative is False the error is set to zero to esc the loop)
if iterative is True:
err = tf_new - tf_old
elif iterative is False:
err = 0
tfbox['tf'] = tf_new
tfbox['cluster'] = cluster
if ap-1 > tf_new:
alpha_range = [ap-1, ap+1]
else:
alpha_range = [tf_new, ap+1]
theta_range = [max(tf_new-3, freqs[0]), tf_new-1]
return tfbox
def compute_transfreq_manual(psds, freqs, theta_chs, alpha_chs, ch_names=None,
theta_range=None, alpha_range=None, method='user_method'):
"""
Compute transition frequency given a customezed cluster
Parameters:
psds: array, shape (N_sensors, N_freqs)
power spectral matrix
freqs: array, shape (N_freqs,)
frequencies at which the psds is computed
theta_chs: tuple | list of integers or string
indeces or names of the theta channels in the cluster
alpha_chs: tuple | list of integers or string
indices or names of the theta channels in the cluster
ch_names: None | list of strings (default None)
names of the channels (must be ordered as they are in psds)
theta_range: tuple | list | array | None (default None)
theta range to compute alpha coefficients (eg: (5,7), [5,7], np.array([5,7])).
If None it is set automatically
alpha_range: tuple | list | array | None (default None)
alpha range to compute theta coefficients (eg: (9,11), [9,11], np.array([9,11])).
If None it is set automatically
method: str (default 'user_method')
name the user wants to assign to the customized cluster
Returns:
tfbox: dictionary
Dictionary containing:\n
* the method used to compute the cluster
* the cluster: pandas dataframe (rows: alpha coefficients,theta coefficients,
clustering labels; columns:channel names)
* the transition frequency (tf)
"""
if len((set(theta_chs)).intersection(set(alpha_chs))) != 0:
raise ValueError("theta_chs and alpha_chs must not have common elements")
if (ch_names is None and type(theta_chs[0]) is not int):
raise ValueError("if ch_names is None theta_chs must be a tuple or a list of integers, corresponding to the theta channel indices")
if (ch_names is None and type(alpha_chs[0]) is not int):
raise ValueError("if ch_names is None alpha_chs must be a tuple or a list of integers, corresponding to the alpha channel indices")
if ch_names is None: ch_names = ['ch'+'0'*(len(str(psds.shape[0]))-len(str(ch_idx+1)))+str(ch_idx+1) for ch_idx in range(psds.shape[0])]
if type(theta_chs[0]) is int: theta_chs = [ch_names[ii-1] for ii in theta_chs]
if type(alpha_chs[0]) is int: alpha_chs = [ch_names[ii-1] for ii in alpha_chs]
theta_idx = [ch_names.index(ch) for ch in theta_chs]
alpha_idx = [ch_names.index(ch) for ch in alpha_chs]
# Normalize power spectrum
psds = psds/psds.sum(axis=1).reshape((psds.shape[0], 1))
# individual alpha peak
f_in_idx = np.where((freqs >= 7) & (freqs <= 13))[0]
ap = f_in_idx[0] + np.argmax(psds.mean(axis=0)[f_in_idx])
ap = freqs[ap]
# define indices of the psds within the alpha and theta ranges
if alpha_range is None:
f_alpha_idx = np.where((freqs >= ap-1) & (freqs <= ap+1))[0]
elif len(alpha_range) != 2:
raise ValueError("len(alpha_range) must be 2")
elif alpha_range[0] < freqs[0] or alpha_range[-1] > freqs[-1]:
raise ValueError("alpha_range must be inside the interval [freqs[0], freqs[-1]]")
elif alpha_range[0] > alpha_range[-1]:
raise ValueError("alpha_range[-1] must be greater than alpha_range[0]")
else:
f_alpha_idx = np.where((freqs >= alpha_range[0]) & (freqs <= alpha_range[1]))[0]
if theta_range is None:
if ap-1 > 7:
f_theta_idx = np.where((freqs >= 5) & (freqs <= 7))[0]
else:
f_theta_idx = np.where((freqs >= ap-3) & (freqs < ap-1))[0]
elif len(theta_range) != 2:
raise ValueError("len(theta_range) must be 2")
elif theta_range[0] < freqs[0] or theta_range[-1] > freqs[-1]:
raise ValueError("theta_range must be inside the interval [freqs[0], freqs[-1]]")
elif theta_range[0] > theta_range[-1]:
raise ValueError("theta_range[-1] must be greater than theta_range[0]")
else:
f_theta_idx = np.where((freqs >= theta_range[0]) & (freqs <= theta_range[1]))[0]
alpha_coef = psds[:, f_alpha_idx].mean(axis=1)
theta_coef = psds[:, f_theta_idx].mean(axis=1)
labels = np.ones(len(ch_names), dtype=int)*2
labels[theta_idx] = 0
labels[alpha_idx] = 1
cluster = pd.DataFrame(index=['alpha_coef', 'theta_coef', 'labels'], columns=ch_names)
cluster.loc['alpha_coef'] = alpha_coef
cluster.loc['theta_coef'] = theta_coef
cluster.loc['labels'] = labels
theta_psds = (psds[theta_idx, :]*(theta_coef[theta_idx]/theta_coef[theta_idx].sum()).reshape(-1, 1)).sum(axis=0)
alpha_psds = (psds[alpha_idx, :]*(alpha_coef[alpha_idx]/alpha_coef[alpha_idx].sum()).reshape(-1, 1)).sum(axis=0)
tf_new = 5
f_in_idx = np.where((freqs >= 5) & (freqs <= ap-0.5))[0]
psds_diff = alpha_psds - theta_psds
for f in | np.flip(f_in_idx) | numpy.flip |
'''
Regrid the Corbelli+2017 cloud catalogue asgn file
to the 14B-088
'''
import numpy as np
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
import os
from os.path import join as osjoin
from corner import hist2d, corner
import emcee
import matplotlib.pyplot as plt
import seaborn as sb
from paths import (fourteenB_HI_data_wGBT_path,
data_path, allfigs_path,
iram_co21_14B088_data_path)
from plotting_styles import (default_figure,
twocolumn_figure,
twocolumn_twopanel_figure,
onecolumn_figure)
from galaxy_params import gal_feath as gal
fig_path = allfigs_path("co_vs_hi")
if not os.path.exists(fig_path):
os.mkdir(fig_path)
# Load GMC catalogue from Corbelli+17
gmc_tab = Table.read(osjoin(data_path,
'Corbelli_17_catalogues',
'J_A+A_601_A146_table5.dat.fits'))
# Load in the Gaussian HI and CO fit table.
tab = Table.read(fourteenB_HI_data_wGBT_path("tables/hi_co_gaussfit_column_densities_perpix.fits"))
# Don't consider the "bad fits" that are probably due to multiple components
good_pts = np.logical_and(~tab['multicomp_flag_HI'],
~tab['multicomp_flag_CO'])
good_pts = np.logical_and(good_pts,
tab["sigma_HI"] > 3800)
# Minimum CO line width of one channel.
good_pts = np.logical_and(good_pts,
tab["sigma_CO"] >= 2600)
# Assign the cloud type based on the closest GMC location in the catalogue
cloud_posns = SkyCoord(gmc_tab['RAdeg'], gmc_tab['DEdeg'])
pix_posns = SkyCoord(tab['RA'], tab['Dec'])
# Just do this for every pixel, even if it's not in the good mask
cloud_types = []
for i in range(len(tab)):
cloud_idx = pix_posns[i].separation(cloud_posns).argmin()
cloud_types.append(gmc_tab['Type'][cloud_idx])
cloud_types = Column(cloud_types)
tab.add_column(cloud_types, name='cloud_type')
def bayes_linear(x, y, x_err, y_err, nWalkers=10, nBurn=100, nSample=1000,
nThin=5, conf_interval=[15.9, 84.1], fix_intercept=False):
'''
Fit a line w/ the intercept set to 0.
'''
mean_scatter = np.mean(np.sqrt(x_err**2 + y_err**2))
std_scatter = np.std(np.sqrt(x_err**2 + y_err**2))
if fix_intercept:
def _logprob(p, x, y, x_err, y_err):
theta, var = p[0], p[1]
if np.abs(theta - np.pi / 4) > np.pi / 4:
return -np.inf
if var < 0:
return -np.inf
Delta = (np.cos(theta) * y - np.sin(theta) * x)**2
Sigma = np.sin(theta)**2 * x_err**2 + np.cos(theta)**2 * y_err**2
lp = -0.5 * np.nansum(Delta / (Sigma + var)) - \
0.5 * np.nansum(np.log(Sigma + var))
return lp
ndim = 2
p0 = np.zeros((nWalkers, ndim))
p0[:, 0] = np.tan(np.nanmean(y / x)) + np.random.randn(nWalkers) * 0.1
p0[:, 1] = np.random.normal(mean_scatter, 0.1 * std_scatter,
size=nWalkers)
else:
def _logprob(p, x, y, x_err, y_err):
theta, bcos, var = p[0], p[1], p[2]
if np.abs(theta - np.pi / 4) > np.pi / 4:
return -np.inf
if var < 0:
return -np.inf
Delta = (np.cos(theta) * y - np.sin(theta) * x - bcos)**2
Sigma = (np.sin(theta))**2 * x_err**2 + \
(np.cos(theta))**2 * y_err**2
lp = -0.5 * | np.nansum(Delta / (Sigma + var)) | numpy.nansum |
from typing import Dict, List, Tuple
import torch
import numpy as np
import argparse
from torch import nn
import yaml
import pandas as pd
from sklearn.metrics import roc_auc_score
from adversarial.adversarial import AdversarialNetwork, Classifier, Discriminator
from adversarial.dataset import (
AdversarialDataset,
get_transforms
)
from adversarial.config import Config
from adversarial.utils import (
fix_all_seeds,
freeze_unfreeze,
get_ground_truth_vector
)
from torch.utils.data import DataLoader
def train_step(
model : nn.Module,
train_loader : DataLoader,
config : Config,
class_criterion : object,
disc_criterion : object,
extractor_criterion : object,
optimizer : torch.optim.Optimizer
) -> Tuple[float, float, float, float]:
model.train()
class_loss_accum, disc_loss_accum, extr_loss_accum = 0., 0., 0.
y_train = []
preds = []
for images, domains, labels in train_loader:
images = images.to(config.DEVICE)
domains = domains.to(config.DEVICE)
labels = labels.to(config.DEVICE)
# Set the gradients to zero before backprop step
optimizer.zero_grad()
# # # # # # # # # # # # # #
# Step 1: Classification #
# # # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_class = model(images)
y_preds_class = y_preds_class.to(config.DEVICE)
class_loss = class_criterion(y_preds_class.squeeze(), labels)
class_loss_accum += class_loss.item()
# Backward step
class_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train.append(labels.detach().cpu().numpy())
preds.append(y_preds_class.softmax(1).detach().cpu().numpy())
# # # # # # # # # # # # #
# Step 2: Discriminator #
# # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, False)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_disc = model.forward_disc(images)
y_preds_disc = y_preds_disc.to(config.DEVICE)
disc_loss = disc_criterion(y_preds_disc.squeeze(), domains)
disc_loss_accum += disc_loss.item()
# Backward step
disc_loss.backward()
optimizer.step()
optimizer.zero_grad()
# # # # # # # # # # #
# Step 3: Extractor #
# # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, False)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_extr = model.forward_disc(images)
y_preds_extr = y_preds_extr.to(config.DEVICE)
gt_vector = get_ground_truth_vector(labels, config.N_DOMAINS, config.N_CLASSES)
gt_vector = gt_vector.to(config.DEVICE)
extr_loss = extractor_criterion(y_preds_extr.squeeze(), gt_vector)
extr_loss_accum += extr_loss.item()
# Backward step
extr_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train = | np.concatenate(y_train) | numpy.concatenate |
# function for plotting and saving figures
from __future__ import division, print_function
import numpy as np
import os
# babusca import via context
from context import smatrix
from context import g2 as g2calc
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator
from matplotlib.ticker import LinearLocator
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LogNorm
import matplotlib.ticker as ticker
from itertools import cycle
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode'] = True
# mpl.rcParams['font.family'] = 'Helvetica'
def g1_coherent(ses, chli, chlo, d1s, directory, offset, ylim=None):
"""
Calculate, plot and save g2 calculation
"""
# defautls
if ylim is None:
ylim = ([1e-6, 1e6])
# init
g1s = np.zeros((len(ses), len(d1s)))
print('calc: init')
for i, se in enumerate(ses):
_, S1 = smatrix.one_particle(se, chli, chlo, d1s + offset)
g1s[i, :] = np.abs(S1) ** 2
print('calc: done')
print('layout: init')
f, (ax1) = plt.subplots(1, figsize=(6, 3))
# AX1
# limits
ax1.set_xlim([np.min(d1s), np.max(d1s)])
ax1.set_ylim(ylim)
# horizontal unit line
ax1.semilogy(d1s, np.ones(d1s.size), linewidth=1, color='#BBBBBB')
# res
for i, se in enumerate(ses):
ax1.semilogy(d1s, g1s[i, :], linewidth=1.2, label=se.label)
ax1.legend(loc="center left", fancybox=False, fontsize=15, edgecolor=None, frameon=False, bbox_to_anchor=(1, 0.5))
ax1.set_xlim([np.min(d1s), np.max(d1s)])
ax1.set_ylim(ylim)
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
# locators
ax1.xaxis.set_major_locator(MaxNLocator(nbins=6, prune=None))
# labels
ax1.set_ylabel(r'$g^{{(1)}}_{{{ins},{outs}}}$'.format(
ins=chli,
outs=chlo))
ax1.set_xlabel(r'$\delta^{(1)}$ (units of $\Gamma$)')
# update font size
update_font(ax1, 16)
# ax2
# plt.tight_layout()
print('layout: done')
print('save: init')
plt.savefig(directory + 'g1_{ins}{outs}_coherent_tau0.pdf'.format(ins=chli, outs=chlo),
bbox_inches='tight')
plt.close(f)
print('save: done')
def g2_coherent(ses, chlsi, chlso, d1s, d2s, directory, offset, ylims=None, yticks=2, yticks2=None):
"""
Calculate, plot and save g2 calculation
"""
# defautls
if ylims is None:
ylim1 = ([1e-6, 1e6])
ylim2 = ([1e-6, 1e6])
else:
ylim1, ylim2 = ylims
print(ylim2)
# init
g1s = np.zeros((len(ses), len(d1s)))
g2s = np.zeros((len(ses), len(d2s)))
print('calc: init')
for i, se in enumerate(ses):
_, S1 = smatrix.one_particle(se, chlsi[0], chlso[0], d1s + offset)
g1s[i, :] = np.abs(S1) ** 2
g2s[i, :] = g2calc.coherent_state_tau0(se, chlsi, chlso, d2s + 2 * offset)['g2']
print('calc: done')
print('layout: init')
f, (ax1, ax2) = plt.subplots(2, figsize=(6, 5))
# AX1
# limits
ax1.set_xlim([np.min(2 * d1s), np.max(2 * d1s)])
ax1.set_ylim(ylim1)
# horizontal unit line
ax1.semilogy(2 * d1s, np.ones(d1s.size), linewidth=1, color='#BBBBBB')
# res
ax1.semilogy(2 * d1s, g1s[0, :], linewidth=1.2, color='#444444')
ax1.set_xlim([np.min(2 * d1s), np.max(2 * d1s)])
ax1.set_ylim(ylim1)
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
# locators
ax1.xaxis.set_major_locator(MaxNLocator(nbins=6, prune=None))
# labels
ax1.set_ylabel(r'$g^{{(1)}}_{{{ins[0]},{outs[0]}}}$ (units of $f$)'.format(
ins=[c + 1 for c in chlsi],
outs=[c + 1 for c in chlso]))
ax1.set_xlabel(r'$2 \delta$ (units of $\Gamma$)')
# update font size
update_font(ax1, 16)
# ax2
# limits
ax2.set_ylim(ylim2)
ax2.set_xlim([np.min(d2s), np.max(d2s)])
# horizontal unit line
ax2.semilogy(d2s, np.ones(d2s.size), linewidth=1, color='#BBBBBB')
# linestyle
# linestyles = ['-', (1, (9, 1.2)), (1, (9, .7, 1, .7)), (1, (9, 3))]
linestyles = ['-', (1, (9, 1.5)), (1, (9, 1.5, 2, 1.5)), (1, (3, 1.5))]
ls = cycle(linestyles)
colors = plt.cm.inferno(np.linspace(.2, .8, len(ses)))
markers = ['', 'o', '^', 's', 'o']
markers = [''] * 8
for i, se in enumerate(ses):
# find label
lbl = None if not hasattr(se, 'label') else se.label
# ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=1.5, ls='-', color='#222222')
# ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=1.2, ls='-', color='#FFFFFF')
# ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=0.4, color=colors[i])
ax2.semilogy(d2s, np.abs(g2s[i, :]), label=lbl, linewidth=1.6, ls=next(ls), color=colors[i])
# ax2.semilogy(d2s[::128], np.abs(g2s[i, ::128]), linewidth=0, color=colors[i], marker=markers[i])
# ax2.semilogy(d2s, np.abs(g2s[i, :]), label=lbl, linewidth=1.3, ls=next(ls), color=colors[i])
# locators
# ax2.yaxis.set_major_locator(LogLocator(numticks=yticks))
if yticks2 is None:
yticks2 = [ylim2[0], 1, ylim2[1]]
ax2.set_yticks(yticks2)
ax2.xaxis.set_major_locator(MaxNLocator(nbins=6, prune=None))
# legend
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.85, box.height])
lgd = None
if hasattr(ses[0], 'label'):
lgd = ax2.legend(loc="center left", fancybox=False, fontsize=15, edgecolor=None, title=r"$U/\Gamma$", frameon=False, bbox_to_anchor=(1, 0.5))
# lgd = ax2.legend(loc="upper left", fancybox=False, fontsize=16, edgecolor='#222222', title=r"$U/\Gamma$")
plt.setp(lgd.get_title(), fontsize=15)
lgd.get_frame().set_alpha(0.0)
# labels
ax2.set_ylabel(r'$g^{{(2)}}_{{{ins[0]}{ins[1]},{outs[0]}{outs[1]}}}$'.format(
ins=[c + 1 for c in chlsi],
outs=[c + 1 for c in chlso]))
ax2.set_xlabel(r'$2 \delta$ (units of \Gamma$)')
# font size
update_font(ax2, 16)
plt.tight_layout()
print('layout: done')
print('save: init')
plt.savefig(directory + 'g2_{ins[0]}{ins[1]}{outs[0]}{outs[1]}_coherent_tau0.pdf'.format(ins=chlsi, outs=chlso),
bbox_inches='tight')
plt.close(f)
print('save: done')
def phi2_coherent(ses, chlsi, chlso, d1s, d2s, directory, offset, ylims=None, yticks=2):
# defautls
if ylims is None:
ylim1 = ([1e-6, 1e6])
ylim2 = ([1e-6, 1e6])
else:
ylim1, ylim2 = ylims
# init
g1s = np.zeros((len(ses), len(d1s)))
g2s = np.zeros((len(ses), len(d2s)))
print('calc: init')
for i, se in enumerate(ses):
_, S1 = smatrix.one_particle(se, chlsi[0], chlso[0], d1s + offset)
g1s[i, :] = np.abs(S1) ** 2
g2s[i, :] = g2calc.coherent_state_tau0(se, chlsi, chlso, d2s + 2 * offset)['phi2']
print('calc: done')
print('layout: init')
f, (ax1, ax2) = plt.subplots(2, figsize=(6, 5))
# AX1
# limits
ax1.set_xlim([np.min(d1s), np.max(d1s)])
ax1.set_ylim(ylim1)
# horizontal unit line
ax1.semilogy(2 * d1s, np.ones(d1s.size), linewidth=1, color='#BBBBBB')
# res
ax1.semilogy(2 * d1s, g1s[0, :], linewidth=1.2, color='#444444')
ax1.set_xlim([np.min(d1s), np.max(d1s)])
ax1.set_ylim(ylim1)
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
# locators
ax1.xaxis.set_major_locator(MaxNLocator(nbins=6, prune=None))
# labels
ax1.set_ylabel(r'$g^{{(1)}}_{{{ins[0]},{outs[0]}}}$'.format(
ins=[c + 1 for c in chlsi],
outs=[c + 1 for c in chlso]))
ax1.set_xlabel(r'$2 \delta$ (units of $\Gamma$)')
# update font size
update_font(ax1, 16)
# ax2
# limits
ax2.set_ylim(ylim2)
ax2.set_xlim([np.min(d2s), np.max(d2s)])
# horizontal unit line
ax2.semilogy(d2s, np.ones(d2s.size), linewidth=1, color='#BBBBBB')
# linestyle
linestyles = ['-', (1, (9, 1.2)), (1, (9, .7, 1, .7)), (1, (9, 3))]
ls = cycle(linestyles)
colors = plt.cm.inferno(np.linspace(.2, .8, len(ses)))
for i, se in enumerate(ses):
# find label
lbl = None if not hasattr(se, 'label') else se.label
# ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=1.5, ls='-', color='#222222')
# ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=1.2, ls='-', color='#FFFFFF')
ax2.semilogy(d2s, np.abs(g2s[i, :]), linewidth=0.4, color=colors[i])
ax2.semilogy(d2s, np.abs(g2s[i, :]), label=lbl, linewidth=1.2, ls=next(ls), color=colors[i])
# locators
# ax2.yaxis.set_major_locator(LogLocator(numticks=yticks))
ax2.set_yticks([ylim2[0], 1, ylim2[1]])
ax2.xaxis.set_major_locator(MaxNLocator(nbins=6, prune=None))
# legend
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.85, box.height])
lgd = None
if hasattr(ses[0], 'label'):
lgd = ax2.legend(loc="center left", fancybox=False, fontsize=15, edgecolor=None, title=r"$U/\Gamma$", frameon=False, bbox_to_anchor=(1, 0.5))
# lgd = ax2.legend(loc="upper left", fancybox=False, fontsize=16, edgecolor='#222222', title=r"$U/\Gamma$")
plt.setp(lgd.get_title(), fontsize=15)
lgd.get_frame().set_alpha(0.0)
# labels
ax2.set_ylabel(r'$\phi^{{(2)}}_{{{ins[0]}{ins[1]},{outs[0]}{outs[1]}}}$'.format(
ins=[c + 1 for c in chlsi],
outs=[c + 1 for c in chlso]))
ax2.set_xlabel(r'$2 \delta$ (units of \Gamma$)')
# font size
update_font(ax2, 16)
plt.tight_layout()
print('layout: done')
print('save: init')
plt.savefig(directory + 'phi2_{ins[0]}{ins[1]}{outs[0]}{outs[1]}_coherent_tau0.pdf'.format(ins=chlsi, outs=chlso),
bbox_inches='tight')
plt.close(f)
print('save: done')
def g2_coherent_tau(se, chlsi, chlso, Es, taus, directory="", logscale=False, ticks=None, title=None, verbose=False):
"""
Plots intensity-intensity correlation g2 for weakly coherent state
as a function of times taus and two-photon energies.
Parameters
----------
se : scattering.Setup object
The current scattering setup
chlsi : tuple
The two channels of incoming photons.
chlso : tuple
The two channels of outgoing photons.
Es : array-like
List of two-photon energies
taus : array-like
List of times/position differences
directory : string
Directory for saving the view.
"""
g2s = np.zeros((len(Es), len(taus)))
for i, E in enumerate(Es):
g2s[i, :] = g2calc.coherent_state(se, chlsi, chlso, E, taus, verbose=verbose)['g2']
# g2
fig = plt.figure(figsize=(7.75, 4))
extra = ''
if title is None:
title = ''
if ticks is None:
ticks = [0.6, 0.8, 1.0, 1.2, 1.4]
if len(Es) > 15 and len(taus) > 15:
# create a nice colored 2d view
if logscale is False:
plt.pcolormesh(Es, taus, g2s.T, cmap='RdBu_r', rasterized=True,
vmin=ticks[0],
vmax=ticks[-1])
cb = plt.colorbar(ticks=ticks)
ticksom = [r'${}$'.format(i) for i in ticks]
cb.ax.set_yticklabels(ticksom)
# im = plt.imshow(Z, interpolation='bilinear', origin='lower',
# cmap=cm.gray, extent=(-3, 3, -2, 2))
levels = ticks
CS = plt.contour(g2s.T, levels,
linestyles=':',
colors='k',
origin='lower',
linewidths=.5,
extent=(Es[0], Es[-1], taus[0], taus[-1])
)
else:
ax = plt.pcolormesh(Es, taus, g2s.T, cmap='RdBu_r', rasterized=True,
norm=LogNorm(vmin=ticks[0], vmax=ticks[-1]))
cb = plt.colorbar(ticks=ticks)
levels = ticks
CS = plt.contour(g2s.T, levels,
linestyles=':',
colors='k',
origin='lower',
linewidths=.5,
extent=(Es[0], Es[-1], taus[0], taus[-1])
)
for t in cb.ax.get_yticklabels():
t.set_fontsize(18)
locx = ticker.MultipleLocator(base=(Es[-1] - Es[0]) / 5) # this locator puts ticks at regular intervals
locy = ticker.MultipleLocator(base=taus[-1] / 4) # this locator puts ticks at regular intervals
ax = plt.gca()
ax.xaxis.set_major_locator(locx)
ax.yaxis.set_major_locator(locy)
plt.xlabel(r'$2 \delta$ (units of $\Gamma$)')
plt.xlim([np.min(Es), np.max(Es)])
plt.ylabel(r'$\tau$')
plt.ylim([ | np.min(taus) | numpy.min |
import numpy as np
from unittest import expectedFailure
from unittest import TestCase
from zlib import crc32
from pycqed.measurement.randomized_benchmarking.clifford_group import(
clifford_lookuptable, clifford_group_single_qubit,
X,Y,Z, H, S, S2, CZ)
import pycqed.measurement.randomized_benchmarking.randomized_benchmarking \
as rb
from pycqed.measurement.randomized_benchmarking.clifford_decompositions \
import(gate_decomposition, epstein_fixed_length_decomposition)
from pycqed.measurement.randomized_benchmarking import \
two_qubit_clifford_group as tqc
from pycqed.measurement.randomized_benchmarking.generate_clifford_hash_tables import construct_clifford_lookuptable
np.random.seed(0)
test_indices_2Q = np.random.randint(0, high=11520, size=50)
# To test all elements of the 2 qubit clifford group use:
# test_indices_2Q = np.arange(11520)
class TestLookuptable(TestCase):
def test_unique_mapping(self):
for row in clifford_lookuptable:
self.assertFalse(len(row) > len(set(row)))
def test_sum_of_rows(self):
expected_sum = np.sum(range(len(clifford_group_single_qubit)))
for row in clifford_lookuptable:
self.assertEqual(np.sum(row), expected_sum)
def test_element_index_in_group(self):
for row in clifford_lookuptable:
for el in row:
self.assertTrue(el < len(clifford_group_single_qubit))
class TestCalculateNetClifford(TestCase):
def test_identity_does_nothing(self):
id_seq = | np.zeros(5) | numpy.zeros |
"""
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
<NAME>*, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import cv2, os, importlib, math
import os.path as osp
import numpy as np
import scipy.io as scio
import tensorflow as tf
def create_mtcnn_pb(sess):
pnet_fun = lambda img: sess.run(
("pnet/conv4-2/BiasAdd:0", "pnet/prob1:0"), feed_dict={"pnet/input:0": img}
)
rnet_fun = lambda img: sess.run(
("rnet/conv5-2/conv5-2:0", "rnet/prob1:0"), feed_dict={"rnet/input:0": img}
)
onet_fun = lambda img: sess.run(
("onet/conv6-2/conv6-2:0", "onet/conv6-3/conv6-3:0", "onet/prob1:0"),
feed_dict={"onet/input:0": img},
)
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
"""Detects faces in an image, and returns bounding boxes and points for them.
img: input image
minsize: minimum faces' size
pnet, rnet, onet: caffemodel
threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold
factor: the factor used to create a scaling pyramid of face sizes to detect in the image.
"""
factor_count = 0
total_boxes = np.empty((0, 9))
points = np.empty(0)
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# create scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(
out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]
)
# inter-scale nms
pick = nms(boxes.copy(), 0.5, "Union")
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, "Union")
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, "Union")
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1 : edy[k], dx[k] - 1 : edx[k], :] = img[
y[k] - 1 : ey[k], x[k] - 1 : ex[k], :
]
if (
tmp.shape[0] > 0
and tmp.shape[1] > 0
or tmp.shape[0] == 0
and tmp.shape[1] == 0
):
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack(
[total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]
)
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = (
np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
)
points[5:10, :] = (
np.tile(h, (5, 1)) * points[5:10, :]
+ np.tile(total_boxes[:, 1], (5, 1))
- 1
)
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, "Min")
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = | np.flipud(dx2) | numpy.flipud |
import numpy as np
def swap_cols(A, col1, col2):
if (col1 == col2):
return A
tmp = A[:, col1].copy()
A[:, col1] = A[:, col2]
A[:, col2] = tmp
return A
def swap_rows(A, row1, row2):
if (row1 == row2):
return A
tmp = A[row1, :].copy()
A[row1, :] = A[row2, :]
A[row2, :] = tmp
return A
def swap_entries(vec, ii, jj):
if ii == jj:
return vec
tmp = vec[ii]
vec[ii] = vec[jj]
vec[jj] = tmp
return vec
def get_block_diagonal_matrix_num_rows(matrix_blocks):
num_rows = 0
for ii in range(len(matrix_blocks)):
num_rows += matrix_blocks[ii].shape[0]
return num_rows
def get_block_diagonal_matrix_num_cols(matrix_blocks):
num_cols = 0
for ii in range(len(matrix_blocks)):
num_cols += matrix_blocks[ii].shape[1]
return num_cols
def pre_multiply_block_diagonal_matrix(matrix, matrix_blocks, block_trans):
num_blocks = len(matrix_blocks)
if (block_trans == True):
block_num_cols = get_block_diagonal_matrix_num_rows(matrix_blocks)
result_num_rows = get_block_diagonal_matrix_num_cols(matrix_blocks)
else:
block_num_cols = get_block_diagonal_matrix_num_cols(matrix_blocks)
result_num_rows = get_block_diagonal_matrix_num_rows(matrix_blocks)
if (block_num_cols != matrix.shape[0]):
msg = "pre_multiply_block_diagonal_matrix() Matrices sizes are "
msg += "inconsistent"
raise Exception(msg)
result = np.empty((result_num_rows, matrix.shape[1]), dtype=float)
sub_matrix_start_row = 0
sub_result_start_row = 0
for ii in range(num_blocks):
if (block_trans == True):
matrix_block_view = matrix_blocks[ii].T
else:
matrix_block_view = matrix_blocks[ii]
num_block_rows = matrix_block_view.shape[0]
num_block_cols = matrix_block_view.shape[1]
num_submatrix_rows = num_block_cols
sub_matrix = matrix[
sub_matrix_start_row:sub_matrix_start_row+num_submatrix_rows, :]
result[sub_result_start_row:sub_result_start_row+num_block_rows, :] =\
np.dot(matrix_block_view, sub_matrix)
sub_matrix_start_row += num_submatrix_rows
sub_result_start_row += num_block_rows
return result
def get_dense_block_diagonal_matrix(matrix_blocks):
num_rows = get_block_diagonal_matrix_num_rows(matrix_blocks)
num_cols = get_block_diagonal_matrix_num_cols(matrix_blocks)
result = np.zeros((num_rows, num_cols), dtype=float)
row_cnt = 0
col_cnt = 0
num_blocks = len(matrix_blocks)
for ii in range(num_blocks):
num_block_rows, num_block_cols = matrix_blocks[ii].shape
result[row_cnt:row_cnt+num_block_rows, col_cnt:col_cnt+num_block_cols] =\
matrix_blocks[ii]
row_cnt += num_block_rows
col_cnt += num_block_cols
return result
def row_reduce_degree_vandermonde_matrix(degree_vandermonde, L_factor, lu_row,
update_degree_specific_data_flag):
"""
If we are working with a new degree we must orthogonalise against
all points (min_q=0). Otherwise we can only have to update
the orthogonalisation for the new row (min_q=lu_row-1).
"""
num_rows, num_cols = degree_vandermonde.shape
min_q = 0
if (not update_degree_specific_data_flag):
min_q = lu_row-1
for qq in range(min_q, lu_row):
if ((qq < lu_row-1) or (update_degree_specific_data_flag)):
degree_vandermonde[qq, :] /= L_factor[qq, qq]
degree_vandermonde[qq+1:, :] -= np.dot(
L_factor[qq+1:, qq:qq+1], degree_vandermonde[qq:qq+1, :])
return degree_vandermonde
def get_degree_basis_indices(num_vars, current_degree, degree_block_num,
update_degree_list, basis_degrees,
generate_degree_basis_indices, verbosity):
prev_degree = current_degree
current_degree, new_indices = generate_degree_basis_indices(
num_vars, degree_block_num)
if (update_degree_list):
basis_degrees.append(prev_degree)
else:
new_indices = np.empty((num_vars, 0), dtype=int)
return current_degree, new_indices, basis_degrees
def update_degree_specific_data(pts, permutations, selected_basis_indices,
new_indices, basis_cardinality, pce, lu_row,
H_factor_blocks, current_index_counter,
precond_weights):
# Make sure new indices have correct array_indices.
# This is necessary because I do not assume the basis indices
# are ordered in anyway. I must ensure this is only done once
# per degree
selected_basis_indices = np.hstack((selected_basis_indices, new_indices))
basis_cardinality.append(selected_basis_indices.shape[1])
current_index_counter = 0
# Build new degree vandermonde
# The vandermonde only needs to be built once per degree.
# It needs to be permuted everytime but this can be done at
# the end of each iteration.
pce.set_indices(new_indices)
degree_vandermonde = pce.basis_matrix(pts)
if (precond_weights is not None):
degree_vandermonde = precondition_matrix(
pts, precond_weights, degree_vandermonde)
# TODO: Eventually make initial row size smaller then increment
# memory when needed.
current_block_num_initial_rows = min(
degree_vandermonde.shape[1], pts.shape[1]-lu_row)
current_H_factor_block = np.empty(
(current_block_num_initial_rows, degree_vandermonde.shape[1]),
dtype=float)
H_factor_blocks.append(current_H_factor_block)
current_block_num_rows = 0
return degree_vandermonde, H_factor_blocks, current_H_factor_block, \
current_block_num_rows, current_index_counter, selected_basis_indices
def compute_pivot_norms(degree_vandermonde, lu_row):
# We only what the submatrix that contains the rows (points)
# that have not already been chosen.
sub_vand_trans = degree_vandermonde[lu_row:, :].T
# Find the norms of each column
norms = np.linalg.norm(sub_vand_trans, axis=0)
assert norms.shape[0] == sub_vand_trans.shape[1]
return norms, sub_vand_trans
def find_next_best_index(norms, num_initial_pts_selected, num_initial_pts,
enforce_ordering_of_initial_points):
# Find the column with the largest norm. Note evec is
# defined 0,...,numTotalPts_-lu_row-1 for points
# lu_row+1,...,numTotalPts_
if (num_initial_pts_selected < num_initial_pts):
if (enforce_ordering_of_initial_points):
# Enforce the ordering of the initial points
next_index = 0
else:
# Chose column with largest norm that corresponds to
# a point in the initial point set.
next_index = np.argmax(
norms[:num_initial_pts-num_initial_pts_selected])
num_initial_pts_selected += 1
else:
next_index = np.argmax(norms)
return next_index, num_initial_pts_selected
def compute_inner_products(sub_vand_trans, norms, next_index):
# Compute inner products of each column with the chosen column
magic_row = sub_vand_trans[:, next_index] / norms[next_index]
inner_products = np.dot(magic_row, sub_vand_trans)
return inner_products, magic_row
def determine_if_low_rank(norms, next_index, degree_max_norm,
current_index_counter):
if (current_index_counter == 0):
degree_max_norm = norms[next_index]
low_rank = False
if ((current_index_counter != 0) and
(norms[next_index] < 0.001*degree_max_norm)):
low_rank = True
return low_rank, degree_max_norm
def update_factorization(next_index, inner_products, norms, magic_row,
num_current_indices, permutations, lu_row,
permuted_pts,
L_factor, U_factor, degree_vandermonde,
H_factor_blocks, current_block_num_rows,
current_index_counter, points_to_degree_map,
basis_degrees, current_H_factor_block, limited_memory,
precond_weights, verbosity):
# Update the LU permutations based
permutations = swap_entries(permutations, lu_row, lu_row+next_index)
# Update the premuted pts
permuted_pts = swap_cols(permuted_pts, lu_row, lu_row+next_index)
if (precond_weights is not None):
# Update the precondition weights
# Todo if I make preconditioing degree dependent then I do not need
# to swap precondWeights as they are only applied once at the begining
# of each degree
precond_weights = swap_entries(
precond_weights, lu_row, lu_row+next_index)
# Update the L factor of the LU factorization to be consistent
# with the new permutations
l_sub = L_factor[lu_row:, :lu_row]
if ((l_sub.shape[0] > 0) and (l_sub.shape[1] > 0)):
L_factor[lu_row:, :lu_row] = swap_rows(l_sub, 0, next_index)
# Update L_factor with inner products
inner_products = swap_entries(inner_products, 0, next_index)
inner_products[0] = norms[next_index]
# the following line accounts for 50% of runtime for large
# number of candidate samples
L_factor[lu_row:, lu_row] = inner_products
# Update U. That is enforce orthogonality to all
# rows with indices < lu_row
# To do this we must find the inner products of all the other
# rows above the current row in degreeVandermonde_
if (lu_row > 0):
U_factor[:lu_row, lu_row] = np.dot(
degree_vandermonde[:lu_row, :], magic_row.T)
# Update the non-zero entries of the H matrix. Essentially these
# entries are the directions needed to orthogonalise the entries
# (basis blocks) in the LU factorization
current_H_factor_block[current_block_num_rows, :] = magic_row
H_factor_blocks[-1] = \
current_H_factor_block[:current_block_num_rows+1, :].copy()
current_block_num_rows += 1
current_index_counter += 1
if (current_index_counter >= num_current_indices):
update_degree_specific_data_flag = True
else:
sub_vand = degree_vandermonde[lu_row:lu_row +
inner_products.shape[0]+1, :]
degree_vandermonde[lu_row:lu_row+inner_products.shape[0]+1, :] = \
swap_rows(sub_vand, 0, next_index)
update_degree_specific_data_flag = False
if (verbosity > 2):
print(("Iteration: ", lu_row+1))
print("\t Adding point:")
print((permuted_pts[:, lu_row]))
points_to_degree_map.append(basis_degrees[-1])
lu_row += 1
return num_current_indices, permutations, lu_row, permuted_pts,\
L_factor, U_factor, degree_vandermonde,\
H_factor_blocks, current_block_num_rows, current_index_counter,\
points_to_degree_map, basis_degrees, current_H_factor_block,\
update_degree_specific_data_flag, precond_weights
def least_factorization_sequential_update(
permuted_pts, lu_row, current_degree, current_degree_basis_indices,
H_factor_blocks, update_degree_specific_data_flag, basis_degrees,
generate_degree_basis_indices, selected_basis_indices, permutations,
basis_cardinality, pce, L_factor, U_factor, num_initial_pts_selected,
num_initial_pts, current_index_counter,
points_to_degree_map, limited_memory, row_reduced_vandermonde_blocks,
assume_non_degeneracy, precond_weights, degree_vandermonde,
degree_max_norm, current_block_num_rows,
current_H_factor_block, enforce_all_initial_points_used,
enforce_ordering_of_initial_points, initial_pts_degenerate,
points_to_num_indices_map, verbosity):
num_vars = permuted_pts.shape[0]
if (lu_row >= permuted_pts.shape[1]):
msg = "least_factorization_sequential_update() "
msg += "Cannot proceed: all points have been added to the interpolant"
raise Exception(msg)
# Get the number of basis terms with degree equal to current_degree
if update_degree_specific_data_flag:
current_degree, current_degree_basis_indices, basis_degrees = \
get_degree_basis_indices(
num_vars, current_degree, len(
H_factor_blocks), True, basis_degrees,
generate_degree_basis_indices, verbosity)
if (update_degree_specific_data_flag and verbosity > 1):
print(("Incrementing degree to ", current_degree))
print(("\tCurrent number of points ", lu_row+1))
print(("\tCurrent number of terms ", selected_basis_indices.shape[1]))
print(("\tNew number of terms ", selected_basis_indices.shape[1] +
current_degree_basis_indices.shape[1]))
# Determine the number of indices of degree current_degree
num_current_indices = current_degree_basis_indices.shape[1]
# If there exists any indices in the pce basis with degree equal to
# degree counter then attempt to use these indices to interpolate some
# of the data
if (num_current_indices > 0):
# Update all the objects and other data structures that must
# be changes wwhen the degree of the interpolant is increased.
if (update_degree_specific_data_flag):
degree_vandermonde, H_factor_blocks, current_H_factor_block, \
current_block_num_rows, current_index_counter, \
selected_basis_indices = \
update_degree_specific_data(
permuted_pts, permutations, selected_basis_indices,
current_degree_basis_indices, basis_cardinality, pce,
lu_row,
H_factor_blocks, current_index_counter, precond_weights)
# Row-reduce degreeVandermonde_ according to previous
# elimination steps
degree_vandermonde = row_reduce_degree_vandermonde_matrix(
degree_vandermonde, L_factor, lu_row,
update_degree_specific_data_flag)
# Compute the pivots needed to update the LU factorization
norms, sub_vand_trans = compute_pivot_norms(degree_vandermonde, lu_row)
# Find the column of the degree_vandermonde with the largest norm.
next_index, num_initial_pts_selected = \
find_next_best_index(
norms, num_initial_pts_selected, num_initial_pts,
enforce_ordering_of_initial_points)
low_rank, degree_max_norm = determine_if_low_rank(
norms, next_index, degree_max_norm, current_index_counter)
if ((low_rank) and ((num_initial_pts_selected < num_initial_pts))):
initial_pts_degenerate = True
if enforce_ordering_of_initial_points:
msg = 'enforce_ordering_of_initial_points was set to True, '
msg += 'initial points are degenerate'
raise Exception(msg)
if (not low_rank):
# Compute the inner products necessary to update the LU
# factorization
inner_products, magic_row = compute_inner_products(
sub_vand_trans, norms, next_index)
# normalize pivot row in degreeVandermonde. The new row
# has already been computed and stored in magic_row
degree_vandermonde[lu_row+next_index, :] = magic_row
num_current_indices, permutations, lu_row, permuted_pts,\
L_factor, U_factor, degree_vandermonde,\
H_factor_blocks, current_block_num_rows, \
current_index_counter,\
points_to_degree_map, basis_degrees, current_H_factor_block,\
update_degree_specific_data_flag, precond_weights = \
update_factorization(
next_index, inner_products, norms, magic_row,
num_current_indices, permutations, lu_row, permuted_pts,
L_factor, U_factor, degree_vandermonde,
H_factor_blocks, current_block_num_rows,
current_index_counter,
points_to_degree_map, basis_degrees,
current_H_factor_block,
limited_memory, precond_weights, verbosity)
points_to_num_indices_map.append(selected_basis_indices.shape[1])
else:
update_degree_specific_data_flag = True
# num_initial_pts_selected was incremented in find_next_best_index
# but no point was actually added because point was low rank
# so decrement counter here
if ((num_initial_pts_selected <= num_initial_pts) and
(num_initial_pts_selected > 0)):
num_initial_pts_selected -= 1
if (assume_non_degeneracy):
msg = "least_factorization_sequential_update() Factorization "
msg += "of new points was requested but new points were "
msg += "degenerate"
raise Exception(msg)
if (verbosity > 1):
print(("Low rank at lu_row ", lu_row,))
print(" incrementing degree counter")
if ((low_rank) or (current_index_counter >= num_current_indices)):
# If the next step will be on a higher degree (because low rank or
# the degree block has been filled) then deep copy
# current_H_factor_block to H_factor_blocks.
# current_H_factor_block is overwritten when the degree
# is increased. Copy (deep) the current degree block to H_factor
H_factor_blocks[-1] = \
current_H_factor_block[:current_block_num_rows, :].copy()
else:
update_degree_specific_data_flag = False
if ((update_degree_specific_data_flag) and (not limited_memory)):
# Store previous row_reduced vandermonde matrix
row_reduced_vandermonde_blocks.append(degree_vandermonde[:lu_row, :])
return permutations, lu_row, permuted_pts, L_factor, U_factor, \
H_factor_blocks,\
current_index_counter, points_to_degree_map, basis_degrees,\
update_degree_specific_data_flag, selected_basis_indices, \
current_degree_basis_indices, degree_vandermonde, \
current_block_num_rows,\
current_H_factor_block, degree_max_norm, num_initial_pts_selected,\
initial_pts_degenerate, current_degree, points_to_num_indices_map
def least_factorization_sequential(
pce, candidate_pts, generate_degree_basis_indices,
initial_pts=None, num_pts=None, verbosity=3,
preconditioning_function=False, assume_non_degeneracy=False,
enforce_all_initial_points_used=False,
enforce_ordering_of_initial_points=False):
if num_pts is None:
num_selected_pts = candidate_pts.shape[1]
else:
num_selected_pts = num_pts
# --------------------------------------------------------------------- #
# Initialization #
# --------------------------------------------------------------------- #
# Extract the basis indices of the pce. If non-zero these will be used
# to interpolate the data
# set_pce( pce );
# must clear selected basis indices in case it was set previously
selected_basis_indices = | np.empty((candidate_pts.shape[0], 0), dtype=int) | numpy.empty |
"""Calibration experiments."""
import argparse
import os
from os.path import exists, dirname
import logging
import warnings
import sys
from matplotlib import pyplot as plt
import numpy as np
import pickle as pkl
import tqdm
import lime.lime_tabular as baseline_lime_tabular
import shap
# Make sure we can get bayes explanations
parent_dir = dirname(os.path.abspath(os.getcwd()))
sys.path.append(parent_dir)
from bayes.explanations import BayesLocalExplanations, explain_many
from bayes.data_routines import get_dataset_by_name
from bayes.models import *
parser = argparse.ArgumentParser()
parser.add_argument("--kernel", required=True, help="The kernel, i.e., lime or shap.")
parser.add_argument("--dataset", required=True, help="The dataset to run on.")
parser.add_argument("--n_initial", default=100, type=int, help="The intial points to compute the calibration.")
parser.add_argument("--n_true", default=10_000, type=int, help="The amount of perturbations to compute the converged explanation.")
parser.add_argument("--n_threads", default=1, type=int, help="The number of threads to launch during the experiment.")
parser.add_argument("--num", type=int, default=None, help="The number of instances to run on. Leave set to None to run on all test instances.")
parser.add_argument("--verbose", action="store_true", help="Verbose output.")
parser.add_argument("--balance_background_dataset", action="store_true", help="Whether to balance the background sampling. This helps with tabular calibration.")
parser.add_argument("--seed", default=0, type=int)
def get_creds(initial, final, total_init=0.0, inside_init=0.0):
"""Computes the calibration from the initial and psuedo ground truth feature importances."""
total, inside = total_init, inside_init
for q, item in tqdm.tqdm(enumerate(initial)):
creds = item.creds
init_coef = item.coef_
for i, c in enumerate(item.coef_):
total += 1.0
if final[q][i] <= (init_coef[i] + creds[i]) and final[q][i] >= (init_coef[i] - creds[i]):
inside += 1
return inside / total, total, inside
def run_calibration(args):
"""Runs the calibration experiment."""
# Get data and model
data = get_dataset_by_name(args.dataset)
if args.dataset in ["compas", "german"]:
image_dataset = False
model_and_data = process_tabular_data_get_model(data)
elif args.dataset[:5] in ["mnist"]:
image_dataset = True
model_and_data = process_mnist_get_model(data)
elif args.dataset[:8] == "imagenet":
image_dataset = True
model_and_data = process_imagenet_get_model(data)
else:
raise NotImplementedError
if image_dataset:
xtest = model_and_data["xtest"]
ytest = model_and_data["ytest"]
segs = model_and_data["xtest_segs"]
get_model = model_and_data["model"]
label = model_and_data["label"]
if args.num is None:
args.num = xtest.shape[0]
total, inside = 0.0, 0.0
for i in tqdm.tqdm(range(args.num)):
instance = xtest[i]
segments = segs[i]
cur_model = get_model(instance, segments)
xtrain = get_xtrain(segments)
# Get initial
exp_init = BayesLocalExplanations(training_data=xtrain,
data="image",
kernel=args.kernel,
categorical_features= | np.arange(xtrain.shape[1]) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for nussl/utils.py
"""
import unittest
import nussl
import numpy as np
from scipy import signal
class TestUtils(unittest.TestCase):
"""
"""
def test_find_peak_indices(self):
array = np.arange(0, 100)
peak = nussl.find_peak_indices(array, 1)[0]
assert peak == 99
array = np.arange(0, 100).reshape(10, 10)
peak = nussl.find_peak_indices(array, 3, min_dist=0)
assert peak == [[9, 9], [9, 8], [9, 7]]
def test_find_peak_values(self):
array = np.arange(0, 100)
peak = nussl.find_peak_values(array, 1)[0]
assert peak == 99
array = np.arange(0, 100).reshape(10, 10)
peak = nussl.find_peak_values(array, 3, min_dist=0)
assert peak == [99, 98, 97]
def test_add_mismatched_arrays(self):
long_array = np.ones((20,))
short_array = np.arange(10)
expected_result = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=float)
# Test basic cases
result = nussl.add_mismatched_arrays(long_array, short_array)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, long_array)
assert all(np.equal(result, expected_result))
expected_result = expected_result[:len(short_array)]
result = nussl.add_mismatched_arrays(long_array, short_array, truncate=True)
assert all( | np.equal(result, expected_result) | numpy.equal |
import pytest
import numpy as np
from cmstk.structure.atom import Atom, AtomCollection
def test_atom_collection():
"""Tests initialization of an AtomCollection object."""
assert AtomCollection().n_atoms == 0
atom0 = Atom(position=np.array([0, 0, 0]))
collection = AtomCollection(atoms=[atom0])
assert collection.n_atoms == 1
lst = list(collection)
assert len(lst) == 1
tup = tuple(collection)
assert len(tup) == 1
def test_atom_collection_setters():
"""Tests ability to set AtomCollection attributes."""
collection = AtomCollection()
atom0 = Atom(position=np.array([0, 0, 0]))
atom1 = Atom(position=np.array([1, 0, 0]))
collection.atoms = [atom0, atom1]
assert collection.n_atoms == 2
with pytest.raises(ValueError):
collection.charges = [0]
collection.charges = [1, 2]
assert collection.atoms[0].charge == 1
with pytest.raises(ValueError):
collection.magnetic_moments = [0]
collection.magnetic_moments = [1, 2]
assert collection.atoms[0].magnetic_moment == 1
with pytest.raises(ValueError):
collection.positions = [np.ndarray([0, 0, 0])]
collection.positions = [np.array([1, 1, 1]), np.array([2, 2, 2])]
assert np.array_equal(collection.atoms[0].position, np.array([1, 1, 1]))
with pytest.raises(ValueError):
collection.symbols = ["Fe"]
collection.symbols = ["Fe", "Cr"]
assert collection.atoms[0].symbol == "Fe"
with pytest.raises(ValueError):
collection.velocities = [np.array([0, 0, 0])]
collection.velocities = [np.array([1, 1, 1]), np.array([2, 2, 2])]
assert np.array_equal(collection.atoms[0].velocity, np.array([1, 1, 1]))
def test_atom_collection_add_atom():
"""Tests behavior of the AtomCollection.add_atom() method."""
collection = AtomCollection(tolerance=0.01)
atom0 = Atom(position=np.array([0, 0, 0]))
collection.add_atom(atom0)
assert collection.n_atoms == 1
atom1 = Atom(position=np.array([0.001, 0.001, 0.001]))
with pytest.raises(ValueError):
collection.add_atom(atom1)
assert collection.n_atoms == 1
def test_atom_collection_remove_atom():
"""Tests behavior of the AtomCollection.remove_atom() method."""
collection = AtomCollection(tolerance=0.01)
removal_position = np.array([0, 0, 0])
with pytest.raises(ValueError):
collection.remove_atom(removal_position)
atom0 = Atom(position=np.array([0, 0, 0]))
collection.add_atom(atom0)
assert collection.n_atoms == 1
removal_position = np.array([1, 1, 1])
with pytest.raises(ValueError):
collection.remove_atom(removal_position)
removal_position = np.array([0, 0, 0])
collection.remove_atom(removal_position)
assert collection.n_atoms == 0
def test_atom_collection_concatenate():
"""Tests behavior of the AtomCollection.concatenate() method."""
atoms0 = [
Atom(position=np.array([0.0, 0.0, 0.0])),
Atom(position=np.array([1.0, 1.0, 1.0]))
]
atoms1 = [
Atom(position=np.array([0.0, 0.0, 0.0])),
Atom(position=np.array([2.0, 2.0, 2.0]))
]
collection0 = AtomCollection(atoms0)
collection1 = AtomCollection(atoms1)
with pytest.raises(ValueError):
collection0.concatenate(collection1)
offset = np.array([2.0, 2.0, 2.0])
collection0.concatenate(collection1, offset)
assert collection0.n_atoms == 4
assert np.array_equal(collection1.atoms[0].position, np.array([0, 0, 0]))
def test_atom_collection_sort_by_charge():
"""Tests behavior of the AtomCollection.sort_by_charge() method."""
atom0 = Atom(charge=0, position=np.array([0, 0, 0]))
atom1 = Atom(charge=1, position=np.array([1, 1, 1]))
atom2 = Atom(charge=2, position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom2, atom0])
collection.sort_by_charge(hl=False)
charges = collection.charges
assert charges[0] == 0
assert charges[2] == 2
collection.sort_by_charge(hl=True)
charges = collection.charges
assert charges[0] == 2
assert charges[2] == 0
def test_atom_collection_sort_by_magnetic_moment():
"""Tests behavior of the AtomCollection.sort_by_magnetic_moment() method."""
atom0 = Atom(magnetic_moment=0, position=np.array([0, 0, 0]))
atom1 = Atom(magnetic_moment=1, position=np.array([1, 1, 1]))
atom2 = Atom(magnetic_moment=2, position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom2, atom0])
collection.sort_by_magnetic_moment(hl=False)
moments = collection.magnetic_moments
assert moments[0] == 0
assert moments[2] == 2
collection.sort_by_magnetic_moment(hl=True)
moments = collection.magnetic_moments
assert moments[0] == 2
assert moments[2] == 0
def test_atom_collection_sort_by_mass():
"""Tests behavior of the AtomCollection.sort_by_mass() method."""
atom0 = Atom(mass=0, position=np.array([0, 0, 0]))
atom1 = Atom(mass=1, position=np.array([1, 1, 1]))
atom2 = Atom(mass=2, position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom0, atom2])
collection.sort_by_mass(hl=False)
masses = collection.masses
assert masses[0] == 0
assert masses[2] == 2
collection.sort_by_mass(hl=True)
masses = collection.masses
assert masses[0] == 2
assert masses[2] == 0
def test_atom_collection_sort_by_position():
"""Tests behavior of the AtomCollection.sort_by_position() method."""
atom0 = Atom(position=np.array([0, 0, 0]))
atom1 = Atom(position=np.array([1, 1, 1]))
atom2 = Atom(position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom2, atom0])
collection.sort_by_position(hl=False)
magnitudes = [np.linalg.norm(p) for p in collection.positions]
assert magnitudes[0] == 0
assert magnitudes[2] == 3.4641016151377544
collection.sort_by_position(hl=True)
magnitudes = [np.linalg.norm(p) for p in collection.positions]
assert magnitudes[0] == 3.4641016151377544
assert magnitudes[2] == 0
def test_atom_collection_sort_by_symbol():
"""Tests behavior of the AtomCollection.sort_by_symbol() method."""
atom0 = Atom(symbol="Fe", position=np.array([0, 0, 0]))
atom1 = Atom(symbol="Cr", position=np.array([1, 1, 1]))
atom2 = Atom(symbol="Ni", position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom2, atom0])
order = ["Fe", "Ni", "Cr"]
collection.sort_by_symbol(order)
symbols = collection.symbols
assert symbols[0] == "Fe"
assert symbols[2] == "Cr"
order = ["Fe", "Ni", "Mg"]
with pytest.raises(ValueError):
collection.sort_by_symbol(order)
order = ["Fe", "Ni"]
with pytest.raises(ValueError):
collection.sort_by_symbol(order)
order = ["Fe", "Fe", "Cr"]
with pytest.raises(ValueError):
collection.sort_by_symbol(order)
def test_atom_collection_sort_by_velocity():
"""Tests behavior of the AtomCollection.sort_by_velocity() method."""
atom0 = Atom(velocity=np.array([0, 0, 0]), position=np.array([0, 0, 0]))
atom1 = Atom(velocity=np.array([1, 1, 1]), position=np.array([1, 1, 1]))
atom2 = Atom(velocity=np.array([2, 2, 2]), position=np.array([2, 2, 2]))
collection = AtomCollection([atom1, atom2, atom0])
collection.sort_by_velocity(hl=False)
magnitudes = [np.linalg.norm(v) for v in collection.velocities]
assert magnitudes[0] == 0
assert magnitudes[2] == 3.4641016151377544
collection.sort_by_velocity(hl=True)
magnitudes = [np.linalg.norm(v) for v in collection.velocities]
assert magnitudes[0] == 3.4641016151377544
assert magnitudes[2] == 0
def test_atom_collection_translate():
"""Tests behavior of the AtomCollection.translate() method."""
atom0 = Atom(position=np.array([0.0, 0.0, 0.0]))
collection = AtomCollection(atoms=[atom0])
translation = np.array([0.5, 0.5, 0.5])
collection.translate(translation)
assert | np.array_equal(collection.atoms[0].position, translation) | numpy.array_equal |
from mahotas.edge import sobel
import pytest
import mahotas as mh
import numpy as np
def test_sobel_shape():
A = np.arange(100*100)
A = (A % 15)
A = A.reshape((100,100))
assert sobel(A).shape == A.shape
assert sobel(A, just_filter=True).shape == A.shape
def test_sobel_zeros():
A = np.zeros((15,100))
assert sobel(A).shape == A.shape
assert sobel(A).sum() == 0
def test_sobel():
I = np.array([
[0,0,0,0,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0]])
E = sobel(I)
r,c = I.shape
for y,x in zip(*np.where(E)):
N = [I[y,x]]
if y > 0: N.append(I[y-1,x])
if x > 0: N.append(I[y,x-1])
if y < (r-1): N.append(I[y+1,x])
if x < (c-1): N.append(I[y,x+1])
assert len(set(N)) > 1
def test_zero_images():
assert np.isnan(sobel(np.zeros((16,16)))).sum() == 0
assert sobel(np.zeros((16,16)), just_filter=True).sum() == 0
def test_sobel_pure():
f = np.random.random((64, 128))
f2 = f.copy()
_ = mh.sobel(f)
assert np.all(f == f2)
def test_3d_error():
f = np.zeros((32,16,3))
with pytest.raises(ValueError):
sobel(f)
def test_dog():
im = mh.demos.load('lena')
im = im.mean(2)
edges = mh.dog(im)
assert edges.shape == im.shape
assert edges.any()
edges1 = mh.dog(im, sigma1=1.)
assert | np.any(edges != edges1) | numpy.any |
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from gym import spaces
from cyberbattle._env import cyberbattle_env
from cyberbattle._env.cyberbattle_env import EnvironmentBounds
def owned_nodes(obs: cyberbattle_env.Observation):
return np.nonzero(obs['nodes_privilegelevel'])[0]
def discovered_nodes_notowned(obs: cyberbattle_env.Observation):
return np.nonzero(obs['nodes_privilegelevel'] == 0)[0]
class Feature(spaces.MultiDiscrete, ABC):
def __init__(self, ep: EnvironmentBounds, nvec):
self.ep = ep
super().__init__(nvec)
@property
def flat_size(self):
return np.prod(self.nvec)
@abstractmethod
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
raise NotImplementedError
class ConcatFeatures(Feature):
def __init__(self, ep: EnvironmentBounds, features: List[Feature]):
self.features = features
self.dim_sizes = np.concatenate([f.nvec for f in self.features])
super().__init__(ep, self.dim_sizes)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
feature_vector = [f.get(obs) for f in self.features]
feature_vector = np.concatenate(feature_vector)
# feature_vector = np.expand_dims(feature_vector, 0)
return feature_vector
class FeatureGlobalNodesProperties(Feature):
def __init__(self, ep: EnvironmentBounds):
super(FeatureGlobalNodesProperties, self).__init__(ep, [4] * ep.property_count * ep.maximum_node_count)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = []
for i in range(self.ep.maximum_node_count):
if i < len(obs['discovered_nodes_properties']):
features.append(np.copy(obs['discovered_nodes_properties'][i]) + 1)
else:
features.append(np.ones(self.ep.property_count))
return np.concatenate(features)
class FeatureGlobalCredentialCacheLength(Feature):
def __init__(self, ep: EnvironmentBounds):
super().__init__(ep, [ep.maximum_total_credentials])
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
return np.array([obs['credential_cache_length']])
class FeatureGlobalCredentialCache(Feature):
def __init__(self, ep: EnvironmentBounds):
super(FeatureGlobalCredentialCache, self).__init__(ep, [ep.maximum_node_count,
ep.port_count] * ep.maximum_total_credentials)
def get(self, obs: cyberbattle_env.Observation) -> np.ndarray:
features = [
obs['credential_cache_matrix'][i] if i < len(obs['credential_cache_matrix']) else np.zeros(2)
for i in range(self.ep.maximum_total_credentials)
]
return | np.concatenate(features) | numpy.concatenate |
import numpy as onp
import jax
import jax.numpy as np
import random
import os
import scipy.interpolate
import astropy.io.fits as pyfits
from redrock.templates import Template
from redrock.archetypes import Archetype
from chex import assert_shape
key = jax.random.PRNGKey(42)
mask_std_val = 1e2
def create_mask(x, x_var, indices, val=0.0):
mask = onp.ones(x.shape)
# mask[x == 0] = val
ind = np.where(np.logical_or(x_var <= 0, x_var == mask_std_val ** 2.0))
mask[ind] = val
fullindices = onp.asarray(indices)[:, None] + onp.arange(x.shape[1])[None, :]
mask[fullindices < 0] = val
# offs = - np.maximum(indices, np.zeros_like(indices))
# for io, off in enumerate(offs):
# mask[io, 0:off] = 0
return np.asarray(mask)
def interp(x_new, x, y):
return scipy.interpolate.interp1d(
x, y, fill_value="extrapolate", kind="linear", bounds_error=False
)(x_new)
def draw_uniform(samples, bins, desired_size):
"""
Draw uniform set of samples
"""
hist, bin_edges = np.histogram(samples, bins=bins)
avg_nb = int(desired_size / float(bins))
numbers = np.repeat(avg_nb, bins)
for j in range(4):
numbers[hist <= numbers] = hist[hist <= numbers]
nb_rest = desired_size - np.sum(numbers[hist <= numbers]) # * bins
avg_nb = round(nb_rest / np.sum(hist > numbers))
numbers[hist > numbers] = avg_nb
result = []
count = 0
for i in range(bin_edges.size - 1):
ind = samples >= bin_edges[i]
ind &= samples <= bin_edges[i + 1]
if ind.sum() > 0:
positions = np.where(ind)[0]
nb = min([numbers[i], ind.sum()])
result.append(jax.random.choice(positions, nb, replace=False))
return np.concatenate(result)
class DataPipeline:
"""
Pipeline for loading data
"""
def load_spectrophotometry(
self,
input_dir="./",
write_subset=False,
use_subset=False,
subsampling=1,
spec=True,
phot=True,
):
if use_subset:
suffix = "2.npy"
else:
suffix = ".npy"
self.input_dir = input_dir
self.lamgrid = onp.load(self.input_dir + "lamgrid.npy")
self.lam_phot_eff = onp.load(self.input_dir + "lam_phot_eff.npy")
self.lam_phot_size_eff = | onp.load(self.input_dir + "lam_phot_size_eff.npy") | numpy.load |
from CoaxialDrone import CoaxialCopter
from PIDcontroller import PIDController_with_ff
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
class DronewithPID(CoaxialCopter,PIDController_with_ff):
def __init__(self,
z_path,
z_dot_path,
z_dot_dot_path,
t,
dt,
Sensor
):
self.t = t
self.dt = dt
self.z_path = z_path
self.z_dot_path = z_dot_path
self.z_dot_dot_path = z_dot_dot_path
self.Sensor = Sensor
def PID_controller_with_measured_values(self,k_p,k_d,k_i,mass_err,sigma,use_measured_height=False):
# creating the co-axial drone object
Controlled_Drone=CoaxialCopter()
# array for recording the state history
drone_state_history = Controlled_Drone.X
# introducing a small error of the actual mass and the mass for which the path has been calculated
actual_mass = Controlled_Drone.m * mass_err
# creating the control system object
control_system = PIDController_with_ff(k_p,k_d,k_i)
# declaring the initial state of the drone with zero hight and zero velocity
Controlled_Drone.X = np.array([0.0,0.0,0.0,0.0])
Drone_Sensor = self.Sensor(Controlled_Drone.X, 0.95)
observation_history = Controlled_Drone.X[0]
# executing the flight
for i in range(1,self.z_path.shape[0]-1):
# condition to use height observation to control the drone or
# use the magically given true state
if use_measured_height:
z_observation = Drone_Sensor.measure(Controlled_Drone.X[0],sigma)
u_bar = control_system.control(self.z_path[i],
z_observation,
self.z_dot_path[i],
Controlled_Drone.X[2],
self.z_dot_dot_path[i],
self.dt)
observation_history = np.vstack((observation_history,z_observation))
else:
u_bar = control_system.control(self.z_path[i],
Controlled_Drone.X[0],
self.z_dot_path[i],
Controlled_Drone.X[2],
self.z_dot_dot_path[i],
self.dt)
observation_history = np.vstack((observation_history,self.z_path[i]))
Controlled_Drone.set_rotors_angular_velocities(u_bar,0.0)
# calculating the new state vector
drone_state = Controlled_Drone.advance_state(self.dt, actual_mass)
# generating a history of vertical positions for the drone
drone_state_history = np.vstack((drone_state_history, drone_state))
plt.subplot(211)
plt.plot(self.t,self.z_path,linestyle='-',marker='.',color='red')
plt.plot(self.t[1:],drone_state_history[:,0],linestyle='-',color='blue',linewidth=3)
if use_measured_height:
plt.scatter(self.t[1:],observation_history[:,0],color='black',marker='.',alpha=0.3)
plt.grid()
if use_measured_height:
plt.title('Change in height (using measured value)').set_fontsize(20)
else:
plt.title('Change in height (ideal case)').set_fontsize(20)
plt.xlabel('$t$ [sec]').set_fontsize(20)
plt.ylabel('$z-z_0$ [$m$]').set_fontsize(20)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
if use_measured_height:
plt.legend(['Planned path','Executed path','Observed value'],fontsize = 14)
else:
plt.legend(['Planned path','Executed path'],fontsize = 14)
plt.show()
plt.subplot(212)
plt.plot(self.t[1:],abs(self.z_path[1:]-drone_state_history[:,0]),linestyle='-',marker='.',color='blue')
plt.grid()
plt.title('Error value ').set_fontsize(20)
plt.xlabel('$t$ [sec]').set_fontsize(20)
plt.ylabel('||$z_{target} - z_{actual}$|| [$m$]').set_fontsize(20)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.legend(['Error'],fontsize = 14)
plt.show()
def PID_controller_with_estimated_values(self,k_p,k_d,k_i,mass_err,sigma,alpha,use_estimated_height=False):
# creating the co-axial drone object
Controlled_Drone=CoaxialCopter()
# array for recording the state history
drone_state_history = Controlled_Drone.X
# introducing a small error of the actual mass and the mass for which the path has been calculated
actual_mass = Controlled_Drone.m * mass_err
# creating the control system object
control_system = PIDController_with_ff(k_p,k_d,k_i)
# declaring the initial state of the drone with zero hight and zero velocity
Controlled_Drone.X = | np.array([0.0,0.0,0.0,0.0]) | numpy.array |
'''
Independent Component Analysis (ICA):
This script computes ICA using the INFOMAX criteria.
The preprocessing steps include demeaning and whitening.
'''
import numpy as np
from numpy import dot
from numpy.linalg import matrix_rank, inv
from numpy.random import permutation
from scipy.linalg import eigh
# Theano Imports
import theano.tensor as T
import theano
from theano import shared
# Global constants
EPS = 1e-18
MAX_W = 1e8
ANNEAL = 0.9
MAX_STEP = 500
MIN_LRATE = 1e-6
W_STOP = 1e-6
class ica_gpu(object):
"""
Infomax ICA for one data modality
"""
def __init__(self, n_comp=10, verbose=False):
# Theano initialization
self.T_weights = shared(np.eye(n_comp, dtype=np.float32))
self.T_bias = shared(np.ones((n_comp, 1), dtype=np.float32))
T_p_x_white = T.fmatrix()
T_lrate = T.fscalar()
T_block = T.fscalar()
T_unmixed = T.dot(self.T_weights, T_p_x_white) + T.addbroadcast(self.T_bias, 1)
T_logit = 1 - 2 / (1 + T.exp(-T_unmixed))
T_out = self.T_weights + T_lrate * \
T.dot(T_block * T.identity_like(self.T_weights) + T.dot(T_logit, T.transpose(T_unmixed)), self.T_weights)
T_bias_out = self.T_bias + T_lrate * T.reshape(T_logit.sum(axis=1), (-1, 1))
T_max_w = T.max(self.T_weights)
T_isnan = T.any(T.isnan(self.T_weights))
self.w_up_fun = theano.function([T_p_x_white, T_lrate, T_block],
[T_max_w, T_isnan],
updates=[(self.T_weights, T_out),
(self.T_bias, T_bias_out)],
allow_input_downcast=True)
T_matrix = T.fmatrix()
T_cov = T.dot(T_matrix, T.transpose(T_matrix))/T_block
self.cov_fun = theano.function([T_matrix, T_block], T_cov, allow_input_downcast=True)
self.loading = None
self.sources = None
self.weights = None
self.n_comp = n_comp
self.verbose = verbose
def __pca_whiten(self, x2d):
""" data Whitening
*Input
x2d : 2d data matrix of observations by variables
n_comp: Number of components to retain
*Output
Xwhite : Whitened X
white : whitening matrix (Xwhite = np.dot(white,X))
dewhite : dewhitening matrix (X = np.dot(dewhite,Xwhite))
"""
NSUB, NVOX = x2d.shape
x2d_demean = x2d - x2d.mean(axis=1).reshape((-1, 1))
# cov = dot(x2d_demean, x2d_demean.T) / ( NVOX -1 )
cov = self.cov_fun(x2d_demean, NVOX-1)
w, v = eigh(cov, eigvals=(NSUB-self.n_comp, NSUB-1))
D = np.diag(1./(np.sqrt(w)))
white = dot(D, v.T)
D = np.diag(np.sqrt(w))
dewhite = dot(v, D)
x_white = dot(white, x2d_demean)
return (x_white, white, dewhite)
def __w_update(self, x_white, lrate1):
""" Update rule for infomax
This function recieves parameters to update W1
* Input
W1: unmixing matrix (must be a square matrix)
Xwhite1: whitened data
bias1: current estimated bias
lrate1: current learning rate
startW1: in case update blows up it will start again from startW1
* Output
W1: updated mixing matrix
bias: updated bias
lrate1: updated learning rate
"""
error = 0
NVOX = x_white.shape[1]
NCOMP = x_white.shape[0]
block1 = int(np.floor(np.sqrt(NVOX / 3)))
permute1 = permutation(NVOX)
p_x_white = x_white[:, permute1].astype(np.float32)
for start in range(0, NVOX, block1):
if start + block1 < NVOX:
tt2 = start + block1
else:
tt2 = NVOX
block1 = NVOX - start
max_w, isnan = self.w_up_fun(p_x_white[:, start:tt2], lrate1, block1)
# Checking if W blows up
if isnan or max_w > MAX_W:
# print("Numeric error! restarting with lower learning rate")
lrate1 = lrate1 * ANNEAL
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
self.T_bias.set_value(np.zeros((NCOMP, 1), dtype=np.float32))
error = 1
if lrate1 > 1e-6 and \
matrix_rank(x_white) < NCOMP:
# print("Data 1 is rank defficient"
# ". I cannot compute " +
# str(NCOMP) + " components.")
return (0, 1)
if lrate1 < 1e-6:
# print("Weight matrix may"
# " not be invertible...")
return (0, 1)
return(lrate1, error)
def __infomax(self, x_white):
"""Computes ICA infomax in whitened data
Decomposes x_white as x_white=AS
*Input
x_white: whitened data (Use PCAwhiten)
verbose: flag to print optimization updates
*Output
A : mixing matrix
S : source matrix
W : unmixing matrix
"""
NCOMP = self.n_comp
# Initialization
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
weights = np.eye(NCOMP)
old_weights = np.eye(NCOMP)
d_weigths = np.zeros(NCOMP)
old_d_weights = np.zeros(NCOMP)
lrate = 0.005 / np.log(NCOMP)
self.T_bias.set_value( | np.zeros((NCOMP, 1), dtype=np.float32) | numpy.zeros |
r"""
Routines for displacing a molecular geometry by translation or
proper/improper rotation.
::
1
|
4
/ \
2 3
Example axes for displacements:
1. X1X4 stretch: :math:`r_{14}`
2. X1X4 torsion: :math:`r_{14}` (for motion of 2, 3)
3. X1X4X2 bend: :math:`r_{14} \times r_{24}`
4. X1 out-of-plane: :math:`r_{24} - r_{34}` or
:math:`(r_{24} \times r_{34}) \times r_{14}`
Each internal coordinate measurement has the option of changing the units
(see the constants module) or taking the absolute value.
"""
import operator as op
import pyparsing as pp
import numpy as np
import gimbal.constants as con
class VectorParser(object):
"""An object for defining and evaluating vector operations on
a cartesian geometry.
A new VectorParser instance takes a cartesian geometry as an
optional input. The instance can be called with a vector (no
action), 3x3 array (cross product) or string, parsed according
to the syntax in :func:`~VectorParser.generate_parser`.
Attributes
----------
xyz : (N, 3) array_like, optional
The cartesian geometry which defines the indices in parsed
expressions. If None, only expressions without indices can
be parsed.
unop : dict
A dictionary which defines unary operations.
bnadd : dict
A dictionary which defines binary addition operations.
bnmul : dict
A dictionary which defines binary multiplication operations.
bnop : dict
A dictionary which defines all binary operations.
axes : dict
A dictionary which defines cartesian axis labels.
expr : pyparsing.Forward
A pyparsing grammar used to evaluate expressions. Automatically
generated when xyz is set.
"""
def __init__(self, xyz=None):
self.unop = {'+': op.pos, '-': op.neg}
self.bnadd = {'+': op.add, '-': op.sub}
self.bnmul = {'*': op.mul, '/': op.truediv, 'o': np.dot, 'x': np.cross}
self.bnop = self.bnadd.copy()
self.bnop.update(self.bnmul)
self.axes = dict(X = np.array([1., 0., 0.]),
Y = np.array([0., 1., 0.]),
Z = np.array([0., 0., 1.]))
self.xyz = xyz
def __call__(self, inp, unit=False):
"""Evaluates an expression based on a string.
Parameters
----------
inp : str or array_like
A string or array used to specify an axis.
unit : bool, optional
Specifies if the axis is converted to a unit vector.
Returns
-------
float or ndarray
The result of the vector operation.
Raises
------
ValueError
If input is not a string, 3-vector or 3x3 array.
"""
if isinstance(inp, str):
u = self.expr.parseString(inp, parseAll=True)[0]
elif len(inp) == 3:
u = np.array(inp, dtype=float)
if u.size == 9:
u = np.cross(u[0] - u[1], u[2] - u[1])
else:
raise ValueError('Axis specification not recognized')
if unit:
return con.unit_vec(u)
else:
return u
@property
def xyz(self):
"""Gets the value of xyz."""
return self._xyz
@xyz.setter
def xyz(self, val):
"""Sets the value of xyz and generates the parser."""
self.expr = self.generate_parser(val)
self._xyz = val
def _eval_unary(self, tokens):
"""Evaluates unary operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching unary expression.
Returns
-------
float or ndarray
The expression after unary operation.
"""
vals = tokens[0]
return self.unop[vals[0]](vals[1])
def _eval_binary(self, tokens):
"""Evaluates binary operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching binary expression.
Returns
-------
float or ndarray
The expression after binary operation.
"""
vals = tokens[0]
newval = vals[0]
it = iter(vals[1:])
for oper in it:
newval = self.bnop[oper](newval, next(it))
return newval
def _eval_power(self, tokens):
"""Evaluates power operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching power expression.
Returns
-------
float or ndarray
The expression after power operation.
"""
vals = tokens[0]
newval = vals[-1]
for v in vals[-3::-2]:
newval = v**newval
return newval
def generate_parser(self, xyz=None):
"""Creates the pyparsing expression based on geometry.
The syntax is as follows:
- ``i+`` are indices of xyz and return vectors.
- ``i+.j`` are floating point numbers (j optional).
- ``i[j]`` is the j-th (scalar) element of xyz[i].
- ``X, Y, Z`` are unit vectors along x, y and z axes (uppercase only).
- ``+`` and ``-`` are addition/subtraction of vectors or scalars.
- ``*`` and ``/`` are multiplication/division of vectors and scalars
(elementwise).
- ``o`` and ``x`` are scalar/vector products of vectors only.
- ``^`` is the power of a vector/scalar by a scalar (elementwise).
- ``(`` and ``)`` specify order of operation.
- ``[i, j, k]`` gives a vector with scalar elements i, j and k.
Parameters
----------
xyz : (N, 3), array_like, optional
The cartesian geometry used in index expressions. If not
provided, strings containing indices will raise an error.
Returns
-------
pyparsing.Forward
A pyparsing grammar definition.
"""
expr = pp.Forward()
# operand types: int, int with index, float, axis or delimited list
intnum = pp.Word(pp.nums)
fltind = pp.Word(pp.nums) + '[' + pp.Word(pp.nums) + ']'
fltnum = pp.Combine(pp.Word(pp.nums) + '.' + pp.Optional(pp.Word(pp.nums)))
alphax = pp.oneOf(' '.join(self.axes))
dllist = pp.Suppress('[') + pp.delimitedList(expr) + pp.Suppress(']')
intnum.setParseAction(lambda t: xyz[int(t[0])])
fltind.setParseAction(lambda t: xyz[int(t[0])][int(t[2])])
fltnum.setParseAction(lambda t: float(t[0]))
alphax.setParseAction(lambda t: self.axes[t[0]])
dllist.setParseAction(lambda t: | np.array(t[:]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import os
import os.path
import re
import scipy.io as sio
import numpy as np
from mpi4py import MPI
from pyscf import gto
import decodense
# decodense variables
PARAMS = {
'prop': 'energy',
'basis': 'ccpvdz',
'xc': 'pbe0',
'loc': 'ibo-2',
'pop': 'iao',
'part': 'atoms'
}
UNIT = 'au'
N_ATOMS = 3
RST_FREQ = 50
# input / output
INPUT = os.getcwd() + '/water_therm_1000.mat'
OUTPUT = os.getcwd() + '/{:}_{:}_{:}_{:}_{:}_{:}/'.format(PARAMS['prop'], PARAMS['xc'] if PARAMS['xc'] != '' else 'hf', \
PARAMS['basis'], PARAMS['loc'] if PARAMS['loc'] != '' else 'can', \
PARAMS['pop'], PARAMS['part'], PARAMS['prop'])
def main():
"""
main program
"""
# mpi attributes
comm = MPI.COMM_WORLD
stat = MPI.Status()
rank = comm.Get_rank()
size = comm.Get_size()
assert 1 < size, 'script must be run in parallel: `mpiexec -np N ...`'
# init decomp object
decomp = decodense.DecompCls(**PARAMS)
# master
if rank == 0:
# write MPI parameters
print('\n MPI global size = {:}\n'.format(size))
# make output dir
if not os.path.isdir(OUTPUT):
restart = False
os.mkdir(OUTPUT)
else:
restart = True
# load in dataset
data = sio.loadmat(INPUT)
# number of slaves and tasks
n_slaves = size - 1
n_tasks = data['R'].shape[0]
# start_idx
if restart:
res_el = np.load(OUTPUT + 'elec.npy')
res_nuc = | np.load(OUTPUT + 'nuc.npy') | numpy.load |
from __future__ import division
import os, glob, time
import numpy as np
import h5py
from skimage.transform import resize, warp, AffineTransform
from skimage import measure
from transforms3d.euler import euler2mat
from transforms3d.affines import compose
from constants import *
def normalize(im_input):
im_output = im_input + 1000 # We want to have air value to be 0 since HU of air is -1000
# Intensity crop
im_output[im_output < 0] = 0
im_output[im_output > 1600] = 1600 # Kind of arbitrary to select the range from -1000 to 600 in HU
im_output = im_output / 1600.0
return im_output
def resize_images_labels(images, labels):
resized_images = resize_images(images)
# labels
size = (ALL_IM_SIZE[0], ALL_IM_SIZE[1] + CROP * 2, ALL_IM_SIZE[2] + CROP * 2)
resized_labels = np.zeros(size, dtype=np.float32)
for z in range(N_CLASSES):
roi = resize((labels == z + 1).astype(np.float32), size, mode='constant')
resized_labels[roi >= 0.5] = z + 1
resized_labels = resized_labels[:, CROP:-CROP, CROP:-CROP]
return resized_images, resized_labels
def resize_images(images):
size = (ALL_IM_SIZE[0], ALL_IM_SIZE[1] + CROP * 2, ALL_IM_SIZE[2] + CROP * 2)
resized_images = resize(images, size, mode='constant')
resized_images = resized_images[:, CROP:-CROP, CROP:-CROP]
return resized_images
def get_tform_coords(im_size):
coords0, coords1, coords2 = np.mgrid[:im_size[0], :im_size[1], :im_size[2]]
coords = np.array([coords0 - im_size[0] / 2, coords1 - im_size[1] / 2, coords2 - im_size[2] / 2])
return np.append(coords.reshape(3, -1), np.ones((1, np.prod(im_size))), axis=0)
def clean_contour(in_contour, is_prob=False):
if is_prob:
pred = (in_contour >= 0.5).astype(np.float32)
else:
pred = in_contour
labels = measure.label(pred)
area = []
for l in range(1, np.amax(labels) + 1):
area.append(np.sum(labels == l))
out_contour = in_contour
out_contour[np.logical_and(labels > 0, labels != np.argmax(area) + 1)] = 0
return out_contour
def restore_labels(labels, roi, read_info):
if roi == -1:
# Pad first, then resize to original shape
labels = np.pad(labels, ((0, 0), (CROP, CROP), (CROP, CROP)), 'constant')
restored_labels = np.zeros(read_info['shape'], dtype=np.float32)
for z in range(N_CLASSES):
roi = resize((labels == z + 1).astype(np.float32), read_info['shape'], mode='constant')
roi[roi >= 0.5] = 1
roi[roi < 0.5] = 0
roi = clean_contour(roi, is_prob=False)
restored_labels[roi == 1] = z + 1
else:
labels = clean_contour(labels, is_prob=True)
# Resize to extracted shape, then pad to original shape
labels = resize(labels, read_info['extract_shape'], mode='constant')
restored_labels = np.zeros(read_info['shape'], dtype=np.float32)
extract = read_info['extract']
restored_labels[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]] = labels
return restored_labels
def read_testing_inputs(file, roi, im_size, output_path=None):
f_h5 = h5py.File(file, 'r')
if roi == -1:
images = np.asarray(f_h5['resized_images'], dtype=np.float32)
read_info = {}
read_info['shape'] = np.asarray(f_h5['images'], dtype=np.float32).shape
else:
images = np.asarray(f_h5['images'], dtype=np.float32)
output = h5py.File(os.path.join(output_path, 'All_' + os.path.basename(file)), 'r')
predictions = np.asarray(output['predictions'], dtype=np.float32)
output.close()
# Select the roi
roi_labels = (predictions == roi + 1).astype(np.float32)
nz = np.nonzero(roi_labels)
extract = []
for c in range(3):
start = np.amin(nz[c])
end = np.amax(nz[c])
r = end - start
extract.append((np.maximum(int(np.rint(start - r * 0.1)), 0),
np.minimum(int(np.rint(end + r * 0.1)), images.shape[c])))
extract_images = images[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]]
read_info = {}
read_info['shape'] = images.shape
read_info['extract_shape'] = extract_images.shape
read_info['extract'] = extract
images = resize(extract_images, im_size, mode='constant')
f_h5.close()
return images, read_info
def read_training_inputs(file, roi, im_size):
f_h5 = h5py.File(file, 'r')
if roi == -1:
images = np.asarray(f_h5['resized_images'], dtype=np.float32)
labels = np.asarray(f_h5['resized_labels'], dtype=np.float32)
else:
images = np.asarray(f_h5['images'], dtype=np.float32)
labels = np.asarray(f_h5['labels'], dtype=np.float32)
f_h5.close()
if roi == -1:
# Select all
assert im_size == images.shape
translation = [0, np.random.uniform(-8, 8), np.random.uniform(-8, 8)]
rotation = euler2mat(np.random.uniform(-5, 5) / 180.0 * np.pi, 0, 0, 'sxyz')
scale = [1, np.random.uniform(0.9, 1.1), np.random.uniform(0.9, 1.1)]
warp_mat = compose(translation, rotation, scale)
tform_coords = get_tform_coords(im_size)
w = np.dot(warp_mat, tform_coords)
w[0] = w[0] + im_size[0] / 2
w[1] = w[1] + im_size[1] / 2
w[2] = w[2] + im_size[2] / 2
warp_coords = w[0:3].reshape(3, im_size[0], im_size[1], im_size[2])
final_images = warp(images, warp_coords)
nclass = int(np.amax(labels)) + 1
final_labels = np.empty(im_size + (nclass,), dtype=np.float32)
for z in range(1, nclass):
temp = warp((labels == z).astype(np.float32), warp_coords)
temp[temp < 0.5] = 0
temp[temp >= 0.5] = 1
final_labels[..., z] = temp
final_labels[..., 0] = np.amax(final_labels[..., 1:], axis=3) == 0
else:
# Select the roi
roi_labels = (labels == roi + 1).astype(np.float32)
# Rotate the images and labels
rotation = np.random.uniform(-15, 15)
shear = np.random.uniform(-5, 5)
tf = AffineTransform(rotation=np.deg2rad(rotation), shear=np.deg2rad(shear))
for z in range(images.shape[0]):
images[z] = warp(images[z], tf.inverse)
roi_labels[z] = warp(roi_labels[z], tf.inverse)
nz = np.nonzero(roi_labels)
extract = []
for c in range(3):
start = | np.amin(nz[c]) | numpy.amin |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
# import argparse
import json
# import matplotlib.pyplot as plt
# import skimage.io as io
import cv2
# from labelme import utils
import numpy as np
import glob
import PIL.Image
from PIL import Image, ImageDraw
from tqdm import tqdm
import os
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
class labelme2coco(object):
def __init__(self, labelme_json=[], save_json_path='./train.json', label = [], image_path=""):
'''
:param labelme_json: 所有labelme的json文件路径组成的列表
:param save_json_path: json保存位置
'''
self.labelme_json = labelme_json
self.save_json_path = save_json_path
self.images = []
self.categories = []
self.annotations = []
self.image_path = image_path
# self.data_coco = {}
# self.label = ['face', 'hand', 'cigarette', 'cellphone']
self.label = label
# self.label = ['cigarette']
# self.exclude_label = ['face', 'cellphone']
self.annID = 1
self.height = 0
self.width = 0
# 统计不同类别框的标注数量
self.stats = {}
self.save_json()
def data_transfer(self):
for num, json_file in enumerate(tqdm(self.labelme_json)):
# print(json_file + "\n")
with open(json_file, 'r', encoding='utf-8') as fp:
data = json.load(fp) # 加载json文件
# 如果标注文件中物体为空,则跳过
if (len(data['shapes']) == 0):
print(json_file)
continue
image_name = json_file.split("\\")[-1].split(".")[0] + ".jpg"
# 加入上层文件夹路径
# image_name = json_file.split("\\").split(".")[0] + ".jpg"
# self.images.append(self.image(data, num))
self.images.append(self.image_from_json(data, num, image_name))
for shapes in data['shapes']:
label = shapes['label']
# # 跳过特定类别
# if label in self.exclude_label:
# continue
if label not in self.stats.keys():
self.stats[label] = 0
self.stats[label] += 1
if label not in self.label:
# self.categories.append(self.categorie(label))
# self.label.append(label)
# print(label + " is not in label list!")
continue
hasFlag = False
for categorie in self.categories:
if label == categorie["name"]:
hasFlag = True
if not hasFlag:
self.categories.append(self.categorie(label))
points = shapes['points'] # 这里的point是用rectangle标注得到的,只有两个点,需要转成四个点
# points.append([points[0][0],points[1][1]])
# points.append([points[1][0],points[0][1]])
self.annotations.append(self.annotation(points, label, num))
self.annID += 1
def image(self, data, num):
image = {}
# img = utils.img_b64_to_arr(data['imageData']) # 解析原图片数据
# img=io.imread("F:\\阜康测试视频\\frame-16\\labelme\\test\\img\\" + data['imagePath']) # 通过图片路径打开图片
# img = cv2.imread("F:\\阜康测试视频\\frame-16\\labelme\\test\\img\\" + data['imagePath'], 0)
img = cv2.imdecode(np.fromfile(os.path.join(self.image_path, data['imagePath']), dtype=np.uint8), -1)
height, width = img.shape[:2]
img = None
image['height'] = height
image['width'] = width
image['id'] = num + 1
# image['file_name'] = data['imagePath'].split('/')[-1]
image['file_name'] = data['imagePath']
self.height = height
self.width = width
return image
# 从Json文件中获取图片信息
def image_from_json(self, data, num, image_name):
image = {}
image['height'] = data["imageHeight"]
image['width'] = data["imageWidth"]
image['id'] = num + 1
# image['file_name'] = data['imagePath'].split('/')[-1]
image['file_name'] = image_name
self.height = data["imageHeight"]
self.width = data["imageWidth"]
return image
def categorie(self, label):
categorie = {}
categorie['supercategory'] = 'None'
categorie['id'] = self.label.index(label) + 1 # 0 默认为背景
categorie['name'] = label
return categorie
def annotation(self, points, label, num):
annotation = {}
annotation['segmentation'] = [list(np.asarray(points).flatten())]
annotation['iscrowd'] = 0
annotation['image_id'] = num + 1
# annotation['bbox'] = str(self.getbbox(points)) # 使用list保存json文件时报错(不知道为什么)
# list(map(int,a[1:-1].split(','))) a=annotation['bbox'] 使用该方式转成list
annotation['bbox'] = list(map(float, self.getbbox(points)))
annotation['area'] = annotation['bbox'][2] * annotation['bbox'][3]
# annotation['category_id'] = self.getcatid(label)
annotation['category_id'] = self.getcatid(label) # 注意,源代码默认为1
annotation['id'] = self.annID
return annotation
def getcatid(self, label):
for categorie in self.categories:
if label == categorie['name']:
return categorie['id']
return 1
def getbbox(self, points):
# img = np.zeros([self.height,self.width],np.uint8)
# cv2.polylines(img, [np.asarray(points)], True, 1, lineType=cv2.LINE_AA) # 画边界线
# cv2.fillPoly(img, [np.asarray(points)], 1) # 画多边形 内部像素值为1
polygons = points
mask = self.polygons_to_mask([self.height, self.width], polygons)
return self.mask2box(mask)
def mask2box(self, mask):
'''从mask反算出其边框
mask:[h,w] 0、1组成的图片
1对应对象,只需计算1对应的行列号(左上角行列号,右下角行列号,就可以算出其边框)
'''
# np.where(mask==1)
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
# 解析左上角行列号
left_top_r = np.min(rows) # y
left_top_c = | np.min(clos) | numpy.min |
import numpy as _numpy
from fdrtd.plugins.simon.caches.cache import Cache
from fdrtd.plugins.simon.microprotocols.microprotocol import Microprotocol
class MicroprotocolSecureMatrixMultiplication(Microprotocol):
def __init__(self, microservice, properties, myself):
super().__init__(microservice, properties, myself)
self.register_cache('input', Cache())
self.register_cache('dimX', Cache())
self.register_cache('dimY', Cache())
self.register_cache('intermediateV', Cache())
self.register_cache('intermediateW', Cache())
self.register_cache('final', Cache())
self.register_stage(0, ['input'], self.stage_0)
self.register_stage(1, ['dimX', 'dimY'], self.stage_1)
self.register_stage(2, ['intermediateV'], self.stage_2)
self.register_stage(3, ['intermediateW'], self.stage_3)
self.register_stage(4, ['final'], self.stage_4)
self.M = None
self.n = self.p = self.q = 0
self.v = None
def stage_0(self, args):
self.M = _numpy.array(args['input'])
self.network.broadcast(self.M.shape, 'dimX' if self.network.myself == 0 else 'dimY')
return 1, None
def stage_1(self, args):
self.p = args['dimX'][0]
self.n = args['dimX'][1]
if args['dimY'][0] != args['dimX'][1]:
raise RuntimeError("matrix shapes not compatible")
self.q = args['dimY'][1]
if self.network.myself == 0:
xt = self.M.transpose()
q, r = | _numpy.linalg.qr(xt, mode='complete') | numpy.linalg.qr |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum process tomography analysis
"""
from typing import List, Dict, Tuple
import time
import numpy as np
import scipy.linalg as la
from qiskit.result import marginal_counts, Counts
from qiskit.quantum_info import DensityMatrix, Choi, Operator
from qiskit.quantum_info.operators.base_operator import BaseOperator
from qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel
from qiskit_experiments.exceptions import AnalysisError
from qiskit_experiments.framework import BaseAnalysis, AnalysisResultData, Options
from .fitters import (
linear_inversion,
scipy_linear_lstsq,
scipy_gaussian_lstsq,
cvxpy_linear_lstsq,
cvxpy_gaussian_lstsq,
)
class TomographyAnalysis(BaseAnalysis):
"""Base analysis for state and process tomography experiments."""
_builtin_fitters = {
"linear_inversion": linear_inversion,
"scipy_linear_lstsq": scipy_linear_lstsq,
"scipy_gaussian_lstsq": scipy_gaussian_lstsq,
"cvxpy_linear_lstsq": cvxpy_linear_lstsq,
"cvxpy_gaussian_lstsq": cvxpy_gaussian_lstsq,
}
@classmethod
def _default_options(cls) -> Options:
"""Default analysis options
Analysis Options:
measurement_basis
(:class:`~qiskit_experiments.library.tomography.basis.BaseFitterMeasurementBasis`):
The measurement
:class:`~qiskit_experiments.library.tomography.basis.BaseFitterMeasurementBasis`
to use for tomographic reconstruction when running a
:class:`~qiskit_experiments.library.tomography.StateTomography` or
:class:`~qiskit_experiments.library.tomography.ProcessTomography`.
preparation_basis
(:class:`~qiskit_experiments.library.tomography.basis.BaseFitterPreparationBasis`):
The preparation
:class:`~qiskit_experiments.library.tomography.basis.BaseFitterPreparationBasis`
to use for tomographic reconstruction for
:class:`~qiskit_experiments.library.tomography.ProcessTomography`.
fitter (str or Callable): The fitter function to use for reconstruction.
This can be a string to select one of the built-in fitters, or a callable to
supply a custom fitter function. See the `Fitter Functions` section for
additional information.
fitter_options (dict): Any addition kwarg options to be supplied to the fitter
function. For documentation of available kargs refer to the fitter function
documentation.
rescale_positive (bool): If True rescale the state returned by the fitter
to be positive-semidefinite. See the `PSD Rescaling` section for
additional information (Default: True).
rescale_trace (bool): If True rescale the state returned by the fitter
have either trace 1 for :class:`~qiskit.quantum_info.DensityMatrix`,
or trace dim for :class:`~qiskit.quantum_info.Choi` matrices (Default: True).
target (Any): Optional, target object for fidelity comparison of the fit
(Default: None).
"""
options = super()._default_options()
options.measurement_basis = None
options.preparation_basis = None
options.fitter = "linear_inversion"
options.fitter_options = {}
options.rescale_positive = True
options.rescale_trace = True
options.target = None
return options
@classmethod
def _get_fitter(cls, fitter):
"""Return fitter function for named builtin fitters"""
if fitter is None:
raise AnalysisError("No tomography fitter given")
if not isinstance(fitter, str):
return fitter
if fitter in cls._builtin_fitters:
return cls._builtin_fitters[fitter]
raise AnalysisError(f"Unrecognized tomography fitter {fitter}")
def _run_analysis(self, experiment_data):
# Extract tomography measurement data
outcome_data, shot_data, measurement_data, preparation_data = self._fitter_data(
experiment_data.data()
)
# Get tomography fitter function
fitter = self._get_fitter(self.options.fitter)
fitter_opts = self.options.fitter_options
# Work around to set proper trace and trace preserving constraints for
# cvxpy fitter
if fitter in (cvxpy_linear_lstsq, cvxpy_gaussian_lstsq):
fitter_opts = fitter_opts.copy()
# Add default value for CVXPY trace constraint if no user value is provided
if "trace" not in fitter_opts:
if self.options.preparation_basis:
fitter_opts["trace"] = 2 ** len(preparation_data[0])
else:
fitter_opts["trace"] = 1
# By default add trace preserving constraint to cvxpy QPT fit
if "trace_preserving" not in fitter_opts and self.options.preparation_basis:
fitter_opts["trace_preserving"] = True
try:
t_fitter_start = time.time()
state, fitter_metadata = fitter(
outcome_data,
shot_data,
measurement_data,
preparation_data,
measurement_basis=self.options.measurement_basis,
preparation_basis=self.options.preparation_basis,
**self.options.fitter_options,
)
t_fitter_stop = time.time()
if fitter_metadata is None:
fitter_metadata = {}
state = Choi(state) if self.options.preparation_basis else DensityMatrix(state)
fitter_metadata["fitter"] = fitter.__name__
fitter_metadata["fitter_time"] = t_fitter_stop - t_fitter_start
analysis_results = self._postprocess_fit(
state,
metadata=fitter_metadata,
target_state=self.options.target,
rescale_positive=self.options.rescale_positive,
rescale_trace=self.options.rescale_trace,
qpt=bool(self.options.preparation_basis),
)
except AnalysisError as ex:
raise AnalysisError(f"Tomography fitter failed with error: {str(ex)}") from ex
return analysis_results, []
@classmethod
def _postprocess_fit(
cls,
state,
metadata=None,
target_state=None,
rescale_positive=False,
rescale_trace=False,
qpt=False,
):
"""Post-process fitter data"""
# Get eigensystem of state
state_cls = type(state)
evals, evecs = cls._state_eigensystem(state)
# Rescale eigenvalues to be PSD
rescaled_psd = False
if rescale_positive and np.any(evals < 0):
scaled_evals = cls._make_positive(evals)
rescaled_psd = True
else:
scaled_evals = evals
# Rescale trace
trace = np.sqrt(len(scaled_evals)) if qpt else 1
sum_evals = np.sum(scaled_evals)
rescaled_trace = False
if rescale_trace and not np.isclose(sum_evals - trace, 0, atol=1e-12):
scaled_evals = trace * scaled_evals / sum_evals
rescaled_trace = True
# Compute state with rescaled eigenvalues
state_result = AnalysisResultData("state", state, extra=metadata)
state_result.extra["eigvals"] = scaled_evals
if rescaled_psd or rescaled_trace:
state = state_cls(evecs @ (scaled_evals * evecs).T.conj())
state_result.value = state
state_result.extra["raw_eigvals"] = evals
if rescaled_trace:
state_result.extra["trace"] = np.sum(scaled_evals)
state_result.extra["raw_trace"] = sum_evals
else:
state_result.extra["trace"] = sum_evals
# Results list
analysis_results = [state_result]
# Compute fidelity with target
if target_state is not None:
analysis_results.append(
cls._fidelity_result(scaled_evals, evecs, target_state, qpt=qpt)
)
# Check positive
analysis_results.append(cls._positivity_result(scaled_evals, qpt=qpt))
# Check trace preserving
if qpt:
analysis_results.append(cls._tp_result(scaled_evals, evecs))
return analysis_results
@staticmethod
def _state_eigensystem(state):
evals, evecs = la.eigh(state)
# Truncate eigenvalues to real part
evals = np.real(evals)
# Sort eigensystem from largest to smallest eigenvalues
sort_inds = np.flip(np.argsort(evals))
return evals[sort_inds], evecs[:, sort_inds]
@staticmethod
def _make_positive(evals, epsilon=0):
if epsilon < 0:
raise AnalysisError("epsilon must be non-negative.")
ret = evals.copy()
dim = len(evals)
idx = dim - 1
accum = 0.0
while idx >= 0:
shift = accum / (idx + 1)
if evals[idx] + shift < epsilon:
ret[idx] = 0
accum = accum + evals[idx]
idx -= 1
else:
for j in range(idx + 1):
ret[j] = evals[j] + shift
break
return ret
@staticmethod
def _positivity_result(evals, qpt=False):
"""Check if eigenvalues are positive"""
cond = np.sum(np.abs(evals[evals < 0]))
is_pos = bool(np.isclose(cond, 0))
name = "completely_positive" if qpt else "positive"
result = AnalysisResultData(name, is_pos)
if not is_pos:
result.extra = {"delta": cond}
return result
@staticmethod
def _tp_result(evals, evecs):
"""Check if QPT channel is trace preserving"""
size = len(evals)
dim = int(np.sqrt(size))
mats = np.reshape(evecs.T, (size, dim, dim), order="F")
kraus_cond = np.einsum("i,ija,ijb->ab", evals, mats.conj(), mats)
cond = np.sum(np.abs(la.eigvalsh(kraus_cond - np.eye(dim))))
is_tp = bool(np.isclose(cond, 0))
result = AnalysisResultData("trace_preserving", is_tp)
if not is_tp:
result.extra = {"delta": cond}
return result
@staticmethod
def _fidelity_result(evals, evecs, target, qpt=False):
"""Faster computation of fidelity from eigen decomposition"""
# Format target to statevector or densitymatrix array
trace = np.sqrt(len(evals)) if qpt else 1
name = "process_fidelity" if qpt else "state_fidelity"
if target is None:
raise AnalysisError("No target state provided")
if isinstance(target, QuantumChannel):
target_state = Choi(target).data / trace
elif isinstance(target, BaseOperator):
target_state = np.ravel(Operator(target), order="F") / np.sqrt(trace)
else:
target_state = np.array(target)
if target_state.ndim == 1:
rho = evecs @ (evals / trace * evecs).T.conj()
fidelity = np.real(target_state.conj() @ rho @ target_state)
else:
sqrt_rho = evecs @ (np.sqrt(evals / trace) * evecs).T.conj()
eig = la.eigvalsh(sqrt_rho @ target_state @ sqrt_rho)
fidelity = np.sum(np.sqrt(np.maximum(eig, 0))) ** 2
return AnalysisResultData(name, fidelity)
@staticmethod
def _fitter_data(
data: List[Dict[str, any]]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[np.ndarray]]:
"""Return list a tuple of basis, frequency, shot data"""
outcome_dict = {}
meas_size = None
prep_size = None
for datum in data:
# Get basis data
metadata = datum["metadata"]
meas_element = tuple(metadata["m_idx"])
prep_element = tuple(metadata["p_idx"]) if "p_idx" in metadata else tuple()
if meas_size is None:
meas_size = len(meas_element)
if prep_size is None:
prep_size = len(prep_element)
# Add outcomes
counts = Counts(marginal_counts(datum["counts"], metadata["clbits"])).int_outcomes()
basis_key = (meas_element, prep_element)
if basis_key in outcome_dict:
TomographyAnalysis._append_counts(outcome_dict[basis_key], counts)
else:
outcome_dict[basis_key] = counts
num_basis = len(outcome_dict)
measurement_data = np.zeros((num_basis, meas_size), dtype=int)
preparation_data = | np.zeros((num_basis, prep_size), dtype=int) | numpy.zeros |
# -*- coding: utf-8 -*-
# Author: <NAME>
# License: MIT
import os
import numpy as np
import scipy as sc
from ..tools import femio
from ..basefem import BaseFEM, get_file_path
class Periodic3D(BaseFEM):
"""A class for a finite element model of a 3D bi-periodic
medium using Gmsh_ and GetDP_.
.. _Gmsh:
http://gmsh.info/
.. _GetDP:
http://getdp.info/
"""
def __init__(
self,
analysis="direct",
A=1,
lambda0=1,
theta_deg=0.0,
phi_deg=0,
psi_deg=0,
period_x=1,
period_y=1,
thick_L1=0.1, #: flt: thickness layer 1 (superstrate)
thick_L2=0.1, #: flt: thickness layer 2
thick_L3=0.1, #: flt: thickness layer 3 (interp)
thick_L4=0.1, #: flt: thickSness layer 4
thick_L5=0.1, #: flt: thickness layer 5
thick_L6=0.1, #: flt: thickness layer 6 (substrate)
PML_top=1.0, # : flt: thickness pml top
PML_bot=1.0, # : flt: thickness pml bot
a_pml=1, #: flt: PMLs parameter, real part
b_pml=1, #: flt: PMLs parameter, imaginary part
eps_L1=1 - 0 * 1j, #: flt: permittivity layer 1 (superstrate)
eps_L2=1 - 0 * 1j, #: flt: permittivity layer 2
eps_L3=1 - 0 * 1j, #: flt: permittivity layer 3
eps_L4=1 - 0 * 1j, #: flt: permittivity layer 4
eps_L5=1 - 0 * 1j, #: flt: permittivity layer 5
eps_L6=1 - 0 * 1j, #: flt: permittivity layer 6 (substrate)
el_order=1,
):
super().__init__()
self.dir_path = get_file_path(__file__)
self.analysis = analysis
self.A = A
self.lambda0 = lambda0
self.theta_deg = theta_deg
self.phi_deg = phi_deg
self.psi_deg = psi_deg
# opto-geometric parameters -------------------------------------------
#: flt: periods
self.period_x = period_x
self.period_y = period_y
self.thick_L1 = thick_L1 #: flt: thickness layer 1 (superstrate)
self.thick_L2 = thick_L2 #: flt: thickness layer 2
self.thick_L3 = thick_L3 #: flt: thickness layer 3 (interp)
self.thick_L4 = thick_L4 #: flt: thickSness layer 4
self.thick_L5 = thick_L5 #: flt: thickness layer 5
self.thick_L6 = thick_L6 #: flt: thickness layer 6 (substrate)
self.PML_top = PML_top #: flt: thickness pml top
self.PML_bot = PML_bot #: flt: thickness pml bot
#: flt: PMLs parameter, real part
self.a_pml = a_pml #: flt: PMLs parameter, real part
self.b_pml = b_pml #: flt: PMLs parameter, imaginary part
self.eps_L1 = eps_L1 #: flt: permittivity layer 1 (superstrate)
self.eps_L2 = eps_L2 #: flt: permittivity layer 2
self.eps_L3 = eps_L3 #: flt: permittivity layer 3
self.eps_L4 = eps_L4 #: flt: permittivity layer 4
self.eps_L5 = eps_L5 #: flt: permittivity layer 5
self.eps_L6 = eps_L6 #: flt: permittivity layer 6 (substrate)
self.el_order = el_order
self.bg_mesh = False
# 2 #: design domain number (check .geo/.pro files)
self.dom_des = 5000
# postprocessing -------------------------------------------------
#: int: number of diffraction orders
#: for postprocessing diffraction efficiencies
self.N_d_order = 0
self.orders = False
self.cplx_effs = False
self.eff_verbose = False
#: int: number of x integration points
#: for postprocessing diffraction efficiencies
self.ninterv_integ = 60
#: int: number of z slices points
#: for postprocessing diffraction efficiencies
self.nb_slice = 3
#: flt: such that `scan_dist = min(h_sup, hsub)/scan_dist_ratio`
self.scan_dist_ratio = 5
self.dim = 3
self.adjoint = False
@property
def celltype(self):
return "tetra"
@property
def zmin_interp(self):
return self.thick_L5 + self.thick_L4
@property
def zmax_interp(self):
return self.zmin_interp + self.thick_L3
@property
def scan_dist(self):
return min(self.thick_L1, self.thick_L6) / self.scan_dist_ratio
@property
def theta_0(self):
return np.pi / 180.0 * (self.theta_deg)
@property
def phi_0(self):
return np.pi / 180.0 * (self.phi_deg)
@property
def psi_0(self):
return np.pi / 180.0 * (self.psi_deg)
@property
def corners_des(self):
return (
-self.period_x / 2,
+self.period_x / 2,
-self.period_y / 2,
+self.period_y / 2,
+self.zmin_interp,
+self.zmax_interp,
)
# @property
# def N_d_order(self):
# N = self.d/self.lambda0 * (np.sqrt([self.eps_L1, self.eps_L6]) - np.sin(self.theta))
# return int(max(N))
def _make_param_dict(self):
param_dict = super()._make_param_dict()
layer_diopter = self.ancillary_problem()
nb_layer = 6
layer = []
for k1 in range(0, nb_layer):
layer.append({})
layer[0]["epsilon"] = self.eps_L1
layer[1]["epsilon"] = self.eps_L2
layer[2]["epsilon"] = self.eps_L3
layer[3]["epsilon"] = self.eps_L4
layer[4]["epsilon"] = self.eps_L5
layer[5]["epsilon"] = self.eps_L6
layer[0]["thickness"] = self.thick_L1
layer[1]["thickness"] = self.thick_L2
layer[2]["thickness"] = self.thick_L3
layer[3]["thickness"] = self.thick_L4
layer[4]["thickness"] = self.thick_L5
layer[5]["thickness"] = self.thick_L6
layer[nb_layer - 2]["hh"] = 0
layer[nb_layer - 1]["hh"] = (
layer[nb_layer - 2]["hh"] - layer[nb_layer - 1]["thickness"]
)
for k in range(nb_layer - 3, -1, -1):
layer[k]["hh"] = layer[k + 1]["hh"] + layer[k + 1]["thickness"]
for i5 in range(0, nb_layer):
param_dict["thick_L" + str(i5 + 1)] = layer[i5]["thickness"]
param_dict["hh_L" + str(i5 + 1)] = layer[i5]["hh"]
param_dict["PML_bot_hh"] = layer[-1]["hh"] - self.PML_bot
param_dict["PML_top_hh"] = layer[0]["hh"] + self.thick_L1
param_dict["Expj_subs_re"] = layer_diopter[1]["Psi"][0].real
param_dict["Exmj_subs_re"] = layer_diopter[1]["Psi"][1].real
param_dict["Eypj_subs_re"] = layer_diopter[1]["Psi"][2].real
param_dict["Eymj_subs_re"] = layer_diopter[1]["Psi"][3].real
param_dict["Ezpj_subs_re"] = layer_diopter[1]["Psi"][4].real
param_dict["Ezmj_subs_re"] = layer_diopter[1]["Psi"][5].real
param_dict["Expj_subs_im"] = layer_diopter[1]["Psi"][0].imag
param_dict["Exmj_subs_im"] = layer_diopter[1]["Psi"][1].imag
param_dict["Eypj_subs_im"] = layer_diopter[1]["Psi"][2].imag
param_dict["Eymj_subs_im"] = layer_diopter[1]["Psi"][3].imag
param_dict["Ezpj_subs_im"] = layer_diopter[1]["Psi"][4].imag
param_dict["Ezmj_subs_im"] = layer_diopter[1]["Psi"][5].imag
param_dict["gamma_subs_re"] = layer_diopter[1]["gamma"].real
param_dict["gamma_subs_im"] = layer_diopter[1]["gamma"].imag
param_dict["Expj_super_re "] = layer_diopter[0]["Psi"][0].real
param_dict["Exmj_super_re "] = layer_diopter[0]["Psi"][1].real
param_dict["Eypj_super_re "] = layer_diopter[0]["Psi"][2].real
param_dict["Eymj_super_re "] = layer_diopter[0]["Psi"][3].real
param_dict["Ezpj_super_re "] = layer_diopter[0]["Psi"][4].real
param_dict["Ezmj_super_re "] = layer_diopter[0]["Psi"][5].real
param_dict["Expj_super_im "] = layer_diopter[0]["Psi"][0].imag
param_dict["Exmj_super_im "] = layer_diopter[0]["Psi"][1].imag
param_dict["Eypj_super_im "] = layer_diopter[0]["Psi"][2].imag
param_dict["Eymj_super_im "] = layer_diopter[0]["Psi"][3].imag
param_dict["Ezpj_super_im "] = layer_diopter[0]["Psi"][4].imag
param_dict["Ezmj_super_im "] = layer_diopter[0]["Psi"][5].imag
param_dict["gamma_super_re "] = layer_diopter[0]["gamma"].real
param_dict["gamma_super_im "] = layer_diopter[0]["gamma"].imag
return param_dict
def compute_solution(self, **kwargs):
res_list = ["helmholtz_vector", "helmholtz_vector_modal"]
return super().compute_solution(res_list=res_list)
def postpro_absorption(self):
self.postprocess("postopQ")
path = self.tmppath("Q.txt")
Q = np.loadtxt(path, skiprows=0, usecols=[1]) + 1j * np.loadtxt(
path, skiprows=0, usecols=[1]
)
return Q.real
def _postpro_fields_cuts(self):
npt_integ = self.ninterv_integ + 1
nb_slice = self.nb_slice
path_t = self.tmppath("Etot_XYcut.out")
path_r = self.tmppath("Edif_XYcut.out")
if os.path.isfile(path_t):
os.remove(path_t)
if os.path.isfile(path_r):
os.remove(path_r)
self.postprocess("Ed" + " -order 2")
Ex_t2, Ey_t2, Ez_t2 = femio.load_table_vect(path_t)
Ex_t2 = Ex_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ey_t2 = Ey_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ez_t2 = Ez_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ex_r2, Ey_r2, Ez_r2 = femio.load_table_vect(path_r)
Ex_r2 = Ex_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ey_r2 = Ey_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ez_r2 = Ez_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
return Ex_r2, Ey_r2, Ez_r2, Ex_t2, Ey_t2, Ez_t2
def postpro_epsilon(self):
self.postprocess("postop_epsilon" + " -order 2")
def diffraction_efficiencies(self):
Ex_r2, Ey_r2, Ez_r2, Ex_t2, Ey_t2, Ez_t2 = self._postpro_fields_cuts()
npt_integ = self.ninterv_integ + 1
# print('gmsh cuts done !')
period_x, period_y = self.period_x, self.period_y
N_d_order = self.N_d_order
lambda0 = self.lambda0
theta_0 = self.theta_0
phi_0 = self.phi_0
nb_slice = self.nb_slice
x_t = np.linspace(-period_x / 2, period_x / 2, npt_integ)
x_r = x_t
y_t = np.linspace(-period_y / 2, period_y / 2, npt_integ)
y_r = y_t
decalage = 0
No_ordre = np.linspace(
-N_d_order + decalage, N_d_order + decalage, 2 * N_d_order + 1
)
Nb_ordre = No_ordre.shape[0]
alpha0 = 2 * np.pi / lambda0 * np.sin(theta_0) * np.cos(phi_0)
beta0 = 2 * np.pi / lambda0 * np.sin(theta_0) * np.sin(phi_0)
gamma0 = 2 * np.pi / lambda0 * np.cos(theta_0)
alphat = alpha0 + 2 * np.pi / period_x * No_ordre
betat = beta0 + 2 * np.pi / period_y * No_ordre
gammatt = np.zeros((Nb_ordre, Nb_ordre), dtype=complex)
gammatr = np.zeros((Nb_ordre, Nb_ordre), dtype=complex)
AXsir = np.zeros((Nb_ordre, Nb_ordre, nb_slice), dtype=complex)
AXsit = np.zeros((Nb_ordre, Nb_ordre, nb_slice), dtype=complex)
nb_layer_diopter = 2
layer_diopter = []
for k1 in range(0, nb_layer_diopter):
layer_diopter.append({})
layer_diopter[0]["epsilon"] = self.eps_L1
layer_diopter[1]["epsilon"] = self.eps_L6
layer_diopter[0]["kp"] = (
2 * np.pi / lambda0 * np.sqrt(layer_diopter[0]["epsilon"])
)
layer_diopter[1]["kp"] = (
2 * np.pi / lambda0 * np.sqrt(layer_diopter[1]["epsilon"])
)
layer_diopter[0]["gamma"] = np.sqrt(
layer_diopter[0]["kp"] ** 2 - alpha0 ** 2 - beta0 ** 2
)
layer_diopter[1]["gamma"] = np.sqrt(
layer_diopter[1]["kp"] ** 2 - alpha0 ** 2 - beta0 ** 2
)
for nt in range(0, Nb_ordre):
for mt in range(0, Nb_ordre):
gammatt[nt, mt] = np.sqrt(
layer_diopter[-1]["kp"] ** 2 - alphat[nt] ** 2 - betat[mt] ** 2
)
for nr in range(0, Nb_ordre):
for mr in range(0, Nb_ordre):
gammatr[nr, mr] = np.sqrt(
layer_diopter[0]["kp"] ** 2 - alphat[nr] ** 2 - betat[mr] ** 2
)
for k11 in range(0, nb_slice):
Ex_t3 = Ex_t2[:, :, k11]
Ey_t3 = Ey_t2[:, :, k11]
Ez_t3 = Ez_t2[:, :, k11]
Ex_r3 = Ex_r2[:, :, k11]
Ey_r3 = Ey_r2[:, :, k11]
Ez_r3 = Ez_r2[:, :, k11]
Ex_t3 = np.transpose(Ex_t3.conjugate())
Ey_t3 = np.transpose(Ey_t3.conjugate())
Ez_t3 = np.transpose(Ez_t3.conjugate())
Ex_r3 = np.transpose(Ex_r3.conjugate())
Ey_r3 = np.transpose(Ey_r3.conjugate())
Ez_r3 = np.transpose(Ez_r3.conjugate())
ex_nm_r_inter = np.zeros((1, npt_integ), dtype=complex)[0, :]
ex_nm_t_inter = | np.zeros((1, npt_integ), dtype=complex) | numpy.zeros |
"""
Created by <NAME> for project computational biology
"""
#import all the necessary modules
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
from astropy.table import Table
from sklearn.metrics import confusion_matrix, roc_curve, auc, roc_auc_score, classification_report, f1_score, precision_score
from sklearn import tree, preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, StratifiedShuffleSplit
import numpy as np
from decimal import getcontext, Decimal
from sklearn.tree import export_graphviz
import graphviz
from imblearn.over_sampling import SMOTE
from random import randint
from inspect import signature
import warnings
warnings.filterwarnings("ignore")
def read_data(file_loc):
'''read the desired data from the csv file as a dataframe'''
dframe=pd.read_csv(file_loc)
return dframe
def remove_nan_dframe(dframe,class_sort):
'''remove patients from the dataframe which contain a Nan value in the specified column and return the new dataframe and the original indexes which were kept '''
drop_index=[]; #will contain all indexes which will have to be removed
kept=[]; #will contain all kept indexes
for i in dframe.index: #look at each seperate patient
if isinstance(dframe.loc[i,class_sort], float) or dframe.loc[i,class_sort]=='Niet bekend': #a Nan is classified as a float in python
drop_index.append(i) #if it is a Nan the index will have to be removed
else:
kept.append(i) #if not a Nan the index will be kept
dframe=dframe.drop(drop_index,axis=0) #drop all Nan indexes
return dframe, kept
def remove_nan_markers(dframe,locs):
'''remove the patients with unknown concentrations of the tumor markers'''
drop_index=[]; #will contain all indexes wchich will have to be removed
TMs=dframe.columns[locs] #names of columns with the tumor markers
for marker in TMs: #look at each column which contains a TM
for pat in dframe.index: #look at each patientin the dataframe
if np.isnan(dframe.loc[pat,marker])==True and pat not in drop_index: #if the patient has a Nan as concentraton add to list
drop_index.append(pat)
dframe=dframe.drop(drop_index,axis=0) #drop all patient with unknown TM(s)
return dframe
def remove_nan_int(dframe,cat='age'):
'''remove patients from the dataframe which contain a Nan value in the specified column with integers/floats and return the new dataframe'''
drop_index=[]; #will contain all indexes which will have to be removed
kept=[]; #will contain all kept indexes
for i in dframe.index: #look at each seperate patient
if np.isnan(dframe.loc[i,cat])==True and i not in drop_index: #if the value is a Nan then add to list
drop_index.append(i) #if it is a Nan the index will have to be removed
else:
kept.append(i) #if not a Nan the index will be kept
dframe=dframe.drop(drop_index,axis=0) #drop all Nan indexes
return dframe, kept
def remove_nan_list(lis):
'''remove Nan values from the list and also specify which original indexes where kept '''
drop_index=[]; #will contain all indexes wchich will have to be removed
kept=[]; #will contain all kept indexes
r=len(lis)
for i in range(0,r): #look at each seperate patient
if isinstance(lis[i], float):
drop_index.append(i)
else:
kept.append(i)
for index in sorted(drop_index, reverse=True): #elements need to be deleted from back to front to prevent indexing issues
del lis[index]
return lis, kept
def make_clustermap(dframe,remove,save_fig,locs,class_sort='lung_carcinoma'):
'''make a clustermap of the selected column in the dataframe together with the corresponding labels of each patient'''
if remove==True: #remove Nan's if specified
dframe, kept=remove_nan_dframe(dframe,class_sort)
cla=dframe[class_sort] #take the desired column
labels=cla.unique() #determine the unique strings in the column
lut = dict(zip(labels, 'rbgk')) #create dictionary of possible options and assign a color code to each
row_colors = cla.map(lut) #provides the corresponding color code for each of the patients and thus indicates the label
markers=dframe.iloc[:,locs] #Tumor markers
cmap = sns.diverging_palette(250, 10, n=9, as_cmap=True) #select a color pallete for the clustermap
g=sns.clustermap(markers, cmap=cmap, metric='euclidean', method='single', col_cluster=False, row_colors=row_colors, z_score=1) #make clustermap with normalization of the columns
for label in labels: #add the labels of each patient next to the clustermap
g.ax_col_dendrogram.bar(0, 0, color=lut[label],
label=label, linewidth=0)
g.ax_col_dendrogram.legend(loc='center', ncol=3)
g.ax_heatmap.set_title('Clustermap of the protein biomarkers with labels of the different known '+str(class_sort)+' classes')
if save_fig==True: #save the figure if wanted with a unique name to prevent overwriting files
x=datetime.datetime.now()
extra='_'.join([str(x.year),str(x.month),str(x.day),str(x.hour),str(x.minute),str(x.second)])
g.savefig('clustermap'+extra+'.png')
return
def approach_paper(dframe,thresholds,locs,category='lung_carcinoma'):
"""use the specified thresholds from the paper to classify each patient (LC=1 and no LC=0)"""
dframe, kept=remove_nan_dframe(dframe,category)
(rows,columns)=dframe.shape
truth=dframe[category]
if category=='lung_carcinoma':
labels=['No', 'Yes'] #determine unique labels
elif category=='cancer_type':
labels=['SCLC','NSCLC']
lut = dict(zip(labels, [0,1]))
ground = truth.map(lut) #the ground truth of each patient mapped with the labels dictionary to have a binary problem
gr=ground.tolist()
#statistics for each individual marker
PPVm=np.zeros(7)
NPVm=np.zeros(7)
sensm=np.zeros(7)
specm=np.zeros(7)
AUCm=np.zeros(7)
LC_results=np.zeros(rows) #results of the thresholding operation
for i in locs: #look at all tumor markers
TM=dframe.columns[i] #current marker
LC_marker=np.zeros(rows) #classification for each individual marker
if TM in thresholds.keys(): #see if a threshold is present for the tumor marker
for pat in range(0,rows): #look at each patient
if dframe.iloc[pat,i]>=thresholds[TM]: #if the TM concentration exceeds the threshold at patient to list and classify as having LC
LC_results[pat]=1
LC_marker[pat]=1
P,N,S,E,_=evaluate_stats(gr,LC_marker,labels) #calculate the statistics for each individual marker
PPVm[i-6]=P[1]
NPVm[i-6]=N[1]
sensm[i-6]=S[1]
specm[i-6]=E[1]
AUCm[i-6]=roc_auc_score(gr,LC_marker)
print_stats_adv(PPVm,NPVm,sensm,specm,AUCm,dframe.columns[locs],'Individual thresholds',category_to_investigate) #provide the statistics in a table for each individual marker
predictions=LC_results
PPV,NPV,sensitivity,specificity,report=evaluate_stats(gr,predictions,labels) #evaluate the operation by calculaton the programmed statistical values
A=roc_auc_score(gr,predictions)
AUC=[A,1-A]
print_stats_adv(PPV,NPV,sensitivity,specificity,AUC,labels,'Thresholds paper',category_to_investigate) #provide the statistics in a table
return PPV[1],NPV[1],sensitivity[1],specificity[1],report
def plot_optimal(AUCs,thresholds,TMs,optimal):
'''plot the continuous AUC values against the corresponding thresholds for each marker'''
for i in range(0,len(AUCs.columns)): #loop over each marker
AUC_list=AUCs[TMs[i]].tolist() #take the right marker
opt=optimal[TMs[i]] #take the optimal threshold value
label=TMs[i].split(' ')
plt.figure()
plt.plot(thresholds, AUC_list, color='darkorange',label='optimal threshold: %0.2f ' % opt + label[1]) #plot the continuous AUCs
plt.xlabel('Threshold value '+label[1])
plt.ylabel('AUC')
plt.title('Threshold values versus AUC for the tumor marker: '+ label[0])
plt.plot([opt,opt],[min(AUC_list), max(AUC_list)],linestyle='--',color='black') #plot the optimal threshold value as a dashed line
plt.legend(loc="lower right")
plt.show()
return
def optimal_thres(dframe,locs,category='lung_carcinoma'):
'''determine the optimal thresholds for each marker by optimalization of the AUC'''
dframe, kept=remove_nan_dframe(dframe,category) #remove Nans
(rows,columns)=dframe.shape
TMs=dframe.columns[locs] #names of all tumor markers
threshold=np.linspace(0,1000,800) #define possible thresholds
AUCs=np.zeros((len(threshold),len(TMs))) #make room in memory for the AUCs
if category=='lung_carcinoma':
labels=['No', 'Yes'] #determine unique labels
elif category=='cancer_type':
labels=['SCLC','NSCLC']
lut = dict(zip(labels, [0,1]))
y_true=dframe[category].map(lut) #map the true classification to binary values
optimal=dict() #dictionary to store the best threshold values
for mi,marker in enumerate(locs): #look at each marker separately
for index,thres in enumerate(threshold): #loop over all of the possible threshold values
LC_result=np.zeros(rows) #make room in memory for classification
for pat in range(0,rows): #look at each patient
if dframe.iloc[pat,marker]>=thres: #classification process
LC_result[pat]=1
fpr, tpr, _ = roc_curve(y_true, LC_result) #determine roc of each threshold
AUCs[index,mi]=auc(fpr, tpr) #determine AUC of each threshold
place=np.argmax(AUCs[:,mi]) #determine index of best AUC
optimal[TMs[mi]] = threshold[place] #add optimal threshold to dictionary with the corresponding marker
AUCs=pd.DataFrame(AUCs,columns=TMs) #convert to dataframe
plot_optimal(AUCs,threshold,TMs,optimal) #plot the AUC values and optimal threshold
return optimal
def optimal_thresCV(dframe,locs,category='lung_carcinoma'):
'''determine the optimal threshold for each marker by applying cross validation and optimalization of the AUC'''
dframe, kept=remove_nan_dframe(dframe,category) #remove Nans
(rows,columns)=dframe.shape
TMs=dframe.columns[locs] #names of tumor markers
threshold=np.linspace(0,1000,800) #define threshold range
if category=='lung_carcinoma':
labels=['No', 'Yes'] #determine unique labels
elif category=='cancer_type':
labels=['SCLC','NSCLC']
lut = dict(zip(labels, [0,1]))
y_true=dframe[category].map(lut)
y_true=y_true.tolist()
y_true=np.array(y_true) #numpy array of the ground truth
skf = StratifiedKFold(n_splits=10) #initialization of the cross validation
overall_optimals=dict() #dictionary which will contain the best threshold for each marker
for mi,marker in enumerate(locs): #look at each marker
AUCs_CV=[] #will contain the AUCs of a marker
optimals=[] #optimal thresholds for each CV set
for train_index, test_index in skf.split(dframe, y_true): #apply cross validation
AUCs=np.zeros(len(threshold)) #will contain the AUCs for all thresholds of the training set
for index,thres in enumerate(threshold): #loop over all possible thresholds
LC_result=np.zeros(len(train_index)) #will contain classification for this threshold
for z,pat in enumerate(train_index): #loop over patients in training set
if dframe.iloc[pat,marker]>=thres: #classify
LC_result[z]=1
fpr, tpr, _ = roc_curve(y_true[train_index], LC_result) #roc for each threshold
AUCs[index]=auc(fpr, tpr) #add AUC to list for this training set
place=np.argmax(AUCs) #place best AUC for this training set
optimal=threshold[place] #optimal threshold for this CV training set
optimals.append(optimal) #extend the optimal thresholds for each CV set
predictions=np.zeros(len(test_index)) #make space in memory for this CV set
for idx,pat in enumerate(test_index): #look at each patient in the test set
if dframe.iloc[pat,marker]>=optimal: #classify with the optimal threshold determined for the training set
predictions[idx]=1
fpr_test, tpr_test, _ = roc_curve(y_true[test_index], predictions) #roc of this CV test set
AUCs_CV.append(auc(fpr_test, tpr_test)) #AUC of this CV test set
label=TMs[mi].split(' ')
plt.figure()
plt.scatter(optimals,AUCs_CV)
plt.xlabel('Threshold value '+label[1])
plt.ylabel('AUC')
plt.title('Threshold values of cross validated test set versus AUC for the individual tumor marker : '+ label[0])
plt.show()
spot=np.argmax(AUCs_CV) #place of optimal threshold for the marker after cross validation
overall_optimals[TMs[mi]]=optimals[spot] #optimal threshold for the marker after cross validation
return overall_optimals
def find_nearest(array, value, pos):
'''calculate the range of the threshold by taking into account the standard deviation of the max metric value'''
array = np.asarray(array)
diff = array - value #value to consider
top=diff[pos:] #threshold values above maximum
bot=diff[:pos] #threshold values below maximum
bot=bot<0
top=top<0
np.flip(bot)
if len(top)>0:
top_idx=top.argmax()-1+pos #position where metric value is equal to max metric minus its std
else:
top_idx=len(array)
if len(bot)>0:
bot_idx=pos-bot.argmax()-1 #position where metric value is equal to max metric minus its std
else:
bot_idx=0
return bot_idx,top_idx
def optimal_thresBoot(dframe,locs,category='lung_carcinoma',used_metric='AUC'):
'''determine the optimal threshold for each marker by applying Bootstrap and optimalization of the chosen metric'''
dframe, kept=remove_nan_dframe(dframe,category) #remove Nans
(rows,columns)=dframe.shape
TMs=dframe.columns[locs] #names of all tumor markers
threshold=np.linspace(0,1000,800) #define possible thresholds
if category=='lung_carcinoma':
labels=['No', 'Yes'] #determine unique labels
elif category=='cancer_type':
labels=['SCLC','NSCLC']
lut = dict(zip(labels, [0,1]))
y_true=dframe[category].map(lut) #map the true classification to binary values
k=5 #number of times boorstrap is applied
selection=dframe.index.tolist() #the indexes which can be selected to use
optimal_range=dict() #the optimal range for each threshold
optimal_means=dict() #the threshold value with highest mean
for mi,marker in enumerate(locs): #look at each marker separately
metric=np.zeros((len(threshold),k)) #make room in memory for the AUCs
for i in range(0,k): #applying Bootstrap multiple times
ti = [randint(0, len(dframe[TMs[mi]])-1) for p in range(0, len(dframe[TMs[mi]]))] #select random indices
train_index=[selection[z] for z in ti] #select the indexes to be used which are present in de dataframe
for index,thres in enumerate(threshold): #loop over all of the possible threshold values
LC_result=np.zeros(len(train_index)) #make room in memory for classification
y_res=np.zeros(len(train_index)) #the true results for this Bootstrap round
for ind,f_idx in enumerate(train_index): #look at each selected index
if (dframe.loc[f_idx,TMs[mi]])>=thres: #classification process
LC_result[ind]=1
y_res[ind]=y_true.loc[f_idx] #correct classificaton accompanied with this selected index
if used_metric=='AUC':
fpr, tpr, _ = roc_curve(y_res, LC_result) #determine roc of each threshold
metric[index,i]=auc(fpr, tpr) #determine AUC of each threshold
elif used_metric=='F1':
metric[index,i]=f1_score(y_res,LC_result) #F1 score
elif used_metric=='precision':
metric[index,i]=precision_score(y_res,LC_result) #precision score
elif used_metric=='specificity':
_,_,_,specificity,_=evaluate_stats(y_res,LC_result,labels)
metric[index,i]=specificity[1]
means=np.mean(metric,axis=1) #calculate means of the metric
stand=np.std(metric,axis=1) #std of metric
#plot result for each individual marker
plt.errorbar(threshold,means,yerr=stand,linestyle='-',ecolor='black')
label=TMs[mi].split(' ')
plt.xlabel('Threshold value '+label[1])
plt.ylabel(used_metric)
plt.title('Threshold values versus '+used_metric+ ' with Bootstrap method for the tumor marker: '+ label[0])
plt.show()
if used_metric=='AUC':
spot=np.argmax(means) #place with highest mean metric score
t_range=means[spot]-np.abs(stand[spot]) #highest mean minus its standard deviation
bot,top=find_nearest(means,t_range,spot) #threshold indexes which match the calculated value
string='-'.join([str(threshold[bot]),str(threshold[top])]) #range written in a string
optimal_range[TMs[mi]]=string #add range to dict
optimal_means[TMs[mi]]=threshold[spot] #add best threshold considering mean metric to dict
elif used_metric=='F1' :
spot=np.argmax(means) #place with highest mean metric score
#
t_range=means[spot]-np.abs(stand[spot]) #highest mean minus its standard deviation
bot,top=find_nearest(means,t_range,spot) #threshold indexes which match the calculated value
string='-'.join([str(threshold[bot]),str(threshold[top])]) #range written in a string
optimal_range[TMs[mi]]=string #add range to dict
#
optimal_means[TMs[mi]]=threshold[spot] #add best threshold considering mean metric to dict
elif used_metric=='precision' or used_metric=='specificity':
means=np.where(means>0.98,0,means) #every metric value which is to high to be considered as real/realistic is set to 0
spot=np.argmax(means) #place with highest mean metric score
#
t_range=means[spot]-np.abs(stand[spot]) #highest mean minus its standard deviation
bot,top=find_nearest(means,t_range,spot) #threshold indexes which match the calculated value
string='-'.join([str(threshold[bot]),str(threshold[top])]) #range written in a string
optimal_range[TMs[mi]]=string #add range to dict
#
optimal_means[TMs[mi]]=threshold[spot] #add best threshold considering mean metric to dict
return optimal_range,optimal_means
def visualize_DT(dtree,feature_names,class_names):
'''Visualization of the decision tree'''
export_graphviz(dtree, out_file='tree3.dot', feature_names = feature_names,class_names = class_names,rounded = True, proportion = False, precision = 2, filled = True)
#(graph,) = pydot.graph_from_dot_file('tree.dot')
#graph=graphviz.Source(dot_data)
#graphviz.render('dot','png','C:/Users/s164616/Documents/MATLAB/Project Computational Biology')
return
def prepare_data(dframe,cat,normalize,smote):
'''prepare the data for the classifier by applying mapping and splitting the data and if specified oversampling and/or normalization'''
dframe, kept=remove_nan_dframe(dframe,cat) #remove all Nan since these do not contribute to the classifier
extra=True #provide additional data to the classifiers of age and smoking history
if extra==True: #remove the Nan's for the ages and smoking history if data will have to be included
dframe,_=remove_nan_int(dframe,'age')
dframe,_=remove_nan_dframe(dframe,'smoking_history')
y_true=dframe[cat]
if cat=='lung_carcinoma':
labels=['No', 'Yes'] #determine unique labels
elif cat=='cancer_type':
labels=['SCLC','NSCLC']
#length=range(0,len(labels)) #provide a integer to each label
locs=marker_locations(dframe)
lut = dict(zip(labels, [0,1])) #create dictionary of possible options
markers=dframe.iloc[:,locs] #TM
TMs=markers.columns
y_true=y_true.map(lut) #convert each string to the corresponding integer in the dictionary
if extra==True:
ages=dframe['age']
#ages=np.rint(ages) #round the ages to the nearest integer
markers['age'] = ages #add the ages to the dataframe with the tumor markers
smoking=dframe['smoking_history']
transf={'Nooit':0,'Verleden':1,'Actief':2} #dictonary to transform the strings to integers
smoking=smoking.map(transf) #map the strings in the list with the provided dictionary
markers['smoking_history'] = smoking #add the smoking history to the dataframe with the tumor markers
TMs=markers.columns #column names also include ages and smoking
X_train, X_test, y_train, y_test = train_test_split(markers.values, y_true, test_size=0.2, stratify=y_true) #split the data in a training set and a test set
col=markers.columns
X_train=pd.DataFrame(X_train,columns=col)
X_test=pd.DataFrame(X_test,columns=col)
if normalize==True and smote!=True: #scale each of the columns of the tumor markers
scaler = preprocessing.StandardScaler()
markers[TMs] = scaler.fit_transform(markers.values[:,0:len(TMs)])
scaler.fit(X_train.values[:,0:len(TMs)])
X_train[TMs] = scaler.transform(X_train.values[:,0:len(TMs)])
X_test[TMs] = scaler.transform(X_test.values[:,0:len(TMs)])
if smote==True: #apply synthetic Minority Over-sampling if specified (usually for skewed data distribution)
sm = SMOTE(random_state=42) #initialization
name=markers.columns #names of the TM's
markers,y_true=sm.fit_resample(markers,y_true)
markers=pd.DataFrame(markers,columns=TMs)
y_true=pd.DataFrame(y_true,columns=['class'])
X_train,y_train=sm.fit_resample(X_train,y_train) #apply operation and provide new data
X_train=pd.DataFrame(X_train,columns=name) #convert the TM list to a Dataframe
if normalize==True and smote==True: #scale each of the columns of the tumor markers
scaler2 = preprocessing.StandardScaler()
markers[TMs] = scaler2.fit_transform(markers.values[:,0:len(TMs)])
scaler2.fit(X_train.values[:,0:len(TMs)])
X_train[TMs] = scaler2.transform(X_train.values[:,0:len(TMs)])
X_test[TMs] = scaler2.transform(X_test.values[:,0:len(TMs)])
return markers, y_true, X_train, X_test, y_train, y_test, labels, lut
def det_CVscore(clf,markers,y_true,labels):
'''apply cross validation (Startified Shuffle Split) and determine the mean and standard deviation of the scores'''
n=100
sss = StratifiedShuffleSplit(n_splits=n, test_size=0.2)
score=[]
PPV=[]
NPV=[]
sensi=[]
speci=[]
print(type(markers))
print(type(y_true))
for train_index, test_index in sss.split(markers, y_true): #loop over each of the folds
clf.fit(markers.iloc[train_index],y_true.iloc[train_index]) #fit fold to classifier
pred=clf.predict_proba(markers.iloc[test_index]) #generate predictions
score.append(roc_auc_score(y_true.iloc[test_index],pred[:,1])) #add AUC score
P,N,se,sp,_=evaluate_stats(y_true.iloc[test_index],np.rint(pred[:,1]),labels) #calculate statistics
#ensure n0 Nan values
if np.isnan(P[1])==True:
P[1]=0
if np.isnan(N[1])==True:
N[1]=0
if np.isnan(se[1])==True:
se[1]=0
if np.isnan(sp[1])==True:
sp[1]=0
PPV.append(P[1])
NPV.append(N[1])
sensi.append(se[1])
speci.append(sp[1])
CV_score={'mean AUC':np.mean(score),'std AUC':np.std(score),'mean PPV':np.mean(PPV),'std PPV':np.std(PPV),'mean NPV':np.mean(NPV),'std NPV':np.std(NPV),'mean sensitivity':np.mean(sensi),'std sensitivity':np.std(sensi),'mean specificity':np.mean(speci),'std specificity':np.std(speci)}
return CV_score
def det_CVscore_sim(clf,markers,y_true):
'''apply cross validation score and determine the mean and standard deviation of the score'''
score=cross_val_score(clf,markers,y_true,cv=10,scoring='roc_auc') #cross validation step
score_f1=cross_val_score(clf,markers,y_true,cv=10,scoring='f1')
CV_score={'mean AUC':np.mean(score),'std AUC':np.std(score),'mean F1':np.mean(score_f1),'std F1':np.std(score_f1)}
return CV_score
def get_label(labels,cat):
'''provide the correct index of the positive label'''
if cat=='lung_carcinoma':
string='Yes'
elif cat=='cancer_type':
string='NSCLC'
for i in range(0,len(labels)):
if labels[i]==string:
Y_index=i
return Y_index
def decisionT(dframe,cat,save_roc):
'''Set up a decision tree classifier and train it after which predictions are made for the test set and statistics for this classification are calculated'''
markers, y_true, X_train, X_test, y_train, y_test, labels, lut=prepare_data(dframe,cat,normalize=False,smote=False) #prepare the data
X_train=markers
y_train=y_true
X_test=markers
y_test=y_true
clf = tree.DecisionTreeClassifier() #initialization of the classifier
CV_score=det_CVscore(clf,markers,y_true,labels) #cross validation
clf.fit(X_train,y_train) #fit classifier to training data
visualize_DT(clf,X_train.columns,labels)
Y_index=get_label(labels,cat)
assert clf.classes_[Y_index]==1 #ensure that the classifier has the correct label as the positive class
predictions=clf.predict(X_test) #use reshape(1,-1) on the array when predicting a single array
PPV,NPV,sensitivity,specificity,report=evaluate_stats(y_test,predictions,labels) #process the result and provide statistics
auc_DT=roc_auc(y_test,predictions,cat,save_roc,lut,classifier='Decision Tree classifier') #AUC and ROC curve of classification
print_stats_adv(PPV,NPV,sensitivity,specificity,False,labels,'Decision Tree classifier',cat) #show statistics in table
return auc_DT,PPV[Y_index],NPV[Y_index],sensitivity[Y_index],specificity[Y_index], report, CV_score
def Logistic_clas(dframe,cat,save_roc):
'''Set up a Logistic Regression classifier and train on data after which the predictions of the test data are evaluated'''
markers, y_true, X_train, X_test, y_train, y_test, labels, lut=prepare_data(dframe,cat,normalize=True,smote=False) #prepare data
X_train=markers
y_train=y_true
X_test=markers
y_test=y_true
clf = LogisticRegression(penalty='l2',solver='liblinear') #initialization of the classifier
CV_score=det_CVscore(clf,markers,y_true,labels) #cross validation
clf.fit(X_train,y_train) #fitting training set
Y_index=get_label(labels,cat)
assert clf.classes_[Y_index]==1 #ensure that the classifier has the correct label as the positive class
predictions=clf.predict_proba(X_test) #use reshape(1,-1) on the array when predicting a single array
predictions=predictions[:,Y_index]
PPV,NPV,sensitivity,specificity,report=evaluate_stats(y_test,np.rint(predictions),labels) #statistics
auc_LC=roc_auc(y_test,predictions,cat,save_roc,lut,classifier='Logistic Regression classifier') #AUC and ROC curve
print_stats_adv(PPV,NPV,sensitivity,specificity,False,labels,'Logistic Regression classifier',cat) #Table of statistics
return auc_LC,PPV[Y_index],NPV[Y_index],sensitivity[Y_index],specificity[Y_index], report, CV_score
def SVM_clas(dframe,cat,save_roc):
'''Set up a Supported vector machine classifier and train on data after which the predictions of the test data are evaluated'''
markers, y_true, X_train, X_test, y_train, y_test, labels, lut=prepare_data(dframe,cat,normalize=True,smote=False) #prepare data
X_train=markers
y_train=y_true
X_test=markers
y_test=y_true
clf = SVC(probability=True) #initialization of the classifier
CV_score=det_CVscore(clf,markers,y_true,labels)
clf.fit(X_train,y_train) #fitting training set
Y_index=get_label(labels,cat)
assert clf.classes_[Y_index]==1 #ensure that the classifier has the correct label as the positive class
predictions=clf.predict_proba(X_test) #use reshape(1,-1) on the array when predicting a single array
predictions=predictions[:,Y_index]
PPV,NPV,sensitivity,specificity,report=evaluate_stats(y_test,np.rint(predictions),labels) #statistics
auc_SVM=roc_auc(y_test,predictions,cat,save_roc,lut,classifier='SVM') #AUC and ROC curve
print_stats_adv(PPV,NPV,sensitivity,specificity,False,labels,'SVM',cat) #Table of statistics
return auc_SVM,PPV[Y_index],NPV[Y_index],sensitivity[Y_index],specificity[Y_index], report, CV_score
def Naive(dframe,cat,save_roc):
'''Set up a Gaussian Naive Bayes classifier and train on data after which the predictions of the test data are evaluated'''
markers, y_true, X_train, X_test, y_train, y_test, labels, lut=prepare_data(dframe,cat,normalize=True,smote=False) #prepare data
X_train=markers
y_train=y_true
X_test=markers
y_test=y_true
clf = GaussianNB() #initialization of the classifier
#clf=BernoulliNB()
CV_score=det_CVscore(clf,markers,y_true,labels)
clf.fit(X_train,y_train) #fitting training set
Y_index=get_label(labels,cat)
assert clf.classes_[Y_index]==1 #ensure that the classifier has the correct label as the positive class
predictions=clf.predict_proba(X_test) #use reshape(1,-1) on the array when predicting a single array
predictions=predictions[:,Y_index]
PPV,NPV,sensitivity,specificity,report=evaluate_stats(y_test, | np.rint(predictions) | numpy.rint |
import numpy as np
import scipy.fftpack as spfft
from scipy.signal import resample
from scipy import interpolate
from sklearn.linear_model import Lasso,OrthogonalMatchingPursuit
#import spams
import cvxpy as cvx
import recon_utils as utils
""" Reconstruction algorithms
This module contains functions that can reconstruct a signal from a
downsampled one. It contains implementations of compressed sensing,
interpolation and signal processing methods.
Compressed sensing
------------------
1. Orthogonal Matching Pursuit, with omp_recon and omp_batch_recon
2. l1 minimization solvers, with lasso_recon and cvx_recon
Spline interpolation
--------------------
1. zero_recon for a spline of 0th degree
2. linear_recon for a spline of 1st degree
3. quadratic_recon for a spline of 2nd degree
4. cubic_recon for a spline of 3rd degree
5. combined_recon for a combination of 0th and 1st degree splines
6. combined_con_recon, same as combined_recon but preserving
local convexity or concavity
Signal processing
-----------------
shannon_recon provides a reconstruction based on the shannon interpolation
and Fourier transform
"""
# ------- COMPRESSED SENSING FUNCTIONS -------
def omp_recon(
signal, ind, transform='dct', target_pts=120, L=20, eps=0,
numThreads=-1, retCoefs=False):
""" Performs an Orthogonal Matching Pursuit technique, with spams library
This algorithm is based on Compressed sensing theory and works as a
greedy algorithm to find the sparsest coefficients in a given transform that
fit the input signal.
Then it returns the inverse transform of these coefficients
Parameters
----------
signal : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
target_pts : integer
the number of points the reconstructed signal should have
L : integer
the number of nonzeros that are supposed to be in
the original signal's transform.
eps : float
see spams' OMP documentation.
numThreads : integer
number of threads to use (if -1, automatically chosen).
transform : 'dct' or 'dst'
the type of transform to use (discrete cosine or sine transform)
retCeofs : boolean
if True, will return the coefficients of the transform
Returns
-------
Y : list
the reconstructed signal
sol : list
the coefficients of the reconstructed signal's transform
"""
X = np.asfortranarray(np.array([signal]).transpose(),dtype=np.float64)
# X contains all the signals to solve
if transform == 'dst':
# generating the transform matrix
phi = spfft.idst(np.identity(target_pts), axis=0)
else:
phi = spfft.idct(np.identity(target_pts), axis=0)
# generating the matrix phi for the problem y=phi.x
phi = phi[ind]
D = np.asfortranarray(phi)
# normalizing D
D = np.asfortranarray(
D / np.tile(np.sqrt((D*D).sum(axis=0)), (D.shape[0],1)),
dtype= np.float64)
alpha = spams.omp(X, D, L=L, eps=eps,
return_reg_path=False, numThreads=numThreads)
sol = np.array(alpha.toarray()).transpose() * 2
sol = sol[0]
indz = np.nonzero(sol)
if transform == 'dst':
Y = spfft.idst(sol)
else:
Y = spfft.idct(sol)
Y = utils.normalize(Y)
if retCoefs:
return (Y, sol)
else:
return Y
def omp_batch_recon(
signal, ind, target_pts, n_nonzero_coefs=20,
transform='dct', retCoefs=False):
""" Performs an Orthogonal Matching Pursuit technique, with batch approach
This algorithm is based on Compressed sensing theory and works as a
greedy algorithm to find the sparsest coefficients in a given transform that
fit the input signal.
Then it returns the inverse transform of these coefficients
Parameters
----------
signal : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
target_pts : integer
the number of points the reconstructed signal should have
n_nonzero_coefs : integer
the number of nonzeros that are supposed to be in
the original signal's transform.
transform : 'dct' or 'dst'
the type of transform to use (discrete cosine or sine transform)
retCeofs : boolean
if True, will return the coefficients of the transform
Returns
-------
x : list
the reconstructed signal
coef : list
the coefficients of the reconstructed signal's transform
"""
if transform == 'dst':
phi = spfft.idst(np.identity(target_pts), axis=0)
else:
phi = spfft.idct(np.identity(target_pts), axis=0)
phi = phi[ind]
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(phi,signal)
coef = omp.coef_
if transform == 'dst':
x = spfft.idst(coef,axis=0) + np.mean(signal)
else:
x = spfft.idct(coef,axis=0) + np.mean(signal)
x = utils.normalize(x)
if retCoefs:
return(x,coef)
else:
return x
def lasso_recon(signal, ind, target_pts=120, alpha=0.001, retCoefs=False):
""" Solves the l1 minimization problem with a lasso algorithm
It transforms the input signal using discrete cosine transform, then solves
a l1 minimization problem to find the sparsest coefficients that fit the
given signal. It returns the inverse transform of these coefficients.
Parameters
----------
signal : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
target_pts : integer
the number of points the reconstructed signal should have
alpha : float
the parameter alpha of scikit-learn's lasso method (see documentation)
retCeofs : boolean
if True, will return the coefficients of the transform
Returns
-------
y : list
the reconstructed signal
coefs : list
the coefficients of the reconstructed signal's transform
"""
D = spfft.dct(np.eye(target_pts))
A = D[ind]
lasso = Lasso(alpha=alpha)
lasso.fit(A,signal)
coefs = lasso.coef_
y = spfft.idct(coefs)
y = utils.normalize(y)
if retCoefs:
return (y,coefs)
else:
return y
def cvx_recon(signal, ind, target_pts=120, retCoefs=False):
""" Solves the l1 minimization problem with CVXPY
It transforms the input signal using discrete cosine transform, then solves
a l1 minimization problem (with CVXPY, a convex minimization solver)
to find the sparsest coefficients that fit the given signal.
It returns the inverse transform of these coefficients.
Parameters
----------
signal : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
target_pts : integer
the number of points the reconstructed signal should have
retCeofs : boolean
if True, will return the coefficients of the transform
Returns
-------
y : list
the reconstructed signal
x : list
the coefficients of the reconstructed signal's transform
"""
A = spfft.idct(np.identity(target_pts), norm='ortho', axis=0)
A = A[ind]
vx = cvx.Variable(target_pts)
objective = cvx.Minimize(cvx.norm(vx, 1))
constraints = [A*vx == signal]
prob = cvx.Problem(objective, constraints)
result = prob.solve(verbose=False)
x = np.array(vx.value)
x = np.squeeze(x)
y = spfft.idct(x, norm='ortho', axis=0)
y = utils.normalize(y)
if retCoefs:
return (y, x)
else:
return y
# ------- INTERPOLATION FUNCTIONS -------
def spline_recon(signal, ind, target_pts=120, kind='linear'):
""" Use splines to reconstruct the signal
It uses scipy.interpolate.interp1d to reconstruct a signal using splines.
Parameters
----------
signal : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
target_pts : integer
the number of points the reconstructed signal should have
kind = 'zero','linear','quadratic' or 'cubic'
the degree of the spline to use, see scipy.interpolate.interp1d
documentation.
Returns
-------
y : list
the reconstructed signal
"""
x = np.linspace(ind[0], ind[-1], target_pts)
f = interpolate.interp1d(ind, signal, kind=kind)
y = f(x)
return y
def combined_recon(ds, ind, threshold, tolerance_ratio=0.1):
""" A combination of zero and linear spline interpolation
This function combines linear and zero spline interpolation, given a
signal that has been downsampled using a by percentage downsampling. It
uses the information given by this downsampling to choose between linear or
zero interpolation.
If the linear interpolation gives at least one point that has a variation
superior to the threshold, it will use zero interpolation instead.
Parameters
----------
ds : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
threshold : float
the threshold used for the by percentage downsampling
tolerance_ratio : float
this ratio increases the interval of points where linear interpolation
is used. It prevents the algorithm to use zero if linear is closer to
the actual signal. It calculates a larger threshold, as such :
``new_threshold = threshold*(1+tolerance_ratio)``
Returns
-------
y : list
the reconstructed signal
"""
# Generate the linear interpolation
x = np.arange(ind[-1]+1)
f_linear = interpolate.interp1d(ind, ds, kind='linear')
f_zero = interpolate.interp1d(ind, ds, kind='zero')
y_linear = f_linear(x)
y_zero = f_zero(x)
y = y_linear.copy()
# the new threshold, with some tolerance so we do not use
# zero interpolation for a small difference with the threshold
tolerance = threshold * (1+tolerance_ratio) #
# Check if the points are in the correct interval
last_ind = ind[0]
for i in range(1, len(ind)):
# j is the index of the points between 2 known points
j = last_ind + 1
out = False
var = np.abs(y_linear[j]-ds[i-1])
while (not out) and (j < ind[i]):
var = np.abs(y_linear[j]-ds[i-1])
if var > tolerance*np.abs(ds[i-1]):
out=True
j +=1
# if one point is outside the interval, use zero interpolation
# for the segment
if out:
for j in range(last_ind+1, ind[i]):
y[j] = y_zero[j]
last_ind = ind[i]
return y
def combined_fixed_recon(ds, ind, threshold, tolerance_ratio=0.1,plot=False):
""" A combination of zero and linear spline interpolation
This function combines linear and zero spline interpolation, given a
signal that has been downsampled using a by percentage downsampling. It
uses the information given by this downsampling to choose between linear or
zero interpolation.
If the linear interpolation gives at least one point that has a variation
superior to the threshold, it will use zero interpolation instead.
Parameters
----------
ds : list
the downsampled signal to reconstruct
ind : list
the list of indices corresponding to the position of
the downsampled points
threshold : float
the threshold used for the by percentage downsampling
tolerance_ratio : float
this ratio increases the interval of points where linear interpolation
is used. It prevents the algorithm to use zero if linear is closer to
the actual signal. It calculates a larger threshold, as such :
``new_threshold = threshold*(1+tolerance_ratio)``
Returns
-------
y : list
the reconstructed signal
"""
x = np.arange(ind[-1]+1)
tolerance = threshold * (1+tolerance_ratio)
f_linear = interpolate.interp1d(ind, ds, kind='linear')
f_pchip = interpolate.PchipInterpolator(ind,ds)
y = [ds[0]]
for i in range(1, len(ind)):
indx = np.arange(ind[i-1]+1, ind[i]+1, 1)
indx_conditions = (i-2 >= 0) and (i+1 < len(ind))
out = False
j = ind[i]-1
while (not out) and (j > ind[i-1]):
var = np.abs(f_linear(x[j]) - ds[i-1])
if var > tolerance*np.abs(ds[i-1]):
out = True
j -= 1
y_ = f_linear(indx)
if out:
#f_nearest = interpolate.interp1d([ind[i-1],ind[i]],[ds[i-1],ds[i]],kind='nearest')
#y_ = f_nearest(indx).tolist()
ind = | np.array(ind,dtype=int) | numpy.array |
import numpy as np
def euclidean_distances(X, Y=None, Y_norm_squared=None, X_norm_squared=None):
'''
将数据的每行看做样本,计算两矩阵样本之间的欧氏距离
:param X: matrix one
:param Y: matrix two
:param Y_norm_squared:
:param X_norm_squared:
:return: pairwise距离矩阵
'''
X = | np.array(X) | numpy.array |
#!/usr/bin/env python
"""Collection of simple functions useful in computational chemistry scripting.
Many of the following functions are used to make operations on xyz coordinates
of molecular structure. When refering to ``xyz_data`` bellow, the following
structures (also used in :py:mod:`~comp_chem_utils.molecule_data`) is assumed::
atom 1 label and corresponding xyz coordinate
atom 2 label and corresponding xyz coordinate
: : :
atom N label and corresponding xyz coordinate
For example the ``xyz_data`` of a Hydrogen molecule along the z-axis
should be passed as::
>>> xyz_data
[['H', 0.0, 0.0, 0.0], ['H', 0.0, 0.0, 1.0]]
"""
__author__="<NAME>"
__email__="<EMAIL>"
import os
import shutil
import numpy as np
from scipy.spatial.distance import pdist
from comp_chem_utils.periodic import element
def vel_auto_corr(vel, max_corr_index, tstep):
"""Calculate velocity autocorrelation function.
Args:
vel (list): The velocities along the trajectory are given as
a list of ``np.array()`` of dimensions N_atoms . 3.
max_corr_index (int): Maximum number of steps to consider
for the auto correlation function. In other words, it
corresponds to the number of points in the output function.
"""
max_step_index = len(vel) - max_corr_index
natoms = vel[0].shape[0]
G = np.zeros((max_corr_index), dtype='float64')
for itau in range(max_corr_index):
for it in range(max_step_index):
#for i in range(natoms):
# G[itau] += np.dot(vel[it][i,:], vel[it+itau][i,:])
G[itau] += np.trace(np.dot(vel[it],np.transpose(vel[it+itau])))
G = G / (natoms * max_corr_index * tstep)
xpts = np.arange(max_corr_index)
xpts = xpts * tstep
return xpts, G
def get_lmax_from_atomic_charge(charge):
"""Return the maximum angular momentum based on the input atomic charge.
This function is designed to return LMAX in a CPMD input file.
Args:
charge (int): Atomic charge.
Returns:
'S' for H or He; 'P' for second row elements; and 'D' for heavier elements.
"""
if charge <= 2:
# H or He
lmax = 'S'
elif charge <= 10:
lmax = 'P'
else:
lmax = 'D'
return lmax
def get_file_as_list(filename, raw=False):
"""Read a file and return it as a list of lines (str).
By default comments (i.e. lines starting with #)
and empty lines are ommited. This can be changed
by setting ``raw=True``
Args:
filename (str): Name of the file to read.
raw (bool, optional): To return the file as it is,
i.e. with comments and blank lines. Default
is ``raw=False``.
Returns:
A list of lines (str)."""
lines = []
with open(filename,'r') as myfile:
for line in myfile:
if raw:
lines.append(line)
else:
# remove empty lines
if line.strip():
# remove comments
if line.strip()[0] != '#':
lines.append(line)
return lines
def make_new_dir(dirn):
"""Make new empty directory.
If the directory already exists it is erased and replaced.
Args:
dirn (str): Name for the new directory (can include path).
"""
if not os.path.exists(dirn):
os.makedirs(dirn)
else:
try:
os.removedirs(dirn)
except(OSError):
print("WARNING: erasing (not empty) directory! {}".format(dirn))
shutil.rmtree(dirn)
os.makedirs(dirn)
def center_of_mass(xyz_data):
"""Calculate center of mass of a molecular structure based on xyz coordinates.
Args:
xyz_data (list): xyz atomic coordinates arranged as described above.
Returns:
3-dimensional ``np.array()`` containing the xyz coordinates of the
center of mass of the molecular structure. The unit of the center of mass
matches the xyz input units (usually Angstroms).
"""
totM = 0.0
COM = np.zeros((3))
for line in xyz_data:
symbol = line[0]
coord = np.array(line[1:])
mass = element(symbol).mass
totM += mass
COM += coord*mass
COM = COM/totM
return COM
def change_vector_norm(fix, mob, R):
"""Scale a 3-D vector defined by two points in space to have a new norm R.
The input vector is defined by a fix position in 3-D space ``fix``,
and a mobile position ``mob``. The function returns a new mobile
position such that the new vector has the norm R.
Args:
fix (np.array): xyz coordinates of the fix point.
mob (np.array): Original xyz coordinates of the mobile point.
R (float): Desired norm for the new vector.
Returns:
The new mobile position as an ``np.array()`` of dimenssion 3.
"""
unit = mob - fix
unit = unit/np.linalg.norm(unit)
# return new position
return fix + R * unit
def change_vector_norm_sym(pos1, pos2, R):
"""Symmetric version of change_vector_norm function.
In other word both positions are modified symmetrically.
"""
unit = pos2 - pos1
norm = np.linalg.norm(unit)
unit = unit/norm
shift = (R-norm)/2.0
# return new position
new_pos1 = pos1 - unit * shift
new_pos2 = pos2 + unit * shift
return new_pos1, new_pos2
def get_rmsd(xyz_data1, xyz_data2):
"""Calculate RMSD between two sets of coordinates.
The Root-mean-square deviation of atomic positions is calculated as
.. math::
RMSD = \\sqrt{ \\frac{1}{N} \\sum_{i=1}^N \\delta_{i}^{2} }
Where ``\delta_i`` is the distance between atom i in ``xyz_data1`` and in
``xyz_data2``.
Args:
xyz_data1 (list): List of atomic coordinates for the first structure
arranged as described above for xyz_data.
xyz_data2 (list): Like ``xyz_data1`` but for the second structure.
Returns:
The RMSD (float).
"""
rmsd = 0
for c1, c2 in zip(xyz_data1, xyz_data2):
d1 = np.array([c1[x] for x in range(1,4)])
d2 = np.array([c2[x] for x in range(1,4)])
vector = d2 - d1
rmsd += np.dot(vector, vector)
rmsd = rmsd/(len(xyz_data1))
return np.sqrt(rmsd)
def get_distance(xyz_data, atoms, box_size=None):
"""Calculate distance between two atoms in xyz_data.
Args:
xyz_data (list): xyz atomic coordinates arranged as described above.
atoms (list): list of two indices matching the two rows of the
xyz_data for wich the distance should be calculated.
Returns:
Distance between the two atoms in the list as a ``float``, the
unit will match the input unit in the ``xyz_data``.
"""
coord1 = np.array([xyz_data[atoms[0]][x] for x in range(1,4)])
coord2 = np.array([xyz_data[atoms[1]][x] for x in range(1,4)])
vector = coord2 - coord1
if box_size:
for i,x in enumerate(vector):
if abs(x) > box_size/2.0:
vector[i] = box_size - abs(x)
return np.linalg.norm(vector)
def get_distance_matrix(xyz_data, box_size=None):
# make np.array
natoms = len(xyz_data)
coord = np.zeros((natoms,3), dtype='float')
for i,line in enumerate(xyz_data):
coord[i,:] = line[1:]
if box_size:
npairs = natoms * (natoms - 1)
matrix = np.zeros((npairs,3), dtype='float')
for i in range(natoms):
for j in range(i+1,natoms):
ij = i + (j-1)*natoms
matrix[ij,:] = coord[i,:] - coord[j,:]
# find out which element to shift:
# basically shift is an array of same shape as matrix
# with zeros every where except where the elements of
# matrix are larger than box_size/2.0
# in that case shift as the value box_size
shift = box_size * (matrix > box_size/2.0).astype(int)
# we can now shift the matrix as follows:
matrix = abs(shift - matrix)
# and get the distances...
matrix = np.linalg.norm(matrix, axis=1)
else:
matrix = pdist(coord)
return matrix
def get_distance_matrix_2(xyz_data1, xyz_data2, box_size=None):
# repeat as above for 2 different sets of coordinates
nat1 = len(xyz_data1)
coord1 = np.zeros((nat1,3), dtype='float')
for i,line in enumerate(xyz_data1):
coord1[i,:] = line[1:]
nat2 = len(xyz_data2)
coord2 = np.zeros((nat2,3), dtype='float')
for i,line in enumerate(xyz_data2):
coord2[i,:] = line[1:]
if box_size:
matrix = np.zeros((nat1,nat2,3), dtype='float')
for i in range(nat1):
for j in range(nat2):
matrix[i,j,:] = coord[i,:] - coord[j,:]
# find out which element to shift:
# basically shift is an array of same shape as matrix
# with zeros every where except where the elements of
# matrix are larger than box_size/2.0
# in that case shift as the value box_size
shift = box_size * (matrix > box_size/2.0).astype(int)
# we can now shift the matrix as follows:
matrix = abs(shift - matrix)
# and get the distances...
matrix = np.linalg.norm(matrix, axis=2)
else:
matrix = cdist(coord1, coord2)
return matrix
def get_angle(xyz_data, atoms):
"""Calculate angle between three atoms in xyz_data.
Args:
xyz_data (list): xyz atomic coordinates arranged as described above.
atoms (list): list of three indices matching the rows of the
xyz_data for wich the angle should be calculated.
Returns:
Angle between the three atoms in the list as a ``float`` in degrees.
"""
coord1 = np.array([xyz_data[atoms[0]][x] for x in range(1,4)])
coord2 = np.array([xyz_data[atoms[1]][x] for x in range(1,4)])
coord3 = np.array([xyz_data[atoms[2]][x] for x in range(1,4)])
vec1 = coord1-coord2
vec2 = coord3-coord2
return np.degrees( np.arccos( | np.dot(vec1,vec2) | numpy.dot |
#!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import ctypes
import struct
import csv
import os, sys
sys.path.insert(0, os.getcwd())
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
DLRM_INTERACTIONS_PLUGIN_LIBRARY="build/plugins/DLRMInteractionsPlugin/libdlrminteractionsplugin.so"
if not os.path.isfile(DLRM_INTERACTIONS_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_INTERACTIONS_PLUGIN_LIBRARY),
"Please build the DLRM Interactions plugin."
))
ctypes.CDLL(DLRM_INTERACTIONS_PLUGIN_LIBRARY)
DLRM_BOTTOM_MLP_PLUGIN_LIBRARY="build/plugins/DLRMBottomMLPPlugin/libdlrmbottommlpplugin.so"
if not os.path.isfile(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY),
"Please build the DLRM Bottom MLP plugin."
))
ctypes.CDLL(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY)
from importlib import import_module
from code.common import logging, dict_get, BENCHMARKS
from code.common.builder import BenchmarkBuilder
from code.dlrm.tensorrt.calibrator import DLRMCalibrator
import json
import numpy as np
import torch
class DLRMBuilder(BenchmarkBuilder):
INTERLEAVED_TOP_MLP = True
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(4 << 30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.DLRM, workspace_size=workspace_size)
with open("code/dlrm/tensorrt/mlperf_40m.limit.json") as f:
self.dlrm_config = json.load(f)
logging.info("DLRM config: {:}".format(self.dlrm_config))
self.num_numerical_inputs = self.dlrm_config["num_numerical_features"]
self.num_features = len(self.dlrm_config["categorical_feature_sizes"])
self.num_interactions = (self.num_features + 1) * self.num_features // 2
self.embedding_size = self.dlrm_config["embedding_dim"]
self.embedding_rows = self.dlrm_config["categorical_feature_sizes"]
self.embedding_rows_bound = 40000000
self.embedding_rows = [min(i, self.embedding_rows_bound) for i in self.embedding_rows]
self.embedding_rows_total = np.sum(np.array(self.embedding_rows))
self.bottom_mlp_channels = self.dlrm_config["bottom_mlp_sizes"]
self.bottom_mlp_names = ["bot_l.0", "bot_l.2", "bot_l.4"]
self.top_mlp_input_size = (self.num_interactions + self.embedding_size + 31) // 32 * 32
self.top_mlp_channels = self.dlrm_config["top_mlp_sizes"]
self.top_mlp_names = ["top_l.0", "top_l.2", "top_l.4", "top_l.6", "top_l.8"]
self.model_filepath = "build/models/dlrm/tb00_40M.pt"
self.embedding_weights_binary_filepath = "build/models/dlrm/40m_limit/dlrm_embedding_weights_int8_v3.bin"
self.model_without_embedding_weights_filepath = "build/models/dlrm/40m_limit/model_test_without_embedding_weights_v3.pt"
self.row_frequencies_binary_filepath = "build/models/dlrm/40m_limit/row_frequencies.bin"
self.row_frequencies_src_dir = "build/models/dlrm/40m_limit/row_freq"
self.embedding_weights_on_gpu_part = self.args.get("embedding_weights_on_gpu_part", 1.0)
self.use_row_frequencies = True if self.embedding_weights_on_gpu_part < 1.0 else False
self.num_profiles = self.args.get("gpu_inference_streams", 1)
if self.precision == "fp16":
self.apply_flag(trt.BuilderFlag.FP16)
elif self.precision == "int8":
self.apply_flag(trt.BuilderFlag.INT8)
if self.precision == "int8":
# Get calibrator variables
calib_batch_size = dict_get(self.args, "calib_batch_size", default=512)
calib_max_batches = dict_get(self.args, "calib_max_batches", default=500)
force_calibration = dict_get(self.args, "force_calibration", default=False)
cache_file = dict_get(self.args, "cache_file", default="code/dlrm/tensorrt/calibrator.cache")
preprocessed_data_dir = dict_get(self.args, "preprocessed_data_dir", default="build/preprocessed_data")
calib_data_dir = os.path.join(preprocessed_data_dir, "criteo/full_recalib/val_data_128000")
# Set up calibrator
self.calibrator = DLRMCalibrator(calib_batch_size=calib_batch_size, calib_max_batches=calib_max_batches,
force_calibration=force_calibration, cache_file=cache_file, data_dir=calib_data_dir)
self.builder_config.int8_calibrator = self.calibrator
self.cache_file = cache_file
self.need_calibration = force_calibration or not os.path.exists(cache_file)
else:
self.need_calibration = False
def calibrate(self):
"""
Generate a new calibration cache, overriding the input batch size to 2 needed for interleaving
"""
self.need_calibration = True
self.calibrator.clear_cache()
self.initialize()
# Generate a dummy engine to generate a new calibration cache.
for input_idx in range(self.network.num_inputs):
input_shape = self.network.get_input(input_idx).shape
input_shape[0] = 2 # need even-numbered batch size for interleaving
self.network.get_input(input_idx).shape = input_shape
self.builder.build_engine(self.network, self.builder_config)
def parse_calibration(self):
# Parse the calibration file, set dynamic range on all network tensors
if not os.path.exists(self.cache_file):
return
np127=np.float32(127.0)
with open(self.cache_file, "rb") as f:
lines=f.read().decode('ascii').splitlines()
calibration_dict = {}
for line in lines:
split=line.split(':')
if len(split)!=2:
continue
tensor = split[0]
dynamic_range=np.uint32(int(split[1], 16)).view(np.dtype('float32')).item()*127.0
calibration_dict[tensor] = dynamic_range
return calibration_dict
def add_mlp(self, input_tensor, input_size, num_channels, names, last_relu=False, useConvForFC=False):
for i, num_channel in enumerate(num_channels):
weights = self.weights[names[i] + ".weight"].numpy()
input_size_suggested_by_weights = weights.shape[1]
if input_size > input_size_suggested_by_weights:
weights = np.concatenate((weights, np.zeros((weights.shape[0], input_size - input_size_suggested_by_weights), dtype=weights.dtype)), 1)
if useConvForFC:
layer = self.network.add_convolution(input_tensor, num_channel, (1, 1),
weights, self.weights[names[i] + ".bias"].numpy())
else:
layer = self.network.add_fully_connected(input_tensor, num_channel,
weights, self.weights[names[i] + ".bias"].numpy())
layer.name = names[i]
layer.get_output(0).name = names[i] + ".output"
if i != len(num_channels) - 1 or last_relu:
layer = self.network.add_activation(layer.get_output(0), trt.ActivationType.RELU)
layer.name = names[i] + ".relu"
layer.get_output(0).name = names[i] + ".relu.output"
input_size = num_channel
input_tensor = layer.get_output(0)
return layer
def add_fused_bottom_mlp(self, plugin_name, input_tensor, input_size, num_channels, names):
plugin = None
output_tensor_name = ""
dynamic_range_dict = self.parse_calibration()
for plugin_creator in trt.get_plugin_registry().plugin_creator_list:
if plugin_creator.name == plugin_name:
plugin_fileds = []
plugin_fileds.append(trt.PluginField("inputChannels", np.array([input_size], dtype=np.int32), trt.PluginFieldType.INT32))
for i, num_channel in enumerate(num_channels):
weights = self.weights[names[i] + ".weight"].numpy()
input_size_suggested_by_weights = weights.shape[1]
if input_size > input_size_suggested_by_weights:
weights = np.concatenate((weights, np.zeros((weights.shape[0], input_size - input_size_suggested_by_weights), dtype=weights.dtype)), 1)
plugin_fileds.append(trt.PluginField("weights" + str(i), weights, trt.PluginFieldType.FLOAT32))
plugin_fileds.append(trt.PluginField("biases" + str(i), self.weights[names[i] + ".bias"].numpy(), trt.PluginFieldType.FLOAT32))
output_tensor_name = names[i] + ".relu.output"
if i != len(num_channels) - 1:
plugin_fileds.append(trt.PluginField("dynamicRange" + str(i), np.array([dynamic_range_dict[output_tensor_name]], dtype=np.float32), trt.PluginFieldType.FLOAT32))
plugin = plugin_creator.create_plugin(name=plugin_name, field_collection=trt.PluginFieldCollection(plugin_fileds))
return plugin, output_tensor_name
def get_dlrm_interactions_plugin(self, plugin_name, tableOffsets, interactionsOutputInterleaved):
plugin = None
for plugin_creator in trt.get_plugin_registry().plugin_creator_list:
if plugin_creator.name == plugin_name:
embeddingSize_field = trt.PluginField("embeddingSize", np.array([self.embedding_size], dtype=np.int32), trt.PluginFieldType.INT32)
embeddingRows_field = trt.PluginField("embeddingRows", np.array([self.embedding_rows_total], dtype=np.int32), trt.PluginFieldType.INT32)
reducedPrecisionIO_field = trt.PluginField("reducedPrecisionIO", np.array([0 if self.need_calibration else (1 if self.precision == "fp16" else 2)], dtype=np.int32), trt.PluginFieldType.INT32)
embeddingWeightsOnGpuPart_field = trt.PluginField("embeddingWeightsOnGpuPart", np.array([self.embedding_weights_on_gpu_part], dtype=np.float32), trt.PluginFieldType.FLOAT32)
interactionsOutputInterleaved_field = trt.PluginField("interactionsOutputInterleaved", np.array([1 if interactionsOutputInterleaved else 0], dtype=np.int32), trt.PluginFieldType.INT32)
tableOffsets_field = trt.PluginField("tableOffsets", tableOffsets, trt.PluginFieldType.INT32)
embeddingWeightsFilepath_field = trt.PluginField("embeddingWeightsFilepath", np.array(list(self.embedding_weights_binary_filepath.encode()), dtype=np.int8), trt.PluginFieldType.CHAR)
if self.use_row_frequencies:
rowFrequenciesFilepath_field = trt.PluginField("rowFrequenciesFilepath", np.array(list(self.row_frequencies_binary_filepath.encode()), dtype=np.int8), trt.PluginFieldType.CHAR)
else:
rowFrequenciesFilepath_field = trt.PluginField("rowFrequenciesFilepath", np.array(list("".encode()), dtype=np.int8), trt.PluginFieldType.CHAR)
field_collection = trt.PluginFieldCollection([embeddingSize_field, embeddingRows_field, reducedPrecisionIO_field, embeddingWeightsOnGpuPart_field, interactionsOutputInterleaved_field, tableOffsets_field, embeddingWeightsFilepath_field, rowFrequenciesFilepath_field])
plugin = plugin_creator.create_plugin(name=plugin_name, field_collection=field_collection)
return plugin
def dump_embedding_weights_to_binary_file(self):
logging.info("Writing quantized embedding weights to " + self.embedding_weights_binary_filepath)
with open(self.embedding_weights_binary_filepath,'wb') as f:
f.write(struct.pack('i', self.num_features))
# Calculate the maximum absolute value of embedding weights for each table
mults = np.ndarray(shape=(self.num_features))
for feature_id in range(self.num_features):
weight_tensor_name = "emb_l." + str(feature_id) + ".weight"
embeddings = self.weights[weight_tensor_name].numpy()
maxAbsVal = abs(max(embeddings.max(), embeddings.min(), key=abs))
mults[feature_id] = 127.5 / maxAbsVal
embeddingsScale = 1.0 / mults[feature_id]
f.write(struct.pack('f', embeddingsScale))
for feature_id in range(self.num_features):
weight_tensor_name = "emb_l." + str(feature_id) + ".weight"
embeddings = self.weights[weight_tensor_name].numpy()
if (embeddings.shape[0] != self.embedding_rows[feature_id]):
raise IOError("Expected " + str(self.embedding_rows[feature_id]) + " embedding rows, but got " + str(embeddings.shape[0]) + " rows for feature " + str(feature_id))
embeddingsQuantized = np.minimum(np.maximum(np.rint(np.multiply(embeddings, mults[feature_id])), -127), 127).astype('int8')
# Remove the embedding weights, we don't need them any longer
del self.weights[weight_tensor_name]
# Write quantized embeddings to file
embeddingsQuantized.tofile(f)
def dump_row_frequencies_to_binary_file(self):
with open(self.row_frequencies_binary_filepath,'wb') as f:
f.write(struct.pack('i', self.num_features))
for feature_id in range(self.num_features):
f.write(struct.pack('i', self.embedding_rows[feature_id]))
row_frequencies_source_filepath = self.row_frequencies_src_dir + "/" + "table_" + str(feature_id + 1) + ".csv"
with open(row_frequencies_source_filepath, 'r') as infile:
reader = csv.reader(infile)
rowIdToFreqDict = {rows[0]:rows[1] for rows in reader}
# if (len(rowIdToFreqDict) != self.embedding_rows[feature_id]):
# raise IOError("Expected " + str(self.embedding_rows[feature_id]) + " embedding rows, but got " + str(len(rowIdToFreqDict)) + " row frequencies for feature " + str(feature_id))
for row_id in range(self.embedding_rows[feature_id]):
if not str(row_id) in rowIdToFreqDict:
f.write(struct.pack('f', 0))
# raise IOError("Cannot find frequency for row " + str(row_id) + " for feature " + str(feature_id))
else:
f.write(struct.pack('f', float(rowIdToFreqDict[str(row_id)])))
def initialize(self):
useConvForFC_bottom = (self.precision == "int8")
useConvForFC_top = (self.precision == "int8")
interactionsOutputInterleaved = False if self.need_calibration or self.input_dtype != "int8" else True
# Check if we should split the model into the binary file with embedding weights quantized and model without embeddings
if not (os.path.isfile(self.embedding_weights_binary_filepath) and os.path.isfile(self.model_without_embedding_weights_filepath)):
logging.info("Loading checkpoint from " + self.model_filepath)
self.weights = torch.load(self.model_filepath, map_location="cpu")["state_dict"]
self.dump_embedding_weights_to_binary_file()
logging.info("Writing model without embedding weights to " + self.model_without_embedding_weights_filepath)
torch.save(self.weights, self.model_without_embedding_weights_filepath)
del self.weights
# Dump row frequencies to file in binary format
if self.use_row_frequencies and not os.path.isfile(self.row_frequencies_binary_filepath):
logging.info("Writing row frequencies to " + self.row_frequencies_binary_filepath)
self.dump_row_frequencies_to_binary_file()
# Load weights
self.weights = torch.load(self.model_without_embedding_weights_filepath, map_location="cpu")
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Numerical input
numerical_input = self.network.add_input("numerical_input", trt.DataType.FLOAT, (-1, self.num_numerical_inputs, 1, 1))
if not self.need_calibration:
if self.input_dtype == "int8":
numerical_input.dtype = trt.int8
elif self.input_dtype == "fp16":
numerical_input.dtype = trt.float16
if self.input_format == "linear":
numerical_input.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
elif self.input_format == "chw4":
numerical_input.allowed_formats = 1 << int(trt.TensorFormat.CHW4)
elif self.input_format == "chw32":
numerical_input.allowed_formats = 1 << int(trt.TensorFormat.CHW32)
# Bottom MLP
if self.need_calibration or self.input_dtype != "int8":
bottom_mlp = self.add_mlp(numerical_input, self.num_numerical_inputs, self.bottom_mlp_channels, self.bottom_mlp_names,
last_relu=True, useConvForFC=useConvForFC_bottom)
else:
bottom_mlp_plugin, output_tesnor_name = self.add_fused_bottom_mlp("DLRM_BOTTOM_MLP_TRT", numerical_input, self.num_numerical_inputs, self.bottom_mlp_channels, self.bottom_mlp_names)
bottom_mlp = self.network.add_plugin_v2([numerical_input], bottom_mlp_plugin)
bottom_mlp.get_output(0).name = output_tesnor_name
bottom_mlp_shuffle = self.network.add_shuffle(bottom_mlp.get_output(0))
bottom_mlp_shuffle.reshape_dims = trt.Dims((-1, 1, self.embedding_size))
# Index input
index_input = self.network.add_input("index_input", trt.DataType.INT32, (-1, self.num_features))
# Embedding lookup and interactions
dlrm_interactions_plugin = self.get_dlrm_interactions_plugin("DLRM_INTERACTIONS_TRT", np.cumsum( | np.array([0] + self.embedding_rows[:-1]) | numpy.array |
"""
Models for causal set graphs.
Available methods:
minkowski_interval(N, D)
de_sitter_interval(N, D, eta_0, eta_1)
causal_set_graph(R, p)
"""
# Copyright (C) 2016 by
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["<NAME> (<EMAIL>)"])
import networkx as nx
import numpy as np
import dagology as dag
__all__ = ['causal_set_graph',
'minkowski_interval',
'de_sitter_interval']
def causal_set_graph(R, p=1.0, periodic=None):
"""
Create a Causal Set DAG from a set of coordinates, an NxD numpy array
Parameters
----------
R - coordinates of points
p - probability with which allowed edges appear
periodic - list - the periodic size of each dimension
Notes
-----
We are assuming a conformal spacetime - ie. lightcones are straight lines
and therefore can calculate whether two points should be connected using
the Minkowski metric.
"""
G = nx.DiGraph()
N, D = R.shape
edgelist = []
for i in range(N):
G.add_node(i, position=tuple(R[i]))
for j in range(N):
if R[i, 0] < R[j, 0]:
if p == 1. or p > np.random.random():
if periodic:
if dag.minkowski_periodic(R[i], R[j], periodic) < 0:
edgelist.append([i,j])
else:
if dag.minkowski(R[i], R[j]) < 0:
edgelist.append([i,j])
G.add_edges_from(edgelist)
return G
def minkowski_interval_scatter(N, D, fix_ends=True):
""" Scatter N points in a D dimensional interval in Minkowski space
Parameters
----------
N - number of points
D - dimension of spacetime
fix_ends - if True, have points at start and end of interval
Notes
-----
Throw points into a unit box rejecting those outside the interval
Repeat until N points have been reached
Note that this is inefficient for large D"""
R = np.random.random((N, D))
a = np.zeros(D)
a[1:] = 0.5
b = np.zeros(D)
b[0] = 1.
b[1:] = 0.5
if fix_ends:
R[0] = a
R[1] = b
i_start = 2
else:
i_start = 0
for i in range(i_start, N):
while (dag.minkowski(a, R[i, :]) > 0) or ((dag.minkowski(R[i, :], b) > 0)):
R[i, :] = | np.random.random(D) | numpy.random.random |
import tensorflow as tf
import data_loader_recsys
import generator_recsys
import utils
import shutil
import time
import math
import eval
import numpy as np
import argparse
# You can run it directly, first training and then evaluating
# nextitrec_generate.py can only be run when the model parameters are saved, i.e.,
# save_path = saver.save(sess,
# "Data/Models/generation_model/model_nextitnet.ckpt".format(iter, numIters))
# if you are dealing very huge industry dataset, e.g.,several hundred million items, you may have memory problem during training, but it
# be easily solved by simply changing the last layer, you do not need to calculate the cross entropy loss
# based on the whole item vector. Similarly, you can also change the last layer (use tf.nn.embedding_lookup or gather) in the prediction phrase
# if you want to just rank the recalled items instead of all items. The current code should be okay if the item size < 5 million.
# Strongly suggest running codes on GPU with more than 10G memory!!!
# if your session data is very long e.g, >50, and you find it may not have very strong internal sequence properties, you can consider generate subsequences
def generatesubsequence(train_set):
# create subsession only for training
subseqtrain = []
for i in range(len(train_set)):
# print(x_train[i]
seq = train_set[i]
lenseq = len(seq)
# session lens=100 shortest subsession=5 realvalue+95 0
for j in range(lenseq - 2):
subseqend = seq[:len(seq) - j]
subseqbeg = [0] * j
subseq = np.append(subseqbeg, subseqend)
# beginseq=padzero+subseq
# newsubseq=pad+subseq
subseqtrain.append(subseq)
x_train = np.array(subseqtrain) # list to ndarray
del subseqtrain
# Randomly shuffle data
np.random.seed(10)
shuffle_train = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffle_train]
print("generating subsessions is done!")
return x_train
def main(datapath=None):
parser = argparse.ArgumentParser()
parser.add_argument('--top_k', type=int, default=5,
help='Sample from top k predictions')
parser.add_argument('--beta1', type=float, default=0.9,
help='hyperpara-Adam')
# history_sequences_20181014_fajie_smalltest.csv
parser.add_argument('--datapath', type=str, default='Data/Session/musicl_20.csv',
# parser.add_argument('--datapath', type=str, default='Data/Session/user-filter-20000items-session5.csv',
help='data path')
parser.add_argument('--eval_iter', type=int, default=100,
help='Sample generator output every x steps')
parser.add_argument('--save_para_every', type=int, default=10000,
help='save model parameters every')
parser.add_argument('--tt_percentage', type=float, default=0.2,
help='0.2 means 80% training 20% testing')
parser.add_argument('--is_generatesubsession', type=bool, default=True,
help='whether generating a subsessions, e.g., 12345-->01234,00123,00012 It may be useful for very some very long sequences')
args = parser.parse_args()
if datapath:
dl = data_loader_recsys.Data_Loader({'model_type': 'generator', 'dir_name': datapath})
else:
dl = data_loader_recsys.Data_Loader({'model_type': 'generator', 'dir_name': args.text_dir})
datapath = args.text_dir
all_samples = dl.item
items = dl.item_dict
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
all_samples = all_samples[shuffle_indices]
# Split train/test set
dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
train_set, valid_set = all_samples[:dev_sample_index], all_samples[dev_sample_index:]
if args.is_generatesubsession:
train_set = generatesubsequence(train_set)
model_para = {
# if you changed the parameters here, also do not forget to change paramters in nextitrec_generate.py
'item_size': len(items),
'dilated_channels': 100,
# if you use nextitnet_residual_block, you can use [1, 4, ],
# if you use nextitnet_residual_block_one, you can tune and i suggest [1, 2, 4, ], for a trial
# when you change it do not forget to change it in nextitrec_generate.py
'dilations': [1, 2, 4],
'kernel_size': 3,
'learning_rate': 0.001,
'batch_size': 32,
'iterations': 256,
'is_negsample': False # False denotes no negative sampling
}
print("\n-------------------------------")
print("model: NextItRec")
print("is_generatesubsession:", args.is_generatesubsession)
print("train_set.shape[0]:", train_set.shape[0])
print("train_set.shape[1]:", train_set.shape[1])
print("dataset:", datapath)
print("batch_size:", model_para['batch_size'])
print("embedding_size:", model_para['dilated_channels'])
print("learning_rate:", model_para['learning_rate'])
print("-------------------------------\n")
itemrec = generator_recsys.NextItNet_Decoder(model_para)
itemrec.train_graph(model_para['is_negsample'])
optimizer = tf.train.AdamOptimizer(model_para['learning_rate'], beta1=args.beta1).minimize(itemrec.loss)
itemrec.predict_graph(model_para['is_negsample'], reuse=True)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
maxmrr5, maxmrr20, maxhit5, maxhit20, maxndcg5, maxndcg20 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
for iter in range(model_para['iterations']):
# train
train_loss = []
batch_no_train = 0
batch_size = model_para['batch_size']
start = time.time()
t1 = time.time()
print("Iter:%d\ttotal train batch:%d" % (iter, round(len(train_set)/batch_size)))
while (batch_no_train + 1) * batch_size < len(train_set):
train_batch = train_set[batch_no_train * batch_size: (batch_no_train + 1) * batch_size, :]
_, loss, results = sess.run(
[optimizer, itemrec.loss,
itemrec.arg_max_prediction],
feed_dict={
itemrec.itemseq_input: train_batch
})
train_loss.append(loss)
batch_no_train += 1
t3 = time.time() - start
if t3 > 300:
print("batch_no_train: %d, total_time: %.2f" % (batch_no_train, t3))
if batch_no_train % 10 == 0:
t2 = time.time()
print("batch_no_train: %d, time:%.2fs, loss: %.4f" % (batch_no_train, t2 - t1, np.mean(train_loss)))
t1 = time.time()
end = time.time()
print("train LOSS: %.4f, time: %.2fs" % (np.mean(train_loss), end - start))
# test
test_loss = []
batch_no_test = 0
formrr5, forhit5, forndcg5, formrr20, forhit20, forndcg20 = [], [], [], [], [], []
_maxmrr5, _maxmrr20, _maxrecall5, _maxrecall20, _maxndcg5, _maxndcg20 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
start = time.time()
print("Iter:%d total test batch:%d" % (iter, round(len(valid_set) / batch_size)))
while (batch_no_test + 1) * batch_size < len(valid_set):
_formrr5, _forhit5, _forndcg5, _formrr20, _forhit20, _forndcg20 = [], [], [], [], [], []
test_batch = valid_set[batch_no_test * batch_size: (batch_no_test + 1) * batch_size, :]
[probs], loss = sess.run(
[[itemrec.g_probs], [itemrec.loss_test]],
feed_dict={
itemrec.input_predict: test_batch
})
test_loss.append(loss)
batch_no_test += 1
batch_out = []
for line in test_batch:
batch_out.append(line[-1])
rank_l, batch_predict, _recall5, _recall20, _mrr5, _mrr20, _ndcg5, _ndcg20 \
= utils.cau_recall_mrr_org(probs, batch_out)
forhit5.append(_recall5)
formrr5.append(_mrr5)
forndcg5.append(_ndcg5)
forhit20.append(_recall20)
formrr20.append(_mrr20)
forndcg20.append(_ndcg20)
'''
for bi in range(probs.shape[0]):
pred_items_5 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k) # top_k=5
pred_items_20 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k + 15)
true_item = item_batch[bi][-1]
predictmap_5 = {ch: i for i, ch in enumerate(pred_items_5)}
pred_items_20 = {ch: i for i, ch in enumerate(pred_items_20)}
rank_5 = predictmap_5.get(true_item)
rank_20 = pred_items_20.get(true_item)
if rank_5 == None:
formrr5.append(0.0)
forhit5.append(0.0)
forndcg5.append(0.0)
_formrr5.append(0.0)
_forhit5.append(0.0)
_forndcg5.append(0.0)
else:
MRR_5 = 1.0 / (rank_5 + 1)
Rec_5 = 1.0
ndcg_5 = 1.0 / math.log(rank_5 + 2, 2)
formrr5.append(MRR_5)
forhit5.append(Rec_5)
forndcg5.append(ndcg_5)
_formrr5.append(MRR_5)
_forhit5.append(Rec_5)
_forndcg5.append(ndcg_5)
if rank_20 == None:
formrr20.append(0.0)
forhit20.append(0.0)
forndcg20.append(0.0)
_formrr20.append(0.0)
_forhit20.append(0.0)
_forndcg20.append(0.0)
else:
MRR_20 = 1.0 / (rank_20 + 1)
Rec_20 = 1.0
ndcg_20 = 1.0 / math.log(rank_20 + 2, 2)
formrr20.append(MRR_20)
forhit20.append(Rec_20)
forndcg20.append(ndcg_20)
_formrr20.append(MRR_20)
_forhit20.append(Rec_20)
_forndcg20.append(ndcg_20)
'''
# if np.mean(_forndcg5) > _maxndcg5 or np.mean(_forndcg20) > _maxndcg20:
if np.mean(_ndcg5) > _maxndcg5 or np.mean(_ndcg20) > _maxndcg20:
_maxmrr5 = np.mean(_mrr5) # (_formrr5)
_maxrecall5 = np.mean(_recall5) # (_forhit5)
_maxndcg5 = np.mean(_ndcg5) # (_forndcg5)
_maxmrr20 = np.mean(_mrr20) # (_formrr20)
_maxrecall20 = np.mean(_recall20) # (_forhit20)
_maxndcg20 = np.mean(_ndcg20) # (_forndcg20)
print("\t\tin batch recall5=%.4f mrr5=%.4f ndcg5=%.4f" % (_maxrecall5, _maxmrr5, _maxndcg5))
print("\t\tin batch recall20=%.4f mrr20=%.4f ndcg20=%.4f" % (_maxrecall20, _maxmrr20, _maxndcg20))
thismrr5 = np.mean(formrr5) # sum(formrr5) / float(len(formrr5))
thismrr20 = np.mean(formrr20) # (formrr20) / float(len(formrr20))
thishit5 = np.mean(forhit5) # sum(forhit5) / float(len(forhit5))
thishit20 = np.mean(forhit20) # sum(forhit20) / float(len(forhit20))
thisndcg5 = | np.mean(forndcg5) | numpy.mean |
from __future__ import division, print_function, absolute_import
import numpy as np
from copy import deepcopy
from ipsolver._constraints import (NonlinearConstraint,
LinearConstraint,
BoxConstraint)
from ipsolver._canonical_constraint import (_parse_constraint,
to_canonical,
empty_canonical_constraint)
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
class TestParseConstraint(TestCase):
def test_equality_constraint(self):
kind = ("equals", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [0, 1, 2])
assert_array_equal(val_eq, [10, 20, 30])
assert_array_equal(ineq, [])
assert_array_equal(val_ineq, [])
assert_array_equal(sign, [])
def test_greater_constraint(self):
kind = ("greater", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [-1, -1, -1])
kind = ("greater", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [-1, -1])
def test_less_constraint(self):
kind = ("less", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [1, 1, 1])
kind = ("less", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [1, 1])
def test_interval_constraint(self):
kind = ("interval", [10, 20, 30], [50, 60, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2, 0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30, 50, 60, 70])
assert_array_equal(sign, [-1, -1, -1, 1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0, 2])
assert_array_equal(val_ineq, [10, 30, 50, 70])
assert_array_equal(sign, [-1, -1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0])
assert_array_equal(val_ineq, [10, 30, 50])
assert_array_equal(sign, [-1, -1, 1])
kind = ("interval", [-np.inf, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [2, 0])
assert_array_equal(val_ineq, [30, 50])
assert_array_equal(sign, [-1, 1])
class TestToCanonical(TestCase):
def test_empty_constraint(self):
x = [1, 2, 3]
canonical = empty_canonical_constraint(x, 3)
assert_array_equal(canonical.n_eq, 0)
assert_array_equal(canonical.n_ineq, 0)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq, [])
assert_array_equal(c_eq, [])
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_ineq, np.empty((0, 3)))
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility, [])
def test_box_to_canonical_conversion(self):
box = BoxConstraint(("interval", [10, 20, 30], [50, np.inf, 70]),
[False, False, False])
x = [1, 2, 3]
x = box.evaluate_and_initialize(x)
canonical = to_canonical(box)
assert_array_equal(canonical.n_eq, 0)
assert_array_equal(canonical.n_ineq, 5)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq, [10-1,
20-2,
30-3,
1-50,
3-70])
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_ineq.toarray(), [[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[1, 0, 0],
[0, 0, 1]])
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq.toarray(), canonical.J_ineq0.toarray())
assert_array_equal(J_eq.toarray(), canonical.J_eq0.toarray())
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility,
[False, False, False, False, False])
def test_linear_to_canonical_conversion(self):
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
linear = LinearConstraint(A, ("interval",
[10, 20, 30],
[10, np.inf, 70]),
[False, False, False])
x = [1, 2, 3, 4]
x = linear.evaluate_and_initialize(x)
canonical = to_canonical(linear)
assert_array_equal(canonical.n_eq, 1)
assert_array_equal(canonical.n_ineq, 3)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_eq, [1+4+9+16-10])
assert_array_equal(c_ineq, [20-5*1-6*4,
30-7*1-8*3,
7*1+8*3-70])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_eq, [[1, 2, 3, 4]])
assert_array_equal(J_ineq, [[-5, 0, 0, -6],
[-7, 0, -8, 0],
[7, 0, 8, 0]])
assert_array_equal(J_ineq, canonical.J_ineq0)
assert_array_equal(J_eq, canonical.J_eq0)
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility,
[False, False, False])
def test_nonlinear_to_canonical_conversion(self):
f1 = 10
g1 = np.array([1, 2, 3, 4])
H1 = np.eye(4)
f2 = 1
g2 = np.array([1, 1, 1, 1])
H2 = np.zeros((4, 4))
f3 = 12
g3 = np.array([1, 0, 0, 1])
H3 = np.diag([1, 2, 3, 4])
def fun(x):
return np.array([f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x),
f2 + g2.dot(x) + 1/2*H2.dot(x).dot(x),
f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x)])
def jac(x):
return np.vstack([g1 + H1.dot(x),
g2 + H2.dot(x),
g3 + H3.dot(x)])
def hess(x, v):
return v[0]*H1 + v[1]*H2 + v[2]*H3
nonlinear = NonlinearConstraint(fun,
("interval",
[10, 20, 30],
[10, np.inf, 70]),
jac, hess,
False)
x = [1, 2, 3, 4]
x = nonlinear.evaluate_and_initialize(x)
canonical = to_canonical(nonlinear)
assert_array_equal(canonical.n_eq, 1)
assert_array_equal(canonical.n_ineq, 3)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq,
[20-(f2 + g2.dot(x) + 1/2*H2.dot(x).dot(x)),
30-(f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x)),
f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x) - 70])
assert_array_equal(c_eq,
[f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x) - 10])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_eq, np.atleast_2d(g1 + H1.dot(x)))
assert_array_equal(J_ineq, np.vstack([-(g2 + H2.dot(x)),
-(g3 + H3.dot(x)),
g3 + H3.dot(x)]))
v_eq = np.array([10])
v_ineq = np.array([5, 6, 3])
assert_array_equal(canonical.hess(x, v_eq, v_ineq),
10*H1 + (-5)*H2 + (-6+3)*H3)
v_eq = np.array([50])
v_ineq = np.array([4, -2, 30])
assert_array_equal(canonical.hess(x, v_eq, v_ineq),
50*H1 + (-4)*H2 + (2+30)*H3)
assert_array_equal(canonical.enforce_feasibility,
[False, False, False])
nonlinear = NonlinearConstraint(fun,
("interval",
[10, 20, 30],
[20, 20, 70]),
jac, hess,
False)
x = [1, 2, 3, 4]
x = nonlinear.evaluate_and_initialize(x)
canonical = to_canonical(nonlinear)
assert_array_equal(canonical.n_eq, 1)
assert_array_equal(canonical.n_ineq, 4)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_eq,
[f2 + g2.dot(x) + 1/2*H2.dot(x).dot(x) - 20])
assert_array_equal(c_ineq,
[10-(f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x)),
30-(f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x)),
f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x) - 20,
f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x) - 70])
assert_array_equal(c_ineq, canonical.c_ineq0)
| assert_array_equal(c_eq, canonical.c_eq0) | numpy.testing.assert_array_equal |
import torch
from time import ctime
import os
from torch.utils.tensorboard import SummaryWriter
import logging
from augmentations.simclr_transform import SimCLRTransform
from util.torchlist import ImageFilelist
from augmentations import TestTransform
import numpy as np
from torchvision.datasets import CIFAR10
def tiny_imagenet(data_root, img_size=64, train=True, transform=None):
"""
TinyImageNet dataset
"""
train_kv = "train_kv_list.txt"
test_kv = "val_kv_list.txt"
if train:
train_dataset = ImageFilelist(root=data_root, flist=os.path.join(data_root, train_kv), transform=transform)
return train_dataset
else:
train_dataset = ImageFilelist(root=data_root, flist=os.path.join(data_root, train_kv), transform=TestTransform(img_size))
test_dataset = ImageFilelist(root=data_root, flist=os.path.join(data_root, test_kv), transform=TestTransform(img_size))
return train_dataset, test_dataset
def positive_mask(batch_size):
"""
Create a mask for masking positive samples
:param batch_size:
:return: A mask that can segregate 2(N-1) negative samples from a batch of N samples
"""
N = 2 * batch_size
mask = torch.ones((N, N), dtype=torch.bool)
mask[torch.eye(N).byte()] = 0
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
def summary_writer(args, log_dir=None, filename_suffix=''):
"""
Create a tensorboard SummaryWriter
"""
if log_dir is None:
args.log_dir = os.path.join(args.train.save_dir, "{}_bs_{}".format(args.train.backbone, args.train.batchsize),
ctime().replace(' ', '_'))
mkdir(args.log_dir)
else:
args.log_dir = log_dir
writer = SummaryWriter(log_dir=args.log_dir, filename_suffix=filename_suffix)
print("logdir = {}".format(args.log_dir))
return writer
def mkdir(path):
"""
Creates new directory if not exists
@param path: folder path
"""
if not os.path.exists(path):
print("creating {}".format(path))
os.makedirs(path, exist_ok=True)
def logger(args, filename=None):
"""
Creates a basic config of logging
@param args: Namespace instance with parsed arguments
@param filename: None by default
"""
if filename is None:
filename = os.path.join(args.log_dir, 'train.log')
else:
filename = os.path.join(args.log_dir, filename)
logging.basicConfig(filename=filename, level=logging.DEBUG, format='%(message)s')
print("logfile created")
def log(msg):
"""
print and log console messages
@param msg: string message
"""
print(msg)
logging.debug(msg)
def save_checkpoint(state_dict, args, epoch, filename=None):
"""
@param state_dict: model state dictionary
@param args: system arguments
@param epoch: epoch
@param filename: filename for saving the checkpoint. Do not include whole path as path is appended in the code
"""
if filename is None:
path = os.path.join(args.log_dir + "/" + "checkpoint_{}.pth".format(epoch))
else:
path = os.path.join(args.log_dir + "/" + filename)
torch.save(state_dict, path)
log("checkpoint saved at {} after {} epochs".format(path, epoch))
return path
class CIFAR10Imbalanced(CIFAR10):
"""@author <NAME>
CIFAR10 dataset, with support for randomly corrupt labels.
Params
------
num_classes: int
Default 10. The number of classes in the dataset.
"""
def __init__(self, gamma=0.2, n_min=250, n_max=5000, num_classes=10, **kwargs):
super(CIFAR10Imbalanced, self).__init__(**kwargs)
log("\n The gamma value for imbalanced CIFAR10: {} \n".format(gamma))
self.num_classes = num_classes
self.gamma = gamma
self.n_min = n_min
self.n_max = n_max
self.imbalanced_dataset()
def imbalanced_dataset(self):
X = np.array([[1, -self.n_max], [1, -self.n_min]])
Y = np.array([self.n_max, self.n_min * 10 ** (self.gamma)])
a, b = np.linalg.solve(X, Y)
classes = list(range(1, self.num_classes + 1))
imbal_class_counts = []
for c in classes:
num_c = int(np.round(a / (b + (c) ** (self.gamma))))
print(c, num_c)
imbal_class_counts.append(num_c)
targets = np.array(self.targets)
# Get class indices
class_indices = [np.where(targets == i)[0] for i in range(self.num_classes)]
# Get imbalanced number of instances
imbal_class_indices = [class_idx[:class_count] for class_idx, class_count in
zip(class_indices, imbal_class_counts)]
imbal_class_indices = | np.hstack(imbal_class_indices) | numpy.hstack |
# MIT License
# Copyright 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
"""An interface for interacting with Morpheus"""
import os
import time
import json
from subprocess import Popen
from typing import Iterable, List, Tuple, Callable, Dict, Union
import imageio
import numpy as np
import tensorflow.compat.v1 as tf
from astropy.io import fits
from matplotlib.colors import hsv_to_rgb
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
from skimage.filters import sobel
from skimage.measure import regionprops
from skimage.morphology import watershed
from tqdm import tqdm
tf.disable_eager_execution()
import morpheus.core.helpers as helpers
import morpheus.core.model as model
class Classifier:
"""The primary interface for the use of Morpheus.
Images can be classified by calling
:py:meth:`~morpheus.classifier.Classifier.classify` and passing
numpy arrays or string FITS file locations.
After an image this this class offers some post processing functionality by
generating segmentation maps using
:py:meth:`~morpheus.classifier.Classifier.segmap_from_classified`, colorized
morphological classifications using
:py:meth:`~morpheus.classifier.Classifier.colorize_classification`, and
generating catalogs using
:py:meth:`~morpheus.classifier.Classifier.catalog_from_classified`.
For more examples, see the `documentation <https://morpheus-astro.readthedocs.io/>`_.
"""
__graph = None
__session = None
__X = tf.placeholder(tf.float32, shape=[None, 40, 40, 4])
@staticmethod
def classify(
h: Union[np.ndarray, str] = None,
j: Union[np.ndarray, str] = None,
z: Union[np.ndarray, str] = None,
v: Union[np.ndarray, str] = None,
out_dir: str = None,
batch_size: int = 1000,
out_type: str = "rank_vote",
gpus: List[int] = None,
cpus: int = None,
parallel_check_interval: float = 1,
) -> dict:
"""Generates per-pixel classifications from input images.
Args:
h (Union[np.ndarray, str]): The H band image or the path to it
j (Union[np.ndarray, str]): The J band image or the path to it
v (Union[np.ndarray, str]): The V band image or the path to it
z (Union[np.ndarray, str]): The Z band image or the path to it
out_dir (str): If provided, a directory to save the output to
batch_size (int): The size of the batches to use when classifying the input
out_type (str): The method by which to aggregate classifications
for a single pixel. Can be one of "rank_vote",
"mean_var", or "both"
gpus (List[int]): The GPU ids to use for parallel classification
the ids can be found using ``nvidia-smi``
cpus (int): The number of cpus to use for parallel classification.
parallel_check_interval (float): If running a parallel job, how often
to check on the running sub-processes
in minutes.
Returns:
Dictionary containing the classification output for the given input
Raises:
ValueError if both gpus and cpus are given
ValueError if mixed string and numpy arrays are given for h, j, v, z
ValueError if h, j, v, or z are None
"""
Classifier._variables_not_none(["h", "j", "v", "z"], [h, j, v, z])
are_files = Classifier._valid_input_types_is_str(h, j, v, z)
workers, is_gpu = Classifier._validate_parallel_params(gpus, cpus)
if are_files:
hduls, [h, j, v, z] = Classifier._parse_files(h, j, v, z)
if out_dir is None:
out_dir = "."
else:
hduls = []
if len(workers) == 1:
classified = Classifier._classify_arrays(
h=h,
j=j,
v=v,
z=z,
out_type=out_type,
out_dir=out_dir,
batch_size=batch_size,
)
else:
if out_dir is None:
out_dir = "."
Classifier._build_parallel_classification_structure(
[h, j, v, z], workers, batch_size, out_dir, out_type
)
Classifier._run_parallel_jobs(
workers, is_gpu, out_dir, parallel_check_interval
)
Classifier._stitch_parallel_classifications(workers, out_dir, out_type)
classification_hduls, classified = Classifier._retrieve_classifications(
out_dir, out_type
)
hduls.extend(classification_hduls)
for hdul in hduls:
hdul.close()
return classified
@staticmethod
def catalog_from_classified(
classified: dict,
flux: np.ndarray,
segmap: np.ndarray,
aggregation_scheme: Callable = None,
out_file: str = None,
) -> List[Dict]:
"""Creates a catalog of sources and their morphologies.
Args:
classified (dict): A dictionary containing the output from morpheus.
flux (np.ndarray): The corresponding flux image in H band
segmap (np.ndarray): A labeled segmap where every pixel with a
value > 0 is associated with a source.
aggregation_scheme (func): Function that takes three arguments `classified`,
`flux`, and `segmap`, same as this
function, then returns a numpy array
containing the morphological classification
in the following order-spheroid, disk,
irregular, and point source/compact. If
None, then the flux weighting scheme
in
out_file (str): a location to save the catalog. Can either be .csv
or .json. Anything else will raise a ValueError.
Returns:
A JSON-compatible list of dictionary objects with the following keys:
{
'id': the id from the segmap
'location': a (y,x) location -- the max pixel within the segmap
'morphology': a dictionary containing the morphology values.
}
"""
if out_file:
if out_file.endswith((".csv", ".json")):
is_csv = out_file.endswith(".csv")
else:
raise ValueError("out_file must end with .csv or .json")
if aggregation_scheme is None:
aggregation_scheme = Classifier.aggregation_scheme_flux_weighted
catalog = []
for region in regionprops(segmap, flux):
_id = region.label
if _id < 1:
continue
img = region.intensity_image
seg = region.filled_image
start_y, start_x, end_y, end_x = region.bbox
dat = {}
for k in classified:
dat[k] = classified[k][start_y:end_y, start_x:end_x].copy()
classification = aggregation_scheme(dat, img, seg)
masked_flux = img * seg
# https://stackoverflow.com/a/3584260
y, x = np.unravel_index(masked_flux.argmax(), masked_flux.shape)
y, x = int(start_y + y), int(start_x + x)
catalog.append(
{"id": _id, "location": [y, x], "morphology": classification}
)
if out_file:
with open(out_file, "w") as f:
if is_csv:
f.write("source_id,y,x,sph,dsk,irr,ps\n")
for c in catalog:
csv = "{},{},{},{},{},{},{}\n"
f.write(
csv.format(
c["id"],
c["location"][0],
c["location"][1],
c["morphology"][0],
c["morphology"][1],
c["morphology"][2],
c["morphology"][3],
)
)
else:
json.dump(catalog, f)
return catalog
# TODO: make the output file with the FITS helper if the output dir is used.
@staticmethod
def segmap_from_classified(
classified: dict,
flux: np.ndarray,
bkg_src_threshold: float = 0.0,
out_dir: str = None,
min_distance: int = 20,
mask: np.ndarray = None,
deblend: bool = True,
) -> np.ndarray:
"""Generate a segmentation map from the classification output.
For more information about the segmentation process, see:
https://arxiv.org/abs/1906.11248
Args:
data (dict): A dictionary containing the output from morpheus.
flux (np.ndarray): The flux to use when making the segmap
bkg_src_threshold (float): The max value that a background
classification pixel can take and be
considered a source. The default is 0.
Should be between [0,1]
out_dir (str): A path to save the segmap in.
min_distance (int): The minimum distance for deblending
mask (np.ndarry): A boolean mask indicating which pixels
deblend (bool): If ``True``, perform deblending as described in 2.
in the algorithm description. If ``False`` return
segmap without deblending.
Returns:
A np.ndarray segmentation map
"""
if bkg_src_threshold < 0 or bkg_src_threshold >= 1:
err_msg = [
"Invalid value for `bkg_src_threshold`, use a value in the ",
"range [0, 1)",
]
raise ValueError(err_msg)
bkg = classified["background"]
markers = np.zeros_like(flux, dtype=np.uint8)
print("Building Markers...")
if mask is None:
mask = classified["n"] > 0
is_bkg = np.logical_and(bkg == 1, mask)
is_src = np.logical_and(bkg <= bkg_src_threshold, mask)
markers[is_bkg] = 1
markers[is_src] = 2
sobel_img = sobel(bkg)
print("Watershedding...")
segmented = watershed(sobel_img, markers, mask=mask) - 1
segmented[np.logical_not(mask)] = 0
labeled, _ = ndi.label(segmented)
labeled[np.logical_not(mask)] = -1
if deblend:
labeled = Classifier._deblend(labeled, flux, min_distance)
if out_dir:
fits.PrimaryHDU(data=labeled).writeto(os.path.join(out_dir, "segmap.fits"))
return labeled
@staticmethod
def colorize_classified(
classified: dict, out_dir: str = None, hide_unclassified: bool = True
) -> np.ndarray:
"""Makes a color image from the classification output.
The colorization scheme is defined in HSV and is as follows:
* Spheroid = Red
* Disk = Blue
* Irregular = Green
* Point Source = Yellow
The hue is set to be the color associated with the highest ranked class
for a given pixel. The saturation is set to be the difference between the
highest ranked class and the second highest ranked class for a given
pixel. For example, if the top two classes have nearly equal values given
by the classifier, then the saturation will be low and the pixel will
appear more white. If the top two classes have very different
values, then the saturation will be high and the pixel's color will be
vibrant and not white. The value for a pixel is set to be 1-bkg, where
bkg is value given to the background class. If the background class has
a high value, then the pixel will appear more black. If the background
value is low, then the pixel will take on the color given by the hue and
saturation values.
Args:
data (dict): A dictionary containing the output from Morpheus.
out_dir (str): a path to save the image in.
hide_unclassified (bool): If true, black out the edges of the image
that are unclassified. If false, show the
borders as white.
Returns:
A [width, height, 3] array representing the RGB image.
"""
red = 0.0 # spheroid
blue = 0.7 # disk
yellow = 0.18 # point source
green = 0.3 # irregular
shape = classified["n"].shape
colors = np.array([red, blue, green, yellow])
morphs = np.dstack(
[classified[i] for i in helpers.LabelHelper.MORPHOLOGIES[:-1]]
)
ordered = np.argsort(-morphs, axis=-1)
hues = np.zeros(shape)
sats = np.zeros(shape)
vals = 1 - classified["background"]
# the classifier doesn't return values for this area so black it out
if hide_unclassified:
vals[0:5, :] = 0
vals[-5:, :] = 0
vals[:, 0:5] = 0
vals[:, -5:] = 0
for i in tqdm(range(shape[0])):
for j in range(shape[1]):
hues[i, j] = colors[ordered[i, j, 0]]
sats[i, j] = (
morphs[i, j, ordered[i, j, 0]] - morphs[i, j, ordered[i, j, 1]]
)
hsv = np.dstack([hues, sats, vals])
rgb = hsv_to_rgb(hsv)
if out_dir:
png = (rgb * 255).astype(np.uint8)
imageio.imwrite(os.path.join(out_dir, "colorized.png"), png)
return rgb
@staticmethod
def _retrieve_classifications(
out_dir: str, out_type: str
) -> Tuple[List[fits.HDUList], dict]:
f_names = []
for morph in helpers.LabelHelper.MORPHOLOGIES:
if out_type in ["mean_var", "both"]:
f_names.extend(
[
os.path.join(out_dir, f"{morph}_mean.fits"),
os.path.join(out_dir, f"{morph}_var.fits"),
]
)
if out_type in ["rank_vote", "both"]:
f_names.append(os.path.join(out_dir, f"{morph}.fits"))
f_names.append(os.path.join(out_dir, "n.fits"))
hduls, arrs = helpers.FitsHelper.get_files(f_names)
classified = {
os.path.split(n)[1].replace(".fits", ""): a for n, a in zip(f_names, arrs)
}
return hduls, classified
@staticmethod
def _valid_input_types_is_str(
h: Union[np.ndarray, str] = None,
j: Union[np.ndarray, str] = None,
z: Union[np.ndarray, str] = None,
v: Union[np.ndarray, str] = None,
):
in_types = {type(val) for val in [h, j, z, v]}
if len(in_types) > 1:
raise ValueError(
"Mixed input type usuage. Ensure all are numpy arrays or strings."
)
t = in_types.pop()
if t in [np.ndarray, str]:
return t == str
else:
raise ValueError("Input type must either be numpy array or string")
# NEW API ==================================================================
@staticmethod
def _classify_arrays(
h: np.ndarray = None,
j: np.ndarray = None,
z: np.ndarray = None,
v: np.ndarray = None,
out_dir: str = None,
batch_size: int = 1000,
out_type: str = "rank_vote",
) -> Dict:
"""Classify numpy arrays using Morpheus.
Args:
h (np.ndarray): the H band values for an image
j (np.ndarray): the J band values for an image
z (np.ndarray): the Z band values for an image
v (np.ndarray): the V band values for an image
out_dir (str): The location where to save the output files
if None returns the output in memory only.
batch_size (int): the number of image sections blackto process at a time
out_type (str): how to process the output from Morpheus. If
'mean_var' record output using mean and variance, If
'rank_vote' record output as the normalized vote
count. If 'both' record both outputs.
Returns:
A dictionary containing the output classifications.
Raises:
ValueError if out_type is not one of ['mean_var', 'rank_vote', 'both']
"""
Classifier._variables_not_none(["h", "j", "z", "v"], [h, j, z, v])
Classifier._arrays_same_size([h, j, z, v])
if out_type not in ["mean_var", "rank_vote", "both"]:
raise ValueError("Invalid value for `out_type`")
mean_var = out_type in ["mean_var", "both"]
rank_vote = out_type in ["rank_vote", "both"]
shape = h.shape
hduls = []
data = {}
if out_dir:
if mean_var:
hs, ds = helpers.FitsHelper.create_mean_var_files(shape, out_dir)
hduls.extend(hs)
data.update(ds)
if rank_vote:
hs, ds = helpers.FitsHelper.create_rank_vote_files(shape, out_dir)
hduls.extend(hs)
data.update(ds)
hs, ds = helpers.FitsHelper.create_n_file(shape, out_dir)
hduls.extend(hs)
data.update(ds)
else:
if mean_var:
data.update(helpers.LabelHelper.make_mean_var_arrays(shape))
if rank_vote:
data.update(helpers.LabelHelper.make_rank_vote_arrays(shape))
data.update(helpers.LabelHelper.make_n_array(shape))
indicies = helpers.LabelHelper.windowed_index_generator(*shape)
window_y, window_x = helpers.LabelHelper.UPDATE_MASK_N.shape
batch_estimate = shape[0] - window_y + 1
batch_estimate *= shape[1] - window_x + 1
batch_estimate = batch_estimate // batch_size
pbar = tqdm(total=batch_estimate, desc="classifying", unit="batch")
while True:
batch = []
batch_idx = []
for _ in range(batch_size):
try:
y, x = next(indicies)
except StopIteration:
break
combined = np.array(
[img[y : y + window_y, x : x + window_x] for img in [h, j, v, z]]
)
batch.append(Classifier._standardize_img(combined))
batch_idx.append((y, x))
if not batch:
break
batch = np.array(batch)
labels = Classifier._call_morpheus(batch)
helpers.LabelHelper.update_labels(data, labels, batch_idx, out_type)
pbar.update()
if rank_vote:
helpers.LabelHelper.finalize_rank_vote(data)
for hdul in hduls:
hdul.close()
return data
@staticmethod
def _standardize_img(img: np.ndarray) -> np.ndarray:
"""Standardizes an input img to mean 0 and unit variance.
Uses the formula described in:
https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization
Args:
img (np.ndarray): the input array to standardize
Returns:
The standardized input
"""
num = img - img.mean()
denom = max(img.std(), 1 / np.sqrt( | np.prod(img.shape) | numpy.prod |
'''This is the core module for HRA functionality. This will perform all HRA
calcs, and return the appropriate outputs.
'''
import logging
import os
import collections
import math
import datetime
from matplotlib import pyplot as plt
import re
import random
import numpy
from osgeo import gdal, ogr, osr
import pygeoprocessing.geoprocessing
LOGGER = logging.getLogger('invest_natcap.habitat_risk_assessment.hra_core')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
def execute(args):
'''
This provides the main calculation functionaility of the HRA model. This
will call all parts necessary for calculation of final outputs.
Inputs:
args- Dictionary containing everything that hra_core will need to
complete the rest of the model run. It will contain the following.
args['workspace_dir']- Directory in which all data resides. Output
and intermediate folders will be subfolders of this one.
args['h_s_c']- The same as intermediate/'h-s', but with the addition
of a 3rd key 'DS' to the outer dictionary layer. This will map to
a dataset URI that shows the potentially buffered overlap between
the habitat and stressor. Additionally, any raster criteria will
be placed in their criteria name subdictionary. The overall
structure will be as pictured:
{(Habitat A, Stressor 1):
{'Crit_Ratings':
{'CritName':
{'Rating': 2.0, 'DQ': 1.0, 'Weight': 1.0}
},
'Crit_Rasters':
{'CritName':
{
'DS': "CritName Raster URI",
'Weight': 1.0, 'DQ': 1.0
}
},
'DS': "A-1 Dataset URI"
}
}
args['habitats']- Similar to the h-s dictionary, a multi-level
dictionary containing all habitat-specific criteria ratings and
rasters. In this case, however, the outermost key is by habitat
name, and habitats['habitatName']['DS'] points to the rasterized
habitat shapefile URI provided by the user.
args['h_s_e']- Similar to the h_s_c dictionary, a multi-level
dictionary containing habitat-stressor-specific criteria ratings
and shapes. The same as intermediate/'h-s', but with the addition
of a 3rd key 'DS' to the outer dictionary layer. This will map to
a dataset URI that shows the potentially buffered overlap between
the habitat and stressor. Additionally, any raster criteria will
be placed in their criteria name subdictionary.
args['risk_eq']- String which identifies the equation to be used
for calculating risk. The core module should check for
possibilities, and send to a different function when deciding R
dependent on this.
args['max_risk']- The highest possible risk value for any given pairing
of habitat and stressor.
args['max_stress']- The largest number of stressors that the user
believes will overlap. This will be used to get an accurate
estimate of risk.
args['aoi_tables']- May or may not exist within this model run, but if
it does, the user desires to have the average risk values by
stressor/habitat using E/C axes for each feature in the AOI layer
specified by 'aoi_tables'. If the risk_eq is 'Euclidean', this will
create risk plots, otherwise it will just create the standard HTML
table for either 'Euclidean' or 'Multiplicative.'
args['aoi_key']- The form of the word 'Name' that the aoi layer uses
for this particular model run.
args['warnings']- A dictionary containing items which need to be
acted upon by hra_core. These will be split into two categories.
'print' contains statements which will be printed using
logger.warn() at the end of a run. 'unbuff' is for pairs which
should use the unbuffered stressor file in lieu of the decayed
rated raster.
{'print': ['This is a warning to the user.', 'This is another.'],
'unbuff': [(HabA, Stress1), (HabC, Stress2)]
}
Outputs:
--Intermediate--
These should be the temp risk and criteria files needed for the
final output calcs.
--Output--
/output/maps/recov_potent_H[habitatname].tif- Raster layer
depicting the recovery potential of each individual habitat.
/output/maps/cum_risk_H[habitatname]- Raster layer depicting the
cumulative risk for all stressors in a cell for the given
habitat.
/output/maps/ecosys_risk- Raster layer that depicts the sum of all
cumulative risk scores of all habitats for that cell.
/output/maps/[habitatname]_HIGH_RISK- A raster-shaped shapefile
containing only the "high risk" areas of each habitat, defined
as being above a certain risk threshold.
Returns nothing.
'''
inter_dir = os.path.join(args['workspace_dir'], 'intermediate')
output_dir = os.path.join(args['workspace_dir'], 'output')
LOGGER.info('Applying CSV criteria to rasters.')
crit_lists, denoms = pre_calc_denoms_and_criteria(
inter_dir,
args['h_s_c'],
args['habitats'],
args['h_s_e'])
LOGGER.info('Calculating risk rasters for individual overlaps.')
# Need to have the h_s_c dict in there so that we can use the H-S pair DS to
# multiply against the E/C rasters in the case of decay.
risk_dict = make_risk_rasters(
args['h_s_c'],
args['habitats'],
inter_dir, crit_lists,
denoms,
args['risk_eq'],
args['warnings'])
# Know at this point that the non-core has re-created the ouput directory
# So we can go ahead and make the maps directory without worrying that
# it will throw an 'already exists.'
maps_dir = os.path.join(output_dir, 'Maps')
os.mkdir(maps_dir)
LOGGER.info('Calculating habitat risk rasters.')
# We will combine all of the h-s rasters of the same habitat into
# cumulative habitat risk rastersma db return a list of the DS's of each,
# so that it can be read into the ecosystem risk raster's vectorize.
h_risk_dict, h_s_risk_dict = make_hab_risk_raster(maps_dir, risk_dict)
LOGGER.info('Making risk shapefiles.')
# Also want to output a polygonized version of high and low risk areas in
# each habitat. Will polygonize everything that falls above a certain
# percentage of the total raster risk, or below that threshold. These can
# then be fed into different models.
num_stress = make_risk_shapes(
maps_dir,
crit_lists,
h_risk_dict,
h_s_risk_dict,
args['max_risk'],
args['max_stress'])
LOGGER.info('Calculating ecosystem risk rasters.')
# Now, combine all of the habitat rasters unto one overall ecosystem
# rasterusing the DS's from the previous function.
make_ecosys_risk_raster(maps_dir, h_risk_dict)
# Recovery potential will use the 'Recovery' subdictionary from the
# crit_lists and denoms dictionaries
make_recov_potent_raster(maps_dir, crit_lists, denoms)
if 'aoi_tables' in args:
LOGGER.info('Creating subregion maps and risk plots.')
# Let's pre-calc stuff so we don't have to worry about it in the middle
# of the file creation.
avgs_dict, aoi_names = pre_calc_avgs(
inter_dir,
risk_dict,
args['aoi_tables'],
args['aoi_key'],
args['risk_eq'],
args['max_risk'])
aoi_pairs = rewrite_avgs_dict(avgs_dict, aoi_names)
tables_dir = os.path.join(output_dir, 'HTML_Plots')
os.mkdir(tables_dir)
make_aoi_tables(tables_dir, aoi_pairs)
if args['risk_eq'] == 'Euclidean':
make_risk_plots(
tables_dir,
aoi_pairs,
args['max_risk'],
args['max_stress'],
num_stress,
len(h_risk_dict))
# Want to clean up the intermediate folder containing the added r/dq*w
# rasters, since it serves no purpose for the users.
# unecessary_folder = os.path.join(inter_dir, 'ReBurned_Crit_Rasters')
# shutil.rmtree(unecessary_folder)
# Want to remove that AOI copy that we used for ID number->name translation.
#if 'aoi_tables' in args:
# unnecessary_file = os.path.join(inter_dir, 'temp_aoi_copy.shp')
# os.remove(unnecessary_file)
# Want to print out our warnings as the last possible things in the
# console window.
for text in args['warnings']['print']:
LOGGER.warn(text)
def make_risk_plots(out_dir, aoi_pairs, max_risk, max_stress, num_stress, num_habs):
'''
This function will produce risk plots when the risk equation is
euclidean.
Args:
out_dir (string): The directory into which the completed risk plots should
be placed.
aoi_pairs (dictionary):
{'AOIName':
[(HName, SName, E, C, Risk), ...],
....
}
max_risk (float): Double representing the highest potential value for a
single h-s raster. The amount of risk for a given Habitat raster
would be SUM(s) for a given h.
max_stress (float): The largest number of stressors that the user
believes will overlap. This will be used to get an accurate
estimate of risk.
num_stress (dict): A dictionary that simply associates every habaitat
with the number of stressors associated with it. This will help us
determine the max E/C we should be expecting in our overarching
ecosystem plot.
Returns:
None
Outputs:
A set of .png images containing the matplotlib plots for every H-S
combination. Within that, each AOI will be displayed as plotted by
(E,C) values.
A single png that is the "ecosystem plot" where the E's for each AOI
are the summed
'''
def plot_background_circle(max_value):
circle_color_list = [(6, '#000000'),
(5, '#780000'),
(4.75, '#911206'),
(4.5, '#AB2C20'),
(4.25, '#C44539'),
(4, '#CF5B46'),
(3.75, '#D66E54'),
(3.5, '#E08865'),
(3.25, '#E89D74'),
(3, '#F0B686'),
(2.75, '#F5CC98'),
(2.5, '#FAE5AC'),
(2.25, '#FFFFBF'),
(2, '#EAEBC3'),
(1.75, '#CFD1C5'),
(1.5, '#B9BEC9'),
(1.25, '#9FA7C9'),
(1, '#8793CC'),
(0.75, '#6D83CF'),
(0.5, '#5372CF'),
(0.25, '#305FCF')]
index = 0
for radius, color in circle_color_list:
index += 1
linestyle = 'solid' if index % 2 == 0 else 'dashed'
cir = plt.Circle(
(0, 0),
edgecolor='.25',
linestyle=linestyle,
radius=radius * max_value / 3.75,
fc=color)
plt.gca().add_patch(cir)
def jigger(E, C):
'''
Want to return a fractionally offset set of coordinates so that
each of the text related to strings is slightly offset.
Range of x: E <= x <= E+.1
Range of y: C-.1 <= y <= C+.1
'''
x = E + random.random() * .1
y = C + ((random.random() * .4) - .2)
return (x, y)
# Create plots for each combination of AOI, Hab
plot_index = 0
for aoi_name, aoi_list in aoi_pairs.iteritems():
LOGGER.debug("AOI list for %s: %s" % (aoi_name, aoi_list))
fig = plt.figure(plot_index)
plot_index += 1
plt.suptitle(aoi_name)
fig.text(0.5, 0.04, 'Exposure', ha='center', va='center')
fig.text(0.06, 0.5, 'Consequence', ha='center', va='center',
rotation='vertical')
hab_index = 0
curr_hab_name = aoi_list[0][0]
# Elements look like: (HabName, StressName, E, C, Risk)
for element in aoi_list:
if element == aoi_list[0]:
# Want to have two across, and make sure there are enough
# spaces going down for each of the subplots
plt.subplot(int(math.ceil(num_habs / 2.0)),
2, hab_index)
plot_background_circle(max_risk)
plt.title(curr_hab_name)
plt.xlim([-.5, max_risk])
plt.ylim([-.5, max_risk])
hab_name = element[0]
if curr_hab_name == hab_name:
plt.plot(
element[2], element[3], 'k^',
markerfacecolor='black', markersize=8)
plt.annotate(
element[1], xy=(element[2], element[3]),
xytext=jigger(element[2], element[3]))
continue
# We get here once we get to the next habitat
hab_index += 1
plt.subplot(int(math.ceil(num_habs/2.0)),
2, hab_index)
plot_background_circle(max_risk)
curr_hab_name = hab_name
plt.title(curr_hab_name)
plt.xlim([-.5, max_risk])
plt.ylim([-.5, max_risk])
# We still need to plot the element that gets us here.
plt.plot(
element[2],
element[3],
'k^',
markerfacecolor='black',
markersize=8)
plt.annotate(
element[1],
xy=(element[2], element[3]),
xytext=jigger(element[2], element[3]))
out_uri = os.path.join(
out_dir, 'risk_plot_' + 'AOI[' + aoi_name + '].png')
plt.savefig(out_uri, format='png')
# Create one ecosystem megaplot that plots the points as summed E,C from
# a given habitat, AOI pairing. So each dot would be (HabitatName, AOI1)
# for all habitats in the ecosystem.
plot_index += 1
max_tot_risk = max_risk * max_stress * num_habs
plt.figure(plot_index)
plt.suptitle("Ecosystem Risk")
plot_background_circle(max_tot_risk)
points_dict = {}
for aoi_name, aoi_list in aoi_pairs.items():
for element in aoi_list:
if aoi_name in points_dict:
points_dict[aoi_name]['E'] += element[2]
points_dict[aoi_name]['C'] += element[3]
else:
points_dict[aoi_name] = {}
points_dict[aoi_name]['E'] = 0
points_dict[aoi_name]['C'] = 0
for aoi_name, p_dict in points_dict.items():
# Create the points which are summed AOI's across all Habitats.
plt.plot(p_dict['E'], p_dict['C'], 'k^',
markerfacecolor='black', markersize=8)
plt.annotate(
aoi_name,
xy=(p_dict['E'], p_dict['C']),
xytext=(p_dict['E'], p_dict['C']+0.07))
plt.xlim([0, max_tot_risk])
plt.ylim([0, max_tot_risk])
plt.xlabel("Exposure (Cumulative)")
plt.ylabel("Consequence (Cumulative)")
out_uri = os.path.join(out_dir, 'ecosystem_risk_plot.png')
plt.savefig(out_uri, format='png')
def make_aoi_tables(out_dir, aoi_pairs):
'''
This function will take in an shapefile containing multiple AOIs, and
output a table containing values averaged over those areas.
Input:
out_dir- The directory into which the completed HTML tables should be
placed.
aoi_pairs- Replacement for avgs_dict, holds all the averaged values on
a H, S basis.
{'AOIName':
[(HName, SName, E, C, Risk), ...],
....
}
Output:
A set of HTML tables which will contain averaged values of E, C, and
risk for each H, S pair within each AOI. Additionally, the tables will
contain a column for risk %, which is the averaged risk value in that
area divided by the total potential risk for a given pixel in the map.
Returns nothing.
'''
filename = os.path.join(
out_dir,
'Sub_Region_Averaged_Results_[%s].html'
% datetime.datetime.now().strftime("%Y-%m-%d_%H_%M"))
file = open(filename, "w")
file.write("<html>")
file.write("<title>" + "InVEST HRA" + "</title>")
file.write("<CENTER><H1>" + "Habitat Risk Assessment Model" +
"</H1></CENTER>")
file.write("<br>")
file.write("This page contains results from running the InVEST Habitat \
Risk Assessment model." + "<p>" + "Each table displays values on a \
per-habitat basis. For each overlapping stressor within the model, the \
averages for the desired sub-regions are presented. C, E, and Risk values \
are calculated as an average across a given subregion. Risk Percentage is \
calculated as a function of total potential risk within that area.")
file.write("<br><br>")
file.write("<HR>")
# Now, all of the actual calculations within the table. We want to make one
# table for each AOi used on the subregions shapefile.
for aoi_name, aoi_list in aoi_pairs.items():
file.write("<H2>" + aoi_name + "</H2>")
file.write('<table border="1", cellpadding="5">')
# Headers row
file.write(
"<tr><b><td>Habitat Name</td><td>Stressor Name</td>" +
"<td>E</td><td>C</td><td>Risk</td><td>Risk %</td></b></tr>")
# Element looks like (HabName, StressName, E, C, Risk)
for element in aoi_list:
file.write("<tr>")
file.write("<td>" + element[0] + "</td>")
file.write("<td>" + element[1] + "</td>")
file.write("<td>" + str(round(element[2], 2)) + "</td>")
file.write("<td>" + str(round(element[3], 2)) + "</td>")
file.write("<td>" + str(round(element[4], 2)) + "</td>")
file.write("<td>" + str(round(element[5] * 100, 2)) + "</td>")
file.write("</tr>")
# End of the AOI-specific table
file.write("</table>")
# End of the page.
file.write("</html>")
file.close()
def rewrite_avgs_dict(avgs_dict, aoi_names):
'''
Aftermarket rejigger of the avgs_dict setup so that everything is AOI
centric instead. Should produce something like the following:
{'AOIName':
[(HName, SName, E, C, Risk, R_Pct), ...],
....
}
'''
pair_dict = {}
for aoi_name in aoi_names:
pair_dict[aoi_name] = []
for h_name, h_dict in avgs_dict.items():
for s_name, s_list in h_dict.items():
for aoi_dict in s_list:
if aoi_dict['Name'] == aoi_name:
pair_dict[aoi_name].append((
h_name,
s_name,
aoi_dict['E'],
aoi_dict['C'],
aoi_dict['Risk'],
aoi_dict['R_Pct']))
return pair_dict
def pre_calc_avgs(inter_dir, risk_dict, aoi_uri, aoi_key, risk_eq, max_risk):
'''
This funtion is a helper to make_aoi_tables, and will just handle
pre-calculation of the average values for each aoi zone.
Input:
inter_dir- The directory which contains the individual E and C rasters.
We can use these to get the avg. E and C values per area. Since we
don't really have these in any sort of dictionary, will probably
just need to explicitly call each individual file based on the
names that we pull from the risk_dict keys.
risk_dict- A simple dictionary that maps a tuple of
(Habitat, Stressor) to the URI for the risk raster created when the
various sub components (H/S/H_S) are combined.
{('HabA', 'Stress1'): "A-1 Risk Raster URI",
('HabA', 'Stress2'): "A-2 Risk Raster URI",
...
}
aoi_uri- The location of the AOI zone files. Each feature within this
file (identified by a 'name' attribute) will be used to average
an area of E/C/Risk values.
risk_eq- A string identifier, either 'Euclidean' or 'Multiplicative'
that tells us which equation should be used for calculation of
risk. This will be used to get the risk value for the average E
and C.
max_risk- The user reported highest risk score present in the CSVs.
Returns:
avgs_dict- A multi level dictionary to hold the average values that
will be placed into the HTML table.
{'HabitatName':
{'StressorName':
[{'Name': AOIName, 'E': 4.6, 'C': 2.8, 'Risk': 4.2},
{...},
...
]
},
....
}
aoi_names- Quick and dirty way of getting the AOI keys.
'''
# Since we know that the AOI will be consistent across all of the rasters,
# want to create the new int field, and the name mapping dictionary upfront
driver = ogr.GetDriverByName('ESRI Shapefile')
aoi = ogr.Open(aoi_uri)
cp_aoi_uri = os.path.join(inter_dir, 'temp_aoi_copy.shp')
cp_aoi = driver.CopyDataSource(aoi, cp_aoi_uri)
layer = cp_aoi.GetLayer()
field_defn = ogr.FieldDefn('BURN_ID', ogr.OFTInteger)
layer.CreateField(field_defn)
name_map = {}
count = 0
ids = []
for feature in layer:
ids.append(count)
name = feature.items()[aoi_key]
feature.SetField('BURN_ID', count)
name_map[count] = name
count += 1
layer.SetFeature(feature)
layer.ResetReading()
# Now we will loop through all of the various pairings to deal with all
# their component parts across our AOI. Want to make sure to use our new
# field as the index.
avgs_dict = {}
avgs_r_sum = {}
# Set a temp filename for the AOI raster.
aoi_rast_uri = pygeoprocessing.geoprocessing.temporary_filename()
# Need an arbitrary element upon which to base the new raster.
arb_raster_uri = next(risk_dict.itervalues())
LOGGER.debug("arb_uri: %s" % arb_raster_uri)
# Use the first overlap raster as the base for the AOI
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
arb_raster_uri,
aoi_rast_uri,
'GTiff',
-1,
gdal.GDT_Float32)
# This rasterize should burn a unique burn ID int to each. Need to have a
# dictionary which associates each burn ID with the AOI 'name' attribute
# that's required.
pygeoprocessing.geoprocessing.rasterize_layer_uri(
aoi_rast_uri,
cp_aoi_uri,
option_list=["ATTRIBUTE=BURN_ID", "ALL_TOUCHED=TRUE"])
for pair in risk_dict:
h, s = pair
if h not in avgs_dict:
avgs_dict[h] = {}
avgs_r_sum[h] = {}
if s not in avgs_dict[h]:
avgs_dict[h][s] = []
# Just going to have to pull explicitly. Too late to go back and
# rejigger now.
e_rast_uri = os.path.join(
inter_dir, "H[" + h + ']_S[' + s +
']_E_Risk_Raster.tif')
c_rast_uri = os.path.join(
inter_dir, "H[" + h + ']_S[' + s +
']_C_Risk_Raster.tif')
# Now, we are going to modify the e value by the spatial overlap value.
# Get S.O value first.
h_rast_uri = os.path.join(inter_dir, 'Habitat_Rasters', h + '.tif')
hs_rast_uri = os.path.join(
inter_dir, 'Overlap_Rasters', "H[" +
h + ']_S[' + s + '].tif')
LOGGER.debug("Entering new funct.")
rast_uri_list = [e_rast_uri, c_rast_uri, h_rast_uri, hs_rast_uri]
rast_labels = ['E', 'C', 'H', 'H_S']
over_pix_sums = aggregate_multi_rasters_uri(
aoi_rast_uri,
rast_uri_list,
rast_labels,
[0])
LOGGER.debug("%s,%s:%s" % (h, s, over_pix_sums))
LOGGER.debug("Exiting new funct.")
for burn_value in over_pix_sums:
subregion_name = name_map[burn_value]
# For a given layer under the AOI, first list item is #of pix,
# second is pix sum
if over_pix_sums[burn_value]['H'][0] == 0:
frac_over = 0.
else:
# Casting to float because otherwise we end up with integer
# division issues.
frac_over = over_pix_sums[burn_value]['H_S'][0] / float(
over_pix_sums[burn_value]['H'][0])
s_o_score = max_risk * frac_over + (1-frac_over)
if frac_over == 0.:
e_score = 0.
# Know here that there is overlap. So now check whether we have
# scoring from users. If no, just use spatial overlap.
else:
e_mean = (over_pix_sums[burn_value]['E'][1] /
over_pix_sums[burn_value]['E'][0])
if e_mean == 0.:
e_score = s_o_score
# If there is, want to average the spatial overlap into
# everything else.
else:
e_score = (e_mean + s_o_score) / 2
# If there's no habitat, my E is 0 (indicating that there's no
# spatial overlap), then my C and risk scores should also be 0.
# Setting E to 0 should cascade to also make risk 0.
if e_score == 0.:
avgs_dict[h][s].append(
{'Name': subregion_name, 'E': 0.,
'C': 0.})
else:
c_mean = (over_pix_sums[burn_value]['C'][1] /
over_pix_sums[burn_value]['C'][0])
avgs_dict[h][s].append(
{'Name': subregion_name, 'E': e_score,
'C': c_mean})
for h, hab_dict in avgs_dict.iteritems():
for s, sub_list in hab_dict.iteritems():
for sub_dict in sub_list:
# For the average risk, want to use the avg. E and C values
# that we just got.
if risk_eq == 'Euclidean':
c_val = 0 if sub_dict['C'] == 0. else sub_dict['C'] - 1
e_val = 0 if sub_dict['E'] == 0. else sub_dict['E'] - 1
r_val = math.sqrt((c_val)**2 + (e_val)**2)
else:
r_val = sub_dict['C'] * sub_dict['E']
sub_dict['Risk'] = r_val
if sub_dict['Name'] in avgs_r_sum[h]:
avgs_r_sum[h][sub_dict['Name']] += r_val
else:
avgs_r_sum[h][sub_dict['Name']] = r_val
for h, hab_dict in avgs_dict.iteritems():
for s, sub_list in hab_dict.iteritems():
for sub_dict in sub_list:
# Want to avoid div by 0 errors if there is none of a particular
# habitat within a subregion. Thus, if the total for risk for a
# habitat is 0, just return 0 as a percentage too.
curr_total_risk = avgs_r_sum[h][sub_dict['Name']]
if curr_total_risk == 0.:
sub_dict['R_Pct'] = 0.
else:
sub_dict['R_Pct'] = sub_dict['Risk']/curr_total_risk
return avgs_dict, name_map.values()
def aggregate_multi_rasters_uri(aoi_rast_uri, rast_uris, rast_labels, ignore_value_list=[]):
'''Will take a stack of rasters and an AOI, and return a dictionary
containing the number of overlap pixels, and the value of those pixels for
each overlap of raster and AOI.
Input:
aoi_uri- The location of an AOI raster which MUST have individual ID
numbers with the attribute name 'BURN_ID' for each feature on the
map.
rast_uris- List of locations of the rasters which should be overlapped
with the AOI.
rast_labels- Names for each raster layer that will be retrievable from
the output dictionary.
ignore_value_list- Optional argument that provides a list of values
which should be ignored if they crop up for a pixel value of one
of the layers.
Returns:
layer_overlap_info-
{AOI Data Value 1:
{rast_label: [#of pix, pix value],
rast_label: [200, 2567.97], ...
}
'''
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(aoi_rast_uri)
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(aoi_rast_uri)
rast_uris = [aoi_rast_uri] + rast_uris
# Want to create a set of temporary filenames, just need to be sure to
# clean them up at the end.
temp_rast_uris = [
pygeoprocessing.geoprocessing.temporary_filename() for _ in range(len(rast_uris))]
pygeoprocessing.geoprocessing.align_dataset_list(
rast_uris,
temp_rast_uris,
['nearest'] * len(rast_uris),
cell_size,
"dataset",
0,
dataset_to_bound_index=0)
rast_ds_list = [gdal.Open(uri) for uri in temp_rast_uris]
rast_bands = [ds.GetRasterBand(1) for ds in rast_ds_list]
# Get the AOI to use for line by line, then cell by cell iterration.
aoi_band = rast_bands[0]
n_cols = aoi_band.XSize
n_rows = aoi_band.YSize
# Set up numpy arrays that currently hold only zeros, but will be used for
# each row read.
aoi_row = numpy.zeros((1, n_cols), numpy.float64, 'C')
rows_dict = {}
for layer_name in rast_labels:
rows_dict[layer_name] = numpy.zeros((1, n_cols), numpy.float64, 'C')
# Now iterate through every cell of the aOI, and concat everything that's
# undr it and store that.
# this defaults a dictionary so we can initalize layer_overlap
# info[aoi_pix][layer_name] = [0,0.]
layer_overlap_info = collections.defaultdict(
lambda: collections.defaultdict(lambda: list([0, 0.])))
for row_index in range(n_rows):
aoi_band.ReadAsArray(
yoff=row_index,
win_xsize=n_cols,
win_ysize=1,
buf_obj=aoi_row)
for idx, layer_name in enumerate(rast_labels):
rast_bands[idx+1].ReadAsArray(
yoff=row_index,
win_xsize=n_cols,
win_ysize=1,
buf_obj=rows_dict[layer_name])
for aoi_pix_value in numpy.unique(aoi_row):
if aoi_pix_value == nodata:
continue
aoi_mask = (aoi_row == aoi_pix_value)
for layer_name in rast_labels:
valid_rows_dict_mask = (
rows_dict[layer_name] != nodata) & aoi_mask
for ignore_value in ignore_value_list:
valid_rows_dict_mask = valid_rows_dict_mask & (
rows_dict[layer_name] != ignore_value)
layer_sum = numpy.sum(
rows_dict[layer_name][valid_rows_dict_mask])
layer_count = numpy.count_nonzero(valid_rows_dict_mask)
layer_overlap_info[aoi_pix_value][layer_name][0] += layer_count
layer_overlap_info[aoi_pix_value][layer_name][1] += layer_sum
return layer_overlap_info
def make_recov_potent_raster(dir, crit_lists, denoms):
'''
This will do the same h-s calculation as used for the individual E/C
calculations, but instead will use r/dq as the equation for each criteria.
The full equation will be:
SUM HAB CRITS( r/dq )
---------------------
SUM HAB CRITS( 1/dq )
Input:
dir- Directory in which the completed raster files should be placed.
crit_lists- A dictionary containing pre-burned criteria which can be
combined to get the E/C for that H-S pairing.
{'Risk': {
'h_s_c': {
(hab1, stressA):
["indiv num raster URI",
"raster 1 URI", ...],
(hab1, stressB): ...
},
'h': {
hab1: ["indiv num raster URI", "raster 1 URI"],
...
},
'h_s_e': { (hab1, stressA): ["indiv num raster URI"]
}
}
'Recovery': { hab1: ["indiv num raster URI", ...],
hab2: ...
}
}
denoms- Dictionary containing the combined denominator for a given
H-S overlap. Once all of the rasters are combined, each H-S raster
can be divided by this.
{'Risk': {
'h_s_c': {
(hab1, stressA): {
'CritName': 2.0, ...},
(hab1, stressB): {'CritName': 1.3, ...}
},
'h': { hab1: {'CritName': 1.3, ...},
...
},
'h_s_e': { (hab1, stressA): {'CritName': 1.3, ...}
}
}
'Recovery': { hab1: {'critname': 1.6, ...}
hab2: ...
}
}
Output:
A raster file for each of the habitats included in the model displaying
the recovery potential within each potential grid cell.
Returns nothing.
'''
# Want all of the unique habitat names
habitats = denoms['Recovery'].keys()
# First, going to try doing everything all at once. For every habitat,
# concat the lists of criteria rasters.
for h in habitats:
curr_list = crit_lists['Recovery'][h]
curr_crit_names = map(lambda uri: re.match(
'.*\]_([^_]*)',
os.path.splitext(os.path.basename(uri))[0]).group(1), curr_list)
curr_denoms = denoms['Recovery'][h]
def add_recov_pix(*pixels):
'''We will have burned numerator values for the recovery potential
equation. Want to add all of the numerators (r/dq), then divide by
the denoms added together (1/dq).'''
value = numpy.zeros(pixels[0].shape)
denom_val = numpy.zeros(pixels[0].shape)
all_nodata = numpy.zeros(pixels[0].shape, dtype=numpy.bool)
all_nodata[:] = True
for i in range(len(pixels)):
valid_mask = pixels[i] != -1
value = numpy.where(valid_mask, pixels[i] + value, value)
denom_val = numpy.where(
valid_mask,
curr_denoms[curr_crit_names[i]] + denom_val,
denom_val)
# Bitwise and- if both are true, will still return True
all_nodata = ~valid_mask & all_nodata
# turn off dividie by zero warning because we probably will divide
# by zero
olderr = numpy.seterr(divide='ignore')
result = numpy.where(denom_val != 0, value / denom_val, 0.0)
# return numpy error state to old value
numpy.seterr(**olderr)
# mask out nodata stacks
return numpy.where(all_nodata, -1, result)
'''
all_nodata = True
for p in pixels:
if p not in [-1., -1]:
all_nodata = False
if all_nodata:
return -1.
value = 0.
denom_val = 0.
for i in range(0, len(pixels)):
p = pixels[i]
if p not in [-1., -1]:
value += p
denom_val += curr_denoms[curr_crit_names[i]]
if value in [0, 0.]:
return 0
else:
value = value / denom_val
return value'''
# Need to get the arbitrary first element in order to have a pixel size
# to use in vectorize_datasets. One hopes that we have at least 1 thing
# in here.
pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(curr_list[0])
out_uri = os.path.join(dir, 'recov_potent_H[' + h + '].tif')
pygeoprocessing.geoprocessing.vectorize_datasets(
curr_list,
add_recov_pix,
out_uri,
gdal.GDT_Float32,
-1.,
pixel_size,
"union",
resample_method_list=None,
dataset_to_align_index=0,
aoi_uri=None,
vectorize_op=False)
def make_ecosys_risk_raster(dir, h_dict):
'''
This will make the compiled raster for all habitats within the ecosystem.
The ecosystem raster will be a direct sum of each of the included habitat
rasters.
Input:
dir- The directory in which all completed should be placed.
h_dict- A dictionary of raster dataset URIs which can be combined to
create an overall ecosystem raster. The key is the habitat name,
and the value is the dataset URI.
{'Habitat A': "Overall Habitat A Risk Map URI",
'Habitat B': "Overall Habitat B Risk URI"
...
}
Output:
ecosys_risk.tif- An overall risk raster for the ecosystem. It will
be placed in the dir folder.
Returns nothing.
'''
# Need a straight list of the values from h_dict
h_list = h_dict.values()
pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(h_list[0])
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(h_list[0])
out_uri = os.path.join(dir, 'ecosys_risk.tif')
def add_e_pixels(*pixels):
'''
Sum all risk pixels to make a single habitat raster out of all the
h-s overlap rasters.
'''
value = numpy.zeros(pixels[0].shape)
all_nodata = numpy.zeros(pixels[0].shape, dtype=numpy.bool)
all_nodata[:] = True
for i in range(len(pixels)):
valid_mask = pixels[i] != -1
value = numpy.where(valid_mask, pixels[i] + value, value)
all_nodata = ~valid_mask & all_nodata
return numpy.where(all_nodata, -1, value)
pygeoprocessing.geoprocessing.vectorize_datasets(
h_list,
add_e_pixels,
out_uri,
gdal.GDT_Float32,
-1.,
pixel_size,
"union",
resample_method_list=None,
dataset_to_align_index=0,
aoi_uri=None,
vectorize_op=False)
def make_risk_shapes(dir, crit_lists, h_dict, h_s_dict, max_risk, max_stress):
'''
This function will take in the current rasterized risk files for each
habitat, and output a shapefile where the areas that are "HIGH RISK" (high
percentage of risk over potential risk) are the only existing polygonized
areas.
Additonally, we also want to create a shapefile which is only the
"low risk" areas- actually, those that are just not high risk (it's the
combination of low risk areas and medium risk areas).
Since the pygeoprocessing.geoprocessing function can only take in ints, want to predetermine
what areas are or are not going to be shapefile, and pass in a raster that
is only 1 or nodata.
Input:
dir- Directory in which the completed shapefiles should be placed.
crit_lists- A dictionary containing pre-burned criteria which can be
combined to get the E/C for that H-S pairing.
{'Risk': {
'h_s_c': { (hab1, stressA): ["indiv num raster URI",
"raster 1 URI", ...],
(hab1, stressB): ...
},
'h': {
hab1: ["indiv num raster URI", "raster 1 URI"],
...
},
'h_s_e': {(hab1, stressA): ["indiv num raster URI"]
}
}
'Recovery': { hab1: ["indiv num raster URI", ...],
hab2: ...
}
}
h_dict- A dictionary that contains raster dataset URIs corresponding
to each of the habitats in the model. The key in this dictionary is
the name of the habiat, and it maps to the open dataset.
h_s_dict- A dictionary that maps a habitat name to the risk rasters
for each of the applicable stressors.
{'HabA': ["A-1 Risk Raster URI", "A-2 Risk Raster URI", ...],
'HabB': ["B-1 Risk Raster URI", "B-2 Risk Raster URI", ...], ...
}
max_risk- Double representing the highest potential value for a single
h-s raster. The amount of risk for a given Habitat raster would be
SUM(s) for a given h.
max_stress- The largest number of stressors that the user believes will
overlap. This will be used to get an accurate estimate of risk.
Output:
Returns two shapefiles for every habitat, one which shows features only
for the areas that are "high risk" within that habitat, and one which
shows features only for the combined low + medium risk areas.
Return:
num_stress- A dictionary containing the number of stressors being
associated with each habitat. The key is the string name of the
habitat, and it maps to an int counter of number of stressors.
'''
# For each h, want to know how many stressors are associated with it. This
# allows us to not have to think about whether or not a h-s pair was zero'd
# out by weighting or DQ.
num_stress = collections.Counter()
for pair in crit_lists['Risk']['h_s_c']:
h, _ = pair
if h in num_stress:
num_stress[h] += 1
else:
num_stress[h] = 1
# This is the user definied threshold overlap of stressors, multipled by the
# maximum potential risk for any given overlap between habitat and stressor
# This yields a user defined threshold for risk.
user_max_risk = max_stress * max_risk
def high_risk_raster(*pixels):
# H_Raster is first in the stack.
high_h_mask = numpy.where(
pixels[0] != -1,
pixels[0] / float(user_max_risk) >= .666,
False)
high_hs = numpy.zeros(pixels[0].shape, dtype=numpy.bool)
for i in range(1, len(pixels)):
high_hs = high_hs | (pixels[i] / float(max_risk) >= .666)
return numpy.where(high_hs | high_h_mask, 3, -1)
'''#We know that the overarching habitat pixel is the first in the list
h_pixel = pixels[0]
h_percent = float(h_pixel)/ user_max_risk
#high risk is classified as the top third of risk
if h_percent >= .666:
return 1
#If we aren't getting high risk from just the habitat pixel,
#want to secondarily check each of the h_s pixels.
for p in pixels[1::]:
p_percent = float(p) / max_risk
if p_percent >= .666:
return 1
#If we get here, neither the habitat raster nor the h_s_raster are
#considered high risk. Can return nodata.
return -1.'''
def med_risk_raster(*pixels):
med_h_mask = numpy.where(
pixels[0] != -1,
(pixels[0] / float(user_max_risk) < .666) &
(pixels[0] / float(user_max_risk) >= .333),
False)
med_hs = numpy.zeros(pixels[0].shape, dtype=numpy.bool)
for i in range(1, len(pixels)):
med_hs = med_hs | \
((pixels[i] / float(max_risk) < .666) &
(pixels[i] / float(max_risk) >= .333))
return numpy.where(med_hs | med_h_mask, 2, -1)
'''#We know that the overarching habitat pixel is the first in the list
h_pixel = pixels[0]
h_percent = float(h_pixel)/ user_max_risk
#medium risk is classified as the middle third of risk
if .333 <= h_percent < .666:
return 1
#If we aren't getting medium risk from just the habitat pixel,
#want to secondarily check each of the h_s pixels.
for p in pixels[1::]:
p_percent = float(p) / max_risk
if .333 <= p_percent < .666:
return 1
#If we get here, neither the habitat raster nor the h_s_raster are
#considered med risk. Can return nodata.
return -1.'''
def low_risk_raster(*pixels):
low_h_mask = numpy.where(
pixels[0] != -1,
(pixels[0] / float(user_max_risk) < .333) &
(pixels[0] / float(user_max_risk) >= 0),
False)
low_hs = numpy.zeros(pixels[0].shape, dtype=numpy.bool)
for i in range(1, len(pixels)):
low_hs = (low_hs |
((pixels[i] / float(user_max_risk) < .333) &
(pixels[i] / float(user_max_risk) >= 0)))
return numpy.where(low_hs | low_h_mask, 1, -1)
'''#We know that the overarching habitat pixel is the first in the list
h_pixel = pixels[0]
h_percent = float(h_pixel)/ user_max_risk
#low risk is classified as the lowest third of risk
if 0. <= h_percent < .333:
return 1
#If we aren't getting low risk from just the habitat pixel,
#want to secondarily check each of the h_s pixels.
for p in pixels[1::]:
p_percent = float(p) / max_risk
if 0. <= p_percent < .333:
return 1
#If we get here, neither the habitat raster nor the h_s_raster are
#considered low risk. Can return nodata.
return -1.'''
def combo_risk_raster(*pixels):
# We actually know that there will be a l_pix, m_pix, and h_pix
# But it's easier to just loop through all of them.
combo_risk = | numpy.zeros(pixels[0].shape) | numpy.zeros |
import numpy as np
import pandas as pd
def classify_prices(discount):
price_classification = [] # Change/remove this line
for d in discount:
if float(d) <= 0:
category = 'no_discount'
price_classification.append(category)
elif 0 <= float(d) <= 0.1:
category = 'discounted'
price_classification.append(category)
elif 0.1 <= float(d) <= 0.2:
category = 'good_deal'
price_classification.append(category)
else:
category = 'buy_now'
price_classification.append(category)
# Please, introduce your answer here
return price_classification
def calculate_discount(current, reference):
list_discount = []
for i in range(len(current)):
c = current[i]
r = reference[i]
discount = (r - c)/r
list_discount.append(discount)
return list_discount
def read_files(current_price_filename, reference_price_filename):
with open(current_price_filename,encoding='utf-8')as f:
data = np.loadtxt(f,delimiter=',')
with open(reference_price_filename, encoding='utf-8')as f:
data1 = np.loadtxt(f, delimiter=',')
current = np.array(data,dtype=np.int) # Change/remove this line
reference = | np.array(data1,dtype=np.int) | numpy.array |
import numpy as np
from keychest.keychestenv import KeyChestEnvironment
# the environment is NP-hard due to reduction from TSP
# (imagine a setting where only 1 combination leads to enough food)
# https://en.wikipedia.org/wiki/Travelling_salesman_problem
# this file provides a heuristic "solving" the environment (to some extent)
def features_for_obs(obs):
"""Features for an observation."""
objects = KeyChestEnvironment.OBJECTS
def get_map(obj):
idx = objects.index(obj)
return obs[:, :, idx]
def get_where(obj):
m = get_map(obj)
return list(zip(*np.where(m)))
def get_where1(obj):
r = get_where(obj)
assert len(r) == 1
return r[0]
ppos = get_where1('player')
def is_present(obj):
r = get_where(obj)
return len(r) > 0
def items_at_player():
player_location = ppos
result = []
for i, obj in enumerate(objects):
if obs[player_location[0], player_location[1], i]:
result.append(obj)
return result
assert obs.shape[2] == len(objects)
result = {}
result['player_position_x'] = ppos[0]
result['player_position_y'] = ppos[1]
if is_present('lamp_on'):
result['lamp_state'] = 1
elif is_present('lamp_off'):
result['lamp_state'] = 0
else:
result['lamp_state'] = -1
items = items_at_player()
result['at_food'] = 'food' in items
result['at_key'] = 'key' in items
result['at_chest'] = 'chest' in items
result['at_button'] = 'button' in items
result['health'] = np.sum(get_map('health'))
result['keys_collected'] = np.sum(get_map('keys_collected'))
return result
def max_reward(env):
"""Return reward upper bound."""
obs = env.reset()
objects = KeyChestEnvironment.OBJECTS
def get_map(obs, obj):
return obs[:, :, objects.index(obj)]
max_reward_ = 0
rd = env.reward_dict
n_keys = np.sum(get_map(obs, 'key'))
n_chests = np.sum(get_map(obs, 'chest'))
n_food = np.sum(get_map(obs, 'food'))
if 'key_collected' in rd:
max_reward_ += n_keys * rd['key_collected']
if 'food_collected' in rd:
max_reward_ += n_food * rd['food_collected']
if 'chest_opened' in rd:
max_reward_ += min(n_keys, n_chests) * rd['chest_opened']
max_reward_ += (n_food * env.engine.food_efficiency + env.engine.initial_health) * rd['step']
return max_reward_
def hardcoded_policy_step(env, do_print=False):
"""Get a step by a hardcoded policy."""
obs = env.engine.observation
objects = env.engine.OBJECTS
def get_map(obs, obj):
return obs[:, :, objects.index(obj)]
def get_objects(obs, obj):
return list(zip(*np.where(get_map(obs, obj))))
def closest_object(obs, obj):
ppos = get_objects(obs, 'player')[0]
objects = get_objects(obs, obj)
if objects:
distances = np.linalg.norm(np.array(objects) - np.array(ppos), axis=1, ord=1)
closest_idx = np.argmin(distances)
else:
distances = []
closest_idx = -1
result = {'distances': distances, 'ppos': ppos, 'objects': objects,
'closest_idx': closest_idx,
'n': len(objects)}
if closest_idx >= 0:
result['smallest_distance'] = distances[closest_idx]
result['closest_object'] = objects[closest_idx]
return result
health = features_for_obs(obs)['health']
keys = features_for_obs(obs)['keys_collected']
button_pos = closest_object(obs, 'button')['closest_object']
key_info = closest_object(obs, 'key')
chest_info = closest_object(obs, 'chest')
food_info = closest_object(obs, 'food')
ppos = key_info['ppos']
if do_print:
print("Health", health, "Keys", keys)
def dist_to(v):
return np.linalg.norm(np.array(ppos) - v, ord=1)
# have keys -> going for a chest
if keys > 0 and chest_info['n']:
target = chest_info['closest_object']
if do_print:
print('Going to the chest!', target)
elif key_info['n']:
target = key_info['closest_object']
if do_print:
print('Going for the key!', target)
else:
target = button_pos
if do_print:
print("Going for the button", target)
if do_print:
print("Dist", dist_to(target))
# overriding target if there is food
def health_alert():
if health < 3:
return True
if health < dist_to(target) * 2:
return True
return False
if health_alert() and food_info['n']:
target = food_info['closest_object']
if do_print:
print('Going for food, hungry')
dx, dy = | np.array(target) | numpy.array |
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script for plotting various data sets from the servo controllers."""
import collections
import ctypes
import importlib
import signal
import socket
import sys
import time
from makani.avionics.common import aio
from makani.avionics.common import pack_avionics_messages as avionics_messages
from makani.avionics.common import servo_types
from makani.avionics.firmware.monitors import servo_types as servo_monitor_types
from makani.avionics.network import aio_node
from makani.avionics.servo.firmware import r22_types
from makani.lib.python import c_helpers
from makani.lib.python import ctype_util
import numpy
from PySide import QtCore
from PySide import QtGui
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
servo_status_bits = c_helpers.EnumHelper('ServoStatusFlag', servo_types,
'kServoStatus')
servo_warning_bits = c_helpers.EnumHelper('ServoWarningFlag', servo_types,
'kServoWarning')
servo_error_bits = c_helpers.EnumHelper('ServoErrorFlag', servo_types,
'kServoError')
r22_status_bits = c_helpers.EnumHelper('R22StatusBit', r22_types)
# Must be included after PySide in order to force pyqtgraph to use it.
pyqtgraph = importlib.import_module('pyqtgraph')
dockarea = importlib.import_module('pyqtgraph.dockarea')
Alias = collections.namedtuple('Alias', ['base_name'])
Operation = collections.namedtuple('Operation', ['function', 'dtype'])
def MultiplyOp(param, factor):
return Operation(lambda c, s, d: factor * d[s][param][c], numpy.float64)
def CountsToDegreesOp(param, counts):
return Operation(
lambda c, s, d: numpy.mod(d[s][param][c] * 360.0 / counts, 360.0) - 180.0,
numpy.float64)
def RadiansToDegreesOp(param):
return MultiplyOp(param, 180.0 / numpy.pi)
def DeltaFirstSourceOp(param):
return Operation(lambda c, s, d: d[s][param][c] - d[0][param][c],
numpy.float64)
class PlotDockArea(dockarea.DockArea):
"""Create plot dock area for GUI.
This class handles all plotting functionality in the main dock area. Add
the instantiated object to a QtGui layout, and then call the appropriate
create plots function(s).
"""
def __init__(self):
super(PlotDockArea, self).__init__()
self._lines = {}
self._bits = {}
self._plots = []
def _AddLine(self, node, name, plot):
"""Add a new data signal to plot redraw list."""
if node not in self._lines:
self._lines[node] = []
self._lines[node].append((name, plot))
def _AddBit(self, node, name, bit, offset, plot):
"""Add a new bit data signal to plot redraw list from a bit field."""
if node not in self._bits:
self._bits[node] = []
self._bits[node].append((name, bit, offset, plot))
def _GetPlotPen(self, servos, signals):
"""Helper function to generate different pen colors."""
signals = [i for i, sig in enumerate(signals) if sig]
pen = {}
for i, (_, node) in enumerate(servos):
for j, sig in enumerate(signals):
pen[(node, sig)] = i * len(signals) + j
return (pen, len(pen))
def _NewPlot(self, title):
"""Helper function to generate new plots with default options."""
dock = dockarea.Dock(name=title)
glw = pyqtgraph.GraphicsLayoutWidget()
dock.addWidget(glw)
p = glw.addPlot(title=title)
p.showGrid(True, True)
p.setMouseEnabled(x=False)
self._plots.append(p)
return (p, dock)
def _PlotAngleBias(self, servos):
"""Plot servo angle bias estimates (for paired servos)."""
return self.CreatePlot(servos, 'Angle bias', 'Bias', 'deg', ['ang_bias'])
def _PlotVelocity(self, servos):
"""Plot servo velocity measurements."""
return self.CreatePlot(servos, 'Velocity', 'Velocity', 'deg/s', ['vel_m'])
def _PlotCurrent(self, servos):
"""Plot servo current measurements."""
return self.CreatePlot(servos, 'Current', 'Current', 'A',
['cur_m', 'cur_limit', 'cur_nlimit'])
def _PlotLineVoltage(self, servos):
"""Plot servo line voltage measurements."""
return self.CreatePlot(servos, 'Line voltage', 'Line voltage', 'V',
['v_lv_in_a', 'v_lv_in_b', 'v_servo'])
def _PlotAngleError(self, servos):
"""Plot servo angle error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Angle error (rel {0})'.format(servos[0][0]),
'Angle', 'deg', ['ang_err'])
def _PlotVelocityError(self, servos):
"""Plot servo velocity error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Velocity error (rel {0})'.format(servos[0][0]),
'Velocity', 'deg/s', ['vel_err'])
def _PlotCurrentError(self, servos):
"""Plot servo current error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Current error (rel {0})'.format(servos[0][0]),
'Current', 'A', ['cur_err'])
def CreateLogicPlot(self, servos, title, bits):
"""Plot status bits."""
(p, dock) = self._NewPlot(title)
p.addLegend()
p.setLabel('bottom', 'Time')
p.setLabel('left', 'Status bits')
p.setYRange(0.0, len(bits))
p.setMouseEnabled(x=False, y=False)
(pen, pens) = self._GetPlotPen(servos, [True] * len(bits))
for name, node in servos:
for idx, bitinfo in enumerate(bits):
param, enum, shortname = bitinfo
value = enum.Value(shortname)
# Determine which bit is set.
bit = bin(value)[::-1].index('1')
bit_name = name + ' ' + shortname
self._AddBit(node, param, bit, len(bits) - idx - 1,
p.plot(name=bit_name, pen=(pen[(node, idx)], pens)))
return dock
def CreatePlot(self, actuators, title, ylabel, yunit, params):
"""Plot a list of params."""
(p, dock) = self._NewPlot(title)
p.addLegend()
p.setLabel('bottom', 'Time')
p.setLabel('left', ylabel, yunit)
(pen, pens) = self._GetPlotPen(actuators, [True] * len(params))
for name, node in actuators:
for i, param in enumerate(params):
plot_label = name + ' ' + param
self._AddLine(node, param,
p.plot(name=plot_label, pen=(pen[(node, i)], pens)))
return dock
def Redraw(self, data, history):
"""Redraw all plots with new data."""
data.lock()
a, b = data.GetIndices(history)
for node, line in self._lines.items():
for (name, plot) in line:
plot.setData(x=data.time[a:b], y=data.GetData(name, node, a, b))
for node, line in self._bits.items():
for (name, bit, offset, plot) in line:
plot.setData(x=data.time[a:b],
y=offset + 0.9 * data.GetDataBit(name, bit, node, a, b))
data.unlock()
for p in self._plots:
p.setXRange(-history, 0.0)
def ClearPlots(self):
"""Clear all plot windows."""
self._lines = {}
self._bits = {}
self._plots = []
# pylint: disable=invalid-name
if self.topContainer:
self.topContainer.close()
self.topContainer = None
def CreateCommandPlots(self, servos):
"""Create servo command plots."""
return [self.CreatePlot(servos, 'Angle command', 'Angle', 'deg',
['ang_m', 'ang_cmd'])]
def CreateEstimatorPlots(self, servos):
"""Create servo estimator plots."""
return [self.CreatePlot(servos, 'Angle estimate', 'Angle', 'deg',
['ang_m', 'ang_est']),
self._PlotVelocity(servos)]
def CreateStatusPlots(self, servos):
"""Create servo status bit plots."""
servo_status = [('flags.status', servo_status_bits, 'Paired'),
('flags.status', servo_status_bits, 'Commanded'),
('flags.status', servo_status_bits, 'Armed'),
('flags.status', servo_status_bits, 'Reset'),
('flags.warning', servo_warning_bits, 'PairTimeout'),
('flags.warning', servo_warning_bits, 'PairFailed')]
r22_supply = ['ShortCircuitDetected',
'OverVoltage',
'UnderVoltage',
'CurrentOutputLimited',
'VoltageOutputLimited']
r22_feedback = ['FeedbackError',
'MotorPhasingError',
'EnableInputNotActive',
'DriveFault']
return [
self.CreateLogicPlot(servos, 'Servo status', servo_status),
self.CreateLogicPlot(
servos, 'R22 supply',
[('r22.status_bits', r22_status_bits, x) for x in r22_supply]),
self.CreateLogicPlot(
servos, 'R22 feedback',
[('r22.status_bits', r22_status_bits, x) for x in r22_feedback])]
def CreateCurrentPlots(self, servos):
"""Create servo current plots."""
return [self._PlotCurrent(servos)]
def CreateVoltagePlots(self, servos):
"""Create servo voltage plots."""
return [self._PlotLineVoltage(servos)]
def CreatePairedPlots(self, servos):
"""Create paired servo plots."""
return [self._PlotAngleBias(servos),
self._PlotAngleError(servos),
self._PlotVelocityError(servos),
self._PlotCurrentError(servos)]
def BuildPlotStack(self, plots, position):
"""Add plots to stack and bring first plot to foreground."""
self.addDock(plots[0], position)
for prev, plot in enumerate(plots[1:]):
self.addDock(plot, 'below', plots[prev])
# Bring first plot to foreground.
if len(plots) > 1:
stack = plots[0].container().stack
current = stack.currentWidget()
current.label.setDim(True)
stack.setCurrentWidget(plots[0])
plots[0].label.setDim(False)
class MainWindow(QtGui.QMainWindow):
"""Create main window for GUI.
This class handles the main window, user interface, and plot display.
"""
def __init__(self, history=60):
super(MainWindow, self).__init__()
self._threads = []
self._history = history
self._redraw_timer = QtCore.QTimer(self)
self._InitUserInterface(history)
self.connect(self._redraw_timer, QtCore.SIGNAL('timeout()'), self._Redraw)
def _InitUserInterface(self, history):
"""Initialize widgets and layout of user interface."""
central_widget = QtGui.QWidget(self)
# Command line.
command_cbox = QtGui.QComboBox(self)
command_cbox.setEditable(True)
command_cbox.lineEdit().returnPressed.connect(self._HandleCommandRequest)
self._command_cbox = command_cbox
# Time history.
history_sbox = QtGui.QSpinBox(self)
history_sbox.setRange(1, history)
history_sbox.setSingleStep(1)
history_sbox.setSuffix(' s')
history_sbox.setValue(history)
history_sbox.valueChanged.connect(self._HandleHistoryLength)
self._history_sbox = history_sbox
# Refresh rate.
refresh_sbox = QtGui.QSpinBox(self)
refresh_sbox.setSuffix(' Hz')
refresh_sbox.setValue(20)
refresh_sbox.valueChanged.connect(self._HandleRedrawRate)
self._refresh_sbox = refresh_sbox
# Pause button.
self._pause_btn = QtGui.QPushButton('Pause', self)
self._pause_btn.clicked.connect(self._HandlePauseButton)
# Plot area.
self._plots = PlotDockArea()
# Status message.
self._status_message = QtGui.QLabel('', self)
# Layout.
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('Command:', self))
hbox.addWidget(self._command_cbox, stretch=1)
hbox.addWidget(QtGui.QLabel('History:', self))
hbox.addWidget(self._history_sbox)
hbox.addWidget(QtGui.QLabel('Refresh:', self))
hbox.addWidget(self._refresh_sbox)
hbox.addWidget(self._pause_btn)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self._plots, stretch=1)
central_widget.setLayout(vbox)
# Main window.
self.setCentralWidget(central_widget)
self.setGeometry(300, 150, 1200, 1000)
self.setWindowTitle('Servo Plotter')
self.statusBar().addWidget(self._status_message)
self._SetRedrawRate(refresh_sbox.value())
self.show()
def _SelectServoSources(self, sources, history):
"""Close existing plots, then create new plots for specified servos."""
self._TryCloseThreads()
self._servo_status = ServoStatusBuffer(allowed_sources=sources,
period=0.01, history=history)
self._servo_status.start()
self._data_source = self._servo_status
self._threads.append(self._servo_status)
def _SelectMotorSources(self, sources, history):
"""Close existing plots, then create new plots for specified motors."""
self._TryCloseThreads()
self._motor_status = MotorStatusBuffer(allowed_sources=sources,
period=0.01, history=history)
self._motor_status.start()
self._data_source = self._motor_status
self._threads.append(self._motor_status)
def _HandleCommandRequest(self):
"""Handle a user command from text entry."""
text = self._command_cbox.currentText()
try:
command, param = text.split(' ', 1)
except ValueError:
command = text
param = ''
handlers = {'select': self._HandleSelectCommand}
command = command.lower()
if command in handlers:
handlers[command](param)
return
self._PrintError('Unknown command: %s' % command)
def _HandleSelectCommand(self, param):
"""Handle user select command."""
params = param.split()
# Possible nodes to select.
servo_nodes = [n for n, _ in aio_node_helper
if n.startswith('kAioNodeServo')]
motor_nodes = [n for n, _ in aio_node_helper
if n.startswith('kAioNodeMotor')]
# Parse node selection.
selected_servos = [s for s in params
if 'kAioNodeServo' + s.capitalize() in servo_nodes]
selected_motors = [m for m in params
if 'kAioNodeMotor' + m.capitalize() in motor_nodes]
for s in selected_servos + selected_motors:
params.remove(s)
plots = []
actuators = []
if selected_servos and selected_motors:
# Only one source type can be selected at a time.
self._PrintError('Only one source type (servos or motors) can be '
'selected at a time')
elif selected_servos:
# Servos were selected.
sources = ['kAioNodeServo' + s.capitalize() for s in selected_servos]
actuators = [(aio_node_helper.ShortName(s), aio_node_helper.Value(s))
for s in sources]
# Select data sources.
self._SelectServoSources(sources, self._history)
# Possible plots to select.
plot_types = {'cmd': (self._plots.CreateCommandPlots, 'top'),
'est': (self._plots.CreateEstimatorPlots, 'top'),
'status': (self._plots.CreateStatusPlots, 'top'),
'cur': (self._plots.CreateCurrentPlots, 'top'),
'volt': (self._plots.CreateVoltagePlots, 'top'),
'paired': (self._plots.CreatePairedPlots, 'bottom')}
# Parse plot selection.
plot_params = [p for p in params if p.lower() in plot_types]
for p in plot_params:
params.remove(p)
# Custom plots.
for p in [p for p in params if p in self._servo_status.GetParams()]:
params.remove(p)
def _GenerateCustomPlot(actuators, p=p):
return [self._plots.CreatePlot(actuators, p, p, '', [p])]
plots.append((_GenerateCustomPlot, 'top'))
# Add default plot selection and add standard plot types to plot list.
if not plot_params and not plots:
plot_params = ['cmd']
for p in plot_params:
plots.append(plot_types[p.lower()])
self._PrintMessage('Selected servos: %s' % ', '.join(selected_servos))
elif selected_motors:
# Motors were selected.
sources = ['kAioNodeMotor' + s.capitalize() for s in selected_motors]
actuators = [(aio_node_helper.ShortName(s), aio_node_helper.Value(s))
for s in sources]
# Select data sources.
self._SelectMotorSources(sources, self._history)
# Custom plots.
for p in [p for p in params if p in self._motor_status.GetParams()]:
params.remove(p)
def _GenerateCustomPlot(actuators, p=p):
return [self._plots.CreatePlot(actuators, p, p, '', [p])]
plots.append((_GenerateCustomPlot, 'top'))
self._PrintMessage('Selected motors: %s' % ', '.join(selected_motors))
else:
# No nodes were selected.
self._PrintError('No nodes were selected')
if params:
self._PrintError('Unknown parameters: %s' % ' '.join(params))
# Create plots.
stacks = {}
self._plots.ClearPlots()
for plot in plots:
func, stack = plot
if stack not in stacks:
stacks[stack] = []
stacks[stack].extend(func(actuators))
for key, value in stacks.iteritems():
self._plots.BuildPlotStack(value, key)
def _HandleRedrawRate(self, value):
"""Handle change to plot refresh rate."""
self._SetRedrawRate(value)
def _HandleHistoryLength(self, value):
"""Handle change to history length."""
pass
def _HandlePauseButton(self):
"""Handle toggling of pause button."""
if self._pause_btn.text() == 'Pause':
self._StopRedraw()
else:
self._StartRedraw()
def _SetRedrawRate(self, hz):
"""Set plot redraw rate."""
if hz > 0:
self._redraw_timer.start(int(1000.0 / hz))
self._StartRedraw()
else:
self._StopRedraw()
def _StartRedraw(self):
"""Start plot redraw timer."""
self._redraw_timer.start()
palette = self._pause_btn.palette()
palette.setColor(QtGui.QPalette.Button, QtCore.Qt.green)
self._pause_btn.setText('Pause')
self._pause_btn.setAutoFillBackground(True)
self._pause_btn.setPalette(palette)
self._pause_btn.update()
def _StopRedraw(self):
"""Stop plot redraw timer."""
self._redraw_timer.stop()
palette = self._pause_btn.palette()
palette.setColor(QtGui.QPalette.Button, QtCore.Qt.red)
self._pause_btn.setText('Paused')
self._pause_btn.setAutoFillBackground(True)
self._pause_btn.setPalette(palette)
self._pause_btn.update()
def _Redraw(self):
"""Redraw plots."""
if hasattr(self, '_data_source'):
self._plots.Redraw(self._data_source, self._history_sbox.value())
def _PrintMessage(self, msg):
"""Print status message."""
self._status_message.setText(msg)
def _PrintError(self, error):
"""Print error message."""
self._PrintMessage('ERROR: ' + error)
def _TryCloseThreads(self):
"""Try to close running threads."""
for thread in self._threads:
thread.should_exit = True
for thread in self._threads:
if thread.isRunning():
thread.wait(2000)
if thread.isRunning():
self._PrintError('Could not terminate {:s}'.format(thread))
self.close()
self._threads = []
def closeEvent(self, event):
"""Override close event in order to close threads."""
self._TryCloseThreads()
event.accept()
class AioDataStream(QtCore.QThread, QtCore.QMutex):
"""Handle incoming AIO data.
This class provides a general interface to handling a circular buffer of
network data.
"""
def __init__(self, allowed_sources, message_type, message_template, period,
history, parent=None):
QtCore.QThread.__init__(self, parent)
QtCore.QMutex.__init__(self)
self.should_exit = False
self._half_size = int(numpy.ceil(history / period))
self._buffer_size = 2 * self._half_size
self._period = period
self._head = 0
self._timestamp = time.time()
self._source_map = {aio_node_helper.Value(x): i
for i, x in enumerate(allowed_sources)}
self._aio_client = aio.AioClient(message_types=[message_type],
allowed_sources=allowed_sources,
timeout=0.2)
self.time = period * numpy.arange(-self._half_size + 1, 1)
self._data = [None] * len(allowed_sources)
self._derived_params = collections.OrderedDict()
message_dict = ctype_util.CTypeToPython(message_template)
for i in range(len(self._data)):
self._data[i] = self._InitBuffers(message_dict)
def run(self): # QThread Virtual function.
"""Poll for new messages."""
while not self.should_exit:
try:
(_, header, message) = self._aio_client.Recv()
self.lock()
message_dict = ctype_util.CTypeToPython(message)
self.HandleMessage(header, message_dict, time.time())
self.unlock()
except socket.timeout:
pass
self._aio_client.Close()
def HandleMessage(self, header, message, timestamp):
"""Handle new messages."""
row = self._source_map[header.source]
dt = timestamp - self._timestamp
count = min(int(dt / self._period), self._half_size)
if count:
# Advance position in circular buffer.
self.ZeroOrderHold(self._head, count)
shadow = numpy.mod(self._head + self._half_size, self._buffer_size)
self.ZeroOrderHold(shadow, count)
self._head = numpy.mod(self._head + count, self._buffer_size)
self._timestamp = timestamp
self.ExtractData(row, self._head, header.type, message)
shadow = | numpy.mod(self._head + self._half_size, self._buffer_size) | numpy.mod |
import numpy as np
from statsmodels.tools.decorators import (cache_readonly,
cache_writable, resettable_cache)
from scipy import optimize
from numpy import dot, identity, kron, log, zeros, pi, exp, eye, abs, empty
from numpy.linalg import inv, pinv
import statsmodels.base.model as base
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams, _ma_transparams,
_ma_invtransparams)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.sandbox.regression.numdiff import (approx_fprime,
approx_fprime_cs, approx_hess, approx_hess_cs)
from statsmodels.tsa.kalmanf import KalmanFilter
from scipy.stats import t, norm
from scipy.signal import lfilter
try:
from kalmanf import kalman_loglike
fast_kalman = 1
except:
fast_kalman = 0
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order):
k_ar, k_ma = order
exog = data.exog
if exog is not None:
exog_names = data._get_names(data._orig_exog) or []
else:
exog_names = []
ar_lag_names = util.make_lag_names(data.ynames, k_ar, 0)
ar_lag_names = [''.join(('ar.', i))
for i in ar_lag_names]
ma_lag_names = util.make_lag_names(data.ynames, k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog),1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
class ARMA(tsbase.TimeSeriesModel):
"""
Autoregressive Moving Average ARMA(p,q) Model
Parameters
----------
endog : array-like
The endogenous variable.
exog : array-like, optional
An optional arry of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method.
"""
def __init__(self, endog, exog=None, dates=None, freq=None):
super(ARMA, self).__init__(endog, exog, dates, freq)
if exog is not None:
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
<NAME>. and <NAME>. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p,q,k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
armod = AR(endog).fit(ic='bic', trend='nc')
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'), arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp+q,p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q==0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12, pgtol=1e-7, factr=1e3,
bounds = bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
loglike = self.loglike
#if self.transparams:
# params = self._invtransparams(params)
#return approx_fprime(params, loglike, epsilon=1e-5)
return approx_fprime_cs(params, loglike)
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
loglike = self.loglike
#if self.transparams:
# params = self._invtransparams(params)
if not fast_kalman or self.method == "css":
return approx_hess_cs(params, loglike, epsilon=1e-5)
else:
return approx_hess(params, self.loglike, epsilon=1e-3)[0]
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start):
# do some defaults
if start is None:
if 'mle' in self.method:
start = 0
else:
start = self.k_ar
if 'mle' not in self.method:
if start < self.k_ar:
raise ValueError("Start must be >= k_ar")
return super(ARMA, self)._get_predict_start(start)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
if 'mle' in self.method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params, self)
errors = KalmanFilter.geterrors(y,k,k_ar,k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat, paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= | dot(self.exog, params[:k]) | numpy.dot |
# encoding: utf-8
'''
Created on Nov 26, 2015
@author: tal
Based in part on:
Learn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py
See https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm
'''
from __future__ import print_function, division, unicode_literals
import os
import errno
from collections import Counter
from hashlib import sha256
import re
import json
import itertools
import logging
import requests
import numpy as np
from numpy.random import choice as random_choice, randint as random_randint, shuffle as random_shuffle, seed as random_seed, rand
from numpy import zeros as np_zeros # pylint:disable=no-name-in-module
from keras.models import Sequential, load_model
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout, recurrent
from keras.callbacks import Callback
# Set a logger for the module
LOGGER = logging.getLogger(__name__) # Every log will use the module name
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.DEBUG)
random_seed(123) # Reproducibility
class Configuration(object):
"""Dump stuff here"""
CONFIG = Configuration()
#pylint:disable=attribute-defined-outside-init
# Parameters for the model:
CONFIG.input_layers = 2
CONFIG.output_layers = 2
CONFIG.amount_of_dropout = 0.2
CONFIG.hidden_size = 500
CONFIG.initialization = "he_normal" # : Gaussian initialization scaled by fan-in (He et al., 2014)
CONFIG.number_of_chars = 100
CONFIG.max_input_len = 60
CONFIG.inverted = True
# parameters for the training:
CONFIG.batch_size = 100 # As the model changes in size, play with the batch size to best fit the process in memory
CONFIG.epochs = 500 # due to mini-epochs.
CONFIG.steps_per_epoch = 1000 # This is a mini-epoch. Using News 2013 an epoch would need to be ~60K.
CONFIG.validation_steps = 10
CONFIG.number_of_iterations = 10
#pylint:enable=attribute-defined-outside-init
# DIGEST = sha256(json.dumps(CONFIG.__dict__, sort_keys=True)).hexdigest()
# Parameters for the dataset
MIN_INPUT_LEN = 5
AMOUNT_OF_NOISE = 0.2 / CONFIG.max_input_len
CHARS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .")
PADDING = "☕"
DATA_FILES_PATH = "~/Downloads/data"
DATA_FILES_FULL_PATH = os.path.expanduser(DATA_FILES_PATH)
DATA_FILES_URL = "http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2013.en.shuffled.gz"
NEWS_FILE_NAME_COMPRESSED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.shuffled.gz") # 1.1 GB
NEWS_FILE_NAME_ENGLISH = "news.2013.en.shuffled"
NEWS_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, NEWS_FILE_NAME_ENGLISH)
NEWS_FILE_NAME_CLEAN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.clean")
NEWS_FILE_NAME_FILTERED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.filtered")
NEWS_FILE_NAME_SPLIT = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.split")
NEWS_FILE_NAME_TRAIN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.train")
NEWS_FILE_NAME_VALIDATE = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.validate")
CHAR_FREQUENCY_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "char_frequency.json")
SAVED_MODEL_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "keras_spell_e{}.h5") # an HDF5 file
# Some cleanup:
NORMALIZE_WHITESPACE_REGEX = re.compile(r'[^\S\n]+', re.UNICODE) # match all whitespace except newlines
RE_DASH_FILTER = re.compile(r'[\-\˗\֊\‐\‑\‒\–\—\⁻\₋\−\﹣\-]', re.UNICODE)
RE_APOSTROPHE_FILTER = re.compile(r''|[ʼ՚'‘’‛❛❜ߴߵ`‵´ˊˋ{}{}{}{}{}{}{}{}{}]'.format(chr(768), chr(769), chr(832),
chr(833), chr(2387), chr(5151),
chr(5152), chr(65344), chr(8242)),
re.UNICODE)
RE_LEFT_PARENTH_FILTER = re.compile(r'[\(\[\{\⁽\₍\❨\❪\﹙\(]', re.UNICODE)
RE_RIGHT_PARENTH_FILTER = re.compile(r'[\)\]\}\⁾\₎\❩\❫\﹚\)]', re.UNICODE)
ALLOWED_CURRENCIES = """¥£₪$€฿₨"""
ALLOWED_PUNCTUATION = """-!?/;"'%&<>.()[]{}@#:,|=*"""
RE_BASIC_CLEANER = re.compile(r'[^\w\s{}{}]'.format(re.escape(ALLOWED_CURRENCIES), re.escape(ALLOWED_PUNCTUATION)), re.UNICODE)
# pylint:disable=invalid-name
def download_the_news_data():
"""Download the news data"""
LOGGER.info("Downloading")
try:
os.makedirs(os.path.dirname(NEWS_FILE_NAME_COMPRESSED))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(NEWS_FILE_NAME_COMPRESSED, "wb") as output_file:
response = requests.get(DATA_FILES_URL, stream=True)
total_length = response.headers.get('content-length')
downloaded = percentage = 0
print("»"*100)
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
downloaded += len(data)
output_file.write(data)
new_percentage = 100 * downloaded // total_length
if new_percentage > percentage:
print("☑", end="")
percentage = new_percentage
print()
def uncompress_data():
"""Uncompress the data files"""
import gzip
with gzip.open(NEWS_FILE_NAME_COMPRESSED, 'rb') as compressed_file:
with open(NEWS_FILE_NAME_COMPRESSED[:-3], 'wb') as outfile:
outfile.write(compressed_file.read())
def add_noise_to_string(a_string, amount_of_noise):
"""Add some artificial spelling mistakes to the string"""
if rand() < amount_of_noise * len(a_string):
# Replace a character with a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + random_choice(CHARS[:-1]) + a_string[random_char_position + 1:]
if rand() < amount_of_noise * len(a_string):
# Delete a character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + a_string[random_char_position + 1:]
if len(a_string) < CONFIG.max_input_len and rand() < amount_of_noise * len(a_string):
# Add a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + | random_choice(CHARS[:-1]) | numpy.random.choice |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from copy import copy, deepcopy
from pathlib import Path
import neurom as nm
import numpy as np
import morphio
from neurom.core.morphology import Morphology, graft_morphology, iter_segments
from numpy.testing import assert_array_equal
SWC_PATH = Path(__file__).parent.parent / 'data/swc/'
def test_simple():
nm.load_morphology(str(SWC_PATH / 'simple.swc'))
def test_load_morphology_pathlib():
nm.load_morphology(SWC_PATH / 'simple.swc')
def test_load_morphology_from_other_morphologies():
filename = SWC_PATH / 'simple.swc'
expected_points = [[ 0., 0., 0., 1.],
[ 0., 5., 0., 1.],
[ 0., 5., 0., 1.],
[-5., 5., 0., 0.],
[ 0., 5., 0., 1.],
[ 6., 5., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., -4., 0., 1.],
[ 0., -4., 0., 1.],
[ 6., -4., 0., 0.],
[ 0., -4., 0., 1.],
[-5., -4., 0., 0.]]
assert_array_equal(nm.load_morphology(nm.load_morphology(filename)).points,
expected_points)
assert_array_equal(nm.load_morphology(Morphology(filename)).points,
expected_points)
assert_array_equal(nm.load_morphology(morphio.Morphology(filename)).points,
expected_points)
def test_for_morphio():
Morphology(morphio.mut.Morphology())
morphio_m = morphio.mut.Morphology()
morphio_m.soma.points = [[0,0,0], [1,1,1], [2,2,2]]
morphio_m.soma.diameters = [1, 1, 1]
neurom_m = Morphology(morphio_m)
assert_array_equal(neurom_m.soma.points,
[[0., 0., 0., 0.5],
[1., 1., 1., 0.5],
[2., 2., 2., 0.5]])
neurom_m.soma.points = [[1, 1, 1, 1],
[2, 2, 2, 2]]
assert_array_equal(neurom_m.soma.points,
[[1, 1, 1, 1],
[2, 2, 2, 2]])
def _check_cloned_morphology(m, m2):
# check if two morphs are identical
# soma
assert isinstance(m2.soma, type(m.soma))
assert m.soma.radius == m2.soma.radius
for v1, v2 in zip(m.soma.iter(), m2.soma.iter()):
assert | np.allclose(v1, v2) | numpy.allclose |
# ________
# /
# \ /
# \ /
# \/
import random
import textwrap
import emd_mean
import AdvEMDpy
import emd_basis
import emd_utils
import numpy as np
import pandas as pd
import cvxpy as cvx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.ndimage import gaussian_filter
from emd_utils import time_extension, Utility
from scipy.interpolate import CubicSpline
from emd_hilbert import Hilbert, hilbert_spectrum
from emd_preprocess import Preprocess
from emd_mean import Fluctuation
from AdvEMDpy import EMD
# alternate packages
from PyEMD import EMD as pyemd0215
import emd as emd040
sns.set(style='darkgrid')
pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001)
pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time)
pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series)
# plot 0 - addition
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('First Iteration of Sifting Algorithm')
plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1)
plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()],
c='r', label=r'$M(t_i)$', zorder=2)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4)
plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()],
c='c', label=r'$m(t_j)$', zorder=3)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5)
plt.yticks(ticks=[-2, -1, 0, 1, 2])
plt.xticks(ticks=[0, np.pi, 2 * np.pi],
labels=[r'0', r'$\pi$', r'$2\pi$'])
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/pseudo_algorithm.png')
plt.show()
knots = np.arange(12)
time = np.linspace(0, 11, 1101)
basis = emd_basis.Basis(time=time, time_series=time)
b_spline_basis = basis.cubic_b_spline(knots)
chsi_basis = basis.chsi_basis(knots)
# plot 1
plt.title('Non-Natural Cubic B-Spline Bases at Boundary')
plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $')
plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $')
plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $')
plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $')
plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $')
plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $'])
plt.xlim(4.4, 6.6)
plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.legend(loc='upper left')
plt.savefig('jss_figures/boundary_bases.png')
plt.show()
# plot 1a - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
knots_uniform = np.linspace(0, 2 * np.pi, 51)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0]
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Uniform Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Uniform Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Uniform Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots_uniform)):
axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_uniform.png')
plt.show()
# plot 1b - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=1, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Statically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Statically Optimised Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Statically Optimised Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots)):
axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_1.png')
plt.show()
# plot 1c - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=2, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Dynamically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Dynamically Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Dynamically Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots[i])):
axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_2.png')
plt.show()
# plot 1d - addition
window = 81
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Filtering Demonstration')
axs[1].set_title('Zoomed Region')
preprocess_time = pseudo_alg_time.copy()
np.random.seed(1)
random.seed(1)
preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time))
for i in random.sample(range(1000), 500):
preprocess_time_series[i] += np.random.normal(0, 1)
preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series)
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_filter.png')
plt.show()
# plot 1e - addition
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Smoothing Demonstration')
axs[1].set_title('Zoomed Region')
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[0].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
downsampled_and_decimated = preprocess.downsample()
axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 11))
downsampled = preprocess.downsample(decimate=False)
axs[0].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[1].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 13))
axs[1].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_smooth.png')
plt.show()
# plot 2
fig, axs = plt.subplots(1, 2, sharey=True)
axs[0].set_title('Cubic B-Spline Bases')
axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1')
axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2')
axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3')
axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4')
axs[0].legend(loc='upper left')
axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].set_xticks([5, 6])
axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[0].set_xlim(4.5, 6.5)
axs[1].set_title('Cubic Hermite Spline Bases')
axs[1].plot(time, chsi_basis[10, :].T, '--')
axs[1].plot(time, chsi_basis[11, :].T, '--')
axs[1].plot(time, chsi_basis[12, :].T, '--')
axs[1].plot(time, chsi_basis[13, :].T, '--')
axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].set_xticks([5, 6])
axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[1].set_xlim(4.5, 6.5)
plt.savefig('jss_figures/comparing_bases.png')
plt.show()
# plot 3
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_dash = maxima_y[-1] * np.ones_like(max_dash_time)
min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_dash = minima_y[-1] * np.ones_like(min_dash_time)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
max_discard = maxima_y[-1]
max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1]
max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101)
max_discard_dash = max_discard * np.ones_like(max_discard_dash_time)
dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101)
dash_2 = np.linspace(minima_y[-1], max_discard, 101)
end_point_time = time[-1]
end_point = time_series[-1]
time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101)
time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)))
time_series_anti_reflect = time_series_reflect[0] - time_series_reflect
utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect)
anti_max_bool = utils.max_bool_func_1st_order_fd()
anti_max_point_time = time_reflect[anti_max_bool]
anti_max_point = time_series_anti_reflect[anti_max_bool]
utils = emd_utils.Utility(time=time, time_series=time_series_reflect)
no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()]
no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()]
point_1 = 5.4
length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101)
length_distance_time = point_1 * np.pi * np.ones_like(length_distance)
length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
length_top = maxima_y[-1] * np.ones_like(length_time)
length_bottom = minima_y[-1] * np.ones_like(length_time)
point_2 = 5.2
length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101)
length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2)
length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101)
length_top_2 = time_series[-1] * np.ones_like(length_time_2)
length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2)
symmetry_axis_1_time = minima_x[-1] * np.ones(101)
symmetry_axis_2_time = time[-1] * np.ones(101)
symmetry_axis = np.linspace(-2, 2, 101)
end_time = np.linspace(time[-1] - width, time[-1] + width, 101)
end_signal = time_series[-1] * np.ones_like(end_time)
anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101)
anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Symmetry Edge Effects Example')
plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10))
plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2,
label=textwrap.fill('Anti-symmetric signal', 10))
plt.plot(max_dash_time, max_dash, 'k-')
plt.plot(min_dash_time, min_dash, 'k-')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(length_distance_time, length_distance, 'k--')
plt.plot(length_distance_time_2, length_distance_2, 'k--')
plt.plot(length_time, length_top, 'k-')
plt.plot(length_time, length_bottom, 'k-')
plt.plot(length_time_2, length_top_2, 'k-')
plt.plot(length_time_2, length_bottom_2, 'k-')
plt.plot(end_time, end_signal, 'k-')
plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)
plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)
plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1)
plt.text(5.1 * np.pi, -0.7, r'$\beta$L')
plt.text(5.34 * np.pi, -0.05, 'L')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10))
plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10))
plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10))
plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_symmetry_anti.png')
plt.show()
# plot 4
a = 0.21
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1)
max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1)
min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1)
min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101)
dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101)
s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1])
slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1
max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1)
max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101)
dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101)
dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101)
s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1])
slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2
min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1)
min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101)
dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time)
dash_4 = np.linspace(slope_based_maximum, slope_based_minimum)
maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101)
maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash)
maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash)
maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash)
maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101)
maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time)
minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101)
minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash)
minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash)
minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash)
minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101)
minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time)
# slightly edit signal to make difference between slope-based method and improved slope-based method more clear
time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \
time_series[time == minima_x[-1]]
improved_slope_based_maximum_time = time[-1]
improved_slope_based_maximum = time_series[-1]
improved_slope_based_minimum_time = slope_based_minimum_time
improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time -
improved_slope_based_maximum_time)
min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101)
min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4)
dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101)
dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Slope-Based Edge Effects Example')
plt.plot(max_dash_time_1, max_dash_1, 'k-')
plt.plot(max_dash_time_2, max_dash_2, 'k-')
plt.plot(max_dash_time_3, max_dash_3, 'k-')
plt.plot(min_dash_time_1, min_dash_1, 'k-')
plt.plot(min_dash_time_2, min_dash_2, 'k-')
plt.plot(min_dash_time_3, min_dash_3, 'k-')
plt.plot(min_dash_time_4, min_dash_4, 'k-')
plt.plot(maxima_dash_time_1, maxima_dash, 'k-')
plt.plot(maxima_dash_time_2, maxima_dash, 'k-')
plt.plot(maxima_dash_time_3, maxima_dash, 'k-')
plt.plot(minima_dash_time_1, minima_dash, 'k-')
plt.plot(minima_dash_time_2, minima_dash, 'k-')
plt.plot(minima_dash_time_3, minima_dash, 'k-')
plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.30 * np.pi, 0.35, r'$s_1$')
plt.text(4.43 * np.pi, -0.20, r'$s_2$')
plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$')
plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]),
-0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]),
1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.plot(minima_line_dash_time, minima_line_dash, 'k--')
plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(dash_3_time, dash_3, 'k--')
plt.plot(dash_4_time, dash_4, 'k--')
plt.plot(dash_final_time, dash_final, 'k--')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4,
label=textwrap.fill('Slope-based maximum', 11))
plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4,
label=textwrap.fill('Slope-based minimum', 11))
plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4,
label=textwrap.fill('Improved slope-based maximum', 11))
plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4,
label=textwrap.fill('Improved slope-based minimum', 11))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_slope_based.png')
plt.show()
# plot 5
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2
A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2
P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2])
P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1])
Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1]
Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1]
Coughlin_time = Huang_time
Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))
Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
Average_max = (maxima_y[-2] + maxima_y[-1]) / 2
Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
Average_min = (minima_y[-2] + minima_y[-1]) / 2
utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave)
Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd()
Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd()
utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave)
Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd()
Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd()
Huang_max_time = Huang_time[Huang_max_bool]
Huang_max = Huang_wave[Huang_max_bool]
Huang_min_time = Huang_time[Huang_min_bool]
Huang_min = Huang_wave[Huang_min_bool]
Coughlin_max_time = Coughlin_time[Coughlin_max_bool]
Coughlin_max = Coughlin_wave[Coughlin_max_bool]
Coughlin_min_time = Coughlin_time[Coughlin_min_bool]
Coughlin_min = Coughlin_wave[Coughlin_min_bool]
max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101)
max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time)
min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101)
min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
min_2_x = minima_y[-2] * np.ones_like(min_2_x_time)
dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101)
dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x)
max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y)
min_2_y = | np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) | numpy.linspace |
"""
Created on Tue Nov 06 09:36:44 2018
@author: c.massari
"""
import numpy as np
from scipy.stats import norm
def ssi(df_SM, acc_per=1, df_var='sm'):
# Group data by desired accumulation period and interpolate
month_values = df_SM[df_var].resample('M').mean()
month_values = month_values.interpolate()
accum_period = month_values.rolling(acc_per).mean()
SSI = accum_period.copy()
mesi = | np.arange(1, 13, 1) | numpy.arange |
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
from scipy.special import erf
import fitsio
import glob
import os
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
from desitarget.io import read_targets_in_tiles
from desitarget.sv3 import sv3_targetmask
from LSS.Cosmo import distance
def tile2rosette(tile):
if tile < 433:
return (tile-1)//27
else:
if tile >= 433 and tile < 436:
return 13
if tile >= 436 and tile < 439:
return 14
if tile >= 439 and tile < 442:
return 15
if tile >= 442 and tile <=480:
return (tile-442)//3
if tile > 480:
return tile//30
return 999999 #shouldn't be any more?
def calc_rosr(rosn,ra,dec):
#given rosetter number and ra,dec, calculate distance from center
roscen = {0:(150.100,2.182),1:(179.6,0),2:(183.1,0),3:(189.9,61.8),4:(194.75,28.2)\
,5:(210.0,5.0),6:(215.5,52.5),7:(217.8,34.4),8:(216.3,-0.6),9:(219.8,-0.6)\
,10:(218.05,2.43),11:(242.75,54.98),12:(241.05,43.45),13:(245.88,43.45),14:(252.5,34.5)\
,15:(269.73,66.02),16:(194.75,24.7),17:(212.8,-0.6),18:(269.73,62.52),19:(236.1,43.45)}
ra = ra*np.pi/180.
dec = dec*np.pi/180.
rac,decc = roscen[rosn]
rac = rac*np.pi/180.
decc = decc*np.pi/180.
cd = np.sin(dec)*np.sin(decc)+np.cos(dec)*np.cos(decc)*np.cos(rac-ra)
ad = np.arccos(cd)*180./np.pi
if ad > 2.5:
print(rosn,ra,dec,rac,decc)
return ad
def combtile_spec(tiles,outf='',rel='daily'):
s = 0
n = 0
if os.path.isfile(outf):
specd = Table.read(outf)
s = 1
tdone = np.unique(specd['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile,zdate in zip(tiles[tmask]['TILEID'],tiles[tmask]['LASTNIGHT']):
zdate = str(zdate)
tspec = combspecdata(tile,zdate,rel=rel)
tspec['TILEID'] = tile
if s == 0:
specd = tspec
s = 1
else:
specd = vstack([specd,tspec],metadata_conflicts='silent')
specd.sort('TARGETID')
kp = (specd['TARGETID'] > 0)
specd = specd[kp]
n += 1
print(tile,n,len(tiles[tmask]),len(specd))
specd.write(outf,format='fits', overwrite=True)
def combspecdata(tile,zdate,specroot='/global/cfs/cdirs/desi/spectro/redux/',rel='daily' ):
#put data from different spectrographs together, one table for fibermap, other for z
coaddir=specroot+rel+'/tiles/cumulative/'
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
fitsio.read(ff)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
print('spectrographs with data:')
print(specs)
if len(specs) == 0:
return None
tspec = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tn = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tspec = vstack([tspec,tn],metadata_conflicts='silent')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
#tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBER','FIBERSTATUS','PRIORITY','FA_TARGET','FA_TYPE',\
#'OBJTYPE','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','NIGHT','EXPID','MJD','SV3_DESI_TARGET','SV3_BGS_TARGET'])
tspec = join(tspec,tf,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
tspec = join(tspec,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
print(len(tspec),len(tf))
#tspec['LOCATION'] = tf['LOCATION']
#tspec['FIBERSTATUS'] = tf['FIBERSTATUS']
#tspec['PRIORITY'] = tf['PRIORITY']
return tspec
def combfibmap(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
return tf
def combfibmap_and_scores(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
tf = join(tf,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
return tf
def goodlocdict(tf):
'''
Make a dictionary to map between location and priority
tf should come from combspecdata above
'''
wloc = tf['FIBERSTATUS'] == 0
print(str(len(tf[wloc])) + ' locations with FIBERSTATUS 0')
goodloc = tf[wloc]['LOCATION']
pdict = dict(zip(tf['LOCATION'], tf['PRIORITY'])) #to be used later for randoms
return pdict,goodloc
def cutphotmask(aa,bits):
print(str(len(aa)) +' before imaging veto' )
keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0)
for biti in bits:
keep &= ((aa['MASKBITS'] & 2**biti)==0)
aa = aa[keep]
print(str(len(aa)) +' after imaging veto' )
return aa
def combtiles_wdup(tiles,mdir='',fout='',tarcol=['RA','DEC','TARGETID','SV3_DESI_TARGET','SV3_BGS_TARGET','SV3_MWS_TARGET','SUBPRIORITY','PRIORITY_INIT','TARGET_STATE','TIMESTAMP','ZWARN','PRIORITY']):
s = 0
n = 0
if os.path.isfile(fout):
tarsn = Table.read(fout)
s = 1
tdone = np.unique(tarsn['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ts = str(tile).zfill(6)
faf = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz'
fht = fitsio.read_header(faf)
wt = tiles['TILEID'] == tile
#tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'])
tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'],columns=tarcol)
#tars.keep_columns(tarcols)
#tars = tars[[b for b in tarcol]]
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
tars = join(tars,tt,keys=['TARGETID'])
tars['TILEID'] = tile
tars['ZWARN'].name = 'ZWARN_MTL'
if s == 0:
tarsn = tars
s = 1
else:
tarsn = vstack([tarsn,tars],metadata_conflicts='silent')
tarsn.sort('TARGETID')
n += 1
print(tile,n,len(tiles[tmask]),len(tarsn))
tarsn.write(fout,format='fits', overwrite=True)
def gettarinfo_type(faf,tars,goodloc,pdict,tp='SV3_DESI_TARGET'):
#get target info
#in current files on SVN, TARGETS has all of the necessary info on potential assignments
#no more, so commented out
#tt = Table.read(faf,hdu='TARGETS')
#tt.keep_columns(['TARGETID','FA_TARGET','FA_TYPE','PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
#if len(tt) != len(tfa):
# print('!!!mismatch between targets and potential assignments, aborting!!!')
# return None
#tt = join(tt,tfa,keys=['TARGETID'])
wgt = (np.isin(tt['LOCATION'],goodloc))
print(str(len(np.unique(tt[wgt]['LOCATION']))) + ' good locations')
print('comparison of number targets, number of targets with good locations')
print(len(tt),len(tt[wgt]))
tt = tt[wgt]
tt = join(tt,tars,keys=['TARGETID'],table_names = ['_AVAIL', ''], uniq_col_name='{col_name}{table_name}')
#Mark targets that actually got assigned fibers
tfall = Table.read(faf,hdu='FIBERASSIGN')
tfall.keep_columns(['TARGETID','LOCATION','PRIORITY'])
tt = join(tt,tfall,keys=['TARGETID'],join_type='left',table_names = ['', '_ASSIGNED'], uniq_col_name='{col_name}{table_name}')
wal = tt['LOCATION_ASSIGNED']*0 == 0
tt['LOCATION'][wal] = tt['LOCATION_ASSIGNED'][wal]
tt['LOCATION_AVAIL'][wal] = tt['LOCATION_ASSIGNED'][wal]
#print('differences between assigned locations')
#print(np.unique(tt['LOCATION_AVAIL'][wal]-tt['LOCATION_ASSIGNED'][wal]))
#print(tt.columns)
tt = unique(tt,keys=['TARGETID']) #cut to unique target ids
#print(tarf)
#tars = Table.read(tarf)
#tars.remove_columns(['Z','ZWARN'])#,'PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
#we want to get these from the zbest file that is specific to the tile and thus when it was observed
#tfa = unique(tfa[wgt],keys=['TARGETID'])
#wtype = ((tt[tp] & 2**tarbit) > 0) #don't cut by type here any more
#tt = tt[wtype]
#tfa = join(tfa,tt,keys=['TARGETID'])
#tft = join(tft,tt,keys=['TARGETID'])
#print(str(len(tfa)) +' unique targets with good locations and at '+str(len(np.unique(tfa['LOCATION'])))+' unique locations and '+str(len(tft))+ ' total unique targets at '+str(len(np.unique(tft['LOCATION']))) +' unique locations ')
#wgl = np.isin(tfa['LOCATION_ASSIGNED'],goodloc)
#wtype = ((tfa[tp] & 2**tarbit) > 0)
#wtfa = wgl & wtype
#print('number of assigned fibers at good locations '+str(len(tfa[wtfa])))
wal = tt['LOCATION_ASSIGNED']*0 == 0
print('number of assigned fibers '+str(len(tt[wal])))
print('number of unique target id '+str(len(np.unique(tt[wal]['TARGETID']))))
print('max priority of assigned '+str(np.max(tt[wal]['PRIORITY_ASSIGNED'])))
#tt[wal]['LOCATION'] = tt[wal]['LOCATION_ASSIGNED']
#tt[wal]['LOCATION_AVAIL'] = tt[wal]['LOCATION_ASSIGNED']
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#print('are location_avail and location_assigned the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION_ASSIGNED'], tt[wal]['LOCATION_AVAIL']))
tt['LOCATION_ASSIGNED'] = np.zeros(len(tt),dtype=int)
tt['LOCATION_ASSIGNED'][wal] = 1
wal = tt['LOCATION_ASSIGNED'] == 1
print('number of assigned fibers '+str(len(tt[wal]))+' (check to match agrees with above)')
wal = tt['LOCATION']*0 == 0
print('number of locations from z file '+str(len(tt[wal]))+' (check to match agrees with above)')
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#tt['PRIORITY_ASSIGNED'] = np.vectorize(pdict.__getitem__)(tt['LOCATION'])
return tt
def find_znotposs(dz):
dz.sort('TARGETID')
tidnoz = []
tids = np.unique(dz['TARGETID'])
ti = 0
i = 0
print('finding targetids that were not observed')
while i < len(dz):
za = 0
while dz[i]['TARGETID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tidnoz.append(tids[ti])
if ti%30000 == 0:
print(ti)
ti += 1
selnoz = np.isin(dz['TARGETID'],tidnoz)
tidsb = np.unique(dz[selnoz]['TILELOCID'])
#dz = dz[selnoz]
dz.sort('TILELOCID')
tids = np.unique(dz['TILELOCID'])
print('number of targetids with no obs '+str(len(tidnoz)))
tlidnoz = []
lznposs = []
ti = 0
i = 0
while i < len(dz):
za = 0
while dz[i]['TILELOCID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tlidnoz.append(tids[ti])
#if np.isin(tids[ti],tidsb):
# lznposs.append(tids[ti])
if ti%30000 == 0:
print(ti,len(tids))
ti += 1
#the ones to veto are now the join of the two
wtbtlid = np.isin(tlidnoz,tidsb)
tlidnoz = np.array(tlidnoz)
lznposs = tlidnoz[wtbtlid]
print('number of locations where assignment was not possible because of priorities '+str(len(lznposs)))
return lznposs
def count_tiles_better(fs,dr,pd,rann=0,specrel='daily',fibcol='COADD_FIBERSTATUS'):
'''
from files with duplicates that have already been sorted by targetid, quickly go
through and get the multi-tile information
dr is either 'dat' or 'ran'
returns file with TARGETID,NTILE,TILES,TILELOCIDS
'''
#fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_specwdup_Alltiles.fits')
#wf = fs['FIBERSTATUS'] == 0
wf = fs[fibcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
if dr == 'dat':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_tarspecwdup_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/datcomb_'+pd+'ntileinfo.fits'
if dr == 'ran':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/rancomb_'+str(rann)+pd+'wdupspec_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random'+str(rann)+'/rancomb_'+pd+'ntileinfo.fits'
wg = np.isin(fj['TILELOCID'],gtl)
fjg = fj[wg]
tids = np.unique(fjg['TARGETID'])
nloc = []#np.zeros(len(np.unique(f['TARGETID'])))
nt = []
tl = []
tli = []
ti = 0
i = 0
while i < len(fjg):
tls = []
tlis = []
nli = 0
while fjg[i]['TARGETID'] == tids[ti]:
nli += 1
tls.append(fjg[i]['TILEID'])
tlis.append(fjg[i]['TILELOCID'])
i += 1
if i == len(fjg):
break
nloc.append(nli)
tlsu = np.unique(tls)
tlisu = np.unique(tlis)
nt.append(len(tlsu))
tl.append("-".join(tlsu.astype(str)))
tli.append("-".join(tlisu.astype(str)))
if ti%100000 == 0:
print(ti)
ti += 1
tc = Table()
tc['TARGETID'] = tids
tc['NTILE'] = nt
tc['TILES'] = tl
tc['TILELOCIDS'] = tli
return tc
def count_tiles(tiles,catdir,pd,ttp='ALL',imask=False):
'''
For list of tileids, simply track the tiles a target shows up as available in
pd is dark or bright
just output targetid and tiles, meant to be matched to other processing
don't worry about what was assigned, purpose is to just count tile overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun.keep_columns(['TARGETID','TILELOCID'])
print(len(fgun),len(np.unique(fgun['TARGETID'])))
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILES'] = aa
ai = np.chararray(len(fgun),unicode=True,itemsize=300)
tlids = np.copy(fgun['TILELOCID']).astype('<U300')
fgun['TILELOCIDS'] = tlids
if s == 0:
fgu = fgun
s =1
else:
fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu = unique(fgu,keys='TARGETID')#,keep='last')
#I think this works when the ordering is the same; things got messed up other places with sorts
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles),len(fgu))
cnt += 1
fu = fgu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#np.unique()#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print(np.unique(fu['TILES']))
fu.write(catdir+'Alltiles_'+pd+'_tilelocs.dat.fits',format='fits', overwrite=True)
def combtiles(tiles,catdir,tp,tmask,tc='SV3_DESI_TARGET',ttp='ALL',imask=False):
'''
For list of tileids, combine data generated per tile , taking care of overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
if tp != 'dark' and tp != 'bright':
wt = (fgun[tc] & tmask[tp]) > 0
fgun = fgun[wt]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun['TILELOCID_ASSIGNED'] = np.zeros(len(fgun))
wm = fgun['LOCATION_ASSIGNED'] == 1
fgun['TILELOCID_ASSIGNED'][wm] = fgun['TILELOCID'][wm]
nl,nla = countloc(fgun)
fgun['ZPOSS'] = np.zeros(len(fgun)).astype(int)
if tp != 'dark' and tp != 'bright':
#fgun['LOC_NOTBLOCK'] = np.zeros(len(fgun)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
was = ~np.isin(fgun['LOCATION_AVAIL'],locsna)
#fgun['LOC_NOTBLOCK'][was] = 1
wg = was
fgun['ZPOSS'][wg] = 1
#fgun.sort('ZPOSS')
#aa = np.chararray(len(fgun),unicode=True,itemsize=100)
#aa[:] = str(tile)
fgun['TILE'] = int(tile)
#fgun['TILES'] = aa
#tlids = np.copy(fgun['TILELOCID']).astype('<U300')
#fgun['TILELOCIDS'] = tlids
#print('sum of assigned,# of unique TILELOCID (should match)')
#print(np.sum(fgun['LOCATION_ASSIGNED'] == 1),len(np.unique(fgun['TILELOCID'])))
#ai = np.chararray(len(fgun),unicode=True,itemsize=300)
#
#
if s == 0:
fgu = fgun
s =1
else:
#fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
#wn = fgu['PRIORITY_ASSIGNED']*0 != 0
#wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
#fgu[wn]['PRIORITY_ASSIGNED'] = 0
#fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED'] #create this column so assigned always show up in order of highest priority
#wa = fgu['LOCATION_ASSIGNED'] == 1
#wa &= fgu['PRIORITY_ASSIGNED'] >= 2000 #this was put SV2 to ignore BGS repeats
#fa = fgu[wa]
#print(len(fa),len(np.unique(fa['TARGETID'])))
#fgu.sort('sort')
#fgu = unique(fgu,keys='TARGETID',keep='last')
#dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
#didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
#fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if tp != 'dark' and tp != 'bright':
# fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
# fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
#aa[:] = '-'+str(tile)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
#fgu['TILES'][didsc] = ms #add the tile info
#aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
#aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
#fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles))#,np.sum(fgu['LOCATION_ASSIGNED']),len(fgu),len(np.unique(fgu['TILELOCID'])),np.sum(fgu['ZPOSS']))#,np.unique(fgu['TILELOCIDS'])
cnt += 1
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#tlids = np.copy(fgu['TILELOCID']).astype('<U300')
#fgu['TILELOCIDS'] = tlids
tsnrcol = 'TSNR2_'+tp
if tp == 'ELG_HIP':
tsnrcol = 'TSNR2_ELG'
if tp == 'BGS_ANY':
tsnrcol = 'TSNR2_BGS'
wt = (fgu[tsnrcol] == 1e20) | (fgu[tsnrcol]*0 != 0)
print('number with bad tsnrcol is '+str(len(fgu[wt])))
fgu[tsnrcol][wt] = 0
wn = fgu['PRIORITY_ASSIGNED']*0 != 0
wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
fgu[wn]['PRIORITY_ASSIGNED'] = 0
fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED']*fgu[tsnrcol] #create this column so assigned always show up in order of highest priority
if tp != 'dark' and tp != 'bright':
#wa = fgu['LOCATION_ASSIGNED'] == 1
#print('ZPOSS for LOCATION_ASSIGNED = 1:')
#print(np.unique(fgu[wa]['ZPOSS']))
fgu['sort'] = fgu['sort']*fgu['ZPOSS']-fgu['ZPOSS']
wa = fgu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fgu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fgu['ZPOSS'] == 1
natloc = ~np.isin(fgu[wp]['TILELOCID'],loclz)
print('number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
fgu.sort('sort')
#fgu.sort('ZPOSS')
fu = unique(fgu,keys='TARGETID')#,keep='last')
tidsu = fu['TARGETID']#[wp][natloc]
tids = fgu['TARGETID']
if tp != 'dark' and tp != 'bright':
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
nalz = ~np.isin(fu['TILELOCID'],loclz)
natloc = wp & nalz#~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
tlocs = fgu['TILELOCID']
ntl = []
ch = 0
bl = 0
print(len(tidsu),len(natloc))
for ii in range(0,len(tidsu)):
#if wp[ii] & natloc[ii]:
if natloc[ii]:
bl += 1
tid = tidsu[ii]
wt = tids == tid
tls = tlocs[wt]
s = 0
for tl in tls:
if s == 0:
if np.isin(tl,loclz):
#wu = fu['TARGETID'] == tid
fu[ii]['TILELOCID'] = tl
#ntl.append(tl)
ch += 1
s = 1
if ii%10000 == 0:
print(ii,len(tidsu),ch,bl)
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
natloc = ~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique and reassignment, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
#print(len(np.unique(fgu['TARGETID'])),np.sum(fgu['LOCATION_ASSIGNED']))
# tiles = fgu['TILES']
# tilesu = fu['TILES']
# tlids = fgu['TILELOCIDS']
# tlidsu = fu['TILELOCIDS']
#
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# otl = tlidsu[ii]
# tt = tiles[wt]
# tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# for ti in tti:
# if ti != otl:
# tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
# fu['TILELOCIDS'] = tlidsu
#
# #wa = fu['LOCATION_ASSIGNED'] == 1
# #wa &= fu['PRIORITY_ASSIGNED'] >= 2000
print(np.sum(fu['LOCATION_ASSIGNED']))
#need to resort tile string
# fl = np.chararray(len(fu),unicode=True,itemsize=100)
# for ii in range(0,len(fu)):
# tl = fu['TILES'][ii]
# tls = tl.split('-')#.astype('int')
# tli = tls[0]
# if len(tls) > 1:
# #tls = tls.astype('int')
# tls.sort()
# tli = tls[0]
# for i in range(1,len(tls)):
# tli += '-'+tls[i]
# #else:
# # tli = tls
# #print(tli)
# fl[ii] = tli
#
# fu['TILES'] = fl
#print(np.unique(fu['TILES']))
# print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
#fu.write(catdir+tp+'Alltiles_'+pd+'_full.dat.fits',format='fits', overwrite=True)
fu.write(catdir+'/datcomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def countloc(aa):
locs = aa['LOCATION_AVAIL']
locsa = aa['LOCATION_ASSIGNED']
la = np.max(locs)+1
nl = np.zeros(la)
nla = np.zeros(la)
for i in range(0,len(aa)):
nl[locs[i]] += 1
nla[locs[i]] += locsa[i]
return nl,nla
def combran_wdup(tiles,rann,randir,tp,sv3dir,specf,keepcols=[]):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
outf = randir+str(rann)+'/rancomb_'+tp+'wdup_Alltiles.fits'
if os.path.isfile(outf):
fgu = Table.read(outf)
#tarsn.keep_columns(['RA','DEC','TARGETID''LOCATION','FIBER','TILEID'])
s = 1
tdone = np.unique(fgu['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fa = Table.read(ffa,hdu='FAVAIL')
ffna = Table.read(ffna)
fgun = join(fa,ffna,keys=['TARGETID'])
#fgun.remove_columns(delcols)
td += 1
fgun['TILEID'] = int(tile)
fgun.keep_columns(['RA','DEC','TARGETID','LOCATION','FIBER','TILEID'])
if s == 0:
fgu = fgun
s = 1
else:
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu.sort('TARGETID')
print(tile,td, len(tiles), len(fgun),len(fgu))
else:
print('did not find '+ffa)
if len(tiles[tmask]['TILEID']) > 0:
fgu.write(outf,format='fits', overwrite=True)
#specf = Table.read(sv3dir+'datcomb_'+tp+'_specwdup_Alltiles.fits')
specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION']
specf.keep_columns(keepcols)
#specf.keep_columns(['ZWARN','LOCATION','TILEID','TILELOCID','FIBERSTATUS','FIBERASSIGN_X','FIBERASSIGN_Y','PRIORITY','DELTA_X','DELTA_Y','EXPTIME','PSF_TO_FIBER_SPECFLUX','TSNR2_ELG_B','TSNR2_LYA_B','TSNR2_BGS_B','TSNR2_QSO_B','TSNR2_LRG_B','TSNR2_ELG_R','TSNR2_LYA_R','TSNR2_BGS_R','TSNR2_QSO_R','TSNR2_LRG_R','TSNR2_ELG_Z','TSNR2_LYA_Z','TSNR2_BGS_Z','TSNR2_QSO_Z','TSNR2_LRG_Z','TSNR2_ELG','TSNR2_LYA','TSNR2_BGS','TSNR2_QSO','TSNR2_LRG'])
fgu = join(fgu,specf,keys=['LOCATION','TILEID','FIBER'])
fgu.sort('TARGETID')
outf = sv3dir+'/rancomb_'+str(rann)+tp+'wdupspec_Alltiles.fits'
print(outf)
fgu.write(outf,format='fits', overwrite=True)
def combran(tiles,rann,randir,ddir,tp,tmask,tc='SV3_DESI_TARGET',imask=False):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
for tile,zdate in zip(tiles['TILEID'],tiles['ZDATE']):
tspec = combfibmap_and_scores(tile,zdate)
pdict,gloc = goodlocdict(tspec)
tspec.keep_columns(['LOCATION','FIBERSTATUS','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','TSNR2_ELG','TSNR2_LRG','TSNR2_QSO','TSNR2_BGS'])
dt = ddir+'ALL'+str(tile)+'_full.dat.fits'
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fd = Table.read(dt)
# print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
#gloc = np.unique(fd['LOCATION_AVAIL']) #bad locations already removed from this files
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd),len(gloc))
if tp != 'dark' and tp != 'bright':
wt = (fd[tc] & tmask[tp]) > 0
fd = fd[wt]
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
nl,nla = countloc(fd)
#commenting out zfailure stuff, not vetoing randoms based on that
#wzf = fd['ZWARN'] != 0
#wzf &= fd['ZWARN'] != 999999
#wzf &= fd['ZWARN']*0 == 0
#loc_fail = np.unique(fd[wzf]['LOCATION'])
#print('number of zfail locations',len(loc_fail))
#
#print(np.sum(fd['LOCATION_ASSIGNED']),len(np.unique(fd['LOCATION_AVAIL'])),np.sum(nla),np.sum(nl))
#
#find the locations that were requested by type but not assigned
fa = Table.read(ffa,hdu='FAVAIL')
wg = np.isin(fa['LOCATION'],gloc)
fa = fa[wg]
fa = join(fa,tspec,keys=['LOCATION'],join_type='left')
#fa['FIBER_GOOD'] = np.zeros(len(fa)).astype(int)
#fa['FIBER_GOOD'][wg] = 1
#fa['Z_NOTBAD'] = np.zeros(len(fa)).astype(int)
#wnzf = ~np.isin(fa['LOCATION'],loc_fail)
#fa['Z_NOTBAD'][wnzf] = 1
fa['ZPOSS'] = np.zeros(len(fa)).astype(int)
#fa['ZPOSSNOTBAD'] = np.zeros(len(fa)).astype(int)
if tp != 'dark' and tp != 'bright':
#fa['LOC_NOTBLOCK'] = np.zeros(len(fa)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
ntloc = len(gloc)-len(locsna)#-len(loc_fail)
print('total number of assignable positions',ntloc)
was = ~np.isin(fa['LOCATION'],locsna)
#fa['LOC_NOTBLOCK'][was] = 1
#wg &= was
fa['ZPOSS'][was] = 1
#fa['ZPOSSNOTBAD'][was&wnzf] = 1
#if maskzfail:
# wg &= wnzf
#wzt = wpr & ~wzf & ~wna
#fg = fa[wg]
#print(len(fa),np.sum(fa['ZPOSSNOTBAD']))
#fg = fa
#print('before,after vetoing locations:')
#print(len(fa),len(fg))
#if tp != 'dark' and tp != 'bright':
# fa.sort('ZPOSS')
#else:
# fg.sort('FIBER_GOOD')
fgun = unique(fa,keys=['TARGETID'],keep='last')
ffna = Table.read(ffna)
fgun = join(fgun,ffna,keys=['TARGETID'])
fgun.remove_columns(delcols)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
print(tile,td, len(tiles), str(len(fgun))+' unique new randoms')
td += 1
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILE'] = int(tile)
fgun['TILES'] = aa
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION']
if s == 0:
fgu = fgun
s = 1
else:
fv = vstack([fgu,fgun],metadata_conflicts='silent')
fgo = fgu.copy()
fgu = unique(fv,keys='TARGETID')#,keep='last')
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if this works, can save vetoing until the end
fgu['TSNR2_ELG'][didsc] = np.maximum(fgu['TSNR2_ELG'][didsc],fgun['TSNR2_ELG'][dids])
fgu['TSNR2_QSO'][didsc] = np.maximum(fgu['TSNR2_QSO'][didsc],fgun['TSNR2_QSO'][dids])
fgu['TSNR2_BGS'][didsc] = np.maximum(fgu['TSNR2_BGS'][didsc],fgun['TSNR2_BGS'][dids])
fgu['TSNR2_LRG'][didsc] = np.maximum(fgu['TSNR2_LRG'][didsc],fgun['TSNR2_LRG'][dids])
if tp != 'dark' and tp != 'bright':
#fgu['FIBER_GOOD'][didsc] = np.maximum(fgu['FIBER_GOOD'][didsc],fgun['FIBER_GOOD'][dids])
#fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
#fgu['Z_NOTBAD'][didsc] = np.maximum(fgu['Z_NOTBAD'][didsc],fgun['Z_NOTBAD'][dids])
fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#fgu['ZPOSSNOTBAD'][didsc] = np.maximum(fgu['ZPOSSNOTBAD'][didsc],fgun['ZPOSSNOTBAD'][dids])
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
print(str(len(fgu))+' unique total randoms')
else:
print('did not find '+ffa)
#fgu.sort('ZPOSS')
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#fu = unique(fgu,keys=['TARGETID'])#,keep='last')
fu = fgu
#fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
#return True
# tiles = fgu['TILES']
# tilesu = fu['TILES']
#tlids = fgu['TILELOCIDS']
#tlidsu = fu['TILELOCIDS']
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# #otl = tlidsu[ii]
# tt = tiles[wt]
# #tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# #for ti in tti:
# # if ti != otl:
# # tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
#fu['TILELOCIDS'] = tlidsu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
NT = np.zeros(len(fgu))
ros = np.zeros(len(fgu))
print('counting tiles and finding rosette')
for ii in range(0,len(fu['TILES'])): #not sure why, but this only works when using loop for Table.read but array option works for fitsio.read
NT[ii] = np.char.count(fu['TILES'][ii],'-')+1
ti = int(fu['TILES'][ii].split('-')[0])
ros[ii] = tile2rosette(ti)
fu['NTILE'] = NT
fu['rosette_number'] = ros
print(np.unique(fu['rosette_number'],return_counts=True))
fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def mkfullran(fs,indir,rann,imbits,outf,tp,pd,bit,desitarg='SV3_DESI_TARGET',tsnr= 'TSNR2_ELG',notqso='',qsobit=4,fbcol='COADD_FIBERSTATUS'):
'''
indir is directory with inputs
rann is the random file number (0-17)
imbits are the maskbits for the imaging veto mask
outf is the name (including full path) of the output file
tp is the target type
pd is the program, dark or bright
bit is the bit to use to select to the target type
randir doesn't get used anymore
desitarg is the column to use to select the target type
tsnr is the tsnr2 used for this sample
'''
#first, need to find locations to veto based on data
#the same is done in mkfulldat
#fs = fitsio.read(indir+'datcomb_'+pd+'_specwdup_Alltiles.fits')
wf = fs[fbcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
#gtl now contains the list of good locations
#we now want to load in the bigger data file with all the target info
#we use it to find the locations where observations of the given type were not possible and then mask them
zf = indir+'datcomb_'+pd+'_tarspecwdup_Alltiles.fits'
dz = Table.read(zf)
wtype = ((dz[desitarg] & bit) > 0)
if notqso == 'notqso':
wtype &= ((dz[desitarg] & qsobit) == 0)
wg = np.isin(dz['TILELOCID'],gtl)
dz = dz[wtype&wg]
print('length after selecting type and fiberstatus == 0 '+str(len(dz)))
lznp = find_znotposs(dz)
#lznp will later be used to veto
#load in random file
zf = indir+'/rancomb_'+str(rann)+pd+'wdupspec_Alltiles.fits'
dz = Table.read(zf)
#load in tileloc info for this random file and join it
zfpd = indir+'/rancomb_'+str(rann)+pd+'_Alltilelocinfo.fits'
dzpd = Table.read(zfpd)
dz = join(dz,dzpd,keys=['TARGETID'])
print('length before cutting to good positions '+str(len(dz)))
#cut to good and possible locations
wk = ~np.isin(dz['TILELOCID'],lznp)
wk &= np.isin(dz['TILELOCID'],gtl)
dz = dz[wk]
print('length after cutting to good positions '+str(len(dz)))
#get all the additional columns desired from original random files through join
tarf = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random'+str(rann)+'/alltilesnofa.fits')
delcols = ['RA','DEC','DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT','NUMOBS_INIT','SCND_TARGET',\
'NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
tarf.remove_columns(delcols)
dz = join(dz,tarf,keys=['TARGETID'])
#apply imaging vetos
dz = cutphotmask(dz,imbits)
print('length after cutting to based on imaging veto mask '+str(len(dz)))
#sort by tsnr, like done for data, so that the highest tsnr are kept
dz.sort(tsnr)
dz = unique(dz,keys=['TARGETID'],keep='last')
print('length after cutting to unique TARGETID '+str(len(dz)))
dz['rosette_number'] = 0
dz['rosette_r'] = 0
for ii in range(0,len(dz)):
rosn = tile2rosette(dz[ii]['TILEID'])
rosd = calc_rosr(rosn,dz[ii]['RA'],dz[ii]['DEC']) #calculates distance in degrees from the rosette center
dz[ii]['rosette_number'] = rosn
dz[ii]['rosette_r'] = rosd
print(np.unique(dz['NTILE']))
dz.write(outf,format='fits', overwrite=True)
def mkfulldat(fs,zf,imbits,tdir,tp,bit,outf,ftiles,azf='',desitarg='SV3_DESI_TARGET',specver='daily',notqso='',qsobit=4,bitweightfile=None):
'''
zf is the name of the file containing all of the combined spec and target info compiled already
imbits is the list of imaging mask bits to mask out
tdir is the directory for the targets
tp is the target type
bit is the SV3_{type}_MASK bit to use for select the correct target type
outf is the full path + name for the output file
ftiles is the name of the file containing information on, e.g., how many tiles each target was available on
azf is the file name for OII flux info (relevant for ELGs only)
desitarg is the column to use for the target type cut (all use SV3_DESI_TARGET except BGS_BRIGHT)
specver is the version of the pipeline used for the redshift info; only 'daily' exists for now
'''
#from desitarget.mtl import inflate_ledger
if tp[:3] == 'BGS' or tp[:3] == 'MWS':
pd = 'bright'
tscol = 'TSNR2_BGS'
else:
pd = 'dark'
tscol = 'TSNR2_ELG'
#load in the appropriate dark/bright combined spec file and use to denote the tileid + location that had good observations:
#fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specver+'/datcomb_'+pd+'_specwdup_Alltiles.fits')
if specver == 'daily':
fbcol = 'FIBERSTATUS'
if specver == 'everest':
fbcol = 'COADD_FIBERSTATUS'
wf = fs[fbcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
#gtl now contains the list of 'good' tilelocid
#read in the big combined data file
dz = Table.read(zf)
#find the rows that satisfy the target type
wtype = ((dz[desitarg] & bit) > 0)
if notqso == 'notqso':
print('removing QSO targets')
wtype &= ((dz[desitarg] & qsobit) == 0)
#find the rows that are 'good' tilelocid
wg = np.isin(dz['TILELOCID'],gtl)
print(len(dz[wtype]))
print(len(dz[wg]))
#down-select to target type of interest and good tilelocid
dz = dz[wtype&wg]
print('length after selecting type and fiberstatus == 0 '+str(len(dz)))
print('length of unique targetid after selecting type and fiberstatus == 0 '+str(len(np.unique(dz['TARGETID']))))
#find targets that were never available at the same location as a target of the same type that got assigned to a good location
#those that were never available are assumed to have 0 probability of assignment so we want to veto this location
lznp = find_znotposs(dz)
wk = ~np.isin(dz['TILELOCID'],lznp)#dz['ZPOSS'] == 1
dz = dz[wk] #0 probability locations now vetoed
print('length after priority veto '+str(len(dz)))
print('joining to full imaging')
ftar = Table.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+pd+'_targets.fits')
ftar.keep_columns(['TARGETID','EBV','FLUX_G','FLUX_R','FLUX_Z','FLUX_IVAR_G','FLUX_IVAR_R','FLUX_IVAR_Z','MW_TRANSMISSION_G','MW_TRANSMISSION_R',\
'MW_TRANSMISSION_Z','FRACFLUX_G','FRACFLUX_R','FRACFLUX_Z','FRACMASKED_G','FRACMASKED_R','FRACMASKED_Z','FRACIN_G','FRACIN_R',\
'FRACIN_Z','NOBS_G','NOBS_R','NOBS_Z','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G','GALDEPTH_R','GALDEPTH_Z','FLUX_W1',\
'FLUX_W2','FLUX_IVAR_W1','FLUX_IVAR_W2','MW_TRANSMISSION_W1','MW_TRANSMISSION_W2','ALLMASK_G','ALLMASK_R','ALLMASK_Z','FIBERFLUX_G',\
'FIBERFLUX_R','FIBERFLUX_Z','FIBERTOTFLUX_G','FIBERTOTFLUX_R','FIBERTOTFLUX_Z','WISEMASK_W1','WISEMASK_W2','MASKBITS',\
'RELEASE','BRICKID','BRICKNAME','BRICK_OBJID','MORPHTYPE','PHOTSYS'])
dz = join(dz,ftar,keys=['TARGETID'])
print('length after join to full targets (should be same) '+str(len(dz)))
#apply imaging veto mask
dz = cutphotmask(dz,imbits)
#load in file with information about where repeats occurred and join it
dtl = Table.read(ftiles)
dtl.keep_columns(['TARGETID','NTILE','TILES','TILELOCIDS'])
dz = join(dz,dtl,keys='TARGETID')
#find the rows where we have spectroscopic observations
wz = dz['ZWARN'] != 999999 #this is what the null column becomes
wz &= dz['ZWARN']*0 == 0 #just in case of nans
#mark them as having LOCATION_ASSIGNED
dz['LOCATION_ASSIGNED'] = np.zeros(len(dz)).astype('bool')
dz['LOCATION_ASSIGNED'][wz] = 1
#find the TILELOCID that were assigned and mark them as so
tlids = np.unique(dz['TILELOCID'][wz])
wtl = np.isin(dz['TILELOCID'],tlids)
dz['TILELOCID_ASSIGNED'] = 0
dz['TILELOCID_ASSIGNED'][wtl] = 1
print('number of unique targets at assigned tilelocid:')
print(len(np.unique(dz[wtl]['TARGETID'])))
#get OII flux info for ELGs
if tp == 'ELG' or tp == 'ELG_HIP':
if azf != '':
arz = fitsio.read(azf,columns=[fbcol,'TARGETID','LOCATION','TILEID','OII_FLUX','OII_FLUX_IVAR','SUBSET','DELTACHI2'])
st = []
for i in range(0,len(arz)):
st.append(arz['SUBSET'][i][:4])
st = np.array(st)
wg = arz[fbcol] == 0
wg &= st == "thru"
arz = arz[wg]
o2c = np.log10(arz['OII_FLUX'] * np.sqrt(arz['OII_FLUX_IVAR']))+0.2*np.log10(arz['DELTACHI2'])
w = (o2c*0) != 0
w |= arz['OII_FLUX'] < 0
o2c[w] = -20
#arz.keep_columns(['TARGETID','LOCATION','TILEID','o2c','OII_FLUX','OII_SIGMA'])#,'Z','ZWARN','TSNR2_ELG'])
arz = Table(arz)
arz['o2c'] = o2c
dz = join(dz,arz,keys=['TARGETID','LOCATION','TILEID'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['', '_OII'])
dz.remove_columns(['SUBSET','DELTACHI2_OII',fbcol+'_OII'])
print('check length after merge with OII strength file:' +str(len(dz)))
if tp[:3] == 'QSO':
if azf != '':
arz = Table.read(azf)
arz.keep_columns(['TARGETID','LOCATION','TILEID','Z','ZERR','Z_QN'])
print(arz.dtype.names)
#arz['TILE'].name = 'TILEID'
dz = join(dz,arz,keys=['TARGETID','TILEID','LOCATION'],join_type='left',uniq_col_name='{col_name}{table_name}',table_names=['','_QF'])
dz['Z'].name = 'Z_RR' #rename the original redrock redshifts
dz['Z_QF'].name = 'Z' #the redshifts from the quasar file should be used instead
#sort and then cut to unique targetid; sort prioritizes observed targets and then TSNR2
dz['sort'] = dz['LOCATION_ASSIGNED']*dz[tscol]+dz['TILELOCID_ASSIGNED']
dz.sort('sort')
dz = unique(dz,keys=['TARGETID'],keep='last')
if tp == 'ELG' or tp == 'ELG_HIP':
print('number of masked oII row (hopefully matches number not assigned) '+ str(np.sum(dz['o2c'].mask)))
if tp == 'QSO':
print('number of good z according to qso file '+str(len(dz)-np.sum(dz['Z'].mask)))
print('length after cutting to unique targetid '+str(len(dz)))
print('LOCATION_ASSIGNED numbers')
print(np.unique(dz['LOCATION_ASSIGNED'],return_counts=True))
print('TILELOCID_ASSIGNED numbers')
print(np.unique(dz['TILELOCID_ASSIGNED'],return_counts=True))
probl = np.zeros(len(dz))
#get completeness based on unique sets of tiles
compa = []
tll = []
ti = 0
print('getting completenes')
#sorting by tiles makes things quicker with while statements below
dz.sort('TILES')
nts = len(np.unique(dz['TILES']))
tlsl = dz['TILES']
tlslu = np.unique(tlsl)
laa = dz['LOCATION_ASSIGNED']
i = 0
while i < len(dz):
tls = []
tlis = []
nli = 0
nai = 0
while tlsl[i] == tlslu[ti]:
nli += 1 #counting unique targetids within the given TILES value
nai += laa[i] #counting the number assigned
i += 1
if i == len(dz):
break
if ti%1000 == 0:
print('at tiles '+str(ti)+' of '+str(nts))
cp = nai/nli #completeness is number assigned over number total
compa.append(cp)
tll.append(tlslu[ti])
ti += 1
#turn the above into a dictionary and apply it
comp_dicta = dict(zip(tll, compa))
fcompa = []
for tl in dz['TILES']:
fcompa.append(comp_dicta[tl])
dz['COMP_TILE'] = | np.array(fcompa) | numpy.array |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: lcpp-dev
# language: python
# name: lcpp-dev
# ---
# %% [markdown]
# # Basic processing of Sentinel V mooring
# %%
import xarray as xr
import numpy as np
import utils
import matplotlib.pyplot as plt
import scipy.stats as stats
import utm
from scipy.ndimage import gaussian_filter
def mode(x, **kwargs):
mout = np.squeeze(stats.mode(x, axis=1)[0])
return mout
def interval_to_mid(intervals):
"""
Parameters
----------
intervals : 1D numpy array
An array of pandas Interval objects.
Returns
-------
mids : 1D numpy array
Midpoints of the intervals.
"""
return np.array([v.mid for v in intervals])
# %% [markdown]
# Load datasets and do some basic conversion of times and variables.
# %%
sV = xr.open_dataset("../proc/ABLE_sentinel_2018_enu.nc")
sV = sV.set_coords(["lon", "lat"])
sV["time"] = utils.POSIX_to_datetime(sV.time.values).astype(np.datetime64)
x, y, *_ = utm.from_latlon(sV.lat, sV.lon)
sV = sV.assign_coords({"x": x, "y": y})
virt = xr.open_dataset("../proc/ABLE_sentinel_RBRvirtuoso_2018.nc")
virt = virt.set_coords(["lon", "lat"])
virt["time"] = utils.POSIX_to_datetime(virt.time.values).astype(np.datetime64)
sbe = xr.open_dataset("../proc/ABLE_sentinel_SBE37_2018.nc")
sbe = sbe.set_coords(["lon", "lat"])
sbe["time"] = utils.POSIX_to_datetime(sbe.time.values).astype(np.datetime64)
# %% [markdown]
# Define some parameters and simple thresholds for processing.
# %%
pmin = 125 # Minimum pressure to keep
dpdtmax = 0.4e-9 # Maximum rate of change of pressure to keep
cut_ends = 2 # Number of points on either end to remove after applying other thresholds
dt = 10 # Bin size for time average [s]
# %% [markdown]
# Apply the thresholds to remove some data.
# %%
is_deep = sV.p > pmin
is_slow = np.fabs(sV.p.differentiate("time")) < dpdtmax
keep = is_deep & is_slow
sVp = sV.isel(time=keep).isel(time=slice(cut_ends, -cut_ends))
# %%
sVp.p.plot.line('.')
# %% [markdown]
# ## Old quality control
#
# Note [Marion's document](https://escholarship.org/content/qt6xd149s8/qt6xd149s8.pdf)
# %%
# # qc_err0 = 0.3
# # qc_err1 = 0.5
# qc_err = 0.15 # error velocity
# qc_q = 110 # correlation
# qc_uv = 2.0 # horizontal velocity
# qc_w = 1.5 # vertical velocity
# qc_a = 30 # echo intensity
# %%
# qc_u_bad = np.abs(sVp.u) > qc_uv
# qc_v_bad = np.abs(sVp.v) > qc_uv
# qc_w_bad = np.abs(sVp.w) > qc_w
# qc_vv_bad = np.abs(sVp.vv) > qc_w
# qc_err_bad = np.abs(sVp.err) > qc_err
# qc_q1_good = sVp.q1 > qc_q
# qc_q2_good = sVp.q2 > qc_q
# qc_q3_good = sVp.q3 > qc_q
# qc_q4_good = sVp.q4 > qc_q
# qc_q_bad = (qc_q1_good.astype(int) + qc_q2_good.astype(int) + qc_q3_good.astype(int) + qc_q4_good.astype(int)) <= 3
# %%
# uv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_u_bad.astype(int) + qc_v_bad.astype(int)) > 1
# w_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_w_bad.astype(int)) > 1
# vv_reject = (qc_q_bad.astype(int) + qc_err_bad.astype(int) + qc_vv_bad.astype(int)) > 1
# %%
# fig, axs = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 10))
# uv_reject.plot(ax=axs[0])
# w_reject.plot(ax=axs[1])
# vv_reject.plot(ax=axs[2])
# %% [markdown]
# Remove velocity using QC.
# %%
# sVqc = sVp.copy()
# u = sVqc.u.values
# u[uv_reject] = np.nan
# sVqc["u"] = (sVqc.u.dims, u, sVqc.u.attrs)
# v = sVqc.v.values
# v[uv_reject] = np.nan
# sVqc["v"] = (sVqc.v.dims, v, sVqc.v.attrs)
# w = sVqc.w.values
# w[w_reject] = np.nan
# sVqc["w"] = (sVqc.w.dims, w, sVqc.w.attrs)
# vv = sVqc.vv.values
# vv[vv_reject] = np.nan
# sVqc["vv"] = (sVqc.vv.dims, vv, sVqc.vv.attrs)
# %% [markdown]
# ## New cut off data above surface
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(sVp.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((sVp.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = sVp[var].where(sVp.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*sVp.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
sVp[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dsl.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(sVp.time, dmingood.min(axis=1), color="k")
# %%
good = dmingood.min(axis=1)
# Make a new dataset without surface
sVs = sVp.copy()
# Loop over the 2D datavars
mask = sVp.distance < xr.DataArray(good, dims={"time": sVp.time})
for var in sVp.data_vars:
if sVp[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
sVs[var] = sVp[var].where(mask)
# Remove distances where there is no good data
sVs = sVs.isel(distance=mask.any("time"))
# %% [markdown]
# ## New quality control
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = sVs.q1 + sVs.q2 + sVs.q3 + sVs.q4
qgood = qsum > qthresh
vqgood = sVs.vq.values > vqthresh
sVqc = sVs.copy()
egood = np.abs(sVs.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(sVs.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
sVqc[var] = sVs[var].where(ebgood)
sVqc["vv"] = sVs.vv.where(vebgood)
# %% [markdown]
# ## Time binning
# %% [markdown]
# Bin average data to reduce size and errors.
#
# First make bins.
# %%
# Time bin start and end to nearest minute. This will cut off some data.
tstart = (sVqc.time[0].values + np.timedelta64(30, 's')).astype('datetime64[m]')
tend = sVqc.time[-1].values.astype('datetime64[m]')
timebins = np.arange(tstart, tend, np.timedelta64(dt, 's'))
# %% [markdown]
# Group and take mean.
# %%
gb = sVqc.groupby_bins("time", timebins)
sVa = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sVa["time_bins"] = interval_to_mid(sVa.time_bins.values).astype("datetime64[s]")
sVa = sVa.rename({"time_bins": "time"})
# %% [markdown]
# Mean of heading should be performed using circular mean. (Technically, so should pitch and roll, but for small angles the noncircular mean is ok)
# %%
sVa["heading"] = (["time"], sVqc.heading.groupby_bins("time", timebins).reduce(stats.circmean, high=360.).values)
# %% [markdown]
# ## Old cut off data above surface
#
# Use a simple echo intensity threshold to find the maximum.
# %%
# dmin = 60. # Minimum distance above which to look for the maximum
# nroll = 120 # Number of points in rolling mode window
# fcut = 0.1 # Extra distance to remove (1 - fcut)*dcut
# %%
# sVa.va.isel(time=10000).plot.line('.')
# %% [markdown]
# Identify echo maximum in each beam, using a rolling mode to smooth out data.
# %%
# # fig, ax = plt.subplots()
# dcuts = []
# for var in ["a1", "a2", "a3", "a4", "va"]:
# am = sVa[var].where(sVa.distance > dmin)
# imax = am.argmax(dim="distance", skipna=True)
# dmax = am.distance[imax]
# ro = dmax.rolling(time=nroll, min_periods=1, center=True)
# dm = ro.reduce(mode)
# dcut = (1 - fcut)*dm
# # ax.plot(sVa.time, dmax, 'r')
# # ax.plot(sVa.time, dm, 'orange')
# # ax.plot(sVa.time, dcut, 'g')
# dcuts.append(dcut.values)
# %%
# dcuts = np.stack(dcuts, axis=1)
# # Use only the vertical beam for finding the surface.
# dcut_min = dcuts[:, 4]
# dcut_min = xr.DataArray(dcut_min, dims={"time": sVa.time})
# %% [markdown]
# Mask and remove data above distance threshold.
# %%
# sVm = sVa.where(sVa.distance < dcut_min)
# # The masking process converts some variables to 2D, change them back...
# sVm["p"] = sVa.p
# sVm["t"] = sVa.t
# sVm["pitch"] = sVa.pitch
# sVm["rol"] = sVa.rol
# sVm["heading"] = sVa.heading
# sVm = sVm.isel(distance=~np.isnan(sVm.u).all(axis=0))
# %% [markdown]
# ## Plotting time series
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.w.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[3], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
fig, ax = plt.subplots(figsize=(12, 3))
sVm_.p.plot(ax=ax)
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-10T11:00"))
sVm_ = sVm.sel(time=timeslice)
fig, axs = plt.subplots(8, 1, figsize=(15, 25), sharex=True)
sVm_.u.plot(ax=axs[0], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.v.plot(ax=axs[1], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.vv.plot(ax=axs[2], x="time", vmin=-0.2, vmax=0.2, cmap="coolwarm")
sVm_.a1.plot(ax=axs[3], x="time")
sVm_.a2.plot(ax=axs[4], x="time")
sVm_.a3.plot(ax=axs[5], x="time")
sVm_.a4.plot(ax=axs[6], x="time")
sVm_.va.plot(ax=axs[7], x="time")
fig, axs = plt.subplots(3, 1, figsize=(11, 8))
sVm_.heading.plot(ax=axs[0])
sVm_.rol.plot(ax=axs[1])
sVm_.pitch.plot(ax=axs[2])
# %% [markdown]
# # Plug in other instruments to dataset
#
# Group and bin average.
# %%
gb = virt.groupby_bins("time", timebins)
virta = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
virta["time_bins"] = interval_to_mid(virta.time_bins.values).astype("datetime64[ms]")
virta = virta.rename({"time_bins": "time"})
gb = sbe.groupby_bins("time", timebins)
sbea = gb.mean(skipna=True, keep_attrs=True)
# Use mid time as dimension, rather than Interval.
sbea["time_bins"] = interval_to_mid(sbea.time_bins.values).astype("datetime64[ms]")
sbea = sbea.rename({"time_bins": "time"})
# %% [markdown]
# Look at a couple of plots.
# %%
fig, ax = plt.subplots(figsize=(12, 3))
virta.turb.plot(ax=ax)
fig, axs = plt.subplots(3, 1, figsize=(12, 10), sharex=True)
sbea.p.plot(ax=axs[0])
sbea.t.plot(ax=axs[1])
sbea.SP.plot(ax=axs[2])
# %% [markdown]
# Assign other data to the sentinal dataset.
# %%
ds = sVa.copy()
# %%
ds["turb_RBR"] = (sVa.p.dims, virta.turb, virta.turb.attrs)
ds["SP_SBE37"] = (sVa.p.dims, sbea.SP, sbea.SP.attrs)
ds["C_SBE37"] = (sVa.p.dims, sbea.C, sbea.C.attrs)
ds["t_SBE37"] = (sVa.p.dims, sbea.t, sbea.t.attrs)
ds["p_SBE37"] = (sVa.p.dims, sbea.p, sbea.p.attrs)
# %% [markdown]
# Try a plot...
# %%
fig, ax = plt.subplots()
ds.p_SBE37.plot(ax=ax)
ds.p.plot(ax=ax, yincrease=False)
# %% [markdown]
# Estimate some more thermodynamic variables.
# %%
import gsw
# %%
ds["SA_SBE37"] = (ds.p.dims, gsw.SA_from_SP(ds.SP_SBE37, ds.p_SBE37, ds.lon, ds.lat), {"units": "g/kg", "long_name": "Absolute_salinity"})
ds["CT_SBE37"] = (ds.p.dims, gsw.CT_from_t(ds.SA_SBE37, ds.t_SBE37, ds.p_SBE37), {"units": "deg C", "long_name": "Conservative_temperature"})
ds["z_SBE37"] = (ds.p.dims, gsw.z_from_p(ds.p_SBE37, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_SBE37"] = (ds.p.dims, -ds.z_SBE37, {"units": "m", "long_name": "depth"})
ds["z_ADCP"] = (ds.p.dims, gsw.z_from_p(ds.p, ds.lat), {"units": "m", "long_name": "height"})
ds["depth_ADCP"] = (ds.p.dims, -ds.z_ADCP, {"units": "m", "long_name": "depth"})
ds["z"] = (ds.distance.dims, ds.distance + ds.z_ADCP.mean(dim="time"), {"units": "m", "long_name": "height"})
ds["depth"] = (ds.distance.dims, -ds.z, {"units": "m", "long_name": "depth"})
ds = ds.set_coords(["z", "depth"])
# %% [markdown]
# Save dataset to netcdf.
# %%
ds.to_netcdf("../proc/ABLE_sentinel_mooring_2018.nc")
# %% [markdown]
# ## Examine a short segment of the dataset
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, axs = plt.subplots(4, 1, figsize=(15, 10), sharex=True, sharey=True)
ds_.u.plot(ax=axs[0], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.a3.plot(ax=axs[1], y="depth", x="time", yincrease=False)
ds_.vv.plot(ax=axs[2], y="depth", x="time", yincrease=False, vmin=-0.2, vmax=0.2, cmap="coolwarm")
ds_.va.plot(ax=axs[3], y="depth", x="time", yincrease=False)
fig, axs = plt.subplots(4, 1, figsize=(11.7, 10), sharex=True)
ds_.p_SBE37.plot(ax=axs[0])
ds_.CT_SBE37.plot(ax=axs[1])
ds_.turb_RBR.plot(ax=axs[2])
ds_.pitch.plot(ax=axs[3])
# %% [markdown]
# Compare echo intensity near bottom for different beams.
# %%
dist = 5
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice).sel(distance=dist, method="nearest")
fig, ax = plt.subplots(figsize=(11, 4))
ds_.a1.plot(ax=ax, label="beam 1")
ds_.a2.plot(ax=ax, label="beam 2")
ds_.a3.plot(ax=ax, label="beam 3")
ds_.a4.plot(ax=ax, label="beam 4")
ds_.va.plot(ax=ax, label="beam v")
ax.set_ylabel("Echo intensity")
ax.legend()
# %%
timeslice = slice(np.datetime64("2018-09-05T08:00"), np.datetime64("2018-09-05T12:00"))
ds_ = ds.sel(time=timeslice)
fig, ax = plt.subplots(figsize=(10, 10))
for i in range(0, ds_.time.size, 50):
ds__ = ds_.isel(time=i)
ds__.va.plot(ax=ax, label=ds__.time.values.astype("datetime64[s]"))
ax.legend(loc="upper left", bbox_to_anchor=(1, 1))
# %%
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import numpy as np
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# def cc(arg):
# return mcolors.to_rgba(arg, alpha=0.6)
xs = ds_.distance.values
verts = []
zs = []
for i in range(0, ds_.time.size, 100):
ds__ = ds_.isel(time=i)
time = (ds__.time - ds_.time[0]).astype(float)/1e9
zs.append(time)
ys = ds__.va.values
ys[0], ys[-1] = 0, 0
verts.append(list(zip(xs, ys)))
# zs = [0.0, 1.0, 2.0, 3.0]
# for z in zs:
# ys = np.random.rand(len(xs))
# ys[0], ys[-1] = 0, 0
# verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts) # facecolors=[cc('r'), cc('g'), cc('b'), cc('y')]
poly.set_alpha(0.2)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('Distance')
ax.set_xlim3d(0, xs.max())
ax.set_ylabel('Y')
ax.set_ylim3d(0, zs[-1])
ax.set_zlabel('Z')
ax.set_zlim3d(0, 200)
ax.view_init(elev=30., azim=30)
plt.show()
# %%
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
timeslice = slice(np.datetime64("2018-09-05T10:00"), np.datetime64("2018-09-05T10:45"))
ds_ = ds.sel(time=timeslice)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection='3d')
T, D = np.meshgrid(ds_.distance.values, (ds_.time.values - ds_.time[0].values).astype(float)/1e9)
# Plot a basic wireframe.
ax.plot_wireframe(T, D, ds_.a2.values, rstride=1, cstride=1)
ax.view_init(elev=45., azim=120)
# %% [markdown]
# # New QC
# %%
tslice = slice(np.datetime64("2018-09-07T10:00"), np.datetime64("2018-09-07T11:00"))
# tslice = slice(np.datetime64("2018-09-04T10:00"), np.datetime64("2018-09-04T11:00"))
# tslice = slice(np.datetime64("2018-09-11T14:00"), np.datetime64("2018-09-11T16:00"))
# tslice = slice(np.datetime64("2018-09-10T03:00"), np.datetime64("2018-09-10T04:00"))
enu = sVp.sel(time=tslice)
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.u.plot(ax=axs[0], **hvel_kwargs)
enu.v.plot(ax=axs[1], **hvel_kwargs)
enu.w.plot(ax=axs[2], **vvel_kwargs)
enu.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enu.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enu.q1.plot(ax=axs[0])
enu.q2.plot(ax=axs[1])
enu.q3.plot(ax=axs[2])
enu.q4.plot(ax=axs[3])
enu.vq.plot(ax=axs[4])
for ax in axs:
ax.set_xlabel("")
# %%
dthresh = 100.
sidelobe_pct = 1 - np.cos(np.deg2rad(enu.beamAngle))
var_names = ["a1", "a2", "a3", "a4", "va"]
nroll = 5
dmingood = np.full((enu.time.size, len(var_names)), np.nan)
fig, axs = plt.subplots(len(var_names), 1, figsize=(14, 3*len(var_names)))
for i, var in enumerate(var_names):
idxmax = enu[var].where(enu.distance > dthresh).argmax("distance")
dmax = sVp.distance[idxmax]
dsl = (1 - sidelobe_pct)*enu.distance[idxmax]
# dmax = dmax.where(dmax > dthresh)
dmode = dsl.rolling(time=nroll, min_periods=1, center=True).reduce(mode)
enu[var].plot(ax=axs[i])
dmingood[:, i] = dmode
dmode.plot(ax=axs[i], color="r")
axs[i].set_title("")
for i in range(len(var_names)):
axs[i].plot(enu.time, dmingood.min(axis=1), color="k")
# %%
fig, axs = plt.subplots(3, 1, figsize=(22, 9))
enu.heading.plot(ax=axs[0], marker='.', linestyle="")
enu.rol.plot(ax=axs[1])
enu.pitch.plot(ax=axs[2])
# %%
# Make a new dataset without surface
enus = enu.copy()
# Loop over the 2D datavars
mask = enu.distance < xr.DataArray(dmingood.min(axis=1), dims={"time": enu.time})
for var in enu.data_vars:
if enu[var].dims == ('distance', 'time'):
print(f"Masking {var}.")
enus[var] = enu[var].where(mask)
# Remove distances where there is no good data
enus = enus.isel(distance=mask.any("time"))
# %%
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(5, 1, sharex=True, figsize=(22, 17))
enus.u.plot(ax=axs[0], **hvel_kwargs)
enus.v.plot(ax=axs[1], **hvel_kwargs)
enus.w.plot(ax=axs[2], **vvel_kwargs)
enus.vv.plot(ax=axs[3], **vvel_kwargs)
np.abs(enus.err).plot(ax=axs[4], vmin=0, vmax=0.2)
for ax in axs:
ax.set_xlabel("")
# %%
from scipy.ndimage import gaussian_filter
# %%
errthresh = 0.2 # Blur around these errors
errthresh_high = 0.2 # Always remove these errors
maskthresh = 0.35 # Blurred mask threshold
qthresh = 300
vqthresh = 35
sigma = (2, 5)
qsum = enus.q1 + enus.q2 + enus.q3 + enus.q4
qgood = qsum > qthresh
vqgood = enus.vq.values > vqthresh
enueb = enus.copy()
egood = np.abs(enus.err) < errthresh
egood_filt = gaussian_filter(egood.values.astype(float), sigma)
ebgood = (egood_filt > maskthresh) & (np.abs(enus.err) < errthresh_high) & qgood
vebgood = (egood_filt > maskthresh) & vqgood
var_names = ["u", "v", "w", "err"]
for var in var_names:
enueb[var] = enus[var].where(ebgood)
enueb["vv"] = enus.vv.where(vebgood)
# %%
fig, ax = plt.subplots(1, 1, figsize=(22, 3.5))
ax.pcolormesh(egood_filt)
ax.contour(egood_filt, [maskthresh], colors="r")
ax.contour(qgood, [0.5], colors="g")
ax.contour(vqgood, [0.5], colors="b")
# %% tags=[]
hvel_kwargs = dict(vmin=-0.3, vmax=0.3, cmap="coolwarm")
vvel_kwargs = dict(vmin=-0.1, vmax=0.1, cmap="coolwarm")
fig, axs = plt.subplots(8, 1, sharex=True, figsize=(22, 28))
enueb.u.plot(ax=axs[0], **hvel_kwargs)
enus.u.plot(ax=axs[1], **hvel_kwargs)
enueb.v.plot(ax=axs[2], **hvel_kwargs)
enus.v.plot(ax=axs[3], **hvel_kwargs)
enueb.w.plot(ax=axs[4], **vvel_kwargs)
enus.w.plot(ax=axs[5], **vvel_kwargs)
enueb.vv.plot(ax=axs[6], **vvel_kwargs)
enus.vv.plot(ax=axs[7], **vvel_kwargs)
for ax in axs:
ax.set_xlabel("")
# %% [markdown]
# # Beam separation
# %%
z = sVp.distance[sVp.distance < 120]
angle = | np.deg2rad(sVp.beamAngle) | numpy.deg2rad |
import numpy as np
import json
import copy
import os
def rotate_box_by_angle_up_direction(box, rotation_angle):
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
box[0:3] = np.dot(box[0:3], rotation_matrix)
box[6:9] = np.dot(box[6:9], rotation_matrix)
box[9:] = np.dot(box[9:], rotation_matrix)
return box
def rotate_box_by_angle_straight_direction(box, rotation_angle):
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[1, 0, 0],
[0,cosval, sinval],
[0,-sinval, cosval]])
box[0:3] = np.dot(box[0:3], rotation_matrix)
box[6:9] = np.dot(box[6:9], rotation_matrix)
box[9:] = np.dot(box[9:], rotation_matrix)
return box
def rotate_edge_by_angle_up_direction(edge, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
Nx3 array
Return:
Nx3 array
"""
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
edge[0:3] = np.dot(edge[0:3], rotation_matrix)
edge[3:6] = | np.dot(edge[3:6], rotation_matrix) | numpy.dot |
# From Caoxiang's CoilPy
# copied 11 Jan 2021
import numpy as np
class FourSurf(object):
'''
toroidal surface in Fourier representation
R = \sum RBC cos(mu-nv) + RBS sin(mu-nv)
Z = \sum ZBC cos(mu-nv) + ZBS sin(mu-nv)
'''
def __init__(self, xm=[], xn=[], rbc=[], zbs=[], rbs=[], zbc=[]):
"""Initialization with Fourier harmonics.
Parameters:
xm -- list or numpy array, array of m index (default: [])
xn -- list or numpy array, array of n index (default: [])
rbc -- list or numpy array, array of radial cosine harmonics (default: [])
zbs -- list or numpy array, array of z sine harmonics (default: [])
rbs -- list or numpy array, array of radial sine harmonics (default: [])
zbc -- list or numpy array, array of z cosine harmonics (default: [])
"""
self.xm = np.atleast_1d(xm)
self.xn = np.atleast_1d(xn)
self.rbc = np.atleast_1d(rbc)
self.rbs = np.atleast_1d(rbs)
self.zbc = np.atleast_1d(zbc)
self.zbs = np.atleast_1d(zbs)
self.mn = len(self.xn)
return
@classmethod
def read_focus_input(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the FOCUS format input file 'plasma.boundary'
Parameters:
filename -- string, path + name to the FOCUS input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
with open(filename, 'r') as f:
line = f.readline() #skip one line
line = f.readline()
num = int(line.split()[0]) #harmonics number
nfp = int(line.split()[1]) #number of field periodicity
nbn = int(line.split()[2]) #number of Bn harmonics
xm = []
xn = []
rbc = []
rbs = []
zbc = []
zbs = []
line = f.readline() #skip one line
line = f.readline() #skip one line
for i in range(num):
line = f.readline()
line_list = line.split()
n = int(line_list[0])
m = int(line_list[1])
if abs(m)>Mpol or abs(n)>Ntor:
continue
xm.append(m)
xn.append(n)
rbc.append(float(line_list[2]))
rbs.append(float(line_list[3]))
zbc.append(float(line_list[4]))
zbs.append(float(line_list[5]))
return cls(xm=np.array(xm), xn=np.array(xn)*nfp,
rbc=np.array(rbc), rbs=np.array(rbs),
zbc=np.array(zbc), zbs=np.array(zbs))
@classmethod
def read_spec_input(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the SPEC input file '*.sp'
Parameters:
filename -- string, path + name to the FOCUS input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
import FortranNamelist.namelist as nml
from misc import vmecMN
spec = nml.NamelistFile(filename)
# spec['physicslist'] =
Mpol = min(Mpol, spec['physicslist']['MPOL'])
Ntor = min(Ntor, spec['physicslist']['NTOR'])
xm, xn = vmecMN(Mpol, Ntor)
return
@classmethod
def read_spec_output(cls, spec_out, ns=-1):
"""initialize surface from the ns-th interface SPEC output
Parameters:
spec_out -- SPEC class, SPEC hdf5 results
ns -- integer, the index of SPEC interface (default: -1)
Returns:
fourier_surface class
"""
# check if spec_out is in correct format
#if not isinstance(spec_out, SPEC):
# raise TypeError("Invalid type of input data, should be SPEC type.")
# get required data
xm = spec_out.output.im
xn = spec_out.output.in1
rbc = spec_out.output.Rbc[ns,:]
zbs = spec_out.output.Zbs[ns,:]
if spec_out.input.physics.Istellsym:
# stellarator symmetry enforced
rbs = np.zeros_like(rbc)
zbc = np.zeros_like(rbc)
else:
rbs = spec_out.output.Rbs[ns,:]
zbc = spec_out.output.Zbc[ns,:]
return cls(xm=xm, xn=xn, rbc=rbc, rbs=rbs, zbc=zbc, zbs=zbs)
@classmethod
def read_vmec_output(cls, woutfile, ns=-1):
"""initialize surface from the ns-th interface SPEC output
Parameters:
woutfile -- string, path + name to the wout file from VMEC output
ns -- integer, the index of VMEC nested flux surfaces (default: -1)
Returns:
fourier_surface class
"""
import xarray as ncdata # read netcdf file
vmec = ncdata.open_dataset(woutfile)
xm = vmec['xm'].values
xn = vmec['xn'].values
rmnc = vmec['rmnc'].values
zmns = vmec['zmns'].values
rbc = rmnc[ns,:]
zbs = zmns[ns,:]
if vmec['lasym__logical__'].values:
# stellarator symmetry enforced
zmnc = vmec['zmnc'].values
rmns = vmec['rmns'].values
rbs = rmns[ns,:]
zbc = zmnc[ns,:]
else :
rbs = np.zeros_like(rbc)
zbc = np.zeros_like(rbc)
return cls(xm=xm, xn=xn, rbc=rbc, rbs=rbs, zbc=zbc, zbs=zbs)
@classmethod
def read_winding_surfce(cls, filename, Mpol=9999, Ntor=9999):
"""initialize surface from the NESCOIL format input file 'nescin.xxx'
Parameters:
filename -- string, path + name to the NESCOIL input boundary file
Mpol -- maximum truncated poloidal mode number (default: 9999)
Ntol -- maximum truncated toroidal mode number (default: 9999)
Returns:
fourier_surface class
"""
with open(filename, 'r') as f:
line = ''
while "phip_edge" not in line:
line = f.readline()
line = f.readline()
nfp = int(line.split()[0])
#print "nfp:",nfp
line = ''
while "Current Surface" not in line:
line = f.readline()
line = f.readline()
line = f.readline()
#print "Number of Fourier modes in coil surface from nescin file: ",line
num = int(line)
xm = []
xn = []
rbc = []
rbs = []
zbc = []
zbs = []
line = f.readline() #skip one line
line = f.readline() #skip one line
for i in range(num):
line = f.readline()
line_list = line.split()
m = int(line_list[0])
n = int(line_list[1])
if abs(m)>Mpol or abs(n)>Ntor:
continue
xm.append(m)
xn.append(n)
rbc.append(float(line_list[2]))
zbs.append(float(line_list[3]))
rbs.append(float(line_list[4]))
zbc.append(float(line_list[5]))
# NESCOIL uses mu+nv, minus sign is added
return cls(xm=np.array(xm), xn=-np.array(xn)*nfp,
rbc=np.array(rbc), rbs=np.array(rbs),
zbc=np.array(zbc), zbs=np.array(zbs))
def rz(self, theta, zeta, normal=False):
""" get r,z position of list of (theta, zeta)
Parameters:
theta -- float array_like, poloidal angle
zeta -- float array_like, toroidal angle value
normal -- logical, calculate the normal vector or not (default: False)
Returns:
r, z -- float array_like
r, z, [rt, zt], [rz, zz] -- if normal
"""
assert len(np.atleast_1d(theta)) == len(np.atleast_1d(zeta)), "theta, zeta should be equal size"
# mt - nz (in matrix)
_mtnz = np.matmul( np.reshape(self.xm, (-1,1)), np.reshape(theta, (1,-1)) ) \
- np.matmul( np.reshape(self.xn, (-1,1)), np.reshape( zeta, (1,-1)) )
_cos = np.cos(_mtnz)
_sin = np.sin(_mtnz)
r = np.matmul( np.reshape(self.rbc, (1,-1)), _cos ) \
+ np.matmul( np.reshape(self.rbs, (1,-1)), _sin )
z = np.matmul( np.reshape(self.zbc, (1,-1)), _cos ) \
+ np.matmul( np.reshape(self.zbs, (1,-1)), _sin )
if not normal :
return (r.ravel(), z.ravel())
else:
rt = np.matmul( np.reshape(self.xm * self.rbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(self.xm * self.rbs, (1,-1)), _cos )
zt = np.matmul( np.reshape(self.xm * self.zbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(self.xm * self.zbs, (1,-1)), _cos )
rz = np.matmul( np.reshape(-self.xn * self.rbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(-self.xn * self.rbs, (1,-1)), _cos )
zz = np.matmul( np.reshape(-self.xn * self.zbc, (1,-1)), -_sin ) \
+ np.matmul( np.reshape(-self.xn * self.zbs, (1,-1)), _cos )
return (r.ravel(), z.ravel(), [rt.ravel(), zt.ravel()], [rz.ravel(), zz.ravel()])
def xyz(self, theta, zeta, normal=False):
""" get x,y,z position of list of (theta, zeta)
Parameters:
theta -- float array_like, poloidal angle
zeta -- float array_like, toroidal angle value
normal -- logical, calculate the normal vector or not (default: False)
Returns:
x, y, z -- float array_like
x, y, z, [nx, ny, nz] -- if normal
"""
data = self.rz(theta, zeta, normal)
r = data[0]
z = data[1]
_sin = np.sin(np.ravel(zeta))
_cos = np.cos(np.ravel(zeta))
if not normal:
return (r*_cos, r*_sin, z)
else:
_xt = data[2][0]*_cos # dx/dtheta
_yt = data[2][0]*_sin # dy/dtheta
_zt = data[2][1] # dz/dtheta
_xz = data[3][0]*_cos - r*_sin # dx/dzeta
_yz = data[3][0]*_sin + r*_cos # dy/dzeta
_zz = data[3][1] # dz/dzeta
# n = dr/dz x dr/dt
n = np.cross(np.transpose([_xz, _yz, _zz]), | np.transpose([_xt, _yt, _zt]) | numpy.transpose |
import sys
from typing import Any
import numpy as np
class Index:
def __index__(self) -> int:
return 0
class SubClass(np.ndarray):
pass
def func(i: int, j: int, **kwargs: Any) -> SubClass:
return B
i8 = np.int64(1)
A = np.array([1])
B = A.view(SubClass).copy()
B_stack = np.array([[1], [1]]).view(SubClass)
C = [1]
if sys.version_info >= (3, 8):
np.ndarray(Index())
np.ndarray([Index()])
np.array(1, dtype=float)
np.array(1, copy=False)
np.array(1, order='F')
np.array(1, order=None)
np.array(1, subok=True)
np.array(1, ndmin=3)
np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
np.asarray(A)
np.asarray(B)
np.asarray(C)
np.asanyarray(A)
np.asanyarray(B)
np.asanyarray(B, dtype=int)
np.asanyarray(C)
np.ascontiguousarray(A)
np.ascontiguousarray(B)
np.ascontiguousarray(C)
np.asfortranarray(A)
np.asfortranarray(B)
np.asfortranarray(C)
np.require(A)
np.require(B)
np.require(B, dtype=int)
np.require(B, requirements=None)
np.require(B, requirements="E")
np.require(B, requirements=["ENSUREARRAY"])
np.require(B, requirements={"F", "E"})
np.require(B, requirements=["C", "OWNDATA"])
np.require(B, requirements="W")
np.require(B, requirements="A")
np.require(C)
np.linspace(0, 2)
np.linspace(0.5, [0, 1, 2])
np.linspace([0, 1, 2], 3)
np.linspace(0j, 2)
np.linspace(0, 2, num=10)
np.linspace(0, 2, endpoint=True)
np.linspace(0, 2, retstep=True)
np.linspace(0j, 2j, retstep=True)
np.linspace(0, 2, dtype=bool)
np.linspace([0, 1], [2, 3], axis=Index())
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=[1j, 2j], num=2)
np.geomspace(1, 2)
np.zeros_like(A)
np.zeros_like(C)
np.zeros_like(B)
np.zeros_like(B, dtype=np.int64)
np.ones_like(A)
np.ones_like(C)
np.ones_like(B)
np.ones_like(B, dtype=np.int64)
np.empty_like(A)
np.empty_like(C)
np.empty_like(B)
np.empty_like(B, dtype=np.int64)
np.full_like(A, i8)
np.full_like(C, i8)
np.full_like(B, i8)
np.full_like(B, i8, dtype=np.int64)
np.ones(1)
np.ones([1, 1, 1])
np.full(1, i8)
np.full([1, 1, 1], i8)
np.indices([1, 2, 3])
| np.indices([1, 2, 3], sparse=True) | numpy.indices |
"""
Tests for the mgrit class
"""
import numpy as np
from pymgrit.core.mgrit import Mgrit
from pymgrit.heat.heat_1d import Heat1D
def rhs(x, t):
"""
Right-hand side of 1D heat equation example problem at a given space-time point (x,t)
:param x: spatial grid point
:param t: time point
:return: right-hand side of 1D heat equation example problem at point (x,t)
"""
return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))
def init_cond(x):
"""
Initial condition of 1D heat equation example,
u(x,0) = sin(pi*x)
:param x: spatial grid point
:return: initial condition of 1D heat equation example problem
"""
return np.sin(np.pi * x)
def test_split_into():
"""
Test the function split_into
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2,
nt=2 ** 2 + 1)
result = np.array([4, 3, 3])
mgrit = Mgrit(problem=[heat0], transfer=[], nested_iteration=False)
np.testing.assert_equal(result, mgrit.split_into(10, 3))
def test_split_points():
"""
Test the function split points
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2,
nt=2 ** 2 + 1)
result_proc0 = (4, 0)
result_proc1 = (3, 4)
result_proc2 = (3, 7)
mgrit = Mgrit(problem=[heat0], nested_iteration=False)
np.testing.assert_equal(result_proc0, mgrit.split_points(10, 3, 0))
np.testing.assert_equal(result_proc1, mgrit.split_points(10, 3, 1))
np.testing.assert_equal(result_proc2, mgrit.split_points(10, 3, 2))
def test_heat_equation_run():
"""
Test one run for the heat equation
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
heat1 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=17)
heat2 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=5)
problem = [heat0, heat1, heat2]
mgrit = Mgrit(problem=problem, cf_iter=1, nested_iteration=True, max_iter=2, random_init_guess=False)
res = mgrit.solve()
result_conv = np.array([0.00267692, 0.00018053])
np.testing.assert_almost_equal(result_conv, res['conv'])
def test_time_stepping():
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
mgrit = Mgrit(problem=[heat0], cf_iter=1, nested_iteration=True, max_iter=2, random_init_guess=False)
res = mgrit.solve()
result_conv = np.array([])
np.testing.assert_almost_equal(result_conv, res['conv'])
def test_setup_points_and_comm_info():
"""
Test for the function setup_points_and_comm_info
"""
heat0 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=65)
heat1 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=17)
heat2 = Heat1D(x_start=0, x_end=2, nx=5, a=1, rhs=rhs, init_cond=init_cond, t_start=0, t_stop=2, nt=5)
problem = [heat0, heat1, heat2]
mgrit = Mgrit(problem=problem, cf_iter=1, nested_iteration=True, max_iter=2)
size = 7
cpts = []
comm_front = []
comm_back = []
block_size_this_lvl = []
index_local = []
index_local_c = []
index_local_f = []
first_is_c_point = []
first_is_f_point = []
last_is_c_point = []
last_is_f_point = []
send_to = []
get_from = []
for i in range(size):
mgrit.comm_time_size = size
mgrit.comm_time_rank = i
mgrit.int_start = 0 # First time points of process interval
mgrit.int_stop = 0 # Last time points of process interval
mgrit.cpts = [] # C-points per process and level corresponding to complete time interval
mgrit.comm_front = [] # Communication inside F-relax per MGRIT level
mgrit.comm_back = [] # Communication inside F-relax per MGRIT level
mgrit.block_size_this_lvl = [] # Block size per process and level with ghost point
mgrit.index_local_c = [] # Local indices of C-Points
mgrit.index_local_f = [] # Local indices of F-Points
mgrit.index_local = [] # Local indices of all points
mgrit.first_is_f_point = [] # Communication after C-relax
mgrit.first_is_c_point = [] # Communication after F-relax
mgrit.last_is_f_point = [] # Communication after F-relax
mgrit.last_is_c_point = [] # Communication after C-relax
mgrit.send_to = []
mgrit.get_from = []
for lvl in range(mgrit.lvl_max):
mgrit.t.append(np.copy(mgrit.problem[lvl].t))
mgrit.setup_points_and_comm_info(lvl=lvl)
cpts.append(mgrit.cpts)
comm_front.append(mgrit.comm_front)
comm_back.append(mgrit.comm_back)
block_size_this_lvl.append(mgrit.block_size_this_lvl)
index_local.append(mgrit.index_local)
index_local_c.append(mgrit.index_local_c)
index_local_f.append(mgrit.index_local_f)
first_is_c_point.append(mgrit.first_is_c_point)
first_is_f_point.append(mgrit.first_is_f_point)
last_is_c_point.append(mgrit.last_is_c_point)
last_is_f_point.append(mgrit.last_is_f_point)
send_to.append(mgrit.send_to)
get_from.append(mgrit.get_from)
test_cpts = [[np.array([0, 4, 8]), np.array([0]), np.array([0])],
[np.array([12, 16]), np.array([4]), np.array([1])],
[np.array([20, 24, 28]), np.array([], dtype=int), np.array([], dtype=int)],
[np.array([32, 36]), np.array([8]), np.array([2])],
[np.array([40, 44]), np.array([], dtype=int), np.array([], dtype=int)],
[np.array([48, 52]), np.array([12]), np.array([3])],
[np.array([56, 60, 64]), np.array([16]), np.array([4])]]
test_comm_front = [[False, False, False],
[True, True, False],
[False, False, False],
[False, False, False],
[True, True, False],
[True, False, False],
[False, True, False]]
test_comm_back = [[True, True, False],
[False, False, False],
[False, False, False],
[True, True, False],
[True, False, False],
[False, True, False],
[False, False, False]]
test_block_size_this_lvl = [[10, 3, 1],
[11, 3, 2],
[10, 4, 0],
[10, 3, 2],
[10, 3, 0],
[10, 3, 2],
[10, 4, 2]]
test_index_local = [[np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([0, 1, 2]), np.array([0])],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), np.array([1, 2]), np.array([1])],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([1, 2, 3]), np.array([], dtype=int)],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([1, 2]), np.array([1])],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([1, 2]), np.array([], dtype=int)],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([1, 2]), np.array([1])],
[np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]), np.array([1, 2, 3]), np.array([1])]]
test_index_local_f = [[np.array([9, 5, 6, 7, 1, 2, 3]), np.array([1, 2]), np.array([], dtype=float)],
[np.array([8, 9, 10, 4, 5, 6, 1, 2]), np.array([1]), np.array([], dtype=float)],
[np.array([6, 7, 8, 2, 3, 4]), np.array([1, 2, 3]), np.array([], dtype=float)],
[np.array([1, 2, 3, 9, 5, 6, 7]), np.array([2]), np.array([], dtype=float)],
[np.array([8, 9, 4, 5, 6, 1, 2]), np.array([1, 2]), np.array([], dtype=float)],
[np.array([7, 8, 9, 3, 4, 5, 1]), np.array([2]), np.array([], dtype=float)],
[np.array([6, 7, 8, 2, 3, 4]), np.array([1, 2]), np.array([], dtype=float)]]
test_index_local_c = [[np.array([0, 4, 8]), np.array([0]), np.array([0])],
[np.array([3, 7]), np.array([2]), np.array([1])],
[np.array([1, 5, 9]), np.array([], dtype=int), np.array([], dtype=int)],
[np.array([4, 8]), np.array([1]), np.array([1])],
[np.array([3, 7]), np.array([], dtype=int), np.array([], dtype=int)],
[np.array([2, 6]), np.array([1]), np.array([1])],
[np.array([1, 5, 9]), np.array([3]), np.array([1])]]
test_first_is_c_point = [[False, False, False], [False, False, False], [True, False, False], [False, True, False],
[False, False, False], [False, True, False], [True, False, False]]
test_first_is_f_point = [[False, False, False], [False, False, False], [False, True, False],
[True, False, False], [False, False, False], [False, False, False],
[False, False, False]]
test_last_is_f_point = [[False, False, False], [True, False, False], [False, True, False],
[False, False, False], [False, True, False], [True, False, False],
[False, False, False]]
test_last_is_c_point = [[False, False, False], [False, True, False], [True, False, False], [False, False, False],
[False, False, False], [False, False, False], [False, False, False]]
test_send_to = [[1, 1, 1], [2, 2, 3], [3, 3, -99], [4, 4, 5], [5, 5, -99], [6, 6, 6], [-99, -99, -99]]
test_get_from = [[-99, -99, -99], [0, 0, 0], [1, 1, -99], [2, 2, 1], [3, 3, -99], [4, 4, 3], [5, 5, 5]]
for i in range(size):
assert all([a == b for a, b in zip(first_is_c_point[i], test_first_is_c_point[i])])
assert all([a == b for a, b in zip(first_is_f_point[i], test_first_is_f_point[i])])
assert all([a == b for a, b in zip(last_is_f_point[i], test_last_is_f_point[i])])
assert all([a == b for a, b in zip(last_is_c_point[i], test_last_is_c_point[i])])
assert all([a == b for a, b in zip(comm_front[i], test_comm_front[i])])
assert all([a == b for a, b in zip(comm_back[i], test_comm_back[i])])
assert all([a == b for a, b in zip(block_size_this_lvl[i], test_block_size_this_lvl[i])])
assert all([a == b for a, b in zip(send_to[i], test_send_to[i])])
assert all([a == b for a, b in zip(get_from[i], test_get_from[i])])
[np.testing.assert_equal(a, b) for a, b in zip(cpts[i], test_cpts[i])]
[np.testing.assert_equal(a, b) for a, b in zip(index_local[i], test_index_local[i])]
[np.testing.assert_equal(a, b) for a, b in zip(index_local_c[i], test_index_local_c[i])]
[ | np.testing.assert_equal(a, b) | numpy.testing.assert_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 18:08:01 2020
@author: <NAME>
Implementação do ajuste do modelo SEIIHURD com separação de grupos. Necessita
de mais verificações e funções para simplificar o input. Baseado nas classes
disponíveis no modelos.py
"""
import numpy as np
from functools import reduce
import scipy.integrate as spi
from scipy.optimize import least_squares
from platypus import NSGAII, Problem, Real
from pyswarms.single.global_best import GlobalBestPSO
import pyswarms as ps
from pyswarms.backend.topology import Star
from pyswarms.utils.plotters import plot_cost_history
from itertools import repeat
import multiprocessing as mp
import copy
import joblib
'''
Social contact matrices from
PREM, Kiesha; COOK, <NAME>.; <NAME>. Projecting social contact matrices in
152 countries using contact surveys and demographic data. PLoS computational
biology, v. 13, n. 9, p. e1005697, 2017.
'''
ages_Mu_min = 5 * np.arange(16)
Mu_house = np.array([[0.47868515, 0.50507561, 0.29848922, 0.15763748, 0.26276959,
0.40185462, 0.46855027, 0.42581354, 0.2150961 , 0.0856771 ,
0.08705463, 0.07551931, 0.05129175, 0.02344832, 0.00793644,
0.01072846],
[0.35580205, 0.77874482, 0.51392686, 0.21151069, 0.08597966,
0.28306027, 0.49982218, 0.52854893, 0.41220947, 0.15848728,
0.07491245, 0.07658339, 0.04772343, 0.02588962, 0.01125956,
0.01073152],
[0.25903114, 0.63488713, 1.36175618, 0.50016515, 0.11748191,
0.10264613, 0.24113458, 0.47274372, 0.54026417, 0.26708819,
0.11007723, 0.04406045, 0.02746409, 0.02825033, 0.02044872,
0.01214665],
[0.14223192, 0.24383932, 0.53761638, 1.05325205, 0.28778496,
0.10925453, 0.0651564 , 0.2432454 , 0.39011334, 0.41381277,
0.23194909, 0.07541471, 0.03428398, 0.02122257, 0.01033573,
0.00864859],
[0.27381886, 0.15430529, 0.16053062, 0.5104134 , 0.95175366,
0.3586594 , 0.09248672, 0.04774269, 0.15814197, 0.36581739,
0.25544811, 0.13338965, 0.03461345, 0.01062458, 0.00844199,
0.00868782],
[0.59409802, 0.26971847, 0.10669146, 0.18330524, 0.39561893,
0.81955947, 0.26376865, 0.06604084, 0.03824556, 0.11560004,
0.23218163, 0.15331788, 0.07336147, 0.02312255, 0.00412646,
0.01025778],
[0.63860889, 0.75760606, 0.43109156, 0.09913293, 0.13935789,
0.32056062, 0.65710277, 0.25488454, 0.1062129 , 0.0430932 ,
0.06880784, 0.09938458, 0.09010691, 0.02233902, 0.01155556,
0.00695246],
[0.56209348, 0.87334544, 0.75598244, 0.33199136, 0.07233271,
0.08674171, 0.20243583, 0.60062714, 0.17793601, 0.06307045,
0.04445926, 0.04082447, 0.06275133, 0.04051762, 0.01712777,
0.00598721],
[0.35751289, 0.66234582, 0.77180208, 0.54993616, 0.17368099,
0.07361914, 0.13016852, 0.19937327, 0.46551558, 0.15412263,
0.06123041, 0.0182514 , 0.04234381, 0.04312892, 0.01656267,
0.01175358],
[0.208131 , 0.41591452, 0.56510014, 0.67760241, 0.38146504,
0.14185001, 0.06160354, 0.12945701, 0.16470166, 0.41150841,
0.14596804, 0.04404807, 0.02395316, 0.01731295, 0.01469059,
0.02275339],
[0.30472548, 0.26744442, 0.41631962, 0.46516888, 0.41751365,
0.28520772, 0.13931619, 0.07682945, 0.11404965, 0.16122096,
0.33813266, 0.1349378 , 0.03755396, 0.01429426, 0.01356763,
0.02551792],
[0.52762004, 0.52787011, 0.33622117, 0.43037934, 0.36416323,
0.42655672, 0.33780201, 0.13492044, 0.0798784 , 0.15795568,
0.20367727, 0.33176385, 0.12256126, 0.05573807, 0.0124446 ,
0.02190564],
[0.53741472, 0.50750067, 0.3229994 , 0.30706704, 0.21340314,
0.27424513, 0.32838657, 0.26023515, 0.13222548, 0.07284901,
0.11950584, 0.16376401, 0.25560123, 0.09269703, 0.02451284,
0.00631762],
[0.37949376, 0.55324102, 0.47449156, 0.24796638, 0.19276924,
0.20675484, 0.3267867 , 0.39525729, 0.3070043 , 0.10088992,
0.10256839, 0.13016641, 0.1231421 , 0.24067708, 0.05475668,
0.01401368],
[0.16359554, 0.48536065, 0.40533723, 0.31542539, 0.06890518,
0.15670328, 0.12884062, 0.27912381, 0.25685832, 0.20143856,
0.12497647, 0.07565566, 0.10331686, 0.08830789, 0.15657321,
0.05744065],
[0.29555039, 0.39898035, 0.60257982, 0.5009724 , 0.13799378,
0.11716593, 0.14366306, 0.31602298, 0.34691652, 0.30960511,
0.31253708, 0.14557295, 0.06065554, 0.10654772, 0.06390924,
0.09827735]])
Mu_school = np.array([[3.21885854e-001, 4.31659966e-002, 7.88269419e-003,
8.09548363e-003, 5.35038146e-003, 2.18201974e-002,
4.01633514e-002, 2.99376002e-002, 1.40680283e-002,
1.66587853e-002, 9.47774696e-003, 7.41041622e-003,
1.28200661e-003, 7.79120405e-004, 8.23608272e-066,
6.37926405e-120],
[5.40133328e-002, 4.84870697e+000, 2.70046494e-001,
3.14778450e-002, 3.11206331e-002, 8.56826951e-002,
1.08251879e-001, 9.46101139e-002, 8.63528188e-002,
5.51141159e-002, 4.19385198e-002, 1.20958942e-002,
4.77242219e-003, 1.39787217e-003, 3.47452943e-004,
8.08973738e-039],
[4.56461982e-004, 1.04840235e+000, 6.09152459e+000,
1.98915822e-001, 1.99709921e-002, 6.68319525e-002,
6.58949586e-002, 9.70851505e-002, 9.54147078e-002,
6.70538232e-002, 4.24864096e-002, 1.98701346e-002,
5.11869429e-003, 7.27320438e-004, 4.93746124e-025,
1.82153965e-004],
[2.59613205e-003, 4.73315233e-002, 1.99337834e+000,
7.20040500e+000, 8.57326037e-002, 7.90668822e-002,
8.54208542e-002, 1.10816964e-001, 8.76955236e-002,
9.22975521e-002, 4.58035025e-002, 2.51130956e-002,
5.71391798e-003, 1.07818752e-003, 6.21174558e-033,
1.70710246e-070],
[7.19158720e-003, 2.48833195e-002, 9.89727235e-003,
8.76815025e-001, 4.33963352e-001, 5.05185217e-002,
3.30594492e-002, 3.81384107e-002, 2.34709676e-002,
2.67235372e-002, 1.32913985e-002, 9.00655556e-003,
6.94913059e-004, 1.25675951e-003, 1.77164197e-004,
1.21957619e-047],
[7.04119204e-003, 1.19412206e-001, 3.75016980e-002,
2.02193056e-001, 2.79822908e-001, 1.68610223e-001,
2.86939363e-002, 3.56961469e-002, 4.09234494e-002,
3.32290896e-002, 8.12074348e-003, 1.26152144e-002,
4.27869081e-003, 2.41737477e-003, 4.63116893e-004,
1.28597237e-003],
[1.41486320e-002, 3.86561429e-001, 2.55902236e-001,
1.69973534e-001, 4.98104010e-002, 8.98122446e-002,
7.95333394e-002, 5.19274611e-002, 5.46612930e-002,
2.64567137e-002, 2.03241595e-002, 2.96263220e-003,
5.42888613e-003, 4.47585970e-004, 1.65440335e-048,
3.11189454e-055],
[2.40945305e-002, 2.11030046e-001, 1.54767246e-001,
8.17929897e-002, 1.84061608e-002, 5.43009779e-002,
7.39351186e-002, 5.21677009e-002, 5.63267084e-002,
2.51807147e-002, 3.53972554e-003, 7.96646343e-003,
5.56929776e-004, 2.08530461e-003, 1.84428290e-123,
9.69555083e-067],
[7.81313905e-003, 1.14371898e-001, 9.09011945e-002,
3.80212104e-001, 8.54533192e-003, 2.62430162e-002,
2.51880009e-002, 3.22563508e-002, 6.73506045e-002,
2.24997143e-002, 2.39241043e-002, 6.50627191e-003,
5.50892674e-003, 4.78308850e-004, 4.81213215e-068,
2.40231425e-092],
[6.55265016e-002, 2.31163536e-001, 1.49970765e-001,
5.53563093e-001, 5.74032526e-003, 3.02865481e-002,
5.72506883e-002, 4.70559232e-002, 4.28736553e-002,
2.42614518e-002, 2.86665377e-002, 1.29570473e-002,
3.24362518e-003, 1.67930318e-003, 6.20916950e-134,
3.27297624e-072],
[1.72765646e-002, 3.43744913e-001, 4.30902785e-001,
4.74293073e-001, 5.39328187e-003, 1.44128740e-002,
3.95545363e-002, 3.73781860e-002, 4.56834488e-002,
5.92135906e-002, 2.91473801e-002, 1.54857502e-002,
4.53105390e-003, 8.87272668e-024, 1.23797452e-117,
5.64262349e-078],
[6.14363036e-002, 2.98367348e-001, 2.59092700e-001,
3.00800812e-001, 5.92454596e-003, 5.26458862e-002,
2.02188672e-002, 3.27897605e-002, 4.07753741e-002,
2.83422407e-002, 2.43657809e-002, 2.73993226e-002,
8.87990718e-003, 1.13279180e-031, 7.81960493e-004,
7.62467510e-004],
[3.63695643e-002, 5.96870355e-002, 3.05072624e-002,
1.45523978e-001, 1.26062984e-002, 1.69458169e-003,
1.55127292e-002, 4.22097670e-002, 9.21792425e-003,
1.42200652e-002, 1.10967529e-002, 5.77020348e-003,
2.04474044e-002, 1.11075734e-002, 4.42271199e-067,
2.12068625e-037],
[1.67937029e-003, 2.72971001e-002, 1.05886266e-002,
7.61087735e-032, 1.97191559e-003, 1.92885006e-003,
1.24343737e-002, 5.39297787e-003, 5.41684968e-003,
8.63502071e-003, 1.94554498e-003, 1.49082274e-002,
8.11781100e-003, 1.74395489e-002, 1.11239023e-002,
3.45693088e-126],
[1.28088348e-028, 5.11065200e-026, 1.93019797e-040,
7.60476035e-003, 2.63586947e-022, 1.69749024e-024,
1.25875005e-026, 7.62109877e-003, 7.84979948e-003,
2.11516023e-002, 3.52117832e-002, 2.14360383e-002,
7.73902109e-003, 8.01328325e-003, 7.91285055e-003,
2.13825814e-002],
[2.81655586e-094, 2.11305187e-002, 8.46562506e-042,
2.12592841e-002, 4.89802057e-036, 7.59232387e-003,
9.77247001e-069, 2.23108239e-060, 1.43715978e-048,
8.56015694e-060, 4.69469043e-042, 1.59822047e-046,
2.20978550e-083, 8.85861277e-107, 1.02042815e-080,
6.61413913e-113]])
Mu_work = np.array([[0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 8.20604524e-092, 1.20585150e-005,
3.16436834e-125],
[0.00000000e+000, 1.16840561e-003, 9.90713236e-072,
4.42646396e-059, 2.91874286e-006, 9.98773031e-003,
2.58779981e-002, 5.66104376e-003, 2.12699812e-002,
5.72117462e-003, 1.48212306e-003, 1.23926126e-003,
1.28212945e-056, 1.34955578e-005, 7.64591325e-079,
2.38392073e-065],
[0.00000000e+000, 2.56552144e-003, 1.12756182e-001,
2.40351143e-002, 2.62981485e-002, 7.56512432e-003,
6.19587609e-002, 1.73269871e-002, 5.87405128e-002,
3.26749742e-002, 1.24709193e-002, 2.93054408e-008,
3.71596993e-017, 2.79780317e-053, 4.95800770e-006,
3.77718083e-102],
[0.00000000e+000, 1.07213881e-002, 4.28390448e-002,
7.22769090e-001, 5.93479736e-001, 3.39341952e-001,
3.17013715e-001, 2.89168861e-001, 3.11143180e-001,
2.34889238e-001, 1.32953769e-001, 6.01944097e-002,
1.47306181e-002, 8.34699602e-006, 2.85972822e-006,
1.88926122e-031],
[0.00000000e+000, 9.14252587e-003, 5.74508682e-002,
4.00000235e-001, 7.93386618e-001, 7.55975146e-001,
6.32277283e-001, 6.83601459e-001, 4.98506972e-001,
3.82309992e-001, 2.81363576e-001, 1.23338103e-001,
4.15708021e-002, 9.86113407e-006, 1.32609387e-005,
3.74318048e-006],
[0.00000000e+000, 1.04243481e-002, 7.34587492e-002,
3.49556755e-001, 7.50680101e-001, 1.25683393e+000,
9.01245714e-001, 8.63446835e-001, 7.70443641e-001,
5.17237071e-001, 4.09810981e-001, 1.80645400e-001,
5.51284783e-002, 1.60674627e-005, 1.01182608e-005,
3.01442534e-006],
[0.00000000e+000, 1.65842404e-002, 8.34076781e-002,
1.89301935e-001, 5.21246906e-001, 8.54460001e-001,
1.12054931e+000, 9.64310078e-001, 8.34675180e-001,
6.52534012e-001, 3.79383514e-001, 2.11198205e-001,
5.17285688e-002, 1.63795563e-005, 4.10100851e-006,
3.49478980e-006],
[0.00000000e+000, 1.11666639e-002, 5.03319748e-002,
3.70510313e-001, 4.24294782e-001, 7.87535547e-001,
8.45085693e-001, 1.14590365e+000, 1.07673077e+000,
7.13492115e-001, 5.00740004e-001, 1.90102207e-001,
3.59740115e-002, 1.22988530e-005, 9.13512833e-006,
6.02097416e-006],
[0.00000000e+000, 6.07792440e-003, 5.49337607e-002,
2.23499535e-001, 4.82353827e-001, 7.52291991e-001,
8.89187601e-001, 9.33765370e-001, 1.10492283e+000,
8.50124391e-001, 5.88941528e-001, 1.94947085e-001,
5.09477228e-002, 1.43626161e-005, 1.02721567e-005,
1.29503893e-005],
[0.00000000e+000, 3.31622551e-003, 7.01829848e-002,
2.67512972e-001, 3.14796392e-001, 5.41516885e-001,
6.95769048e-001, 7.50620518e-001, 7.50038547e-001,
7.00954088e-001, 4.35197983e-001, 2.11283335e-001,
3.88576200e-002, 1.62810370e-005, 1.08243610e-005,
6.09172339e-006],
[0.00000000e+000, 4.39576425e-004, 7.17737968e-002,
1.89254612e-001, 2.47832532e-001, 5.16027731e-001,
6.02783971e-001, 6.15949277e-001, 8.05581107e-001,
7.44063535e-001, 5.44855374e-001, 2.52198706e-001,
4.39235685e-002, 1.18079721e-005, 1.18226645e-005,
1.01613165e-005],
[0.00000000e+000, 4.91737561e-003, 1.08686672e-001,
1.24987806e-001, 1.64110983e-001, 3.00118829e-001,
4.18159745e-001, 3.86897613e-001, 4.77718241e-001,
3.60854250e-001, 3.22466456e-001, 1.92516925e-001,
4.07209694e-002, 1.34978304e-005, 6.58739925e-006,
6.65716756e-006],
[0.00000000e+000, 6.35447018e-004, 3.96329620e-002,
1.83072502e-002, 7.04596701e-002, 1.24861117e-001,
1.37834574e-001, 1.59845720e-001, 1.66933479e-001,
1.56084857e-001, 1.14949158e-001, 8.46570798e-002,
1.50879843e-002, 2.03019580e-005, 8.26102156e-006,
1.48398182e-005],
[7.60299521e-006, 3.36326754e-006, 7.64855296e-006,
2.27621532e-005, 3.14933351e-005, 7.89308410e-005,
7.24212842e-005, 2.91748203e-005, 6.61873732e-005,
5.95693238e-005, 7.70713500e-005, 5.30687748e-005,
4.66030117e-005, 1.41633235e-005, 2.49066205e-005,
1.19109038e-005],
[5.78863840e-055, 7.88785149e-042, 2.54830412e-006,
2.60648191e-005, 1.68036205e-005, 2.12446739e-005,
3.57267603e-005, 4.02377033e-005, 3.56401935e-005,
3.09769252e-005, 2.13053382e-005, 4.49709414e-005,
2.61368373e-005, 1.68266203e-005, 1.66514322e-005,
2.60822813e-005],
[2.35721271e-141, 9.06871674e-097, 1.18637122e-089,
9.39934076e-022, 4.66000452e-005, 4.69664011e-005,
4.69316082e-005, 8.42184044e-005, 2.77788168e-005,
1.03294378e-005, 1.06803618e-005, 7.26341826e-075,
1.10073971e-065, 1.02831671e-005, 5.16902994e-049,
8.28040509e-043]])
Mu_other = np.array([[0.95537734, 0.46860132, 0.27110607, 0.19447667, 0.32135073,
0.48782072, 0.54963024, 0.42195593, 0.27152038, 0.17864251,
0.20155642, 0.16358271, 0.1040159 , 0.0874149 , 0.05129938,
0.02153823],
[0.51023519, 2.17757364, 0.9022516 , 0.24304235, 0.20119518,
0.39689588, 0.47242431, 0.46949918, 0.37741651, 0.16843746,
0.12590504, 0.12682331, 0.11282247, 0.08222718, 0.03648526,
0.02404257],
[0.18585796, 1.11958124, 4.47729443, 0.67959759, 0.43936317,
0.36934142, 0.41566744, 0.44467286, 0.48797422, 0.28795385,
0.17659191, 0.10674831, 0.07175567, 0.07249261, 0.04815305,
0.03697862],
[0.09854482, 0.3514869 , 1.84902386, 5.38491613, 1.27425161,
0.59242579, 0.36578735, 0.39181798, 0.38131832, 0.31501028,
0.13275648, 0.06408612, 0.04499218, 0.04000664, 0.02232326,
0.01322698],
[0.13674436, 0.1973461 , 0.33264088, 2.08016394, 3.28810184,
1.29198125, 0.74642201, 0.44357051, 0.32781391, 0.35511243,
0.20132011, 0.12961 , 0.04994553, 0.03748657, 0.03841073,
0.02700581],
[0.23495203, 0.13839031, 0.14085679, 0.5347385 , 1.46021275,
1.85222022, 1.02681162, 0.61513602, 0.39086271, 0.32871844,
0.25938947, 0.13520412, 0.05101963, 0.03714278, 0.02177751,
0.00979745],
[0.23139098, 0.18634831, 0.32002214, 0.2477269 , 0.64111274,
0.93691022, 1.14560725, 0.73176025, 0.43760432, 0.31057135,
0.29406937, 0.20632155, 0.09044896, 0.06448983, 0.03041877,
0.02522842],
[0.18786196, 0.25090485, 0.21366969, 0.15358412, 0.35761286,
0.62390736, 0.76125666, 0.82975354, 0.54980593, 0.32778339,
0.20858991, 0.1607099 , 0.13218526, 0.09042909, 0.04990491,
0.01762718],
[0.12220241, 0.17968132, 0.31826246, 0.19846971, 0.34823183,
0.41563737, 0.55930999, 0.54070187, 0.5573184 , 0.31526474,
0.20194048, 0.09234293, 0.08377534, 0.05819374, 0.0414762 ,
0.01563101],
[0.03429527, 0.06388018, 0.09407867, 0.17418896, 0.23404519,
0.28879108, 0.34528852, 0.34507961, 0.31461973, 0.29954426,
0.21759668, 0.09684718, 0.06596679, 0.04274337, 0.0356891 ,
0.02459849],
[0.05092152, 0.10829561, 0.13898902, 0.2005828 , 0.35807132,
0.45181815, 0.32281821, 0.28014803, 0.30125545, 0.31260137,
0.22923948, 0.17657382, 0.10276889, 0.05555467, 0.03430327,
0.02064256],
[0.06739051, 0.06795035, 0.0826437 , 0.09522087, 0.23309189,
0.39055444, 0.39458465, 0.29290532, 0.27204846, 0.17810118,
0.24399007, 0.22146653, 0.13732849, 0.07585801, 0.03938794,
0.0190908 ],
[0.04337917, 0.05375367, 0.05230119, 0.08066901, 0.16619572,
0.25423056, 0.25580913, 0.27430323, 0.22478799, 0.16909017,
0.14284879, 0.17211604, 0.14336033, 0.10344522, 0.06797049,
0.02546014],
[0.04080687, 0.06113728, 0.04392062, 0.04488748, 0.12808591,
0.19886058, 0.24542711, 0.19678011, 0.17800136, 0.13147441,
0.13564091, 0.14280335, 0.12969805, 0.11181631, 0.05550193,
0.02956066],
[0.01432324, 0.03441212, 0.05604694, 0.10154456, 0.09204 ,
0.13341443, 0.13396901, 0.16682638, 0.18562675, 0.1299677 ,
0.09922375, 0.09634331, 0.15184583, 0.13541738, 0.1169359 ,
0.03805293],
[0.01972631, 0.02274412, 0.03797545, 0.02036785, 0.04357298,
0.05783639, 0.10706321, 0.07688271, 0.06969759, 0.08029393,
0.05466604, 0.05129046, 0.04648653, 0.06132882, 0.05004289,
0.03030569]])
def generate_reduced_matrices(age_sep, Ni):
'''
Receives the age_separation and populations to generate the average contact
matrices, returns a (4, len(age_sep)+1, len(age_sep)+1) with the 4 partial
contact matrices: house, school, work and other
Ni is the population for each population component (16 5-years age groups)
'''
nMat = len(age_sep) + 1
Ms = | np.empty((4, nMat, nMat)) | numpy.empty |
import os
import math
import numpy as np
import itertools as it
import pylab as plt
import seaborn as sns
import pandas as pd
import multiprocessing as mp
import tables
#from scipy.spatial.distance import euclidean
from numba import njit
from blechpy.utils.particles import HMMInfoParticle
from blechpy import load_dataset
from blechpy.dio import h5io
from blechpy.plotting import hmm_plot as hmmplt
from joblib import Parallel, delayed, Memory
from appdirs import user_cache_dir
cachedir = user_cache_dir('blechpy')
memory = Memory(cachedir, verbose=0)
TEST_PARAMS = {'n_cells': 10, 'n_states': 4, 'state_seq_length': 5,
'trial_time': 3.5, 'dt': 0.001, 'max_rate': 50, 'n_trials': 15,
'min_state_dur': 0.05, 'noise': 0.01, 'baseline_dur': 1}
FACTORIAL_LOOKUP = np.array([math.factorial(x) for x in range(20)])
@njit
def fast_factorial(x):
if x < len(FACTORIAL_LOOKUP):
return FACTORIAL_LOOKUP[x]
else:
y = 1
for i in range(1,x+1):
y = y*i
return y
@njit
def poisson(rate, n, dt):
'''Gives probability of each neurons spike count assuming poisson spiking
'''
tmp = np.power(rate*dt, n) / np.array([fast_factorial(x) for x in n])
tmp = tmp * np.exp(-rate*dt)
return tmp
@njit
def forward(spikes, dt, PI, A, B):
'''Run forward algorithm to compute alpha = P(Xt = i| o1...ot, pi)
Gives the probabilities of being in a specific state at each time point
given the past observations and initial probabilities
Parameters
----------
spikes : np.array
N x T matrix of spike counts with each entry ((i,j)) holding the # of
spikes from neuron i in timebine j
nStates : int, # of hidden states predicted to have generate the spikes
dt : float, timebin in seconds (i.e. 0.001)
PI : np.array
nStates x 1 vector of initial state probabilities
A : np.array
nStates x nStates state transmission matrix with each entry ((i,j))
giving the probability of transitioning from state i to state j
B : np.array
N x nSates rate matrix. Each entry ((i,j)) gives this predicited rate
of neuron i in state j
Returns
-------
alpha : np.array
nStates x T matrix of forward probabilites. Each entry (i,j) gives
P(Xt = i | o1,...,oj, pi)
norms : np.array
1 x T vector of norm used to normalize alpha to be a probability
distribution and also to scale the outputs of the backward algorithm.
norms(t) = sum(alpha(:,t))
'''
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
# For each state, use the the initial state distribution and spike counts
# to initialize alpha(:,1)
row = np.array([PI[i] * np.prod(poisson(B[:,i], spikes[:,0], dt))
for i in range(nStates)])
alpha = np.zeros((nStates, nTimeSteps))
norms = [np.sum(row)]
alpha[:, 0] = row/norms[0]
for t in range(1, nTimeSteps):
tmp = np.array([np.prod(poisson(B[:, s], spikes[:, t], dt)) *
np.sum(alpha[:, t-1] * A[:,s])
for s in range(nStates)])
tmp_norm = np.sum(tmp)
norms.append(tmp_norm)
tmp = tmp / tmp_norm
alpha[:, t] = tmp
return alpha, norms
@njit
def backward(spikes, dt, A, B, norms):
''' Runs the backward algorithm to compute beta = P(ot+1...oT | Xt=s)
Computes the probability of observing all future observations given the
current state at each time point
Paramters
---------
spike : np.array, N x T matrix of spike counts
nStates : int, # of hidden states predicted
dt : float, timebin size in seconds
A : np.array, nStates x nStates matrix of transition probabilities
B : np.array, N x nStates matrix of estimated spike rates for each neuron
Returns
-------
beta : np.array, nStates x T matrix of backward probabilities
'''
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
beta = np.zeros((nStates, nTimeSteps))
beta[:, -1] = 1 # Initialize final beta to 1 for all states
tStep = list(range(nTimeSteps-1))
tStep.reverse()
for t in tStep:
for s in range(nStates):
beta[s,t] = np.sum((beta[:, t+1] * A[s,:]) *
np.prod(poisson(B[:, s], spikes[:, t+1], dt)))
beta[:, t] = beta[:, t] / norms[t+1]
return beta
@njit
def baum_welch(spikes, dt, A, B, alpha, beta):
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
gamma = np.zeros((nStates, nTimeSteps))
epsilons = np.zeros((nStates, nStates, nTimeSteps-1))
for t in range(nTimeSteps):
if t < nTimeSteps-1:
gamma[:, t] = (alpha[:, t] * beta[:, t]) / np.sum(alpha[:,t] * beta[:,t])
epsilonNumerator = np.zeros((nStates, nStates))
for si in range(nStates):
for sj in range(nStates):
probs = np.prod(poisson(B[:,sj], spikes[:, t+1], dt))
epsilonNumerator[si, sj] = (alpha[si, t]*A[si, sj]*
beta[sj, t]*probs)
epsilons[:, :, t] = epsilonNumerator / np.sum(epsilonNumerator)
return gamma, epsilons
def isNotConverged(oldPI, oldA, oldB, PI, A, B, thresh=1e-4):
dPI = np.sqrt(np.sum(np.power(oldPI - PI, 2)))
dA = np.sqrt(np.sum(np.power(oldA - A, 2)))
dB = np.sqrt(np.sum(np.power(oldB - B, 2)))
print('dPI = %f, dA = %f, dB = %f' % (dPI, dA, dB))
if all([x < thresh for x in [dPI, dA, dB]]):
return False
else:
return True
def poisson_viterbi(spikes, dt, PI, A, B):
'''
Parameters
----------
spikes : np.array, Neuron X Time matrix of spike counts
PI : np.array, nStates x 1 vector of initial state probabilities
A : np.array, nStates X nStates matric of state transition probabilities
B : np.array, Neuron X States matrix of estimated firing rates
dt : float, time step size in seconds
Returns
-------
bestPath : np.array
1 x Time vector of states representing the most likely hidden state
sequence
maxPathLogProb : float
Log probability of the most likely state sequence
T1 : np.array
State X Time matrix where each entry (i,j) gives the log probability of
the the most likely path so far ending in state i that generates
observations o1,..., oj
T2: np.array
State X Time matrix of back pointers where each entry (i,j) gives the
state x(j-1) on the most likely path so far ending in state i
'''
if A.shape[0] != A.shape[1]:
raise ValueError('Transition matrix is not square')
nStates = A.shape[0]
nCells, nTimeSteps = spikes.shape
T1 = np.zeros((nStates, nTimeSteps))
T2 = np.zeros((nStates, nTimeSteps))
T1[:,1] = np.array([np.log(PI[i]) +
np.log(np.prod(poisson(B[:,i], spikes[:, 1], dt)))
for i in range(nStates)])
for t, s in it.product(range(1,nTimeSteps), range(nStates)):
probs = np.log(np.prod(poisson(B[:, s], spikes[:, t], dt)))
vec2 = T1[:, t-1] + np.log(A[:,s])
vec1 = vec2 + probs
T1[s, t] = np.max(vec1)
idx = np.argmax(vec2)
T2[s, t] = idx
bestPathEndState = np.argmax(T1[:, -1])
maxPathLogProb = T1[idx, -1]
bestPath = np.zeros((nTimeSteps,))
bestPath[-1] = bestPathEndState
tStep = list(range(nTimeSteps-1))
tStep.reverse()
for t in tStep:
bestPath[t] = T2[int(bestPath[t+1]), t+1]
return bestPath, maxPathLogProb, T1, T2
class TestData(object):
def __init__(self, params=None):
if params is None:
params = TEST_PARAMS.copy()
param_str = '\t'+'\n\t'.join(repr(params)[1:-1].split(', '))
print('Using default parameters:\n%s' % param_str)
self.params = params.copy()
self.generate()
def generate(self, params=None):
print('-'*80)
print('Simulating Data')
print('-'*80)
if params is not None:
self.params.update(params)
params = self.params
param_str = '\t'+'\n\t'.join(repr(params)[1:-1].split(', '))
print('Parameters:\n%s' % param_str)
self._generate_ground_truth()
self._generate_spike_trains()
def _generate_ground_truth(self):
print('Generating ground truth state sequence...')
params = self.params
nStates = params['n_states']
seqLen = params['state_seq_length']
minSeqDur = params['min_state_dur']
baseline_dur = params['baseline_dur']
maxFR = params['max_rate']
nCells = params['n_cells']
trialTime = params['trial_time']
nTrials = params['n_trials']
dt = params['dt']
nTimeSteps = int(trialTime/dt)
T = trialTime
# Figure out a random state sequence and state durations
stateSeq = np.random.randint(0, nStates, seqLen)
stateSeq = np.array([0, *np.random.randint(0,nStates, seqLen-1)])
stateDurs = np.zeros((nTrials, seqLen))
for i in range(nTrials):
tmp = np.abs(np.random.rand(seqLen-1))
tmp = tmp * ((trialTime - baseline_dur) / np.sum(tmp))
stateDurs[i, :] = np.array([baseline_dur, *tmp])
# Make vector of state at each time point
stateVec = np.zeros((nTrials, nTimeSteps))
for trial in range(nTrials):
t0 = 0
for state, dur in zip(stateSeq, stateDurs[trial]):
tn = int(dur/dt)
stateVec[trial, t0:t0+tn] = state
t0 += tn
# Determine firing rates per neuron per state
# For each neuron generate a mean firing rate and then draw state
# firing rates from a normal distribution around that with 10Hz
# variance
mean_rates = np.random.rand(nCells, 1) * maxFR
stateRates = np.zeros((nCells, nStates))
for i, r in enumerate(mean_rates):
stateRates[i, :] = np.array([r, *np.abs(np.random.normal(r, .5*r, nStates-1))])
self.ground_truth = {'state_sequence': stateSeq,
'state_durations': stateDurs,
'firing_rates': stateRates,
'state_vectors': stateVec}
def _generate_spike_trains(self):
print('Generating new spike trains...')
params = self.params
nCells = params['n_cells']
trialTime = params['trial_time']
dt = params['dt']
nTrials = params['n_trials']
noise = params['noise']
nTimeSteps = int(trialTime/dt)
stateRates = self.ground_truth['firing_rates']
stateVec = self.ground_truth['state_vectors']
# Make spike arrays
# Trial x Neuron x Time
random_nums = np.abs(np.random.rand(nTrials, nCells, nTimeSteps))
rate_arr = np.zeros((nTrials, nCells, nTimeSteps))
for trial, cell, t in it.product(range(nTrials), range(nCells), range(nTimeSteps)):
state = int(stateVec[trial, t])
mean_rate = stateRates[cell, state]
# draw noisy rates from normal distrib with mean rate from ground
# truth and width as noise*mean_rate
r = np.random.normal(mean_rate, mean_rate*noise)
rate_arr[trial, cell, t] = r
spikes = (random_nums <= rate_arr *dt).astype('int')
self.spike_trains = spikes
def get_spike_trains(self):
if not hasattr(self, 'spike_trains'):
self._generate_spike_trains()
return self.spike_trains
def get_ground_truth(self):
if not hasattr(self, 'ground_truth'):
self._generate_ground_truth()
return self.ground_truth
def plot_state_rates(self, ax=None):
fig, ax = plot_state_rates(self.ground_truth['firing_rates'], ax=ax)
return fig, ax
def plot_state_raster(self, ax=None):
fig, ax = plot_state_raster(self.spike_trains,
self.ground_truth['state_vectors'],
self.params['dt'], ax=ax)
return fig, ax
class PoissonHMM(object):
'''Poisson implementation of Hidden Markov Model for fitting spike data
from a neuronal population
Author: <NAME>
Adpated from code by <NAME>
'''
def __init__(self, n_predicted_states, spikes, dt,
max_history=500, cost_window=0.25, set_data=None):
if len(spikes.shape) == 2:
spikes = np.array([spikes])
self.data = spikes.astype('int32')
self.dt = dt
self._rate_data = None
self.n_states = n_predicted_states
self._cost_window = cost_window
self._max_history = max_history
self.cost = None
self.BIC = None
self.best_sequences = None
self.max_log_prob = None
self._rate_data = None
self.history = None
self._compute_data_rate_array()
if set_data is None:
self.randomize()
else:
self.fitted = set_data['fitted']
self.initial_distribution = set_data['initial_distribution']
self.transition = set_data['transition']
self.emission = set_data['emission']
self.iteration = 0
self._update_cost()
def randomize(self):
nStates = self.n_states
spikes = self.data
dt = self.dt
n_trials, n_cells, n_steps = spikes.shape
total_time = n_steps * dt
# Initialize transition matrix with high stay probability
print('Randomizing')
diag = np.abs(np.random.normal(.99, .01, nStates))
A = np.abs(np.random.normal(0.01/(nStates-1), 0.01, (nStates, nStates)))
for i in range(nStates):
A[i, i] = diag[i]
A[i,:] = A[i,:] / np.sum(A[i,:])
# Initialize rate matrix ("Emission" matrix)
spike_counts = np.sum(spikes, axis=2) / total_time
mean_rates = | np.mean(spike_counts, axis=0) | numpy.mean |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import assert_equal, assert_, run_module_suite
import unittest
from qutip import *
import qutip.settings as qset
if qset.has_openmp:
from qutip.cy.openmp.benchmark import _spmvpy, _spmvpy_openmp
@unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.')
def test_openmp_spmv():
"OPENMP : spmvpy_openmp == spmvpy"
for k in range(100):
L = rand_herm(10,0.25).data
vec = rand_ket(L.shape[0],0.25).full().ravel()
out = np.zeros_like(vec)
out_openmp = np.zeros_like(vec)
_spmvpy(L.data, L.indices, L.indptr, vec, 1, out)
_spmvpy_openmp(L.data, L.indices, L.indptr, vec, 1, out_openmp, 2)
assert_(np.allclose(out, out_openmp, 1e-15))
@unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.')
def test_openmp_mesolve():
"OPENMP : mesolve"
N = 100
wc = 1.0 * 2 * np.pi # cavity frequency
wa = 1.0 * 2 * np.pi # atom frequency
g = 0.05 * 2 * np.pi # coupling strength
kappa = 0.005 # cavity dissipation rate
gamma = 0.05 # atom dissipation rate
n_th_a = 1 # temperature in frequency units
use_rwa = 0
# operators
a = tensor(destroy(N), qeye(2))
sm = tensor(qeye(N), destroy(2))
# Hamiltonian
if use_rwa:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() * sm + a * sm.dag())
else:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() + a) * (sm + sm.dag())
c_op_list = []
rate = kappa * (1 + n_th_a)
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append( | np.sqrt(rate) | numpy.sqrt |
# Copyright (c) 2016-2021 The Regents of the University of Michigan
# Part of fresnel, released under the BSD 3-Clause License.
"""Test the ConvexPolyhedron geometry."""
import fresnel
import numpy
from collections import namedtuple
import PIL
import conftest
import pytest
import math
import itertools
import os
import pathlib
dir_path = pathlib.Path(os.path.realpath(__file__)).parent
def scene_eight_polyhedra(device):
"""Create a test scene with eight polyhedra."""
scene = fresnel.Scene(device, lights=conftest.test_lights())
# place eight polyhedra
position = []
for k in range(2):
for i in range(2):
for j in range(2):
position.append([2.5 * i, 2.5 * j, 2.5 * k])
# create the polyhedron faces
origins = []
normals = []
colors = []
for v in [-1, 1]:
origins.append([v, 0, 0])
normals.append([v, 0, 0])
origins.append([0, v, 0])
normals.append([0, v, 0])
origins.append([0, 0, v])
normals.append([0, 0, v])
colors.append([178 / 255, 223 / 255, 138 / 255])
colors.append([178 / 255, 223 / 255, 138 / 255])
colors.append([178 / 255, 223 / 255, 138 / 255])
for x in [-1, 1]:
for y in [-1, 1]:
for z in [-1, 1]:
normals.append([x, y, z])
origins.append([x * 0.75, y * 0.75, z * 0.75])
colors.append([166 / 255, 206 / 255, 227 / 255])
poly_info = {
'face_normal': normals,
'face_origin': origins,
'radius': math.sqrt(3),
'face_color': fresnel.color.linear(colors)
}
geometry = fresnel.geometry.ConvexPolyhedron(scene,
poly_info,
position=position)
geometry.material = \
fresnel.material.Material(color=fresnel.color.linear([1.0, 0, 0]),
roughness=0.8,
specular=0.5,
primitive_color_mix=0.0)
geometry.orientation[:] = [1, 0, 0, 0]
scene.camera = fresnel.camera.Orthographic(position=(20, 20, 20),
look_at=(0, 0, 0),
up=(0, 1, 0),
height=7)
return scene
@pytest.fixture(scope='function')
def scene_eight_polyhedra_(device_):
"""Pytest fixture to create a test scene."""
return scene_eight_polyhedra(device_)
def test_render(scene_eight_polyhedra_, generate=False):
"""Test that convex polyhedra render properly."""
buf_proxy = fresnel.preview(scene_eight_polyhedra_,
w=150,
h=100,
anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_geometry_convex_polyhedron.test_render.png',
'wb'), 'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:], dir_path / 'reference'
/ 'test_geometry_convex_polyhedron.test_render.png')
def test_outline(scene_eight_polyhedra_, generate=False):
"""Test that face outlines render properly."""
geometry = scene_eight_polyhedra_.geometry[0]
geometry.outline_width = 0.1
buf_proxy = fresnel.preview(scene_eight_polyhedra_,
w=150,
h=100,
anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_geometry_convex_polyhedron.test_outline.png',
'wb'), 'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:], dir_path / 'reference'
/ 'test_geometry_convex_polyhedron.test_outline.png')
def test_face_color(scene_eight_polyhedra_, generate=False):
"""Test that faces can be colored individually."""
buf_proxy = fresnel.preview(scene_eight_polyhedra_,
w=150,
h=100,
anti_alias=False)
geometry = scene_eight_polyhedra_.geometry[0]
geometry.color_by_face = 1.0
geometry.material.primitive_color_mix = 1.0
buf_proxy = fresnel.preview(scene_eight_polyhedra_,
w=150,
h=100,
anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_geometry_convex_polyhedron.test_face_color.png',
'wb'), 'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:], dir_path / 'reference'
/ 'test_geometry_convex_polyhedron.test_face_color.png')
def test_convert_cube():
"""Sanity checks on converting vertices to origins and normals."""
pms = [+1, -1]
cube_verts = numpy.array([x for x in itertools.product(pms, repeat=3)])
poly_info = fresnel.util.convex_polyhedron_from_vertices(cube_verts)
assert poly_info['face_origin'].shape[0] == 6
assert poly_info['face_normal'].shape[0] == 6
for f in poly_info['face_sides']:
assert f == 4 # should all be squares
assert poly_info['radius'] == numpy.sqrt(3)
def test_face_merge_cube():
"""Add a point into the middle and make sure no new faces are created."""
pms = [+1, -1]
cube_verts = numpy.array([x for x in itertools.product(pms, repeat=3)])
cube_verts = | numpy.concatenate((cube_verts, [[0.5, 0.5, 1.0]])) | numpy.concatenate |
# import modules
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# create the arrays
bd_1975 = np.array([
8.4 , 8.8 , 8.4 , 8. , 7.9 , 8.9 , 8.6 , 8.5 , 8.9 ,
9.1 , 8.6 , 9.8 , 8.2 , 9. , 9.7 , 8.6 , 8.2 , 9. ,
8.4 , 8.6 , 8.9 , 9.1 , 8.3 , 8.7 , 9.6 , 8.5 , 9.1 ,
9. , 9.2 , 9.9 , 8.6 , 9.2 , 8.4 , 8.9 , 8.5 , 10.4 ,
9.6 , 9.1 , 9.3 , 9.3 , 8.8 , 8.3 , 8.8 , 9.1 , 10.1 ,
8.9 , 9.2 , 8.5 , 10.2 , 10.1 , 9.2 , 9.7 , 9.1 , 8.5 ,
8.2 , 9. , 9.3 , 8. , 9.1 , 8.1 , 8.3 , 8.7 , 8.8 ,
8.6 , 8.7 , 8. , 8.8 , 9. , 9.1 , 9.74, 9.1 , 9.8 ,
10.4 , 8.3 , 9.44, 9.04, 9. , 9.05, 9.65, 9.45, 8.65,
9.45, 9.45, 9.05, 8.75, 9.45, 8.35
])
bd_2012 = np.array([
9.4 , 8.9 , 9.5 , 11. , 8.7 , 8.4 , 9.1 , 8.7 , 10.2 ,
9.6 , 8.85, 8.8 , 9.5 , 9.2 , 9. , 9.8 , 9.3 , 9. ,
10.2 , 7.7 , 9. , 9.5 , 9.4 , 8. , 8.9 , 9.4 , 9.5 ,
8. , 10. , 8.95, 8.2 , 8.8 , 9.2 , 9.4 , 9.5 , 8.1 ,
9.5 , 8.4 , 9.3 , 9.3 , 9.6 , 9.2 , 10. , 8.9 , 10.5 ,
8.9 , 8.6 , 8.8 , 9.15, 9.5 , 9.1 , 10.2 , 8.4 , 10. ,
10.2 , 9.3 , 10.8 , 8.3 , 7.8 , 9.8 , 7.9 , 8.9 , 7.7 ,
8.9 , 9.4 , 9.4 , 8.5 , 8.5 , 9.6 , 10.2 , 8.8 , 9.5 ,
9.3 , 9. , 9.2 , 8.7 , 9. , 9.1 , 8.7 , 9.4 , 9.8 ,
8.6 , 10.6 , 9. , 9.5 , 8.1 , 9.3 , 9.6 , 8.5 , 8.2 ,
8. , 9.5 , 9.7 , 9.9 , 9.1 , 9.5 , 9.8 , 8.4 , 8.3 ,
9.6 , 9.4 , 10. , 8.9 , 9.1 , 9.8 , 9.3 , 9.9 , 8.9 ,
8.5 , 10.6 , 9.3 , 8.9 , 8.9 , 9.7 , 9.8 , 10.5 , 8.4 ,
10. , 9. , 8.7 , 8.8 , 8.4 , 9.3 , 9.8 , 8.9 , 9.8 ,
9.1 ])
# define ecdf function
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# number of points
n = len(data)
# x-data
x = np.sort(data)
# y-data
y = np.arange(1, n+1) / n
return x, y
# Compute ECDFs
x_1975, y_1975 = ecdf(bd_1975)
x_2012, y_2012 = ecdf(bd_2012)
# Plot the ECDFs
_ = plt.plot(x_1975, y_1975, marker='.', linestyle='none')
_ = plt.plot(x_2012, y_2012, marker='.', linestyle='none')
# Set margins
plt.margins(0.02)
# Add axis labels and legend
_ = plt.xlabel('beak depth (mm)')
_ = plt.ylabel('ECDF')
_ = plt.legend(('1975', '2012'), loc='lower right')
plt.show()
# define function to create bootstrap replicates for 1d data
def bootstrap_replicate_1d(data, func):
return func(np.random.choice(data, size=len(data)))
# define bootstrap function
def draw_bs_reps(data, func, size=1):
"""Draw bootstrap replicates."""
# Initialize array of replicates: bs_replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_replicates[i] = bootstrap_replicate_1d(data, func)
return bs_replicates
# compute te difference of the sample means
mean_diff = (np.mean(bd_2012)) - (np.mean(bd_1975))
# get the bootstrap replicates of means
# Get bootstrap replicates of means
bs_replicates_1975 = draw_bs_reps(bd_1975, np.mean, 10000)
bs_replicates_2012 = draw_bs_reps(bd_2012, np.mean, 10000)
# Compute samples of difference of means: bs_diff_replicates
bs_diff_replicates = bs_replicates_2012 - bs_replicates_1975
# Compute 95% confidence interval: conf_int
conf_int = np.percentile(bs_diff_replicates, [2.5 , 97.5])
# Print the results
print('difference of means =', mean_diff, 'mm')
print('95% confidence interval =', conf_int, 'mm')
# Compute mean of combined data set: combined_mean
combined_mean = np.mean(np.concatenate((bd_1975, bd_2012)))
# Shift the samples
bd_1975_shifted = bd_1975 - np.mean(bd_1975) + combined_mean
bd_2012_shifted = bd_2012 - np.mean(bd_2012) + combined_mean
# Get bootstrap replicates of shifted data sets
bs_replicates_1975 = draw_bs_reps(bd_1975_shifted, np.mean, 10000)
bs_replicates_2012 = draw_bs_reps(bd_2012_shifted, np.mean, 10000)
# Compute replicates of difference of means: bs_diff_replicates
bs_diff_replicates = bs_replicates_2012 - bs_replicates_1975
# Compute the p-value
p = np.sum(bs_diff_replicates >= mean_diff) / len(bs_diff_replicates)
# Print p-value
print('p =', p)
print('we just proved that the depths of the beaks did change between 1975 and 2012')
# create an array for beach lengths
bl_1975 = np.array([
13.9 , 14. , 12.9 , 13.5 , 12.9 , 14.6 , 13. , 14.2 , 14. ,
14.2 , 13.1 , 15.1 , 13.5 , 14.4 , 14.9 , 12.9 , 13. , 14.9 ,
14. , 13.8 , 13. , 14.75, 13.7 , 13.8 , 14. , 14.6 , 15.2 ,
13.5 , 15.1 , 15. , 12.8 , 14.9 , 15.3 , 13.4 , 14.2 , 15.1 ,
15.1 , 14. , 13.6 , 14. , 14. , 13.9 , 14. , 14.9 , 15.6 ,
13.8 , 14.4 , 12.8 , 14.2 , 13.4 , 14. , 14.8 , 14.2 , 13.5 ,
13.4 , 14.6 , 13.5 , 13.7 , 13.9 , 13.1 , 13.4 , 13.8 , 13.6 ,
14. , 13.5 , 12.8 , 14. , 13.4 , 14.9 , 15.54, 14.63, 14.73,
15.73, 14.83, 15.94, 15.14, 14.23, 14.15, 14.35, 14.95, 13.95,
14.05, 14.55, 14.05, 14.45, 15.05, 13.25 ])
bl_2012 = np.array([
14.3 , 12.5 , 13.7 , 13.8 , 12. , 13. , 13. , 13.6 , 12.8 ,
13.6 , 12.95, 13.1 , 13.4 , 13.9 , 12.3 , 14. , 12.5 , 12.3 ,
13.9 , 13.1 , 12.5 , 13.9 , 13.7 , 12. , 14.4 , 13.5 , 13.8 ,
13. , 14.9 , 12.5 , 12.3 , 12.8 , 13.4 , 13.8 , 13.5 , 13.5 ,
13.4 , 12.3 , 14.35, 13.2 , 13.8 , 14.6 , 14.3 , 13.8 , 13.6 ,
12.9 , 13. , 13.5 , 13.2 , 13.7 , 13.1 , 13.2 , 12.6 , 13. ,
13.9 , 13.2 , 15. , 13.37, 11.4 , 13.8 , 13. , 13. , 13.1 ,
12.8 , 13.3 , 13.5 , 12.4 , 13.1 , 14. , 13.5 , 11.8 , 13.7 ,
13.2 , 12.2 , 13. , 13.1 , 14.7 , 13.7 , 13.5 , 13.3 , 14.1 ,
12.5 , 13.7 , 14.6 , 14.1 , 12.9 , 13.9 , 13.4 , 13. , 12.7 ,
12.1 , 14. , 14.9 , 13.9 , 12.9 , 14.6 , 14. , 13. , 12.7 ,
14. , 14.1 , 14.1 , 13. , 13.5 , 13.4 , 13.9 , 13.1 , 12.9 ,
14. , 14. , 14.1 , 14.7 , 13.4 , 13.8 , 13.4 , 13.8 , 12.4 ,
14.1 , 12.9 , 13.9 , 14.3 , 13.2 , 14.2 , 13. , 14.6 , 13.1 ,
15.2 ])
# let us try to figure out if the beaks got longer
# Make scatter plot of 1975 data
_ = plt.plot(bl_1975, bd_1975, marker='.',
linestyle='None', color='blue', alpha=0.5)
# Make scatter plot of 2012 data
_ = plt.plot(bl_2012, bd_2012, marker='.',
linestyle='None', color='red', alpha=0.5)
# Label axes and make legend
_ = plt.xlabel('beak length (mm)')
_ = plt.ylabel('beak depth (mm)')
_ = plt.legend(('1975', '2012'), loc='upper left')
# Show the plot
#plt.show()
# define a function that draws boostraps with linear regreesion
def draw_bs_pairs_linreg(x, y, size=1):
"""Perform pairs bootstrap for linear regression."""
# Set up array of indices to sample from: inds
inds = np.arange(0, len(x))
# Initialize replicates: bs_slope_reps, bs_intercept_reps
bs_slope_reps = np.empty(size)
bs_intercept_reps = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, size=len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x , bs_y, 1)
return bs_slope_reps, bs_intercept_reps
# Compute the linear regressions
slope_1975, intercept_1975 = np.polyfit(bl_1975, bd_1975, 1)
slope_2012, intercept_2012 = np.polyfit(bl_2012, bd_2012, 1)
# Perform pairs bootstrap for the linear regressions
bs_slope_reps_1975, bs_intercept_reps_1975 = \
draw_bs_pairs_linreg(bl_1975, bd_1975, 1000)
bs_slope_reps_2012, bs_intercept_reps_2012 = \
draw_bs_pairs_linreg(bl_2012, bd_2012, 1000)
# Compute confidence intervals of slopes
slope_conf_int_1975 = np.percentile(bs_slope_reps_1975, [2.5, 97.5])
slope_conf_int_2012 = np.percentile(bs_slope_reps_2012, [2.5, 97.5])
# compute confidence intervals of intercepts
intercept_conf_int_1975 = np.percentile(bs_intercept_reps_1975, [2.5, 97.5])
intercept_conf_int_2012 = np.percentile(bs_intercept_reps_2012, [2.5, 97.5])
# Print the results
print('1975: slope =', slope_1975,
'conf int =', slope_conf_int_1975)
print('1975: intercept =', intercept_1975,
'conf int =', intercept_conf_int_1975)
print('2012: slope =', slope_2012,
'conf int =', slope_conf_int_2012)
print('2012: intercept =', intercept_2012,
'conf int =', intercept_conf_int_2012)
print('looks like they have the same slope but different intercepts')
# make a plot
# Make scatter plot of 1975 data
_ = plt.plot(bl_1975, bd_1975, marker='.',
linestyle='none', color='blue', alpha=0.5)
# Make scatter plot of 2012 data
_ = plt.plot(bl_2012, bd_2012, marker='.',
linestyle='none', color='red', alpha=0.5)
# Label axes and make legend
_ = plt.xlabel('beak length (mm)')
_ = plt.ylabel('beak depth (mm)')
_ = plt.legend(('1975', '2012'), loc='upper left')
# Generate x-values for bootstrap lines: x
x = np.array([10, 17])
# Plot the bootstrap lines
for i in range(100):
plt.plot(x, bs_slope_reps_1975[i] * x + bs_intercept_reps_1975[i] ,
linewidth=0.5, alpha=0.2, color='blue')
plt.plot(x, bs_slope_reps_2012[i] * x + bs_intercept_reps_2012[i],
linewidth=0.5, alpha=0.2, color='red')
# Draw the plot again
plt.show()
""" The linear regressions showed interesting information about the beak geometry.
The slope was the same in 1975 and 2012, suggesting that for every millimeter gained in beak length,
the birds gained about half a millimeter in depth in both years.
However, if you are interested in the shape of the beak,
you want to compare the ratio of beak length to beak depth.
Let's make that comparison. """
# Compute length-to-depth ratios
ratio_1975 = bl_1975 / bd_1975
ratio_2012 = bl_2012 / bd_2012
# Compute means
mean_ratio_1975 = np.mean(ratio_1975)
mean_ratio_2012 = np.mean(ratio_2012)
# Generate bootstrap replicates of the means
bs_replicates_1975 = draw_bs_reps(ratio_1975, np.mean, 10000)
bs_replicates_2012 = draw_bs_reps(ratio_2012, np.mean, 10000)
# Compute the 99% confidence intervals
conf_int_1975 = np.percentile(bs_replicates_1975, [0.5 , 99.5])
conf_int_2012 = np.percentile(bs_replicates_2012, [0.5, 99.5])
# Print the results
print('1975: mean ratio =', mean_ratio_1975,
'conf int =', conf_int_1975)
print('2012: mean ratio =', mean_ratio_2012,
'conf int =', conf_int_2012)
# heritability
# why do these birds' beak keep getting bigger?
# could it be something to do with cross mating with another speciecs?
bd_parent_scandens = np.array([
8.3318, 8.4035, 8.5317, 8.7202, 8.7089, 8.7541, 8.773 ,
8.8107, 8.7919, 8.8069, 8.6523, 8.6146, 8.6938, 8.7127,
8.7466, 8.7504, 8.7805, 8.7428, 8.7164, 8.8032, 8.8258,
8.856 , 8.9012, 8.9125, 8.8635, 8.8258, 8.8522, 8.8974,
8.9427, 8.9879, 8.9615, 8.9238, 8.9351, 9.0143, 9.0558,
9.0596, 8.9917, 8.905 , 8.9314, 8.9465, 8.9879, 8.9804,
9.0219, 9.052 , 9.0407, 9.0407, 8.9955, 8.9992, 8.9992,
9.0747, 9.0747, 9.5385, 9.4781, 9.4517, 9.3537, 9.2707,
9.1199, 9.1689, 9.1425, 9.135 , 9.1011, 9.1727, 9.2217,
9.2255, 9.2821, 9.3235, 9.3198, 9.3198, 9.3198, 9.3273,
9.3725, 9.3989, 9.4253, 9.4593, 9.4442, 9.4291, 9.2632,
9.2293, 9.1878, 9.1425, 9.1275, 9.1802, 9.1765, 9.2481,
9.2481, 9.1991, 9.1689, 9.1765, 9.2406, 9.3198, 9.3235,
9.1991, 9.2971, 9.2443, 9.316 , 9.2934, 9.3914, 9.3989,
9.5121, 9.6176, 9.5535, 9.4668, 9.3725, 9.3348, 9.3763,
9.3839, 9.4216, 9.4065, 9.3348, 9.4442, 9.4367, 9.5083,
9.448 , 9.4781, 9.595 , 9.6101, 9.5686, 9.6365, 9.7119,
9.8213, 9.825 , 9.7609, 9.6516, 9.5988, 9.546 , 9.6516,
9.7572, 9.8854, 10.0023, 9.3914]) # average beak depth of two parents of species G. bd_parent_scandens
bd_offspring_scandens = np.array([
8.419 , 9.2468, 8.1532, 8.0089, 8.2215, 8.3734, 8.5025,
8.6392, 8.7684, 8.8139, 8.7911, 8.9051, 8.9203, 8.8747,
8.943 , 9.0038, 8.981 , 9.0949, 9.2696, 9.1633, 9.1785,
9.1937, 9.2772, 9.0722, 8.9658, 8.9658, 8.5025, 8.4949,
8.4949, 8.5633, 8.6013, 8.6468, 8.1532, 8.3734, 8.662 ,
8.6924, 8.7456, 8.8367, 8.8595, 8.9658, 8.9582, 8.8671,
8.8671, 8.943 , 9.0646, 9.1405, 9.2089, 9.2848, 9.3759,
9.4899, 9.4519, 8.1228, 8.2595, 8.3127, 8.4949, 8.6013,
8.4646, 8.5329, 8.7532, 8.8823, 9.0342, 8.6392, 8.6772,
8.6316, 8.7532, 8.8291, 8.8975, 8.9734, 9.0494, 9.1253,
9.1253, 9.1253, 9.1785, 9.2848, 9.4595, 9.3608, 9.2089,
9.2544, 9.3684, 9.3684, 9.2316, 9.1709, 9.2316, 9.0342,
8.8899, 8.8291, 8.981 , 8.8975, 10.4089, 10.1886, 9.7633,
9.7329, 9.6114, 9.5051, 9.5127, 9.3684, 9.6266, 9.5354,
10.0215, 10.0215, 9.6266, 9.6038, 9.4063, 9.2316, 9.338 ,
9.262 , 9.262 , 9.4063, 9.4367, 9.0342, 8.943 , 8.9203,
8.7835, 8.7835, 9.057 , 8.9354, 8.8975, 8.8139, 8.8671,
9.0873, 9.2848, 9.2392, 9.2924, 9.4063, 9.3152, 9.4899,
9.5962, 9.6873, 9.5203, 9.6646]) # average beak depth of the offspring of respective parents
bd_parent_fortis = np.array([
10.1 , 9.55 , 9.4 , 10.25 , 10.125, 9.7 , 9.05 , 7.4 ,
9. , 8.65 , 9.625, 9.9 , 9.55 , 9.05 , 8.35 , 10.1 ,
10.1 , 9.9 , 10.225, 10. , 10.55 , 10.45 , 9.2 , 10.2 ,
8.95 , 10.05 , 10.2 , 9.5 , 9.925, 9.95 , 10.05 , 8.75 ,
9.2 , 10.15 , 9.8 , 10.7 , 10.5 , 9.55 , 10.55 , 10.475,
8.65 , 10.7 , 9.1 , 9.4 , 10.3 , 9.65 , 9.5 , 9.7 ,
10.525, 9.95 , 10.1 , 9.75 , 10.05 , 9.9 , 10. , 9.1 ,
9.45 , 9.25 , 9.5 , 10. , 10.525, 9.9 , 10.4 , 8.95 ,
9.4 , 10.95 , 10.75 , 10.1 , 8.05 , 9.1 , 9.55 , 9.05 ,
10.2 , 10. , 10.55 , 10.75 , 8.175, 9.7 , 8.8 , 10.75 ,
9.3 , 9.7 , 9.6 , 9.75 , 9.6 , 10.45 , 11. , 10.85 ,
10.15 , 10.35 , 10.4 , 9.95 , 9.1 , 10.1 , 9.85 , 9.625,
9.475, 9. , 9.25 , 9.1 , 9.25 , 9.2 , 9.95 , 8.65 ,
9.8 , 9.4 , 9. , 8.55 , 8.75 , 9.65 , 8.95 , 9.15 ,
9.85 , 10.225, 9.825, 10. , 9.425, 10.4 , 9.875, 8.95 ,
8.9 , 9.35 , 10.425, 10. , 10.175, 9.875, 9.875, 9.15 ,
9.45 , 9.025, 9.7 , 9.7 , 10.05 , 10.3 , 9.6 , 10. ,
9.8 , 10.05 , 8.75 , 10.55 , 9.7 , 10. , 9.85 , 9.8 ,
9.175, 9.65 , 9.55 , 9.9 , 11.55 , 11.3 , 10.4 , 10.8 ,
9.8 , 10.45 , 10. , 10.75 , 9.35 , 10.75 , 9.175, 9.65 ,
8.8 , 10.55 , 10.675, 9.95 , 9.55 , 8.825, 9.7 , 9.85 ,
9.8 , 9.55 , 9.275, 10.325, 9.15 , 9.35 , 9.15 , 9.65 ,
10.575, 9.975, 9.55 , 9.2 , 9.925, 9.2 , 9.3 , 8.775,
9.325, 9.175, 9.325, 8.975, 9.7 , 9.5 , 10.225, 10.025,
8.2 , 8.2 , 9.55 , 9.05 , 9.6 , 9.6 , 10.15 , 9.875,
10.485, 11.485, 10.985, 9.7 , 9.65 , 9.35 , 10.05 , 10.1 ,
9.9 , 8.95 , 9.3 , 9.95 , 9.45 , 9.5 , 8.45 , 8.8 ,
8.525, 9.375, 10.2 , 7.625, 8.375, 9.25 , 9.4 , 10.55 ,
8.9 , 8.8 , 9. , 8.575, 8.575, 9.6 , 9.375, 9.6 ,
9.95 , 9.6 , 10.2 , 9.85 , 9.625, 9.025, 10.375, 10.25 ,
9.3 , 9.5 , 9.55 , 8.55 , 9.05 , 9.9 , 9.8 , 9.75 ,
10.25 , 9.1 , 9.65 , 10.3 , 8.9 , 9.95 , 9.5 , 9.775,
9.425, 7.75 , 7.55 , 9.1 , 9.6 , 9.575, 8.95 , 9.65 ,
9.65 , 9.65 , 9.525, 9.85 , 9.05 , 9.3 , 8.9 , 9.45 ,
10. , 9.85 , 9.25 , 10.1 , 9.125, 9.65 , 9.1 , 8.05 ,
7.4 , 8.85 , 9.075, 9. , 9.7 , 8.7 , 9.45 , 9.7 ,
8.35 , 8.85 , 9.7 , 9.45 , 10.3 , 10. , 10.45 , 9.45 ,
8.5 , 8.3 , 10. , 9.225, 9.75 , 9.15 , 9.55 , 9. ,
9.275, 9.35 , 8.95 , 9.875, 8.45 , 8.6 , 9.7 , 8.55 ,
9.05 , 9.6 , 8.65 , 9.2 , 8.95 , 9.6 , 9.15 , 9.4 ,
8.95 , 9.95 , 10.55 , 9.7 , 8.85 , 8.8 , 10. , 9.05 ,
8.2 , 8.1 , 7.25 , 8.3 , 9.15 , 8.6 , 9.5 , 8.05 ,
9.425, 9.3 , 9.8 , 9.3 , 9.85 , 9.5 , 8.65 , 9.825,
9. , 10.45 , 9.1 , 9.55 , 9.05 , 10. , 9.35 , 8.375,
8.3 , 8.8 , 10.1 , 9.5 , 9.75 , 10.1 , 9.575, 9.425,
9.65 , 8.725, 9.025, 8.5 , 8.95 , 9.3 , 8.85 , 8.95 ,
9.8 , 9.5 , 8.65 , 9.1 , 9.4 , 8.475, 9.35 , 7.95 ,
9.35 , 8.575, 9.05 , 8.175, 9.85 , 7.85 , 9.85 , 10.1 ,
9.35 , 8.85 , 8.75 , 9.625, 9.25 , 9.55 , 10.325, 8.55 ,
9.675, 9.15 , 9. , 9.65 , 8.6 , 8.8 , 9. , 9.95 ,
8.4 , 9.35 , 10.3 , 9.05 , 9.975, 9.975, 8.65 , 8.725,
8.2 , 7.85 , 8.775, 8.5 , 9.4 ]) #
bd_offspring_fortis = np.array([
10.7 , 9.78, 9.48, 9.6 , 10.27, 9.5 , 9. , 7.46, 7.65,
8.63, 9.81, 9.4 , 9.48, 8.75, 7.6 , 10. , 10.09, 9.74,
9.64, 8.49, 10.15, 10.28, 9.2 , 10.01, 9.03, 9.94, 10.5 ,
9.7 , 10.02, 10.04, 9.43, 8.1 , 9.5 , 9.9 , 9.48, 10.18,
10.16, 9.08, 10.39, 9.9 , 8.4 , 10.6 , 8.75, 9.46, 9.6 ,
9.6 , 9.95, 10.05, 10.16, 10.1 , 9.83, 9.46, 9.7 , 9.82,
10.34, 8.02, 9.65, 9.87, 9. , 11.14, 9.25, 8.14, 10.23,
8.7 , 9.8 , 10.54, 11.19, 9.85, 8.1 , 9.3 , 9.34, 9.19,
9.52, 9.36, 8.8 , 8.6 , 8. , 8.5 , 8.3 , 10.38, 8.54,
8.94, 10. , 9.76, 9.45, 9.89, 10.9 , 9.91, 9.39, 9.86,
9.74, 9.9 , 9.09, 9.69, 10.24, 8.9 , 9.67, 8.93, 9.3 ,
8.67, 9.15, 9.23, 9.59, 9.03, 9.58, 8.97, 8.57, 8.47,
8.71, 9.21, 9.13, 8.5 , 9.58, 9.21, 9.6 , 9.32, 8.7 ,
10.46, 9.29, 9.24, 9.45, 9.35, 10.19, 9.91, 9.18, 9.89,
9.6 , 10.3 , 9.45, 8.79, 9.2 , 8.8 , 9.69, 10.61, 9.6 ,
9.9 , 9.26, 10.2 , 8.79, 9.28, 8.83, 9.76, 10.2 , 9.43,
9.4 , 9.9 , 9.5 , 8.95, 9.98, 9.72, 9.86, 11.1 , 9.14,
10.49, 9.75, 10.35, 9.73, 9.83, 8.69, 9.58, 8.42, 9.25,
10.12, 9.31, 9.99, 8.59, 8.74, 8.79, 9.6 , 9.52, 8.93,
10.23, 9.35, 9.35, 9.09, 9.04, 9.75, 10.5 , 9.09, 9.05,
9.54, 9.3 , 9.06, 8.7 , 9.32, 8.4 , 8.67, 8.6 , 9.53,
9.77, 9.65, 9.43, 8.35, 8.26, 9.5 , 8.6 , 9.57, 9.14,
10.79, 8.91, 9.93, 10.7 , 9.3 , 9.93, 9.51, 9.44, 10.05,
10.13, 9.24, 8.21, 8.9 , 9.34, 8.77, 9.4 , 8.82, 8.83,
8.6 , 9.5 , 10.2 , 8.09, 9.07, 9.29, 9.1 , 10.19, 9.25,
8.98, 9.02, 8.6 , 8.25, 8.7 , 9.9 , 9.65, 9.45, 9.38,
10.4 , 9.96, 9.46, 8.26, 10.05, 8.92, 9.5 , 9.43, 8.97,
8.44, 8.92, 10.3 , 8.4 , 9.37, 9.91, 10. , 9.21, 9.95,
8.84, 9.82, 9.5 , 10.29, 8.4 , 8.31, 9.29, 8.86, 9.4 ,
9.62, 8.62, 8.3 , 9.8 , 8.48, 9.61, 9.5 , 9.37, 8.74,
9.31, 9.5 , 9.49, 9.74, 9.2 , 9.24, 9.7 , 9.64, 9.2 ,
7.5 , 7.5 , 8.7 , 8.31, 9. , 9.74, 9.31, 10.5 , 9.3 ,
8.12, 9.34, 9.72, 9. , 9.65, 9.9 , 10. , 10.1 , 8. ,
9.07, 9.75, 9.33, 8.11, 9.36, 9.74, 9.9 , 9.23, 9.7 ,
8.2 , 9.35, 9.49, 9.34, 8.87, 9.03, 9.07, 9.43, 8.2 ,
9.19, 9. , 9.2 , 9.06, 9.81, 8.89, 9.4 , 10.45, 9.64,
9.03, 8.71, 9.91, 8.33, 8.2 , 7.83, 7.14, 8.91, 9.18,
8.8 , 9.9 , 7.73, 9.25, 8.7 , 9.5 , 9.3 , 9.05, 10.18,
8.85, 9.24, 9.15, 9.98, 8.77, 9.8 , 8.65, 10. , 8.81,
8.01, 7.9 , 9.41, 10.18, 9.55, 9.08, 8.4 , 9.75, 8.9 ,
9.07, 9.35, 8.9 , 8.19, 8.65, 9.19, 8.9 , 9.28, 10.58,
9. , 9.4 , 8.91, 9.93, 10. , 9.37, 7.4 , 9. , 8.8 ,
9.18, 8.3 , 10.08, 7.9 , 9.96, 10.4 , 9.65, 8.8 , 8.65,
9.7 , 9.23, 9.43, 9.93, 8.47, 9.55, 9.28, 8.85, 8.9 ,
8.75, 8.63, 9. , 9.43, 8.28, 9.23, 10.4 , 9. , 9.8 ,
9.77, 8.97, 8.37, 7.7 , 7.9 , 9.5 , 8.2 , 8.8 ])
# Make scatter plots
_ = plt.plot(bd_parent_fortis, bd_offspring_fortis,
marker='.', linestyle='none', color='blue', alpha=0.5)
_ = plt.plot(bd_parent_scandens, bd_offspring_scandens,
marker='.', linestyle='none', color='red', alpha=0.5)
# Label axes
_ = plt.xlabel('parental beak depth (mm)')
_ = plt.ylabel('offspring beak depth (mm)')
# Add legend
_ = plt.legend(('G. fortis', 'G. scandens'), loc='lower right')
# Show plot
plt.show()
"""In an effort to quantify the correlation between offspring and parent beak depths,
we would like to compute statistics, such as the Pearson correlation coefficient,
between parents and offspring.
To get confidence intervals on this, we need to do a pairs bootstrap."""
# define a function to perfom pairs boostrap for a statistic
def draw_bs_pairs(x, y, func, size=1):
"""Perform pairs bootstrap for a single statistic."""
# Set up array of indices to sample from: inds
inds = np.arange(len(x))
# Initialize replicates: bs_replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_replicates[i] = func(bs_x, bs_y)
return bs_replicates
# define a function that computes pearson correlation
def pearson_r(x, y):
"""Compute Pearson correlation coefficient between two arrays."""
# Compute correlation matrix: corr_mat
corr_mat = np.corrcoef(x,y)
# Return entry [0,1]
return corr_mat[0,1]
# Compute the Pearson correlation coefficients
r_scandens = pearson_r(bd_parent_scandens, bd_offspring_scandens)
r_fortis = pearson_r(bd_parent_fortis , bd_offspring_fortis)
# Acquire 1000 bootstrap replicates of Pearson r
bs_replicates_scandens = draw_bs_pairs(bd_parent_scandens, bd_offspring_scandens, pearson_r, 1000)
bs_replicates_fortis = draw_bs_pairs(bd_parent_fortis, bd_offspring_fortis, pearson_r, 1000)
# Compute 95% confidence intervals
conf_int_scandens = np.percentile(bs_replicates_scandens,[2.5, 97.5])
conf_int_fortis = np.percentile(bs_replicates_fortis, [2.5, 97.5])
# Print results
print('G. scandens:', r_scandens, conf_int_scandens)
print('G. fortis:', r_fortis, conf_int_fortis)
""" Remember that the Pearson correlation coefficient is the ratio of the covariance to the
geometric mean of the variances of the two data sets.
This is a measure of the correlation between parents and offspring,
but might not be the best estimate of heritability.
If you stop and think, it makes more sense to define heritability as
the ratio of the covariance between parent and offspring to the variance of the parents alone.
"""
# define a heritability function
def heritability(parents, offspring):
"""Compute the heritability from parent and offspring samples."""
covariance_matrix = np.cov(parents, offspring)
return covariance_matrix[0,1] / covariance_matrix[0,0]
# Compute the heritability
heritability_scandens = heritability(bd_parent_scandens, bd_offspring_scandens)
heritability_fortis = heritability(bd_parent_fortis, bd_offspring_fortis)
# Acquire 1000 bootstrap replicates of heritability
replicates_scandens = draw_bs_pairs(
bd_parent_scandens, bd_offspring_scandens, heritability, size=1000)
replicates_fortis = draw_bs_pairs(
bd_parent_fortis, bd_offspring_fortis, heritability, size=1000)
# Compute 95% confidence intervals
conf_int_scandens = np.percentile(replicates_scandens, [2.5, 97.5])
conf_int_fortis = np.percentile(replicates_fortis, [2.5, 97.5])
# Print results
print('G. scandens:', heritability_scandens, conf_int_scandens)
print('G. fortis:', heritability_fortis, conf_int_fortis)
""" Is beak depth heritable at all in G. scandens?
The heritability of beak depth in G. scandens seems low.
It could be that this observed heritability was just achieved by chance and beak depth is
actually not really heritable in the species. Let us test this hypothesis by doing paris permutation test """
# Initialize array of replicates: perm_replicates
perm_replicates = | np.empty(10000) | numpy.empty |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" LSCE, LSCF, LSFD are modified from OpenModal
https://github.com/openmodal/
Copyright (C) 2014-2017 <NAME>, <NAME>, <NAME>, <NAME>
(in alphabetic order)
The rest is by
<NAME> <<EMAIL>>
"""
import numpy as np
from numpy.fft import irfft
from scipy.linalg import lstsq, toeplitz, eig, inv, norm, solve
from collections import defaultdict
from .common import window
def lsce(frf, f, low_lim, nmax, fs, additional_timepoints=0):
"""Compute poles(natural frequencies and damping) from FRFs.
The Least-Squares Complex Exponential method (LSCE), introduced in [1]_, is
the extension of the Complex Exponential method (CE) to a global procedure.
It is therefore a SIMO method, processing simultaneously several IRFs
obtained by exciting a structure at one single point and measuring the
responses at several locations. With such a procedure, a consistent set of
global parameters (natural frequencies and damping factors) is obtained,
thus overcoming the variations obtained in the results for those parameters
when applying the CE method on different IRFs.
The output from LSCE is used by LSFD to compute mode shapes.
Parameters
----------
frf: ndarray
frequency response function array - receptance
f: float
starting frequency
low_lim: float
lower limit of the frf/f
nmax: int
the maximal order of the polynomial
fs: float
time sampling interval
additional_timepoints: float, default 0
normed additional time points (default is 0% added time points, max. is
1, all time points (100%) taken into computation)
Returns
-------
srlist: list
list of complex eigenfrequencies
References
-----------
[1] <NAME>., <NAME>. <NAME>.,
"Parameter Estimation Techniques For Modal Analysis"
SAE Technical Paper Series, No. 790221, 1979
[2] <NAME> .; Modal Testing: Theory, practice and application,
second edition. Reasearch Studies Press, John Wiley & Sons, 2000.
[3] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Theoretical
and Experimental Modal Analysis. Reasearch Studio Press
Ltd., 1997.
[4] <NAME>., <NAME>., Experimental Modal Analysis,
http://www.ltas-vis.ulg.ac.be/cmsms/uploads/File/Mvibr_notes.pdf
"""
# number of outputs, length of receptance
no, l = frf.shape
# number of DFT frequencies (nf >> n)
nf = 2*(l-low_lim-1)
# Impulse response function, ie. h = IFFT(H)
irf = np.fft.irfft(frf[:, low_lim:], n=nf, axis=-1)
sr_list = []
nf2 = irf.shape[1]
for n in range(1, nmax+1):
# number of time points for computation
nt = int(2*n + additional_timepoints*(nf2 - 4*n))
# setup equation system.
# [h]: time-response matrix, hh: {h'} vector, size (2N)x1
h = np.zeros((nt*no, 2*n))
hh = np.zeros(nt*no)
for j in range(nt):
for k in range(no):
h[j+k*2*n, :] = irf[k, j:j+2*n]
hh[j+k*2*n] = irf[k, (2*n)+j]
# the computation of the autoregressive coefficients matrix
beta = lstsq(h, -hh)[0]
sr = np.roots(np.append(beta, 1)[::-1]) # the roots of the polynomial
sr = (np.log(sr)*fs).astype(complex) # the complex natural frequency
sr += 2*np.pi*f*1j # for f_min different than 0 Hz
# sort after eigenvalues
sr_list.append(sr.sort())
return sr_list
def lsce_reconstruction(n, f, sr, vr, irf, two_sided_frf=False):
"""Reconstruction of the least-squares complex exponential (CE) method.
:param n: number of degrees of freedom
:param f: frequency vector [Hz]
:param sr: the complex natural frequency
:param vr: the roots of the polynomial
:param irf: impulse response function vector
:return: residues and reconstructed FRFs
"""
dt = 1/(len(f)*(f[1]-f[0]))
if two_sided_frf is False:
dt /= 2
# no: number of outputs
no, l = irf.shape
v = np.zeros((2*n, 2*n), dtype=complex)
for l in range(0, 2*n):
for k in range(0, 2*n):
v[k, l] = vr[l]**k
# {h''} vector
hhh = np.zeros((2*n*no))
for j in range(0, 2*n):
for k in range(no):
hhh[j+k*2*n] = irf[k, j]
a = np.zeros((no, 2*n), dtype=complex)
for i in range(no):
# the computation of residues
a[i, :] = np.linalg.solve(v, -hhh[i*2*n:(i+1)*2*n])
# reconstructed irf
h = np.zeros(np.shape(irf))
for i in range(no):
for jk in range(l):
h[i, jk] = np.real(np.sum(a[i,:]*np.exp(sr*jk*dt)))
return a, h
def lsfd(lambdak, f, frf):
"""LSFD (Least-Squares Frequency domain) method
Determine the residues and mode shapes from complex natural frquencies and
the measured frequency response functions.
Parameters
----------
lambdak: ndarray
a vector of selected complex natural frequencies
f: ndarray
frequency vector
frf: ndarray
frequency response functions
Returns
-------
h, a, lr, ur
reconstructed FRF, modal constant(residue), lower residual,
upper residual
"""
ni = frf.shape[0] # number of references
no = frf.shape[1] # number of responses
n = frf.shape[2] # length of frequency vector
nmodes = lambdak.shape[0] # number of modes
omega = 2 * np.pi * f # angular frequency
# Factors in the freqeuncy response function
b = 1 / np.subtract.outer(1j * omega, lambdak).T
c = 1 / np.subtract.outer(1j * omega, np.conj(lambdak)).T
# Separate complex data to real and imaginary part
hr = frf.real
hi = frf.imag
br = b.real
bi = b.imag
cr = c.real
ci = c.imag
# Stack the data together in order to obtain 2D matrix
hri = np.dstack((hr, hi))
bri = np.hstack((br+cr, bi+ci))
cri = np.hstack((-bi+ci, br-cr))
ur_multiplyer = np.ones(n)
ur_zeros = np.zeros(n)
lr_multiplyer = -1/(omega**2)
urr = np.hstack((ur_multiplyer, ur_zeros))
uri = np.hstack((ur_zeros, ur_multiplyer))
lrr = np.hstack((lr_multiplyer, ur_zeros))
lri = np.hstack((ur_zeros, lr_multiplyer))
bcri = np.vstack((bri, cri, urr, uri, lrr, lri))
# Reshape 3D array to 2D for least squares coputation
hri = hri.reshape(ni*no, 2*n)
# Compute the modal constants (residuals) and upper and lower residuals
uv = lstsq(bcri.T,hri.T)[0]
# Reshape 2D results to 3D
uv = uv.T.reshape(ni, no, 2*nmodes+4)
u = uv[:, :, :nmodes]
v = uv[:, :, nmodes:-4]
urr = uv[:, :, -4]
uri = uv[:, :, -3]
lrr = uv[:, :, -2]
lri = uv[:, :, -1]
a = u + 1j*v # Modal constant (residue)
ur = urr + 1j*uri # Upper residual
lr = lrr + 1j*lri # Lower residual
# Reconstructed FRF matrix
h = uv @ bcri
h = h[:,:,:n] + 1j*h[:,:,n:]
return h, a, lr, ur
def lscf(frf, low_lim, n, fs):
"""LSCF - Least-Squares Complex frequency domain method
The LSCF method is an frequency-domain Linear Least Squares estimator
optimized for modal parameter estimation. The choice of the most important
algorithm characteristics is based on the results in [1] (Section 5.3.3.)
and can be summarized as:
- Formulation: the normal equations [1]_
(Eq. 5.26: [sum(Tk - Sk.H * Rk^-1 * Sk)]*ThetaA=D*ThetaA = 0) are
constructed for the common denominator discrete-time model in the Z-domain.
Consequently, by looping over the outputs and inputs, the submatrices Rk,
Sk, and Tk are formulated through the use of the FFT algorithm as Toeplitz
structured (n+1) square matrices. Using complex coefficients, the FRF data
within the frequency band of interest (FRF-zoom) is projected in the
Z-domain in the interval of [0, 2*pi] in order to improve numerical
conditioning. (In the case that real coefficients are used, the data is
projected in the interval of [0, pi].) The projecting on an interval that
does not completely describe the unity circle, say [0, alpha*2*pi] where
alpha is typically 0.9-0.95. Deliberately over-modeling is best applied to
cope with discontinuities. This is justified by the use of a discrete time
model in the Z-domain, which is much more robust for a high order of the
transfer function polynomials.
- Solver: the normal equations can be solved for the denominator
coefficients ThetaA by computing the Least-Squares (LS) or mixed
Total-Least-Squares (TLS) solution. The inverse of the square matrix D for
the LS solution is computed by means of a pseudo inverse operation for
reasons of numerical stability, while the mixed LS-TLS solution is computed
using an SVD (Singular Value Decomposition).
Parameters
----------
frf: ndarray
frequency response function - receptance
low_lim:
lower limit of the frf
n: int
the order of the polynomial
fs: float
time sampling interval
Returns
-------
srlist: list
list of complex eigenfrequencies
References
----------
[1] <NAME>., Frequency-domain System Identification for Modal
Analysis, Ph. D. thesis, Mechanical Engineering Dept. (WERK), Vrije
Universiteit Brussel, Brussel, (Belgium), May 2002,
(http://mech.vub.ac.be/avrg/PhD/thesis_PV_web.pdf)
[2] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., Stabilization Charts and Uncertainty Bounds For Frequency-Domain
Linear Least Squares Estimators, Vrije Universiteit Brussel(VUB),
Mechanical Engineering Dept. (WERK), Acoustic and Vibration Research
Group (AVRG), Pleinlaan 2, B-1050 Brussels, Belgium, e-mail:
<EMAIL>, url:
(http://sem-proceedings.com/21i/sem.org-IMAC-XXI-Conf-s02p01
-Stabilization-Charts-Uncertainty-Bounds-Frequency-Domain-
Linear-Least.pdf)
[3] <NAME>, <NAME>, <NAME>, <NAME>, B.
Peeters, A Poly-Reference Implementation of the Least-Squares Complex
Frequency-Domain Estimator, Vrije Universiteit Brussel, LMS
International
"""
# the poles should be complex conjugate, thus expect even polynomial order
n *= 2
# nr: (number of inputs) * (number of outputs), l: length of receptance
nr, l = frf.shape
# number of DFT frequencies (nf >> n)
nf = 2*(l-1)
indices_s = np.arange(-n, n+1)
indices_t = np.arange(n+1)
# Selection of the weighting function
# Least-Squares (LS) Formulation based on Normal Matrix
sk = -irfft_adjusted_lower_limit(frf, low_lim, indices_s)
t = irfft_adjusted_lower_limit(frf.real**2 + frf.imag**2,
low_lim, indices_t)
r = -(irfft(np.ones(low_lim), n=nf))[indices_t]*nf
r[0] += nf
s = []
for i in range(nr):
s.append(toeplitz(sk[i, n:], sk[i, :n+1][::-1]))
t = toeplitz(np.sum(t[:, :n+1], axis=0))
r = toeplitz(r)
sr_list = []
for j in range(2, n+1, 2):
d = 0
for i in range(nr):
rinv = inv(r[:j+1, :j+1])
snew = s[i][:j+1, :j+1]
# sum
d -= (snew[:j+1, :j+1].T @ rinv) @ snew[:j+1, :j+1]
d += t[:j+1, :j+1]
a0an1 = solve(-d[0:j, 0:j], d[0:j, j])
# the numerator coefficients
sr = np.roots(np.append(a0an1, 1)[::-1])
# Z-domain (for discrete-time domain model)
sr = -np.log(sr) * fs
sr_list.append(sr.sort())
return sr_list
def remove_redundant(omega, xi, prec=1e-3):
"""Remove the redundant values of frequency and damping vectors
(due to the complex conjugate eigenvalues)
Input:
omega - eiqenfrquencies vector
xi - damping ratios vector
prec - absoulute precision in order to distinguish between two values
"""
N = len(omega)
test_omega = np.zeros((N,N), dtype=int)
for i in range(1,N):
for j in range(0,i):
if np.abs((omega[i] - omega[j])) < prec:
test_omega[i,j] = 1
else:
test_omega[i,j] = 0
test = np.zeros(N, dtype=int)
for i in range(0,N):
test[i] = np.sum(test_omega[i,:])
omega_mod = omega[np.where(test < 1)]
xi_mod = xi[np.where(test < 1)]
return omega_mod, xi_mod
def irfft_adjusted_lower_limit(x, low_lim, indices):
"""
Compute the ifft of real matrix x with adjusted summation limits:
y(j) = sum[k=-n-2, ... , -low_lim-1, low_lim, low_lim+1, ... n-2,
n-1] x[k] * exp(sqrt(-1)*j*k* 2*pi/n),
j =-n-2, ..., -low_limit-1, low_limit, low_limit+1, ... n-2, n-1
:param x: Single-sided real array to Fourier transform.
:param low_lim: lower limit index of the array x.
:param indices: list of indices of interest
:return: Fourier transformed two-sided array x with adjusted lower limit.
Retruns values.
"""
nf = 2 * (x.shape[1] - 1)
a = (irfft(x, n=nf)[:, indices]) * nf
b = (irfft(x[:, :low_lim], n=nf)[:, indices]) * nf
return a - b
def stabilization(sd, fmin=0, fmax=np.inf, tol_freq=1, tol_damping=5,
tol_mode=0.98, macchoice='complex'):
"""Calculate stabilization of modal parameters for increasing model order.
Used for plotting stabilization diagram
Parameters
----------
sd: dict with keys {'wn', 'zeta', 'realmode'/'cpxmode', 'stable'}
dict of dicts having modal parameters for each model order.
fmin: float, default 0
Minimum frequency to consider
fmax: float, default np.inf
Maximum frequency to consider
tol_freq: float, default 1
Tolerance for frequency in %, lower is better. Between [0, 100]
tol_damping: float, default 5
Tolerance for damping in %, lower is better. Between [0, 100]
tol_freq: float, default 0.98
Tolerance for mode shape, higher is better. Between [0, 1]
macchoice: str, {'complex', 'real', 'None'}
Method for comparing mode shapes. 'None' for no comparison.
Returns
-------
SDout: two nested defaultdicts.
First Keys is model order, second key is
modal property: {stab, freq, zeta, mode} = {True, False}
"""
# Initialize SDout as 2 nested defaultdict
SDout = defaultdict(lambda: defaultdict(list))
# loop over model orders except the last.
for n, nnext in window(sd, 2):
val = sd[n]
# is A stable?
SDout[n]['a_stable'].append(val['stable'])
# loop over frequencies for current model order
for ifr, natfreq in enumerate(val['wn']):
if natfreq < fmin or natfreq > fmax:
continue
SDout[n]['freq'].append(natfreq)
# compare with frequencies from one model order higher.
nfreq = sd[nnext]['wn']
tol_low = (1 - tol_freq / 100) * natfreq
tol_high = (1 + tol_freq / 100) * natfreq
ifreqS, = np.where((nfreq >= tol_low) & (nfreq <= tol_high))
if ifreqS.size == 0: # ifreqS is empty
# the current natfreq is not stabilized
SDout[n]['stab'].append(False)
SDout[n]['zeta'].append(False)
SDout[n]['mode'].append(False)
else:
# Stabilized in natfreq
SDout[n]['stab'].append(True)
# Only in very rare cases, ie multiple natfreqs are very
# close, is len(ifreqS) != 1
for ii in ifreqS:
nep = sd[nnext]['zeta'][ii]
ep = val['zeta'][ifr]
tol_low = (1 - tol_damping / 100) * ep
tol_high = (1 + tol_damping / 100) * ep
iepS, = np.where((nep >= tol_low) & (nep <= tol_high))
if iepS.size == 0:
SDout[n]['zeta'].append(False)
else:
SDout[n]['zeta'].append(True)
if macchoice == 'complex':
m1 = val['cpxmode'][ifr]
m2 = sd[nnext]['cpxmode'][ifreqS]
MAC = ModalACX(m1, m2)
elif macchoice == 'real':
m1 = sd[n]['realmode'][ifr]
m2 = sd[nnext]['realmode'][ifreqS]
MAC = ModalAC(m1, m2)
else:
MAC = 0
if np.max(MAC) >= tol_mode:
SDout[n]['mode'].append(True)
else:
SDout[n]['mode'].append(False)
return SDout
def frf_mkc(M, K, fmin, fmax, fres, C=None, idof=None, odof=None):
"""Compute the frequency response for a FEM model, given a range of
frequencies.
Parameters
----------
M: array
Mass matrix
K: array
Stiffness matrix
C: array, optional
Damping matrix
fmin: float
Minimum frequency used
fmax: float
Maximum frequency used
fres: float
Frequency resolution
idof: array[int], default None
Array of in dofs/modes to use. If None, use all.
odof: array[int], default None
Array of out dofs/modes to use. If None, use all.
Returns
-------
freq: ndarray
The frequencies where H is calculated.
H: ndarray, [idof, odof, len(freq)]
The transfer function. H[0,0] gives H1 for DOF1, etc.
Examples
--------
>>> M = np.array([[1, 0],
... [0, 1]])
>>> K = np.array([[2, -1],
... [-1, 6]])
>>> C = np.array([[0.3, -0.02],
... [-0.02, 0.1]])
>>> freq, H = frf_mkc(M, K, C)
"""
n, n = M.shape
if C is None:
C = np.zeros(M.shape)
# in/out DOFs to use
if idof is None:
idof = np.arange(n)
if odof is None:
odof = np.arange(n)
n1 = len(idof)
n2 = len(odof)
# Create state space system, A, B, C, D. D=0
Z = np.zeros((n, n))
I = np.eye(n)
A = np.vstack((
np.hstack((Z, I)),
np.hstack((-solve(M, K, assume_a='pos'),
-solve(M, C, assume_a='pos')))))
B = np.vstack((Z, inv(M)))
C = | np.hstack((I, Z)) | numpy.hstack |
# Written by <NAME>
#
# Based on:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import numpy as np
import pycocotools.mask as mask_util
from torch.autograd import Variable
import torch
import ipdb
import math
from core.config import cfg
from utils.timer import Timer
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils.fpn as fpn_utils
import utils.image as image_utils
import utils.keypoints as keypoint_utils
from roi_data.hoi_data import get_hoi_blob_names
from roi_data.hoi_data_union import get_hoi_union_blob_names, generate_union_mask, generate_joints_heatmap
from roi_data.hoi_data_union import generate_pose_configmap, generate_part_box_from_kp, generate_part_box_from_kp17
from datasets import json_dataset
import torch.nn.functional as F
import time
def im_detect_all(model, im, box_proposals=None, timers=None, entry=None):
"""Process the outputs of model for testing
Args:
model: the network module
im_data: Pytorch variable. Input batch to the model.
im_info: Pytorch variable. Input batch to the model.
gt_boxes: Pytorch variable. Input batch to the model.
num_boxes: Pytorch variable. Input batch to the model.
args: arguments from command line.
timer: record the cost of time for different steps
The rest of inputs are of type pytorch Variables and either input to or output from the model.
"""
if timers is None:
timers = defaultdict(Timer)
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
# boxes is in origin img size
scores, boxes, im_scale, blob_conv = im_detect_bbox_aug(
model, im, box_proposals)
else:
scores, boxes, im_scale, blob_conv = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals)
timers['im_detect_bbox'].toc()
im_info = np.array([im.shape[:2]+(im_scale[0],)])
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class) (numpy.ndarray)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes, im_scale, blob_conv)
else:
masks = im_detect_mask(model, im_scale, boxes, blob_conv)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(cls_boxes, masks, boxes, im.shape[0], im.shape[1])
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes, blob_conv)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
vcoco_heatmaps = None
if cfg.MODEL.VCOCO_ON:
if cfg.VCOCO.KEYPOINTS_ON:
# ipdb.set_trace()
vcoco_heatmaps, vcoco_heatmaps_np = im_detect_keypoints_vcoco(model, im_scale[0], cls_boxes[1][:, :4], blob_conv)
vcoco_cls_keyps = keypoint_results_vcoco(cls_boxes, vcoco_heatmaps_np)
else:
vcoco_cls_keyps = None
hoi_res = im_detect_hoi_union(model, boxes, scores, cls_boxes[1].shape[0],
im_info, blob_conv, entry,
vcoco_heatmaps)
else:
hoi_res = None
vcoco_cls_keyps = None
return cls_boxes, cls_segms, cls_keyps, hoi_res, vcoco_cls_keyps
def im_detect_all_precomp_box(model, im, timers=None, entry=None, mode='val', category_id_to_contiguous_id=None):
"""Process the outputs of model for testing
Args:
model: the network module
im_data: Pytorch variable. Input batch to the model.
im_info: Pytorch variable. Input batch to the model.
gt_boxes: Pytorch variable. Input batch to the model.
num_boxes: Pytorch variable. Input batch to the model.
args: arguments from command line.
timer: record the cost of time for different steps
The rest of inputs are of type pytorch Variables and either input to or output from the model.
"""
if timers is None:
timers = defaultdict(Timer)
blob_conv, im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
im_info = np.array([im.shape[:2] + (im_scale[0],)])
scores, boxes, cates, cls_boxes = im_detect_bbox_precomp_box(entry, category_id_to_contiguous_id)
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes, im_scale, blob_conv)
else:
masks = im_detect_mask(model, im_scale, boxes, blob_conv)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(cls_boxes, masks, boxes, im.shape[0], im.shape[1])
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes, blob_conv)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
vcoco_heatmaps = None
vcoco_cls_keyps = None
loss = None
if cfg.MODEL.VCOCO_ON:
hoi_res, loss = im_detect_hoi_union(model, boxes, scores, cates, cls_boxes[1].shape[0],
im_info, blob_conv, entry, mode,
vcoco_heatmaps)
else:
hoi_res = None
vcoco_cls_keyps = None
return cls_boxes, cls_segms, cls_keyps, hoi_res, vcoco_cls_keyps, loss
def im_conv_body_only(model, im, target_scale, target_max_size):
inputs, im_scale = _get_blobs(im, None, target_scale, target_max_size)
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = Variable(torch.from_numpy(inputs['data']), volatile=True).cuda()
else:
inputs['data'] = torch.from_numpy(inputs['data']).cuda()
inputs.pop('im_info')
blob_conv = model.module.convbody_net(**inputs)
return blob_conv, im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Prepare the bbox for testing"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']), volatile=True)]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']), volatile=True)]
else:
inputs['data'] = [torch.from_numpy(inputs['data'])]
inputs['im_info'] = [torch.from_numpy(inputs['im_info'])]
time1 = time.time()
return_dict = model(**inputs)
time2 = time.time()
print('model_time:', time2-time1)
if cfg.MODEL.FASTER_RCNN:
rois = return_dict['rois'].data.cpu().numpy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# cls prob (activations after softmax)
scores = return_dict['cls_score'].data.cpu().numpy().squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = return_dict['bbox_pred'].data.cpu().numpy().squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# (legacy) Optionally normalize targets by a precomputed mean and stdev
box_deltas = box_deltas.view(-1, 4) * cfg.TRAIN.BBOX_NORMALIZE_STDS \
+ cfg.TRAIN.BBOX_NORMALIZE_MEANS
pred_boxes = box_utils.bbox_transform(boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS)
pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale, return_dict['blob_conv']
def im_detect_bbox_precomp_box(entry, category_id_to_contiguous_id):
"""Prepare the bbox for testing"""
# box in origin image
pred_boxes = entry['precomp_boxes']
scores = entry['precomp_score']
cates = entry['precomp_cate'].astype(np.int32)
contiguous_cate = list()
for cls in cates:
# ipdb.set_trace()
if category_id_to_contiguous_id.get(cls) is None:
contiguous_cate.append(80)
else:
contiguous_cate.append(category_id_to_contiguous_id[cls])
cates = np.array(contiguous_cate, dtype=cates.dtype)
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
box_sc = np.concatenate([pred_boxes, scores[:, None]], 1)
unique_cates = np.unique(cates)
for c in unique_cates:
if category_id_to_contiguous_id.get(c) is not None:
inds = np.where(cates == c)
cls_boxes[category_id_to_contiguous_id[c]] = box_sc[inds]
if len(cls_boxes[1]) == 0:
cls_boxes[1] = np.empty((0,5), dtype=np.float32)
return scores, pred_boxes, cates, cls_boxes
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i, blob_conv_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i, blob_conv_i
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale, _ = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils.flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils.aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils.aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_mask(model, im_scale, boxes, blob_conv):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
blob_conv (Variable): base features from the backbone network.
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
pred_masks = model.module.mask_net(blob_conv, inputs)
pred_masks = pred_masks.data.cpu().numpy().squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aug(model, im, boxes, im_scale, blob_conv):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
im_scale (list): image blob scales as returned by im_detect_bbox
blob_conv (Tensor): base features from the backbone network.
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
masks_i = im_detect_mask(model, im_scale, boxes, blob_conv)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
blob_conv, im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf, blob_conv)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
blob_conv, im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes, blob_conv)
return masks_scl
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
blob_conv, im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar, blob_conv)
return masks_ar
def im_detect_keypoints_vcoco(model, im_scale, human_boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if human_boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return None, pred_heatmaps
# project boxes to re-sized image size
human_boxes = np.hstack((np.zeros((human_boxes.shape[0], 1), dtype=human_boxes.dtype),
human_boxes * im_scale))
inputs = {'human_boxes': human_boxes}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'human_boxes')
pred_heatmaps = model.module.vcoco_keypoint_net(blob_conv, inputs)
np_pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
# In case of 1
if np_pred_heatmaps.ndim == 3:
np_pred_heatmaps = np.expand_dims(np_pred_heatmaps, axis=0)
return pred_heatmaps, np_pred_heatmaps
def keypoint_results_vcoco(cls_boxes, pred_heatmaps):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils.get_person_class_index()
xy_preds = keypoint_utils.heatmaps_to_keypoints(pred_heatmaps, cls_boxes[person_idx])
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils.nms_oks(xy_preds, cls_boxes[person_idx], 0.3)
xy_preds = xy_preds[keep, :, :]
# ref_boxes = ref_boxes[keep, :]
# pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def im_detect_keypoints(model, im_scale, boxes, blob_conv):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scale (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
pred_heatmaps = model.module.keypoint_net(blob_conv, inputs)
pred_heatmaps = pred_heatmaps.data.cpu().numpy().squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def im_detect_keypoints_aug(model, im, boxes, im_scale, blob_conv):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
im_scale (list): image blob scales as returned by im_detect_bbox
blob_conv (Tensor): base features from the backbone network.
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes, blob_conv)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils.flip_boxes(boxes, im.shape[1])
blob_conv, im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf, blob_conv)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils.flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
blob_conv, im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes, blob_conv)
return heatmaps_scl
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils.aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils.aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
blob_conv, im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar, blob_conv)
return heatmaps_ar
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils.boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def box_results_with_nms_and_limit(scores, boxes): # NOTE: support single-batch
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(np.float32, copy=False)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils.soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.05,
# score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils.box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils.expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
for _ in range(cls_boxes[j].shape[0]):
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = (ref_box[2] - ref_box[0] + 1)
h = (ref_box[3] - ref_box[1] + 1)
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]), (x_0 - ref_box[0]):(x_1 - ref_box[0])]
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(np.array(im_mask[:, :, np.newaxis], order='F'))[0]
# For dumping to json, need to decode the byte string.
# https://github.com/cocodataset/cocoapi/issues/70
rle['counts'] = rle['counts'].decode('ascii')
segms.append(rle)
mask_ind += 1
cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils.get_person_class_index()
xy_preds = keypoint_utils.heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils.nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
# -------------------------- HOI ----------------------------
def im_detect_hoi(model, boxes, scores, human_count, im_info, blob_conv, entry=None, vcoco_heatmaps=None):
hoi_blob_in = get_hoi_blob_names(is_training=False)
# im_info.shape = (1, 3) h, w, scale
im_scale = im_info[0, 2]
# project boxes to re-sized image size
hoi_blob_in['boxes'] = np.hstack((np.zeros((boxes.shape[0], 1), dtype=boxes.dtype),
boxes * im_scale))
hoi_blob_in['scores'] = scores
human_index = np.arange(boxes.shape[0])[:human_count]
object_index = np.arange(boxes.shape[0])[human_count:]
interaction_human_inds, interaction_target_object_inds \
= np.repeat(human_index, object_index.size), np.tile(object_index - human_count, human_index.size)
hoi_blob_in['human_index'] = human_index
hoi_blob_in['target_object_index'] = object_index
hoi_blob_in['interaction_human_inds'] = interaction_human_inds
hoi_blob_in['interaction_target_object_inds'] = interaction_target_object_inds
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(hoi_blob_in, 'boxes')
# if no human box is detected, not use hoi_head, just return nan
if human_index.size > 0:
hoi_blob_out = model.module.hoi_net(blob_conv, hoi_blob_in, im_info, vcoco_heatmaps)
# ipdb.set_trace()
# if entry:
# test_hoi_fill_hoi_blob_from_gt(hoi_blob_out, entry, im_scale)
hoi_res = hoi_res_gather(hoi_blob_out, im_scale, entry)
else:
# ToDo: any problem here?
hoi_res = dict(
agents=np.full((1, 4 + cfg.VCOCO.NUM_ACTION_CLASSES), np.nan),
roles=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
)
return hoi_res
def hoi_res_gather(hoi_blob, im_scale, entry=None):
'''
Convert predicted score and location to triplets
:param hoi_blob:
:param im_scale:
:param entry:
:return:
'''
# ToDo: modify comments
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
human_action_score = F.sigmoid(hoi_blob['human_action_score']).cpu().numpy()
human_action_bbox_pred = hoi_blob['human_action_bbox_pred'].cpu().numpy()
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy()
human_score = hoi_blob['scores'][hoi_blob['human_index']]
object_score = hoi_blob['scores'][hoi_blob['target_object_index']]
# scale to original image size when testing
boxes = hoi_blob['boxes'][:, 1:] / im_scale
# For actions don't interact with object, action_score is s_h * s^a_h
# For triplets(interact with objects), action_score is s_h * s_o * s^a_h * g^a_h,o
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK)
triplet_action_mask = np.tile(action_mask.transpose((1, 0)), (human_action_score.shape[0], 1, 1))
# For actions that that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
human_action_pair_score = human_score[:, np.newaxis] * human_action_score
# in case there is no role-objects
if hoi_blob['target_object_index'].size > 0:
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
interaction_action_score = \
interaction_action_score.reshape(human_score.size, object_score.size, -1).transpose(0, 2, 1)
interaction_action_score = np.repeat(interaction_action_score, num_target_object_types, axis=1
).reshape(-1, object_score.size)
# get target localization term g^a_h,o
target_localization_term = target_localization(boxes, hoi_blob['human_index'],
hoi_blob['target_object_index'], human_action_bbox_pred)
# find the object box that maximizes S^a_h,o
# `for each human / action pair we find the object box that maximizes S_h_o^a`
object_action_score = object_score * interaction_action_score * target_localization_term
choosed_object_inds = np.argmax(object_action_score, axis=-1)
# choose corresponding target_localization_term
target_localization_term = target_localization_term[np.arange(choosed_object_inds.size), choosed_object_inds]
# ToDo: choose top-50
# triplet score S^a_h,o
triplet_action_score = \
np.repeat(human_score, num_action_classes * num_target_object_types) * \
object_score[choosed_object_inds] * \
np.repeat(human_action_score, num_target_object_types, axis=1).ravel() * \
target_localization_term
# transform to (human_num, action_num, num_target_object_types)
triplet_action_score = triplet_action_score.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
# ToDo: thresh
# triplet_action_score[triplet_action_mask <= cfg.TEST.SCORE_THRESH] = np.nan
if entry:
# assert triplet_action_score.shape == entry['gt_role_id'][hoi_blob['human_index']].shape
for i in range(len(triplet_action_score.shape)):
pass
# assert np.all(np.where(triplet_action_score > 0.9)[i] ==
# np.where(entry['gt_role_id'][hoi_blob['human_index']] > -1)[i])
# choose appropriate score
# ToDo: any problem here?
# As not every action that defined interacts with objects will have
# corresponding objects in one image, and triplet_action_score always
# have a object box, should I set a thresh or some method to choose
# score between human_action_pair_score and triplet score???
# OR wrong result will be excluded when calculate AP??
# action_score = np.zeros(human_action_score.shape)
# action_score[human_action_mask == 0] = human_action_pair_score[human_action_mask == 0]
# action_score[human_action_mask == 1] = np.amax(triplet_action_score, axis=-1)[human_action_mask == 1]
# set triplet action score don't interact with object to zero
# triplet_action_score[triplet_action_mask == 0] = np.nan
triplet_action_score[triplet_action_mask == 0] = -1
top_k_value = triplet_action_score.flatten()[
np.argpartition(triplet_action_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
triplet_action_score[triplet_action_score <= top_k_value] = np.nan
# get corresponding box of role-objects
choosed_object_inds = choosed_object_inds.reshape(human_action_score.shape[0], num_action_classes,
num_target_object_types)
choosed_objects = boxes[hoi_blob['target_object_index']][choosed_object_inds]
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
action_score = human_action_pair_score
# ToDo: threshold
# action_score[action_score <= cfg.TEST.SCORE_THRESH] = np.nan
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
agents = np.hstack((boxes[hoi_blob['human_index']], action_score))
roles = np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1)
roles = np.stack([roles[:, :, i, :].reshape(-1, num_action_classes * 5) for i in range(num_target_object_types)], axis=-1)
return_dict = dict(
# image_id=i
agents=agents,
roles=roles
)
return return_dict
def target_localization(boxes, human_index, object_index, target_location):
"""
Target localization term in paper, g^a_h,o
Measure compatibility between human-object relative location and
target location, which is predicted by hoi-head
:param boxes:
:param human_index:
:param object_index:
:param target_location:
:return:
"""
human_boxes = boxes[human_index]
object_boxes = boxes[object_index]
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
# relative location between every human box and object box
# ToDo: add cfg.MODEL.BBOX_REG_WEIGHTS
relative_location = box_utils.bbox_transform_inv(
np.repeat(human_boxes, object_boxes.shape[0], axis=0),
np.tile(object_boxes, (human_boxes.shape[0], 1))
).reshape(human_boxes.shape[0], object_boxes.shape[0], 4)
# reshape target location same shape as relative location
target_location = target_location.reshape(-1, num_action_classes * num_target_object_types, 4)
# tile to human_num * (num_action_classes * num_target_object_types * object_num) * 4
relative_location, target_location = \
np.tile(relative_location, (1, num_action_classes * num_target_object_types, 1)), \
np.repeat(target_location, relative_location.shape[1], axis=1)
compatibility = np.sum(np.square((relative_location - target_location)), axis=-1)
# It seems the paper make a mistake here
compatibility = np.exp(-compatibility / (2 * cfg.VCOCO.TARGET_SIGMA ** 2))
# reshape to (human_num * num_action_classes * num_target_object_types, object_num)
compatibility = compatibility.reshape(human_index.size * num_action_classes * num_target_object_types,
object_index.size)
return compatibility
# ------------------test interact net code ------------------
# ToDo: will be cleaned
def test_hoi_fill_hoi_blob_from_gt(hoi_blob, entry, im_scale):
"""['boxes', 'human_index', 'target_object_index', 'interaction_human_inds',
'interaction_target_object_inds', 'interaction_batch_idx', 'human_action_labels',
'human_action_targets', 'action_target_weights', 'interaction_action_labels',
'boxes_fpn2', 'boxes_fpn3', 'boxes_fpn4', 'boxes_fpn5', 'boxes_idx_restore_int32',
'human_action_score', 'human_action_bbox_pred', 'interaction_action_score']"""
hoi_blob['boxes'] = np.hstack((np.zeros((entry['boxes'].shape[0], 1), dtype=hoi_blob['boxes'].dtype),
entry['boxes'])) * im_scale
hoi_blob['scores'] = np.ones(entry['boxes'].shape[0])
human_index = np.where(entry['gt_actions'][:, 0] > -1)[0]
# all object could be target object
target_object_index = np.arange(entry['boxes'].shape[0], dtype=human_index.dtype)
interaction_human_inds, interaction_target_object_inds \
= np.repeat(np.arange(human_index.size), target_object_index.size), \
np.tile(np.arange(target_object_index.size), human_index.size)
hoi_blob['human_index'] = human_index
hoi_blob['target_object_index'] = target_object_index
hoi_blob['interaction_human_inds'] = interaction_human_inds
hoi_blob['interaction_target_object_inds'] = interaction_target_object_inds
human_action_score = entry['gt_actions'][human_index]
hoi_blob['human_action_score'] = torch.from_numpy(human_action_score).cuda()
action_label_mat = generate_action_mat(entry['gt_role_id'])
triplet_label = action_label_mat[human_index[interaction_human_inds],
target_object_index[interaction_target_object_inds]]
hoi_blob['interaction_action_score'] = torch.from_numpy(triplet_label).cuda()
human_action_bbox_pred, _ = \
_compute_action_targets(entry['boxes'][human_index], entry['boxes'],
entry['gt_role_id'][human_index])
hoi_blob['human_action_bbox_pred'] = torch.from_numpy(human_action_bbox_pred).cuda()
def generate_action_mat(gt_role_id):
'''
Generate a matrix to store action triplet
:param gt_role_id:
:return: action_mat, row is person id, column is role-object id,
third axis is action id
'''
mat = np.zeros((gt_role_id.shape[0], gt_role_id.shape[0], cfg.VCOCO.NUM_ACTION_CLASSES, gt_role_id.shape[-1]), dtype=np.float32)
obj_ids = gt_role_id[np.where(gt_role_id > -1)]
human_ids, action_cls, role_cls = np.where(gt_role_id > -1)
assert role_cls.size == human_ids.size == action_cls.size == obj_ids.size
mat[human_ids, obj_ids, action_cls, role_cls] = 1
return mat
def _compute_action_targets(person_rois, gt_boxes, role_ids):
'''
Compute action targets
:param person_rois: rois assigned to gt acting-human, n * 4
:param gt_boxes: all gt boxes in one image
:param role_ids: person_rois_num * action_cls_num * num_target_object_types, store person rois corresponding role object ids
:return:
'''
assert person_rois.shape[0] == role_ids.shape[0]
# should use cfg.MODEL.BBOX_REG_WEIGHTS?
# calculate targets between every person rois and every gt_boxes
targets = box_utils.bbox_transform_inv(np.repeat(person_rois, gt_boxes.shape[0], axis=0),
np.tile(gt_boxes, (person_rois.shape[0], 1)),
(1., 1., 1., 1.)).reshape(person_rois.shape[0], gt_boxes.shape[0], -1)
# human action targets is (person_num: 16, action_num: 26, role_cls: 2, relative_location: 4)
# don't use np.inf, so that actions without target_objects could kept
human_action_targets = np.zeros((role_ids.shape[0], role_ids.shape[1],
role_ids.shape[2], 4), dtype=np.float32)
action_target_weights = np.zeros_like(human_action_targets, dtype=np.float32)
# get action targets relative location
human_action_targets[np.where(role_ids > -1)] = \
targets[np.where(role_ids > -1)[0], role_ids[np.where(role_ids > -1)].astype(int)]
action_target_weights[np.where(role_ids > -1)] = 1.
return human_action_targets.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4), \
action_target_weights.reshape(-1, cfg.VCOCO.NUM_ACTION_CLASSES * 2 * 4)
# ------------------------------- HOI union ------------------------------------
def im_detect_hoi_union(model, boxes, scores, cates, human_count, im_info, blob_conv, entry=None, mode='val', vcoco_heatmaps=None):
loss = dict(
interaction_action_loss=None,
interaction_action_accuray_cls=None)
hoi_blob_in = get_hoi_union_blob_names(is_training=False)
# im_info.shape = (1, 3)
im_scale = im_info[0, 2]
# project boxes to re-sized image size
scaled_boxes = np.hstack((np.zeros((boxes.shape[0], 1), dtype=boxes.dtype),
boxes * im_scale))
# ToDo: choose top 16 human boxes, top 64 target boxes??
# ToDo: lower nms thresh, triplet nms
human_inds = np.where(cates == 1)[0]
human_boxes = scaled_boxes[human_inds]
human_scores = scores[human_inds]
# human_boxes = scaled_boxes[:human_count]
# human_scores = scores[:human_count]
# keep_human_inds = np.where(human_scores >= cfg.VCOCO.TEST_HUMAN_SCORE_THRESH)[0][:16] # ToDo:
keep_human_inds = np.where(human_scores >= cfg.VCOCO.TEST_HUMAN_SCORE_THRESH)[0]
human_boxes = human_boxes[keep_human_inds]
human_scores = human_scores[keep_human_inds]
# select target objects boxes, all boxes are used as targets, including human
# ToDo: try different targets number
# keep_target_inds = np.where(scores >= cfg.VCOCO.TEST_TARGET_OBJECT_SCORE_THRESH)[0][:64]
keep_target_inds = np.where(scores >= cfg.VCOCO.TEST_TARGET_OBJECT_SCORE_THRESH)[0]
target_boxes = scaled_boxes[keep_target_inds]
target_scores = scores[keep_target_inds]
target_classes = cates[keep_target_inds]
interaction_human_inds, interaction_object_inds, union_boxes, spatial_info =\
generate_triplets(human_boxes, target_boxes)
target_cls_mat = np.zeros((target_boxes.shape[0], cfg.MODEL.NUM_CLASSES)).astype(np.float32)
target_cls_mat[:, target_classes] = 1.0
hoi_blob_in['human_boxes'] = human_boxes
hoi_blob_in['object_boxes'] = target_boxes
hoi_blob_in['object_classes'] = target_cls_mat
hoi_blob_in['union_boxes'] = union_boxes
hoi_blob_in['human_scores'] = human_scores
hoi_blob_in['object_scores'] = target_scores
hoi_blob_in['spatial_info'] = spatial_info
hoi_blob_in['interaction_human_inds'] = interaction_human_inds
hoi_blob_in['interaction_object_inds'] = interaction_object_inds
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(hoi_blob_in, 'human_boxes')
_add_multilevel_rois_for_test(hoi_blob_in, 'object_boxes')
_add_multilevel_rois_for_test(hoi_blob_in, 'union_boxes')
else:
blob_conv = blob_conv[-1]
# if no human box is detected, not use hoi_head, just return nan
if human_boxes.size > 0:
rois_keypoints = entry['precomp_keypoints']
human_keypoints = rois_keypoints[human_inds[keep_human_inds]]
union_kps, part_boxes, flag = get_pred_keypoints(human_boxes, human_keypoints, interaction_human_inds, im_scale)
vcoco_heatmaps, union_mask, rescale_kps = generate_joints_heatmap(union_kps, union_boxes[:, 1:]/im_scale,
human_boxes[interaction_human_inds, 1:]/im_scale,
target_boxes[interaction_object_inds, 1:]/im_scale)
pose_configmap = generate_pose_configmap(union_kps, union_boxes[:, 1:]/im_scale,
human_boxes[interaction_human_inds, 1:]/im_scale,
target_boxes[interaction_object_inds, 1:]/im_scale)
hoi_blob_in['union_mask'] = union_mask
hoi_blob_in['rescale_kps'] = rescale_kps
hoi_blob_in['part_boxes'] = part_boxes
hoi_blob_in['flag'] = flag
hoi_blob_in['poseconfig'] = pose_configmap
# # Testing. Replace pred action with gt action
if cfg.DEBUG_TEST_WITH_GT and cfg.DEBUG_TEST_GT_ACTION and entry is not None:
hoi_blob_out = test_det_bbox_gt_action(hoi_blob_in, entry, im_info)
else:
hoi_blob_out = model.module.hoi_net(blob_conv, hoi_blob_in, im_info, vcoco_heatmaps)
affinity_mat = None
if entry.get('affinity_mat') is not None:
affinity_mat = entry['affinity_mat']
affinity_mat = affinity_mat[human_inds[keep_human_inds]][:, keep_target_inds]
hoi_res, interaction_affinity_score = hoi_union_res_gather(hoi_blob_out, im_scale, affinity_mat, entry)
human_action_labels, interaction_action_labels, interaction_affinity_label, \
total_action_num, recall_action_num, total_affinity_num, recall_affinity_num = \
get_gt_labels(entry, human_boxes, target_boxes, interaction_human_inds.shape[0], im_scale)
hoi_blob_out['human_action_labels'] = human_action_labels
hoi_blob_out['interaction_action_labels'] = interaction_action_labels
hoi_blob_out['interaction_affinity'] = interaction_affinity_label
interaction_action_loss, interaction_affinity_loss, \
interaction_action_accuray_cls, interaction_affinity_cls = model.module.HOI_Head.loss(hoi_blob_out)
loss = dict(
interaction_action_loss=float(interaction_action_loss.cpu()),
interaction_action_accuray_cls=float(interaction_action_accuray_cls.cpu()),
interaction_affinity_loss=float(interaction_affinity_loss),
interaction_affinity_cls=float(interaction_affinity_cls),
interaction_affinity_label=interaction_affinity_label,
interaction_affinity_score=interaction_affinity_score,
total_action_num = total_action_num,
total_affinity_num = total_affinity_num,
recall_action_num = recall_action_num,
recall_affinity_num = recall_affinity_num)
else:
# ToDo: any problem here?
hoi_res = dict(
agents=np.full((1, 4 + cfg.VCOCO.NUM_ACTION_CLASSES), np.nan),
roles=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
roles1=np.full((1, 5 * cfg.VCOCO.NUM_ACTION_CLASSES, cfg.VCOCO.NUM_TARGET_OBJECT_TYPES), np.nan),
)
return hoi_res, loss
def get_maxAgent(agents_i, separated_parts_o):
'''
given agents_i, choosed best agents by pred score
N x (4+26) x 3
'''
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
#num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
agents_i = agents_i.reshape((-1, 4 + num_action_classes, separated_parts_o))
boxes = agents_i[:,:4, 0]
scores = agents_i[:, 4:, :] # N x 26 x o
choose_id = np.argmax(scores, axis=-1) # N x 26
choose_id_ = choose_id.reshape(-1) #
scores_ = scores.reshape((-1, separated_parts_o)) #
assert scores_.shape[0] == len(choose_id_)
choosed_score = scores_[np.arange(len(choose_id_)), choose_id_]
choosed_score = choosed_score.reshape(scores.shape[:-1])
return np.hstack((boxes, choosed_score)) # N x 30
def get_maxRole(roles_i, separated_parts_o):
'''
given roles_i, choose best roles by pred score
'''
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
roles_i = roles_i.reshape((-1, num_action_classes, 5, num_target_object_types, separated_parts_o))
role_score = roles_i[:,:,-1:] # N x 26 x 1 x 2 x o
choose_id = np.argmax(role_score, axis=-1) # N x 26 x 1 x 2
choose_id = np.tile(choose_id, (1,1,5,1)) # N x 26 x 5 x 2
choose_id_ = choose_id.reshape(-1)
roles_i_ = roles_i.reshape((-1, separated_parts_o))
assert roles_i_.shape[0] == len(choose_id_)
outs = roles_i_[np.arange(len(choose_id_)), choose_id_] # N
return outs.reshape((roles_i.shape[0], num_action_classes*5, num_target_object_types))
def hoi_union_res_gather(hoi_blob, im_scale, interaction_affinity_score=None, entry=None):
'''
Convert predicted score and location to triplets
:param hoi_blob:
:param im_scale:
:param entry:
:return:
'''
# ToDo: modify comments
num_action_classes = cfg.VCOCO.NUM_ACTION_CLASSES
num_target_object_types = cfg.VCOCO.NUM_TARGET_OBJECT_TYPES
# (1) interaction_affinity_score
interaction_affinity_score = F.sigmoid(hoi_blob['interaction_affinity_score']).cpu().numpy() ##N*1
# interaction_affinity_score = 1 / (np.exp(-interaction_affinity_score)+1)
# (2) interaction_action_score
interaction_action_score = F.sigmoid(hoi_blob['interaction_action_score']).cpu().numpy() ## N*24
## combine interaction_action_score and interaction_affinity_score (1+3)
interaction_action_score1 = interaction_action_score * interaction_affinity_score
human_score = hoi_blob['human_scores']
object_score = hoi_blob['object_scores']
## use LIS to human_score
# human_score = LIS(human_score)
# object_score = LIS(object_score)
# scale to original image size when testing
human_boxes = hoi_blob['human_boxes'][:, 1:] / im_scale
object_boxes = hoi_blob['object_boxes'][:, 1:] / im_scale
# we use mask to choose appropriate score
action_mask = np.array(cfg.VCOCO.ACTION_MASK).T
# triplet_action_mask = np.tile(action_mask, (human_action_score.shape[0], 1, 1))
# For actions that do not interact with any object (e.g., smile, run),
# we rely on s^a_h and the interaction output s^a_h_o is not used,
# human_action_pair_score = human_score[:, np.newaxis] * human_action_score
# ToDo: try just use human score as human action pair score
# we can get better results for `pred bbox and gt action`
# human_action_pair_score = human_score[:, np.newaxis]
interaction_score_list = [interaction_action_score, interaction_action_score1]
role_list = []
for inter_idx in range((len(interaction_score_list))):
# in case there is no role-objects
if hoi_blob['object_boxes'].size > 0:
# triplets score
triplet_action_score = interaction_score_list[inter_idx] * \
human_score[hoi_blob['interaction_human_inds']][:, np.newaxis] * \
object_score[hoi_blob['interaction_object_inds']][:, np.newaxis]
# ToDo: try just use interaction_action_score, better results for `pred bbox and gt action`
# triplet_action_score = interaction_action_score
# transform from (human num, object num, action_num) to
# (human_num*action_num*num_target_object_types, object_num)
triplet_action_score_tmp = np.zeros(
(triplet_action_score.shape[0], num_action_classes, num_target_object_types),
dtype=triplet_action_score.dtype)
triplet_action_score_tmp[:, np.where(action_mask > 0)[0], np.where(action_mask > 0)[1]] = \
triplet_action_score
triplet_action_score = triplet_action_score_tmp
# interaction_action_score = interaction_action_score_tmp.reshape(human_score.size, object_score.size, -1)
# interaction_action_score = interaction_action_score.transpose(0, 2, 1).reshape(-1, object_score.size)
triplet_action_score = triplet_action_score.reshape(human_score.size, object_score.size,
num_action_classes, num_target_object_types)
"""********** remove misgrouping case before hard nms ************"""
# triplet_action_score_mask = remove_mis_group(hoi_blob, entry, im_scale)
# if triplet_action_score_mask is not None:
# # ipdb.set_trace()
# triplet_action_score = triplet_action_score * triplet_action_score_mask[:,:,None,None]
# ToDo: one person one action one object
# ToDo: or one pair three action
choosed_object_inds = np.argmax(triplet_action_score, axis=1)
triplet_action_score = np.max(triplet_action_score, axis=1)
# triplet_action_score[triplet_action_score < 0.3] = -1
# triplet_action_score[triplet_action_mask == 0] = -1
"""********* keep top k value **********"""
# top_k_value = triplet_action_score.flatten()[
# np.argpartition(triplet_action_score, -cfg.VCOCO.KEEP_TOP_NUM, axis=None)[-cfg.VCOCO.KEEP_TOP_NUM]]
# triplet_action_score[triplet_action_score <= top_k_value] = np.nan
choosed_objects = object_boxes[choosed_object_inds]
else:
# if there is no object predicted, triplet action score won't used
triplet_action_score = np.full((1, num_action_classes, num_target_object_types), np.nan)
choosed_objects = np.zeros((1, num_action_classes, num_target_object_types, 4))
# action_score = human_action_pair_score
# keep consistent with v-coco eval code
# agents: box coordinates + 26 action score.
# roles: 26 * (role object coordinates + role-action score) * num_target_object_types
# agents = np.hstack((human_boxes, action_score))
agents = np.hstack((human_boxes, np.zeros((human_boxes.shape[0], num_action_classes))))
roles = | np.concatenate((choosed_objects, triplet_action_score[..., np.newaxis]), axis=-1) | numpy.concatenate |
Subsets and Splits