prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import pandas as pd
from rbergomi.rbergomi_utils import *
class rBergomi(object):
"""
Class for generating paths of the rBergomi model.
Integral equations for reference:
Y(t) := sqrt(2a + 1) int 0,t (t - u)^a dW(u)
V(t) := xi exp(eta Y - 0.5 eta^2 t^(2a + 1))
S(t) := S0 int 0,t sqrt(V) dB(u) - 0.5 V du
"""
def __init__(self, n=256, N=1024, T=1.0):
"""
Constructor for class.
"""
# Basic assignments.
self.n = n # Steps per year
self.N = N # Paths
self.T = T # Maturity
self.dt = 1.0/n # Step size
self.s = int(n*T) # Steps
self.t = np.linspace(0,T,1+self.s)[np.newaxis,:] # Time grid
def dW(self, α=0.4, β=-0.4, seed=0):
"""
.
"""
self.α = α
self.β = β
s = self.s
# Store required covariance matrices
cov1 = cov(α, self.n)
cov2 = cov(β, self.n)
chol1 = np.linalg.cholesky(cov1)[np.newaxis,np.newaxis,:,:]
chol2 = np.linalg.cholesky(cov2)[np.newaxis,np.newaxis,:,:]
# fn = 'sobol/'+str(seed)+'-'+str(self.N)+'-'+str(4*s)+'.csv'
# random_numbers = np.array(pd.read_csv(fn))
## SHOULD BE OUTSIDE CALIBRATION ROUTINE
np.random.seed(seed)
random_numbers = np.random.normal(size=(self.N,4*s))
# Obviously generalise
dB11 = random_numbers[:,0*s:1*s]
dB12 = random_numbers[:,1*s:2*s]
dB21 = random_numbers[:,2*s:3*s]
dB22 = random_numbers[:,3*s:4*s]
# Prepare for operations
dB1 = np.zeros((self.N,s,2,1))
dB2 = np.zeros((self.N,s,2,1))
dB1[:,:,0,0] = dB11
dB1[:,:,1,0] = dB12
dB2[:,:,0,0] = dB21
dB2[:,:,1,0] = dB22
# Finally, correlate in C-layer
dW1 = np.squeeze(np.matmul(chol1,dB1))
dW2 = np.squeeze(np.matmul(chol2,dB2))
dW = np.zeros((self.N,s,2,2))
dW[:,:,:,0] = dW1
dW[:,:,:,1] = dW2
return dW
# Should promote this for two dimensions given α, β use
def Y(self, dW, α):
"""
Constructs Volterra process from appropriately
correlated 2d Brownian increments.
"""
Y1 = np.zeros((self.N, 1 + self.s)) # Exact integral
Y2 = np.zeros((self.N, 1 + self.s)) # Riemann sum
# Construct Y1 through exact integral
# for i in range(1 + self.s):
# Use np.cumsum here? - must time this
# for i in np.arange(1, 1 + self.s, 1): # See (3.6)
# Y1[:,i] += dW[:,i-1,1] # Assumes kappa = 1
# Construct Y1 through exact integral
Y1[:,1:1+self.s] = dW[:,:self.s,1] # Assumes kappa = 1
# Construct arrays for convolution
Γ = n | p.zeros(1 + self.s) | numpy.zeros |
import os
import sys
import unittest
import numpy as np
from QGrain.algorithms import *
from QGrain.models.FittingResult import *
class TestComponentFittingResult(unittest.TestCase):
def setUp(self):
self.real_x = np.linspace(-10, 10, 2001)
self.fitting_space_x = np.array([i+1 for i in self.real_x])
self.params = np.array([0, 2], dtype=np.float64)
self.fraction = 1.0
self.algorithm_data = AlgorithmData(DistributionType.Normal, 1)
def tearDown(self):
self.real_x = None
self.fitting_space_x = None
self.params = None
self.fraction = None
self.algorithm_data = None
def gen_by_defaluts(self):
component_result = ComponentFittingResult(
self.real_x, self.fitting_space_x,
self.algorithm_data,
self.params, self.fraction)
return component_result
def test_valid(self):
component_result = self.gen_by_defaluts()
self.assertFalse(component_result.has_nan)
def test_has_attrs(self):
component_result = self.gen_by_defaluts()
component_result.component_y
component_result.params
component_result.fraction
component_result.mean
component_result.median
component_result.mode
component_result.variance
component_result.standard_deviation
component_result.skewness
component_result.kurtosis
def test_read_only(self):
component_result = self.gen_by_defaluts()
with self.assertRaises(AttributeError):
component_result.component_y = None
with self.assertRaises(AttributeError):
component_result.params = None
with self.assertRaises(AttributeError):
component_result.fraction = 0.0
with self.assertRaises(AttributeError):
component_result.mean = 0.0
with self.assertRaises(AttributeError):
component_result.median = 0.0
with self.assertRaises(AttributeError):
component_result.mode = 0.0
with self.assertRaises(AttributeError):
component_result.variance = 0.0
with self.assertRaises(AttributeError):
component_result.standard_deviation = 0.0
with self.assertRaises(AttributeError):
component_result.skewness = 0.0
with self.assertRaises(AttributeError):
component_result.kurtosis = 0.0
def test_fraction_invalid(self):
# self.fraction = -1.0
# with self.assertRaises(AssertionError):
# component_result = self.gen_by_defaluts()
self.fraction = None
with self.assertRaises(AssertionError):
component_result = self.gen_by_defaluts()
self.fraction = "0.5"
with self.assertRaises(AssertionError):
component_result = self.gen_by_defaluts()
def test_param_invalid(self):
self.params[0] = np.nan
component_result = self.gen_by_defaluts()
self.assertTrue(component_result.has_nan)
self.assertTrue(np.isnan(component_result.fraction))
self.assertTrue(np.all(np.isnan(component_result.component_y)))
self.assertTrue(np.isnan(component_result.mean))
self.assertTrue(np.isnan(component_result.median))
self.assertTrue( | np.isnan(component_result.mode) | numpy.isnan |
#Tools to study and correct for trends in spectroscopic succes rate (ssr)
#Initial LRG model fitting taken from <NAME> notebook
import sys, os, glob, time, warnings, gc
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack, join
import fitsio
from scipy.optimize import curve_fit, minimize
import LSS.common_tools as common
elgcol = ['SUBSET','EBV','PRIORITY','TARGETID','OII_FLUX','OII_FLUX_IVAR','ELG_LOP','ELG_VLO','TSNR2_ELG','TSNR2_LRG','PHOTSYS','MASKBITS','FIBERFLUX_G','FIBERFLUX_R','FIBERFLUX_Z','COADD_FIBERSTATUS','Z','ZWARN','DELTACHI2']
def ELG_goodobs(data,fbs_col='COADD_FIBERSTATUS'):#,dt_col='DESI_TARGET'):
mask = data[fbs_col]==0
print(fbs_col,np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove "no data" fibers
mask &= data['ZWARN'] & 2**9==0
print('& No data', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Apply imaging mask
#mask &= data['lrg_mask']==0
#print('& LRG imaging mask', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
data['q'] = ELG_goodz(data)#data['ZWARN']==0
print('failure rate is '+str(np.sum(~data['q'])/len(data)))
return data
def ELG_goodz(data,zcol='Z'):
o2c = np.log10(data['OII_FLUX'] * np.sqrt(data['OII_FLUX_IVAR']))+0.2*np.log10(data['DELTACHI2'])
sel = o2c > 0.9
return sel
def LRG_goodobs(data,fbs_col='COADD_FIBERSTATUS',dt_col='DESI_TARGET'):
mask = data[fbs_col]==0
print(fbs_col,np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove "no data" fibers
mask &= data['ZWARN'] & 2**9==0
print('& No data', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Apply LRG mask
#mask &= data['lrg_mask']==0
#print('& LRG imaging mask', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
# Remove QSO targets
mask &= data[dt_col] & 2**2 ==0
print('& Remove QSO targets', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
data = data[mask]
data['q'] = LRG_goodz(data)#data['ZWARN']==0
#data['q'] &= data['Z']<1.5
#data['q'] &= data['DELTACHI2']>15
print('failure rate is '+str(np.sum(~data['q'])/len(data)))
return data
def LRG_goodz(data,zcol='Z'):
sel = data['ZWARN']==0
sel &= data[zcol]<1.5
sel &= data['DELTACHI2']>15
return sel
def get_ELG_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
data = data[sel]
print(len(data))
data['q'] = data['o2c'] > 0.9
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_BGS_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
data = data[sel]
print(len(data))
gz = data['ZWARN'] == 0
gz &= data['DELTACHI2'] > 40
data['q'] = gz
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['EFFTIME_BGS'] = 12.15/89.8 * cat['TSNR2_BGS']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_QSO_data_full(tracer,surveys=['DA02'],versions=['test'],specrels=['guadalupe']):
cats = []
for sur,ver,sr in zip(surveys,versions,specrels):
dir = '/global/cfs/cdirs/desi/survey/catalogs/'+sur+'/LSS/'+sr+'/LSScats/'+ver+'/'
tfn = tracer
if sur == 'DA02':
tfn+='zdone'
fn = dir+tfn+'_full.dat.fits'
data = Table(fitsio.read(fn))
print(len(data))
sel = data['ZWARN'] != 999999
sel &= data['SPECTYPE'] != 'STAR'
data = data[sel]
wz = data['Z_not4clus']*0 == 0
wz &= data['Z_not4clus'] != 999999
wz &= data['Z_not4clus'] != 1.e20
print(len(data),len(wz),np.sum(wz))
data['q'] = wz
cats.append(data)
if len(cats) == 1:
cat = cats[0]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_QSO'] = 8.60/0.255 * cat['TSNR2_QSO']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_ELG_data(specrel='fuji',tr='ELG_LOP',maskbits=[1,11,12,13],notqso=True):
maintids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/'+tr+'targetsDR9v1.1.1.fits',columns=['TARGETID','DESI_TARGET','MASKBITS','NOBS_G','NOBS_R','NOBS_Z'])
maintids = common.cutphotmask(maintids,maskbits)
elgcatdir = '/global/cfs/cdirs/desi/users/raichoor/spectro/'+specrel
sv3 = fitsio.read(elgcatdir+'/sv3-elg-fuji-tiles.fits',columns=elgcol)
st = []
for i in range(0,len(sv3)):
st.append(sv3['SUBSET'][i][:4])
st = np.array(st)
wg = st == "thru"
sv3 = sv3[wg]
if tr != 'ELG':
print('cutting SV3 to main '+tr)
sel = sv3[tr] == True
print('length before is '+str(len(sv3)))
sv3 = sv3[sel]
print('length after is '+str(len(sv3)))
sel = sv3['PRIORITY'] > 10000
sv3 = sv3[sel]
print('length after cutting to priority > 10000 '+str(len(sv3)))
sv3 = ELG_goodobs(Table(sv3))
sv3 = join(sv3,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(sv3)))
elgcatdirg = '/global/cfs/cdirs/desi/users/raichoor/spectro/guadalupe'
main = fitsio.read(elgcatdirg+'/main-elg-guadalupe-tiles.fits',columns=elgcol)
st = []
for i in range(0,len(main)):
st.append(main['SUBSET'][i][:4])
st = np.array(st)
wg = st == "thru"
main = main[wg]
if tr != 'ELG':
print('cutting main to main '+tr)
sel = main[tr] == True
print('length before is '+str(len(main)))
main = main[sel]
print('length after is '+str(len(main)))
main = ELG_goodobs(Table(main))
main = join(main,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(main)))
sv1 = fitsio.read(elgcatdir+'/sv1-elg-fuji-tiles.fits',columns=elgcol)
if tr != 'ELG':
print('cutting SV1 to main '+tr)
sel = sv1[tr] == True
print('length before is '+str(len(sv1)))
sv1 = sv1[sel]
print('length after is '+str(len(sv1)))
sv1 = ELG_goodobs(Table(sv1))
sv1 = join(sv1,maintids,keys=['TARGETID'])
print('length after join to main targets to get DESI_TARGET and cut on maskbits values '+str(len(sv1)))
#cat = vstack([sv1, sv3, main], join_type='inner')
#cat = vstack([sv1, main], join_type='inner')
cat = main
print(len(cat))
if notqso:
# Remove QSO targets
mask = cat['DESI_TARGET'] & 2**2 ==0
print(' Remove QSO targets', np.sum(mask), np.sum(~mask), np.sum(~mask)/len(mask))
cat = cat[mask]
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
gextc = 3.214
cat['gfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_G']) - gextc * cat['EBV']
cat['FIBERFLUX_G_EC'] = cat['FIBERFLUX_G']*10**(0.4*gextc*cat['EBV'])
rextc = 2.165
cat['rfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_R']) - rextc * cat['EBV']
cat['FIBERFLUX_R_EC'] = cat['FIBERFLUX_R']*10**(0.4*rextc*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def get_LRG_data(specrel='fuji'):
maintids = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/LRGtargetsDR9v1.1.1.fits',columns=['TARGETID','lrg_mask'])
sel = maintids['lrg_mask'] == 0
maintids = maintids[sel]
zcatdir = '/global/cfs/cdirs/desi/spectro/redux/'+specrel+'/zcatalog/'
perexpall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-perexp.fits'))
sel = np.isin(perexpall['TARGETID'],maintids['TARGETID'])
perexplrg = perexpall[sel]
del perexpall
perexplrg = LRG_goodobs(perexplrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_1xall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-1x_depth.fits'))
sel = np.isin(cat_1xall['TARGETID'],maintids['TARGETID'])
cat_1xlrg = cat_1xall[sel]
del cat_1xall
cat_1xlrg = LRG_goodobs(cat_1xlrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_deepall = Table(fitsio.read(zcatdir+'ztile-sv1-dark-cumulative.fits'))
sel = np.isin(cat_deepall['TARGETID'],maintids['TARGETID'])
cat_deeplrg = cat_deepall[sel]
del cat_deepall
cat_deeplrg = LRG_goodobs(cat_deeplrg,'COADD_FIBERSTATUS','SV1_DESI_TARGET')
cat_sv3all = Table(fitsio.read(zcatdir+'ztile-sv3-dark-cumulative.fits'))
sel = np.isin(cat_sv3all['TARGETID'],maintids['TARGETID'])
sel &= cat_sv3all['PRIORITY'] == 103200 #we don't want to include the failed repeats in the statistics
cat_sv3lrg = cat_sv3all[sel]
del cat_sv3all
cat_sv3lrg = LRG_goodobs(cat_sv3lrg,'COADD_FIBERSTATUS','SV3_DESI_TARGET')
if specrel == 'fuji':
specrelmain = 'guadalupe'
zcatdirm = '/global/cfs/cdirs/desi/spectro/redux/'+specrelmain+'/zcatalog/'
cat_mainall = Table(fitsio.read(zcatdirm+'ztile-main-dark-cumulative.fits'))
sel = np.isin(cat_mainall['TARGETID'],maintids['TARGETID'])
cat_mainlrg = cat_mainall[sel]
del cat_mainall
cat_mainlrg = LRG_goodobs(cat_mainlrg,'COADD_FIBERSTATUS','DESI_TARGET')
cat = vstack([perexplrg, cat_1xlrg, cat_mainlrg, cat_deeplrg, cat_sv3lrg], join_type='inner')
print(len(cat))
cat['EFFTIME_ELG'] = 8.60 * cat['TSNR2_ELG']
cat['EFFTIME_LRG'] = 12.15 * cat['TSNR2_LRG']
cat['zfibermag'] = 22.5 - 2.5*np.log10(cat['FIBERFLUX_Z']) - 1.211 * cat['EBV']
cat['FIBERFLUX_Z_EC'] = cat['FIBERFLUX_Z']*10**(0.4*1.211*cat['EBV'])
cat['qf'] = np.array(cat['q'], dtype=float)
return cat
def fit_cons(dl,el,minv=0,step=0.01):
c = minv
newcost = np.sum((dl-c)**2./el**2.)
oldcost = newcost + 1
while newcost < oldcost:
oc = c
oldcost = newcost
c += step
newcost = np.sum((dl-c)**2./el**2.)
return oldcost,c
class LRG_ssr:
def __init__(self,specrel='fuji',efftime_min=500,efftime_max=2000):
self.cat = get_LRG_data(specrel)
mask = self.cat['EFFTIME_LRG']>efftime_min
mask &= self.cat['EFFTIME_LRG']<efftime_max
self.cat = self.cat[mask]
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_Z_EC'], self.cat['EFFTIME_LRG'], *params)
return self.cost(q_predict)
def failure_rate(self,flux, efftime, a, b, c):
sn = flux * np.sqrt(efftime)
return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
def add_modpre(self,data):
res = minimize(self.wrapper, [0, 10., 0.01], bounds=((-200, 200), (0, 100), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars)
dflux = data['FIBERFLUX_Z']*10**(0.4*1.211*data['EBV'])#data['FIBERFLUX_Z_EC']
deff = 12.15 * data['TSNR2_LRG']#data['EFFTIME_LRG']
data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
return data
class BGS_ssr:
def __init__(self,specrel='fuji',efftime_min=100,efftime_max=300):
self.cat = get_BGS_data_full('BGS_BRIGHT')
mask = self.cat['EFFTIME_BGS']>efftime_min
mask &= self.cat['EFFTIME_BGS']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_BGS'])
hf,_ = np.histogram(self.cat['EFFTIME_BGS'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_R_EC'], self.cat['EFFTIME_BGS'], *params)
return self.cost(q_predict)
def failure_rate(self,flux, efftime, a, b, c):
sn = flux * np.sqrt(efftime)
return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, .5)
def add_modpre(self,data):
res = minimize(self.wrapper, [0, 10., 0.01], bounds=((-200, 200), (0, 100), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper(pars))
dflux = data['FIBERFLUX_R']*10**(0.4*2.165*data['EBV'])#data['FIBERFLUX_Z_EC']
deff = 12.15/89.8 * data['TSNR2_BGS']#data['EFFTIME_LRG']
data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
#print(len(data),np.sum(data['mod_success_rate']))
ha,_ = np.histogram(deff,bins=self.bine)
gz = data['ZWARN'] == 0
gz &= data['DELTACHI2'] > 40
hf,_ = np.histogram(deff[gz],weights=1/data[gz]['mod_success_rate'],bins=self.bine)
plt.errorbar(self.bc,1.-self.nzf,self.nzfe,fmt='ko')
plt.errorbar(self.bc,hf/ha,self.nzfe,fmt='rd')
plt.show()
return data
class ELG_ssr:
def __init__(self,specrel='fuji',efftime_min=450,efftime_max=1500):
self.cat = get_ELG_data_full('ELG_LOPnotqso')#get_ELG_data(specrel)
mask = self.cat['EFFTIME_ELG']>efftime_min
mask &= self.cat['EFFTIME_ELG']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_ELG'])
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
self.vis_5hist = False
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_G_EC'], self.cat['EFFTIME_ELG'], *params)
return self.cost(q_predict)
def wrapper_hist(self,params):
h_predict = self.failure_rate_eff(self.bc, *params)
diff = self.nzf-h_predict
cost = np.sum((diff/self.nzfe)**2.)
return cost
def failure_rate(self,flux, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c/flux, 0, 1)
def failure_rate_eff(self, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c, 0, 1)
def hist_norm(self,fluxc):
nzfper = []
consl = []
nb = 5
pstep = 100//5
costt = 0
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
if self.vis_5hist:
print(mf)
#fper.append(mf)
wtf = (fluxc*(self.mft-self.cat['FIBERFLUX_G_EC'])/self.mft+1)*(self.wts_fid-1)+1
selw = wtf < 1
wtf[selw] = 1
ha,_ = np.histogram(self.cat['EFFTIME_ELG'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],weights=wtf[sel&self.selgz],bins=self.bine)
#if self.vis_5hist:
# print(mf)
# print(np.sum(ha))
# print(np.sum(hf))
dl = hf/ha
nzfper.append(dl)
def ccost(c):
return np.sum((dl-c)**2./self.nzfpere[i]**2.)
resc = minimize(ccost, np.ones(1))
bc = resc.x
cost = ccost(bc)
consl.append(bc)
costt += cost
if self.vis_5hist:
for i in range(0,nb):
plt.errorbar(self.bc,nzfper[i],self.nzfpere[i])
plt.plot(self.bc,np.ones(len(self.bc))*consl[i],'k:')
plt.show()
return costt
def add_modpre(self,data):
res = minimize(self.wrapper_hist, [-200, 10., 0.01], bounds=((-10000, 0), (0, 10000), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper_hist(pars))
gextc = 3.214
dflux = data['FIBERFLUX_G']*10**(0.4*gextc*data['EBV']) #data['FIBERFLUX_G_EC']
deff = 8.60 * data['TSNR2_ELG']#data['EFFTIME_ELG']
#data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
data['mod_success_rate'] = 1. -self.failure_rate_eff(deff,*pars)
assr = 1. -self.failure_rate_eff(self.cat['EFFTIME_ELG'],*pars)
relssr = assr/np.max(assr)
drelssr = data['mod_success_rate']/np.max(assr)#np.max(data['mod_success_rate'])
seld = deff > 450
seld &= deff < 1500
print(len(relssr),len(drelssr[seld]),np.max(assr),np.max(data[seld]['mod_success_rate']))
self.wts_fid = 1/relssr
nzfper = []
nzfpere = []
fper = []
self.mft = np.median(self.cat['FIBERFLUX_G_EC'])
nb = 5
pstep = 100//5
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
fper.append(mf)
ha,_ = np.histogram(self.cat['EFFTIME_ELG'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],bins=self.bine)
hfw,_ = np.histogram(self.cat['EFFTIME_ELG'][sel&self.selgz],weights=self.wts_fid[sel&self.selgz],bins=self.bine)
nzfper.append(hf/ha)
nzfpere.append(np.sqrt(ha-hf)/ha)
#plt.plot(self.bc,hfw/ha)
#plt.title('inputs')
#plt.show()
self.nzfpere = nzfpere
rest = minimize(self.hist_norm, np.ones(1))#, bounds=((-10, 10)),
#method='Powell', tol=1e-6)
fcoeff = rest.x
self.vis_5hist = True
print(fcoeff,self.hist_norm(fcoeff))#,self.hist_norm(0.),self.hist_norm(1.))
wtf = (fcoeff*(self.mft-dflux)/self.mft+1)*(1/drelssr-1)+1
sel = wtf < 1
wtf[sel] = 1
data['WEIGHT_ZFAIL'] = wtf
return data
# nb = 5
# pstep = 100//5
# costt = 0
#
# seld = np.ones(len(dflux),dtype='bool')
# dflux = dflux[seld]
# deff =deff[seld]
# dselgz = data[seld]['o2c'] > 0.9
# wtf = (1/drelssr[seld]-1)+1
#print('are weight arrays equal?',np.array_equal(self.wts_fid,wtf))
# for i in range(0,nb):
# sel = dflux > np.percentile(dflux,i*pstep)
# sel &= dflux < np.percentile(dflux,(i+1)*pstep)
# mf = np.median(dflux[sel])
#
#
#
# ha,_ = np.histogram(deff[sel],bins=self.bine)
# hf,_ = np.histogram(deff[sel&dselgz],weights=wtf[sel&dselgz],bins=self.bine)
class QSO_ssr:
def __init__(self,specrel='fuji',efftime_min=450,efftime_max=1500):
self.cat = get_QSO_data_full('QSO')#get_ELG_data(specrel)
mask = self.cat['EFFTIME_QSO']>efftime_min
mask &= self.cat['EFFTIME_QSO']<efftime_max
self.cat = self.cat[mask]
self.selgz = self.cat['q'] == 1
ha,bine = np.histogram(self.cat['EFFTIME_QSO'])
hf,_ = np.histogram(self.cat['EFFTIME_QSO'][~self.selgz])
self.nzf = hf/ha
print(self.nzf)
self.nzfe = np.sqrt(hf)/ha
bc = []
bs = bine[1]-bine[0]
for i in range(0,len(bine)-1):
bc.append(bine[i]+bs/2.)
self.bc = np.array(bc)
self.bine = bine
self.vis_5hist = False
def cost(self,q_predict):
return np.sum((self.cat['qf']-q_predict)**2)
def wrapper(self,params):
q_predict = 1-self.failure_rate(self.cat['FIBERFLUX_G_EC'], self.cat['EFFTIME_QSO'], *params)
return self.cost(q_predict)
def wrapper_hist(self,params):
h_predict = self.failure_rate_eff(self.bc, *params)
diff = self.nzf-h_predict
cost = np.sum((diff/self.nzfe)**2.)
return cost
def failure_rate(self,flux, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c/flux, 0, 1)
def failure_rate_eff(self, efftime, a, b, c):
#sn = flux * np.sqrt(efftime)
#return np.clip(np.exp(-(sn+a)/b)+c/flux, 0, 1)
return np.clip(np.exp(-(efftime+a)/b)+c, 0, 1)
def hist_norm(self,fluxc):
nzfper = []
consl = []
nb = 5
pstep = 100//5
costt = 0
for i in range(0,nb):
sel = self.cat['FIBERFLUX_G_EC'] > np.percentile(self.cat['FIBERFLUX_G_EC'],i*pstep)
sel &= self.cat['FIBERFLUX_G_EC'] < np.percentile(self.cat['FIBERFLUX_G_EC'],(i+1)*pstep)
mf = np.median(self.cat['FIBERFLUX_G_EC'][sel])
if self.vis_5hist:
print(mf)
#fper.append(mf)
wtf = (fluxc*(self.mft-self.cat['FIBERFLUX_G_EC'])/self.mft+1)*(self.wts_fid-1)+1
selw = wtf < 1
wtf[selw] = 1
ha,_ = np.histogram(self.cat['EFFTIME_QSO'][sel],bins=self.bine)
hf,_ = np.histogram(self.cat['EFFTIME_QSO'][sel&self.selgz],weights=wtf[sel&self.selgz],bins=self.bine)
#if self.vis_5hist:
# print(mf)
# print(np.sum(ha))
# print(np.sum(hf))
dl = hf/ha
nzfper.append(dl)
def ccost(c):
return np.sum((dl-c)**2./self.nzfpere[i]**2.)
resc = minimize(ccost, np.ones(1))
bc = resc.x
cost = ccost(bc)
consl.append(bc)
costt += cost
if self.vis_5hist:
for i in range(0,nb):
plt.errorbar(self.bc,nzfper[i],self.nzfpere[i])
plt.plot(self.bc,np.ones(len(self.bc))*consl[i],'k:')
plt.show()
return costt
def add_modpre(self,data):
res = minimize(self.wrapper_hist, [-0.001, 1, 0.4], bounds=((-1000, 0), (0, 1000), (0., 1)),
method='Powell', tol=1e-6)
pars = res.x
print(pars,self.wrapper_hist(pars))
plt.errorbar(self.bc,self.nzf,self.nzfe,fmt='ko')
mod = self.failure_rate_eff(self.bc, *pars)
plt.plot(self.bc,mod,'k--')
plt.show()
gextc = 3.214
rextc = 2.165
dflux = data['FIBERFLUX_R']*10**(0.4*rextc*data['EBV']) #data['FIBERFLUX_G_EC']
deff = 8.60/0.255 * data['TSNR2_QSO']#data['EFFTIME_ELG']
#data['mod_success_rate'] = 1. -self.failure_rate(dflux,deff,*pars)
data['mod_success_rate'] = 1. -self.failure_rate_eff(deff,*pars)
assr = 1. -self.failure_rate_eff(self.cat['EFFTIME_QSO'],*pars)
relssr = assr/np.max(assr)
drelssr = data['mod_success_rate']/ | np.max(assr) | numpy.max |
# coding: utf8
########################################################################
# #
# Control law : tau = P(q*-q^) + D(v*-v^) + tau_ff #
# #
########################################################################
from matplotlib import pyplot as plt
import pinocchio as pin
import numpy as np
import numpy.matlib as matlib
import tsid
import FootTrajectoryGenerator as ftg
import FootstepPlanner
import pybullet as pyb
import utils
import time
pin.switchToNumpyMatrix()
########################################################################
# Class for a PD with feed-forward Controller #
########################################################################
class controller:
""" Inverse Dynamics controller that take into account the dynamics of the quadruped to generate
actuator torques to apply on the ground the contact forces computed by the MPC (for feet in stance
phase) and to perform the desired footsteps (for feet in swing phase)
Args:
N_similation (int): maximum number of Inverse Dynamics iterations for the simulation
"""
def __init__(self, N_simulation, k_mpc, n_periods):
self.q_ref = np.array([[0.0, 0.0, 0.2027682, 0.0, 0.0, 0.0, 1.0,
0.0, 0.8, -1.6, 0, 0.8, -1.6,
0, -0.8, 1.6, 0, -0.8, 1.6]]).transpose()
self.qtsid = self.q_ref.copy()
self.vtsid = np.zeros((18, 1))
self.ades = np.zeros((18, 1))
self.error = False
self.verbose = True
# List with the names of all feet frames
self.foot_frames = ['FL_FOOT', 'FR_FOOT', 'HL_FOOT', 'HR_FOOT']
# Constraining the contacts
mu = 0.9 # friction coefficient
fMin = 1.0 # minimum normal force
fMax = 25.0 # maximum normal force
contactNormal = np.matrix([0., 0., 1.]).T # direction of the normal to the contact surface
# Coefficients of the posture task
kp_posture = 10.0 # proportionnal gain of the posture task
w_posture = 1.0 # weight of the posture task
# Coefficients of the contact tasks
kp_contact = 100.0 # proportionnal gain for the contacts
self.w_forceRef = 50.0 # weight of the forces regularization
self.w_reg_f = 50.0
# Coefficients of the foot tracking task
kp_foot = 100.0 # proportionnal gain for the tracking task
self.w_foot = 500.0 # weight of the tracking task
# Arrays to store logs
k_max_loop = N_simulation
self.f_pos = np.zeros((4, k_max_loop, 3))
self.f_vel = np.zeros((4, k_max_loop, 3))
self.f_acc = np.zeros((4, k_max_loop, 3))
self.f_pos_ref = np.zeros((4, k_max_loop, 3))
self.f_vel_ref = np.zeros((4, k_max_loop, 3))
self.f_acc_ref = np.zeros((4, k_max_loop, 3))
self.b_pos = np.zeros((k_max_loop, 6))
self.b_vel = np.zeros((k_max_loop, 6))
self.com_pos = np.zeros((k_max_loop, 3))
self.com_pos_ref = np.zeros((k_max_loop, 3))
self.c_forces = np.zeros((4, k_max_loop, 3))
self.h_ref_feet = np.zeros((k_max_loop, ))
self.goals = np.zeros((3, 4))
self.vgoals = np.zeros((3, 4))
self.agoals = np.zeros((3, 4))
self.mgoals = np.zeros((6, 4))
# Position of the shoulders in local frame
self.shoulders = np.array([[0.19, 0.19, -0.19, -0.19], [0.15005, -0.15005, 0.15005, -0.15005]])
self.footsteps = self.shoulders.copy()
self.memory_contacts = self.shoulders.copy()
# Foot trajectory generator
max_height_feet = 0.05
t_lock_before_touchdown = 0.05
self.ftgs = [ftg.Foot_trajectory_generator(max_height_feet, t_lock_before_touchdown) for i in range(4)]
# Which pair of feet is active (0 for [1, 2] and 1 for [0, 3])
self.pair = -1
# Number of TSID steps for 1 step of the MPC
self.k_mpc = k_mpc
# For update_feet_tasks function
self.dt = 0.001 # [s], time step
self.t1 = 0.14 # [s], duration of swing phase
# Rotation matrix
self.R = np.eye(3)
# Feedforward torques
self.tau_ff = np.zeros((12, 1))
# Torques sent to the robot
self.torques12 = np.zeros((12, 1))
self.tau = np.zeros((12, ))
self.ID_base = None # ID of base link
self.ID_feet = [None] * 4 # ID of feet links
# Footstep planner object
# self.fstep_planner = FootstepPlanner.FootstepPlanner(0.001, 32)
self.vu_m = np.zeros((6, 1))
self.t_stance = 0.16
self.T_gait = 0.32
self.n_periods = n_periods
self.h_ref = 0.235 - 0.01205385
self.t_swing = np.zeros((4, )) # Total duration of current swing phase for each foot
self.contacts_order = [0, 1, 2, 3]
# Parameter to enable/disable hybrid control
self.enable_hybrid_control = False
# Time since the start of the simulation
self.t = 0.0
########################################################################
# Definition of the Model and TSID problem #
########################################################################
# Set the paths where the urdf and srdf file of the robot are registered
modelPath = "/opt/openrobots/share/example-robot-data/robots"
urdf = modelPath + "/solo_description/robots/solo12.urdf"
srdf = modelPath + "/solo_description/srdf/solo.srdf"
vector = pin.StdVec_StdString()
vector.extend(item for item in modelPath)
# Create the robot wrapper from the urdf model (which has no free flyer) and add a free flyer
self.robot = tsid.RobotWrapper(urdf, vector, pin.JointModelFreeFlyer(), False)
self.model = self.robot.model()
# Creation of the Invverse Dynamics HQP problem using the robot
# accelerations (base + joints) and the contact forces
self.invdyn = tsid.InverseDynamicsFormulationAccForce("tsid", self.robot, False)
# Compute the problem data with a solver based on EiQuadProg
t = 0.0
self.invdyn.computeProblemData(t, self.qtsid, self.vtsid)
# Saving IDs for later
self.ID_base = self.model.getFrameId("base_link")
for i, name in enumerate(self.foot_frames):
self.ID_feet[i] = self.model.getFrameId(name)
# Store a frame object to avoid creating one each time
self.pos_foot = self.robot.framePosition(self.invdyn.data(), self.ID_feet[0])
#####################
# LEGS POSTURE TASK #
#####################
# Task definition (creating the task object)
self.postureTask = tsid.TaskJointPosture("task-posture", self.robot)
self.postureTask.setKp(kp_posture * matlib.ones(self.robot.nv-6).T) # Proportional gain
self.postureTask.setKd(2.0 * np.sqrt(kp_posture) * matlib.ones(self.robot.nv-6).T) # Derivative gain
# Add the task to the HQP with weight = w_posture, priority level = 1 (not real constraint)
# and a transition duration = 0.0
self.invdyn.addMotionTask(self.postureTask, w_posture, 1, 0.0)
# TSID Trajectory (creating the trajectory object and linking it to the task)
pin.loadReferenceConfigurations(self.model, srdf, False)
self.trajPosture = tsid.TrajectoryEuclidianConstant("traj_joint", self.q_ref[7:])
self.samplePosture = self.trajPosture.computeNext()
self.postureTask.setReference(self.samplePosture)
############
# CONTACTS #
############
self.contacts = 4*[None] # List to store the rigid contact tasks
for i, name in enumerate(self.foot_frames):
# Contact definition (creating the contact object)
self.contacts[i] = tsid.ContactPoint(name, self.robot, name, contactNormal, mu, fMin, fMax)
self.contacts[i].setKp((kp_contact * matlib.ones(3).T))
self.contacts[i].setKd((2.0 * np.sqrt(kp_contact) * matlib.ones(3).T))
self.contacts[i].useLocalFrame(False)
# Set the contact reference position
H_ref = self.robot.framePosition(self.invdyn.data(), self.ID_feet[i])
H_ref.translation = np.matrix(
[H_ref.translation[0, 0],
H_ref.translation[1, 0],
0.0]).T
self.contacts[i].setReference(H_ref)
# Regularization weight for the force tracking subtask
self.contacts[i].setRegularizationTaskWeightVector(
np.matrix([self.w_reg_f, self.w_reg_f, self.w_reg_f]).T)
# Adding the rigid contact after the reference contact force has been set
self.invdyn.addRigidContact(self.contacts[i], self.w_forceRef)
#######################
# FOOT TRACKING TASKS #
#######################
self.feetTask = 4*[None] # List to store the foot tracking tasks
mask = np.matrix([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]).T
# Task definition (creating the task object)
for i_foot in range(4):
self.feetTask[i_foot] = tsid.TaskSE3Equality(
"foot_track_" + str(i_foot), self.robot, self.foot_frames[i_foot])
self.feetTask[i_foot].setKp(kp_foot * mask)
self.feetTask[i_foot].setKd(2.0 * np.sqrt(kp_foot) * mask)
self.feetTask[i_foot].setMask(mask)
self.feetTask[i_foot].useLocalFrame(False)
# The reference will be set later when the task is enabled
##########
# SOLVER #
##########
# Use EiquadprogFast solver
self.solver = tsid.SolverHQuadProgFast("qp solver")
# Resize the solver to fit the number of variables, equality and inequality constraints
self.solver.resize(self.invdyn.nVar, self.invdyn.nEq, self.invdyn.nIn)
def update_feet_tasks(self, k_loop, gait, looping, interface, ftps_Ids_deb):
"""Update the 3D desired position for feet in swing phase by using a 5-th order polynomial that lead them
to the desired position on the ground (computed by the footstep planner)
Args:
k_loop (int): number of time steps since the start of the current gait cycle
pair (int): the current pair of feet in swing phase, for a walking trot gait
looping (int): total number of time steps in one gait cycle
interface (Interface object): interface between the simulator and the MPC/InvDyn
ftps_Ids_deb (list): IDs of debug spheres in PyBullet
"""
# Indexes of feet in swing phase
feet = np.where(gait[0, 1:] == 0)[0]
if len(feet) == 0: # If no foot in swing phase
return 0
t0s = []
for i in feet: # For each foot in swing phase get remaining duration of the swing phase
# Index of the line containing the next stance phase
index = next((idx for idx, val in np.ndenumerate(gait[:, 1+i]) if (((val==1)))), [-1])[0]
remaining_iterations = np.cumsum(gait[:index, 0])[-1] * self.k_mpc - (k_loop % self.k_mpc)
# Compute total duration of current swing phase
i_iter = 1
self.t_swing[i] = gait[0, 0]
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter += 1
i_iter = -1
while gait[i_iter, 1+i] == 0:
self.t_swing[i] += gait[i_iter, 0]
i_iter -= 1
self.t_swing[i] *= self.dt * self.k_mpc
t0s.append(np.round(self.t_swing[i] - remaining_iterations * self.dt, decimals=3))
# self.footsteps contains the target (x, y) positions for both feet in swing phase
for i in range(len(feet)):
i_foot = feet[i]
# Get desired 3D position, velocity and acceleration
if t0s[i] == 0.000:
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i_foot]).get_next_foot(
interface.o_feet[0, i_foot], interface.ov_feet[0, i_foot], interface.oa_feet[0, i_foot],
interface.o_feet[1, i_foot], interface.ov_feet[1, i_foot], interface.oa_feet[1, i_foot],
self.footsteps[0, i_foot], self.footsteps[1, i_foot], t0s[i], self.t_swing[i_foot], self.dt)
self.mgoals[:, i_foot] = np.array([x0, dx0, ddx0, y0, dy0, ddy0])
else:
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i_foot]).get_next_foot(
self.mgoals[0, i_foot], self.mgoals[1, i_foot], self.mgoals[2, i_foot],
self.mgoals[3, i_foot], self.mgoals[4, i_foot], self.mgoals[5, i_foot],
self.footsteps[0, i_foot], self.footsteps[1, i_foot], t0s[i], self.t_swing[i_foot], self.dt)
self.mgoals[:, i_foot] = np.array([x0, dx0, ddx0, y0, dy0, ddy0])
# Take into account vertical offset of Pybullet
z0 += interface.mean_feet_z
# Store desired position, velocity and acceleration for later call to this function
self.goals[:, i_foot] = np.array([x0, y0, z0])
self.vgoals[:, i_foot] = np.array([dx0, dy0, dz0])
self.agoals[:, i_foot] = np.array([ddx0, ddy0, ddz0])
# Update desired pos, vel, acc
self.sampleFeet[i_foot].pos( | np.matrix([x0, y0, z0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]) | numpy.matrix |
import numpy
import scipy.linalg
from . import linsys, utils
__all__ = [
"DeflatedCg",
"DeflatedMinres",
"DeflatedGmres",
"_DeflationMixin",
"ObliqueProjection",
"_Projection",
"Ritz",
"Arnoldifyer",
"bound_pseudo",
]
class _Projection(utils.Projection):
def __init__(self, linear_system, U, **kwargs):
"""Abstract base class of a projection for deflation.
:param A: the :py:class:`~krypy.linsys.LinearSystem`.
:param U: basis of the deflation space with ``U.shape == (N, d)``.
All parameters of :py:class:`~krypy.utils.Projection` are valid except
``X`` and ``Y``.
"""
raise NotImplementedError("abstract base class cannot be instanciated")
class ObliqueProjection(_Projection):
def __init__(self, linear_system, U, qr_reorthos=0, **kwargs):
"""Oblique projection for left deflation."""
# preprocess and store input
self.linear_system = linear_system
(N, d) = U.shape
# orthogonalize U in the Minv-inner-product
U, _ = utils.qr(U, ip_B=linear_system.get_ip_Minv_B(), reorthos=qr_reorthos)
self.U = U
"""An orthonormalized basis of the deflation space ``U`` with respect
to provided inner product."""
# apply operator to U
self.AU = linear_system.MlAMr * U
"""Result of application of operator to deflation space, i.e.,
:math:`M_lAM_rU`."""
self._MAU = None
# call Projection constructor
super(_Projection, self).__init__(
self.AU, self.U, ip_B=linear_system.ip_B, **kwargs
)
def correct(self, z):
"""Correct the given approximate solution ``z`` with respect to the
linear system ``linear_system`` and the deflation space defined by
``U``."""
c = self.linear_system.Ml * (self.linear_system.b - self.linear_system.A * z)
c = utils.inner(self.W, c, ip_B=self.ip_B)
if self.Q is not None and self.R is not None:
c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c))
if self.WR is not self.VR:
c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c))
return z + self.W.dot(c)
@property
def MAU(self):
"""Result of preconditioned operator to deflation space, i.e.,
:math:`MM_lAM_rU`."""
if self._MAU is None:
self._MAU = self.linear_system.M * self.AU
return self._MAU
class _DeflationMixin(object):
"""Mixin class for deflation in Krylov subspace methods.
Can be used to add deflation functionality to any solver from
:py:mod:`~krypy.linsys`.
:param linear_system: the :py:class:`~krypy.linsys.LinearSystem` that
should be solved.
:param U: a basis of the deflation space with ``U.shape == (N, k)``.
All other parameters are passed through to the underlying solver from
:py:mod:`~krypy.linsys`.
"""
def __init__(self, linear_system, U=None, projection_kwargs=None, *args, **kwargs):
if U is None:
U = numpy.zeros((linear_system.N, 0))
if projection_kwargs is None:
projection_kwargs = {}
# construct projection
projection = ObliqueProjection(linear_system, U, **projection_kwargs)
self.projection = projection
"""Projection that is used for deflation."""
# retrieve E=ip_B(U,AU) from projection
if projection.Q is None and projection.R is None:
E = numpy.eye(U.shape[1])
else:
E = projection.Q.dot(projection.R)
if projection.VR is not None and projection.WR is not None:
E = projection.WR.T.conj().dot(E.dot(projection.VR))
self.E = E
r""":math:`E=\langle U,M_lAM_rU\rangle`."""
self.C = numpy.zeros((U.shape[1], 0))
r""":math:`C=\langle U,M_lAM_rV_n\rangle`.
This attribute is updated while the Arnoldi/Lanczos method proceeds.
See also :py:meth:`_apply_projection`.
"""
self._B_ = None
super(_DeflationMixin, self).__init__(
linear_system, dtype=U.dtype, *args, **kwargs
)
def _solve(self):
N = self.linear_system.N
P = utils.LinearOperator(
(N, N), self.projection.AU.dtype, self._apply_projection
)
self.MlAMr = P * self.linear_system.MlAMr
super(_DeflationMixin, self)._solve()
def _apply_projection(self, Av):
"""Apply the projection and store inner product.
:param v: the vector resulting from an application of :math:`M_lAM_r`
to the current Arnoldi vector. (CG needs special treatment, here).
"""
PAv, UAv = self.projection.apply_complement(Av, return_Ya=True)
self.C = numpy.column_stack([self.C, UAv])
return PAv
def _get_initial_residual(self, x0):
"""Return the projected initial residual.
Returns :math:`MPM_l(b-Ax_0)`.
"""
if x0 is None:
Mlr = self.linear_system.Mlb
else:
r = self.linear_system.b - self.linear_system.A * x0
Mlr = self.linear_system.Ml * r
PMlr, self.UMlr = self.projection.apply_complement(Mlr, return_Ya=True)
MPMlr = self.linear_system.M * PMlr
MPMlr_norm = utils.norm(PMlr, MPMlr, ip_B=self.linear_system.ip_B)
return MPMlr, PMlr, MPMlr_norm
def _get_xk(self, yk):
xk = super(_DeflationMixin, self)._get_xk(yk)
return self.projection.correct(xk)
@property
def B_(self):
r""":math:`\underline{B}=\langle V_{n+1},M_lAM_rU\rangle`.
This property is obtained from :math:`C` if the operator is
self-adjoint. Otherwise, the inner products have to be formed
explicitly."""
(n_, n) = self.H.shape
ls = self.linear_system
if self._B_ is None or self._B_.shape[1] < n_:
# compute B_
if ls.self_adjoint:
self._B_ = self.C.T.conj()
if n_ > n:
self._B_ = numpy.vstack(
[
self._B_,
utils.inner(
self.V[:, [-1]], self.projection.AU, ip_B=ls.ip_B
),
]
)
else:
self._B_ = utils.inner(self.V, self.projection.AU, ip_B=ls.ip_B)
return self._B_
def estimate_time(self, nsteps, ndefl, deflweight=1.0):
"""Estimate time needed to run nsteps iterations with deflation
Uses timings from :py:attr:`linear_system` if it is an instance of
:py:class:`~krypy.linsys.TimedLinearSystem`. Otherwise, an
:py:class:`~krypy.utils.OtherError`
is raised.
:param nsteps: number of iterations.
:param ndefl: number of deflation vectors.
:param deflweight: (optional) the time for the setup and application of
the projection for deflation is multiplied by this factor. This can
be used as a counter measure for the evaluation of Ritz vectors.
Defaults to 1.
"""
# get ops for nsteps of this solver
solver_ops = self.operations(nsteps)
# define ops for deflation setup + application with ndefl deflation
# vectors
proj_ops = {
"A": ndefl,
"M": ndefl,
"Ml": ndefl,
"Mr": ndefl,
"ip_B": (
ndefl * (ndefl + 1) / 2 + ndefl ** 2 + 2 * ndefl * solver_ops["Ml"]
),
"axpy": (
ndefl * (ndefl + 1) / 2
+ ndefl * ndefl
+ (2 * ndefl + 2) * solver_ops["Ml"]
),
}
# get timings from linear_system
if not isinstance(self.linear_system, linsys.TimedLinearSystem):
raise utils.RuntimeError(
"A `TimedLinearSystem` has to be used in order to obtain " "timings."
)
timings = self.linear_system.timings
return timings.get_ops(solver_ops) + deflweight * timings.get_ops(proj_ops)
class DeflatedCg(_DeflationMixin, linsys.Cg):
"""Deflated preconditioned CG method.
See :py:class:`_DeflationMixin` and :py:class:`~krypy.linsys.Cg` for
the documentation of the available parameters.
"""
def __init__(self, *args, **kwargs):
self._UAps = []
super(DeflatedCg, self).__init__(*args, **kwargs)
def _apply_projection(self, Av):
r"""Computes :math:`\langle C,M_lAM_rV_n\rangle` efficiently with a
three-term recurrence."""
PAv, UAp = self.projection.apply_complement(Av, return_Ya=True)
self._UAps.append(UAp)
c = UAp.copy()
rhos = self.rhos
if self.iter > 0:
c -= (1 + rhos[-1] / rhos[-2]) * self._UAps[-2]
if self.iter > 1:
c += rhos[-2] / rhos[-3] * self._UAps[-3]
c *= ((-1) ** self.iter) / numpy.sqrt(rhos[-1])
if self.iter > 0:
c -= numpy.sqrt(rhos[-2] / rhos[-1]) * self.C[:, [-1]]
self.C = numpy.column_stack([self.C, c])
return PAv
class DeflatedMinres(_DeflationMixin, linsys.Minres):
"""Deflated preconditioned MINRES method.
See :py:class:`_DeflationMixin` and :py:class:`~krypy.linsys.Minres` for
the documentation of the available parameters.
"""
pass
class DeflatedGmres(_DeflationMixin, linsys.Gmres):
"""Deflated preconditioned GMRES method.
See :py:class:`_DeflationMixin` and :py:class:`~krypy.linsys.Gmres` for
the documentation of the available parameters.
"""
pass
class Arnoldifyer(object):
def __init__(self, deflated_solver):
r"""Obtain Arnoldi relations for approximate deflated Krylov subspaces.
:param deflated_solver: an instance of a deflated solver.
"""
self._deflated_solver = deflated_solver
H = deflated_solver.H
B_ = deflated_solver.B_
C = deflated_solver.C
E = deflated_solver.E
V = deflated_solver.V
U = deflated_solver.projection.U
ls = deflated_solver.linear_system
MAU = deflated_solver.projection.MAU
# get dimensions
n_, n = self.n_, self.n = H.shape
d = self.d = deflated_solver.projection.U.shape[1]
# store a few matrices for later use
EinvC = numpy.linalg.solve(E, C) if d > 0 else numpy.zeros((0, n))
self.L = numpy.block([[H, numpy.zeros((n_, d))], [EinvC, numpy.eye(d)]])
self.J = numpy.block([[numpy.eye(n, n_), B_[:n, :]], [numpy.zeros((d, n_)), E]])
self.M = numpy.block([[H[:n, :n] + B_[:n, :].dot(EinvC), B_[:n, :]], [C, E]])
self.A_norm = numpy.linalg.norm(self.M, 2)
if d > 0:
# rank-revealing QR decomp of projected MAU
Q, R, P = scipy.linalg.qr(
MAU - U.dot(E) - V.dot(B_), mode="economic", pivoting=True
)
# inverse permutation
P_inv = numpy.argsort(P)
# rank of R
l = (numpy.abs(numpy.diag(R)) > 1e-14 * self.A_norm).sum()
Q1 = Q[:, :l]
self.R12 = R[:l, P_inv]
# reorthonormalize in correct inner product.
# the above can be removed if the rank-revealing factorization
# is carried out directly in the appropriate inner product
Q1, Rt = utils.qr(Q1, ip_B=ls.get_ip_Minv_B())
self.R12 = Rt.dot(self.R12)
# residual helper matrix
self.N = numpy.column_stack(
[numpy.eye(l + n_ - n, n_ - n), numpy.vstack([B_[n:, :], self.R12])]
).dot(numpy.block([[numpy.zeros((d + n_ - n, n)), numpy.eye(d + n_ - n)]]))
else:
Q1 = numpy.zeros((U.shape[0], 0))
self.R12 = numpy.zeros((0, 0))
self.N = numpy.block(
[[numpy.zeros((n_ - n, n)), numpy.eye(n_ - n, n_ - n)]]
)
# residual basis
self.Z = numpy.column_stack([V[:, n:], Q1])
def get(self, Wt, full=False):
r"""Get Arnoldi relation for a deflation subspace choice.
:param Wt: the coefficients :math:`\tilde{W}` of the deflation vectors
in the basis :math:`[V_n,U]` with ``Wt.shape == (n+d, k)``, i.e., the
deflation vectors are :math:`W=[V_n,U]\tilde{W}`. Must fulfill
:math:`\tilde{W}^*\tilde{W}=I_k`.
:param full: (optional) should the full Arnoldi
basis and the full perturbation be returned? Defaults to ``False``.
:return:
* ``Hh``: the Hessenberg matrix with ``Hh.shape == (n+d-k, n+d-k)``.
* ``Rh``: the perturbation core matrix with
``Rh.shape == (l, n+d-k)``.
* ``q_norm``: norm :math:`\|q\|_2`.
* ``vdiff_norm``: the norm of the difference of the initial vectors
:math:`\tilde{v}-\hat{v}`.
* ``PWAW_norm``: norm of the projection
:math:`P_{\mathcal{W}^\perp,A\mathcal{W}}`.
* ``Vh``: (if ``full == True``) the Arnoldi basis with ``Vh.shape ==
(N, n+d-k)``.
* ``F``: (if ``full == True``) the perturbation matrix
:math:`F=-Z\hat{R}\hat{V}_n^* - \hat{V}_n\hat{R}^*Z^*`.
"""
n = self.n
n_ = self.n_
d = self.d
k = Wt.shape[1]
# get orthonormal basis of Wt and Wt^\perp
if k > 0:
Wto, _ = scipy.linalg.qr(Wt)
Wt = Wto[:, :k]
Wto = Wto[:, k:]
else:
Wto = numpy.eye(Wt.shape[0])
deflated_solver = self._deflated_solver
Pt = utils.Projection(
self.L.dot(Wt), self.J.T.conj().dot(Wt)
).operator_complement()
if d > 0:
qt = Pt * (
numpy.vstack(
[
[[deflated_solver.MMlr0_norm]],
numpy.zeros((self.n_ - 1, 1)),
numpy.linalg.solve(deflated_solver.E, deflated_solver.UMlr),
]
)
)
else:
tmp = numpy.zeros((self.n_, 1))
tmp[0] = deflated_solver.MMlr0_norm
qt = Pt * tmp
q = Wto.T.conj().dot(self.J.dot(qt))
# TODO: q seems to suffer from round-off errors and thus the first
# vector in the computed basis may differ from the exact
# projected one more than on the level of unit round-off.
# rotate closest vector in [V_n,U] to first column
Q = utils.House(q)
q_norm = Q.xnorm
# Arnoldify
WtoQ = Q.apply(Wto.T.conj()).T.conj()
from scipy.linalg import hessenberg
Hh, T = hessenberg(
Q.apply(Wto.T.conj().dot(self.J).dot(Pt * (self.L.dot(WtoQ)))), calc_q=True
)
QT = Q.apply(T)
# construct residual
Rh = self.N.dot(Pt * self.L.dot(Wto.dot(QT)))
# norm of difference between initial vectors
vdiff = self.N.dot(qt)
vdiff_norm = 0 if vdiff.size == 0 else numpy.linalg.norm(vdiff, 2)
# compute norm of projection P_{W^\perp,AW}
if k > 0:
# compute coefficients of orthonormalized AW in the basis [V,Z]
Y = numpy.block(
[
[numpy.eye(n_), deflated_solver.B_],
[numpy.zeros((d, n_)), deflated_solver.E],
[numpy.zeros((self.R12.shape[0], n_)), self.R12],
]
)
YL_Q, _ = scipy.linalg.qr(Y.dot(self.L.dot(Wt)), mode="economic")
# compute <W,X> where X is an orthonormal basis of AW
WX = Wt.T.conj().dot(numpy.vstack([YL_Q[:n, :], YL_Q[n_ : n_ + d, :]]))
PWAW_norm = 1.0 / numpy.min(scipy.linalg.svdvals(WX))
else:
PWAW_norm = 1.0
if full:
Vh = numpy.column_stack(
[deflated_solver.V[:, :n], deflated_solver.projection.U]
).dot(Wto.dot(QT))
ip_Minv_B = deflated_solver.linear_system.get_ip_Minv_B()
def _apply_F(x):
"""Application of the perturbation."""
return -(
self.Z.dot(Rh.dot(utils.inner(Vh, x, ip_B=ip_Minv_B)))
+ Vh.dot(Rh.T.conj().dot(utils.inner(self.Z, x, ip_B=ip_Minv_B)))
)
F = utils.LinearOperator(
(Vh.shape[0], Vh.shape[0]), dtype=deflated_solver.dtype, dot=_apply_F
)
return Hh, Rh, q_norm, vdiff_norm, PWAW_norm, Vh, F
return Hh, Rh, q_norm, vdiff_norm, PWAW_norm
def bound_pseudo(
arnoldifyer,
Wt,
g_norm=0.0,
G_norm=0.0,
GW_norm=0.0,
WGW_norm=0.0,
tol=1e-6,
pseudo_type="auto",
pseudo_kwargs=None,
delta_n=20,
terminate_factor=1.0,
):
r"""Bound residual norms of next deflated system.
:param arnoldifyer: an instance of
:py:class:`~krypy.deflation.Arnoldifyer`.
:param Wt: coefficients :math:`\tilde{W}\in\mathbb{C}^{n+d,k}` of the
considered deflation vectors :math:`W` for the basis :math:`[V,U]`
where ``V=last_solver.V`` and ``U=last_P.U``, i.e.,
:math:`W=[V,U]\tilde{W}` and
:math:`\mathcal{W}=\operatorname{colspan}(W)`. Must fulfill
:math:`\tilde{W}^*\tilde{W}=I_k`.
:param g_norm: norm :math:`\|g\|` of difference :math:`g=c-b` of
right hand sides. Has to fulfill :math:`\|g\|<\|b\|`.
:param G_norm: norm :math:`\|G\|` of difference
:math:`G=B-A` of operators.
:param GW_norm: Norm :math:`\|G|_{\mathcal{W}}\|` of difference
:math:`G=B-A` of operators restricted to :math:`\mathcal{W}`.
:param WGW_norm: Norm :math:`\|\langle W,GW\rangle\|_2`.
:param pseudo_type: One of
* ``'auto'``: determines if :math:`\hat{H}` is non-normal, normal or
Hermitian and uses the corresponding mode (see other options below).
* ``'nonnormal'``: the pseudospectrum of the Hessenberg matrix
:math:`\hat{H}` is used (involves one computation of a pseudospectrum)
* ``'normal'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of circles around the eigenvalues.
* ``'hermitian'``: the pseudospectrum of :math:`\hat{H}` is computed
efficiently by the union of intervals around the eigenvalues.
* ``'contain'``: the pseudospectrum of the extended Hessenberg matrix
:math:`\begin{bmatrix}\hat{H}\\S_i\end{bmatrix}` is used
(pseudospectrum has to be re computed for each iteration).
* ``'omit'``: do not compute the pseudospectrum at all and just use the
residual bounds from the approximate Krylov subspace.
:param pseudo_kwargs: (optional) arguments that are passed to the method
that computes the pseudospectrum.
:param terminate_factor: (optional) terminate the computation if the ratio
of two subsequent residual norms is larger than the provided factor.
Defaults to 1.
"""
if pseudo_kwargs is None:
pseudo_kwargs = {}
# Arnoldify!
Hh, Rh, q_norm, vdiff_norm, PWAW_norm = arnoldifyer.get(Wt)
# get original linear system
ls_orig = arnoldifyer._deflated_solver.linear_system
k = Wt.shape[1]
if k > 0:
# smallest singular value of W^*AW
WAW = Wt.T.conj().dot(arnoldifyer.J.dot(arnoldifyer.L.dot(Wt)))
sigma_min = numpy.min(scipy.linalg.svdvals(WAW))
if sigma_min <= WGW_norm:
raise utils.AssumptionError("sigma_min(W^*AW) > ||W^*GW|| not satisfied.")
eta = GW_norm / (sigma_min - WGW_norm)
else:
eta = 0.0
b_norm = ls_orig.MMlb_norm
beta = PWAW_norm * (eta * (b_norm + g_norm) + g_norm) + vdiff_norm
# check assumption on g_norm and b_norm
if g_norm >= b_norm:
raise utils.AssumptionError("||g_norm|| < ||b_norm|| not satisfied")
# compute residual norms of Hh*z=e_1*b_norm
ls_small = linsys.LinearSystem(
Hh,
numpy.eye(Hh.shape[0], 1) * q_norm,
normal=ls_orig.normal,
self_adjoint=ls_orig.self_adjoint,
positive_definite=ls_orig.positive_definite,
)
Solver = type(arnoldifyer._deflated_solver)
if issubclass(Solver, linsys.Minres) or issubclass(Solver, linsys.Gmres):
aresnorms = utils.get_residual_norms(Hh, self_adjoint=ls_orig.self_adjoint)
else:
# TODO: compute residuals more efficiently for CG
try:
solver = Solver(ls_small, tol=tol, maxiter=Hh.shape[0])
except utils.ConvergenceError as e:
# use all residuals that have been computed
# (useful for short recurrences)
solver = e.solver
aresnorms = numpy.array(solver.resnorms)
# absolute residual norm
aresnorms = aresnorms * q_norm
if pseudo_type == "omit":
return aresnorms / (b_norm - g_norm)
# spectrum of Hh
evals, evecs = scipy.linalg.eig(Hh)
if ls_small.self_adjoint:
evals = numpy.real(evals)
# norm of Hh
Hh_norm = numpy.linalg.norm(Hh, 2)
def _auto():
"""determine pseudo automatically"""
# is Hh Hermitian?
if numpy.linalg.norm(Hh - Hh.T.conj(), 2) < 1e-14 * Hh_norm:
return "hermitian"
# is Hh normal?
if numpy.linalg.cond(evecs, 2) < 1 + 1e-14:
return "normal"
return "nonnormal"
if pseudo_type == "auto":
pseudo_type = _auto()
# for delta >= min(|\lambda|), the pseudospectrum will contain zero and
# the thus polymax > 1. nevertheless, the bound may provide useful
# information in early iterations with large values of delta.
# Therefore, the maximal perturbation is chosen as the maximal
# eigenvalue of Hh
delta_max = 1e2 * numpy.max(numpy.abs(evals))
# minimal delta is defined via Rh
# HACK until numpy.linal.svd (and thus numpy.linalg.norm) is fixed
from scipy.linalg import svd
_, Rhsvd, _ = svd(Rh[:, :1])
delta_min = PWAW_norm * (eta * (Hh_norm + G_norm) + G_norm) + numpy.max(Rhsvd)
if delta_min == 0:
delta_min = 1e-16
import pseudopy
if not ls_small.normal:
# construct pseudospectrum for the expected range
pseudo = pseudopy.NonnormalAuto(
Hh, delta_min * 0.99, delta_max * 1.01, **pseudo_kwargs
)
elif not ls_small.self_adjoint:
pseudo = pseudopy.NormalEvals(evals)
else:
pseudo = None
bounds = [aresnorms[0]]
for i in range(1, len(aresnorms)):
# compute roots of polynomial
if issubclass(Solver, linsys.Cg):
roots = scipy.linalg.eigvalsh(Hh[:i, :i])
else:
# TODO: more stable way of computing the roots of the MINRES
# poly with exploitation of symmetry?
HhQ, HhR = scipy.linalg.qr(Hh[: i + 1, :i], mode="economic")
roots_inv = scipy.linalg.eigvals(HhQ[:i, :].T.conj(), HhR)
roots = 1.0 / roots_inv[numpy.abs(roots_inv) > 1e-14]
if ls_small.self_adjoint:
roots = numpy.real(roots)
# compute polynomial
p = utils.NormalizedRootsPolynomial(roots)
if ls_small.self_adjoint:
p_minmax_candidates = p.minmax_candidates()
# absolute residual
aresnorm = aresnorms[i]
# perturbation
# HACK until numpy.linal.svd (and thus numpy.linalg.norm) is fixed
_, Rhsvd, _ = svd(Rh[:, :i])
Rhnrm = numpy.max(Rhsvd)
epsilon = PWAW_norm * (eta * (Hh_norm + G_norm) + G_norm) + Rhnrm
# + numpy.linalg.norm(Rh[:, :i], 2)
if epsilon == 0:
epsilon = 1e-16
if pseudo_type == "contain":
raise NotImplementedError("contain not yet implemented")
# exit if epsilon >= delta_max
if epsilon >= delta_max:
break
delta_log_range = numpy.linspace(
numpy.log10(1.01 * epsilon), numpy.log10(delta_max), delta_n + 2
)[0:-1]
def compute_pseudo(delta_log):
delta = 10 ** delta_log
if ls_small.self_adjoint:
# pseudospectrum are intervals
pseudo_intervals = utils.Intervals(
[utils.Interval(ev - delta, ev + delta) for ev in evals]
)
# add roots of first derivative of p
candidates = []
for candidate in p_minmax_candidates:
if pseudo_intervals.contains(candidate):
candidates.append(candidate)
all_candidates = numpy.hstack(
[pseudo_intervals.get_endpoints(), | numpy.array(candidates) | numpy.array |
import numpy as np
from numpy import ma
from scipy import sparse as sp
# we don’t need this in requirements.txt, as it’s only needed for testing
from pytest import mark
from scanpy.data_structs.ann_data import AnnData, BoundStructArray, SMP_INDEX
def test_creation():
AnnData( | np.array([[1, 2], [3, 4]]) | numpy.array |
"""General functions for mathematical and numerical operations.
Functions
---------
- spline - Create a general spline interpolation function.
- cumtrapz_loglog - Perform a cumulative integral in log-log space.
- extend - Extend the given array by extraplation.
- sampleInverse - Find x-sampling to evenly divide a function in y-space.
- smooth - Use convolution to smooth the given array.
- _trapezium_loglog -
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
# import scipy as sp
import scipy.stats # noqa
import warnings
from . import math_core, interpolate # , statistic
__all__ = [
'cumtrapz_loglog', 'even_selection', 'extend', 'monotonic_smooth', 'rk4_step',
'sample_inverse', 'smooth_convolve', 'spline',
# DEPRECATED
'smooth', '_smooth'
]
def cumtrapz_loglog(yy, xx, bounds=None, axis=-1, dlogx=None, lntol=1e-2):
"""Calculate integral, given `y = dA/dx` or `y = dA/dlogx` w/ trapezoid rule in log-log space.
We are calculating the integral `A` given sets of values for `y` and `x`.
To associate `yy` with `dA/dx` then `dlogx = None` [default], otherwise,
to associate `yy` with `dA/dlogx` then `dlogx = True` for natural-logarithm, or `dlogx = b`
for a logarithm of base `b`.
For each interval (x[i+1], x[i]), calculate the integral assuming that y is of the form,
`y = a * x^gamma`
Notes
-----
- When bounds are given that are not identical to input `xx` values, then interpolation must
be performed. This can be done on the resulting cumsum'd values, or on the input integrand
values. The cumsum values are *not necessarily a power-law* (for negative indices), and thus
the interpolation is better performed on the input `yy` values.
"""
yy = np.asarray(yy)
xx = np.asarray(xx)
if bounds is not None:
if len(bounds) != 2 or np.any(~math_core.within(bounds, xx)) or (bounds[0] > bounds[1]):
err = "Invalid `bounds` = '{}', xx extrema = '{}'!".format(
bounds, math_core.minmax(xx))
raise ValueError(err)
if axis != -1 or np.ndim(yy) > 1:
newy = interpolate.interp_func(xx, yy, xlog=True, ylog=True)(bounds)
else:
newy = interpolate.interp(bounds, xx, yy, xlog=True, ylog=True, valid=False)
# newy = interpolate.interp(bounds, xx, yy, xlog=True, ylog=True, valid=False)
ii = np.searchsorted(xx, bounds)
xx = np.insert(xx, ii, bounds, axis=axis)
yy = np.insert(yy, ii, newy, axis=axis)
ii = np.array([ii[0], ii[1]+1])
assert np.alltrue(xx[ii] == bounds), "FAILED!"
yy = np.ma.masked_values(yy, value=0.0, atol=0.0)
# if np.ndim(yy) > 1 and np.ndim(xx) == 1:
if np.ndim(yy) != np.ndim(xx):
if np.ndim(yy) < np.ndim(xx):
raise ValueError("BAD SHAPES")
cut = [slice(None)] + [np.newaxis for ii in range(np.ndim(yy)-1)]
xx = xx[tuple(cut)]
log_base = np.e
if dlogx is not None:
# If `dlogx` is True, then we're using log-base-e (i.e. natural-log)
# Otherwise, set the log-base to the given value
if dlogx is not True:
log_base = dlogx
# Numerically calculate the local power-law index
delta_logx = np.diff( | np.log(xx) | numpy.log |
import os
from collections.abc import Iterable
from functools import partial
from math import ceil
from operator import getitem
from threading import Lock
from typing import Optional, Union
import numpy as np
import pandas as pd
import dask.array as da
from dask.base import tokenize
from dask.blockwise import BlockwiseDepDict, blockwise
from dask.dataframe.core import (
DataFrame,
Index,
Series,
_concat,
_emulate,
apply_and_enforce,
has_parallel_type,
new_dd_object,
)
from dask.dataframe.io.utils import DataFrameIOFunction
from dask.dataframe.shuffle import set_partition
from dask.dataframe.utils import (
check_meta,
insert_meta_param_description,
is_series_like,
make_meta,
)
from dask.delayed import delayed
from dask.highlevelgraph import HighLevelGraph
from dask.layers import DataFrameIOLayer
from dask.utils import M, _deprecated, funcname, is_arraylike
lock = Lock()
def _meta_from_array(x, columns=None, index=None, meta=None):
"""Create empty DataFrame or Series which has correct dtype"""
if x.ndim > 2:
raise ValueError(
"from_array does not input more than 2D array, got"
" array with shape %r" % (x.shape,)
)
if index is not None:
if not isinstance(index, Index):
raise ValueError("'index' must be an instance of dask.dataframe.Index")
index = index._meta
if meta is None:
meta = pd.DataFrame()
if getattr(x.dtype, "names", None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}")
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else "f8" for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return meta._constructor_sliced(
[], name=columns, dtype=x.dtype, index=index
)
elif len(columns) == 1:
return meta._constructor(
np.array([], dtype=x.dtype), columns=columns, index=index
)
raise ValueError(
"For a 1d array, columns must be a scalar or single element list"
)
else:
if np.isnan(x.shape[1]):
raise ValueError("Shape along axis 1 must be known")
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError(
"Number of column names must match width of the array. "
f"Got {len(columns)} names for {x.shape[1]} columns"
)
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return meta._constructor(data, columns=columns, index=index)
def from_array(x, chunksize=50000, columns=None, meta=None):
"""Read any sliceable array into a Dask Dataframe
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
Parameters
----------
x : array_like
chunksize : int, optional
The number of rows per partition to use.
columns : list or string, optional
list of column names if DataFrame, single string if Series
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns, meta=meta)
meta = _meta_from_array(x, columns, meta=meta)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = "from_array-" + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if is_series_like(meta):
dsk[name, i] = (type(meta), data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (type(meta), data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(
data: Union[pd.DataFrame, pd.Series],
npartitions: Optional[int] = None,
chunksize: Optional[int] = None,
sort: bool = True,
name: Optional[str] = None,
) -> DataFrame:
"""
Construct a Dask DataFrame from a Pandas DataFrame
This splits an in-memory Pandas dataframe into several parts and constructs
a dask.dataframe from those parts on which Dask.dataframe can operate in
parallel. By default, the input dataframe will be sorted by the index to
produce cleanly-divided partitions (with known divisions). To preserve the
input ordering, make sure the input index is monotonically-increasing. The
``sort=False`` option will also avoid reordering, but will not result in
known divisions.
Note that, despite parallelism, Dask.dataframe may not always be faster
than Pandas. We recommend that you stay with Pandas for as long as
possible before switching to Dask.dataframe.
Parameters
----------
data : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a Dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
chunksize : int, optional
The number of rows per index partition to use.
sort: bool
Sort the input by index first to obtain cleanly divided partitions
(with known divisions). If False, the input will not be sorted, and
all divisions will be set to None. Default is True.
name: string, optional
An optional keyname for the dataframe. Defaults to hashing the input
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> from dask.dataframe import from_pandas
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, "index", None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not has_parallel_type(data):
raise TypeError("Input must be a pandas DataFrame or Series.")
if (npartitions is None) == (none_chunksize := (chunksize is None)):
raise ValueError("Exactly one of npartitions and chunksize must be specified.")
nrows = len(data)
if none_chunksize:
if not isinstance(npartitions, int):
raise TypeError(
"Please provide npartitions as an int, or possibly as None if you specify chunksize."
)
chunksize = int(ceil(nrows / npartitions))
elif not isinstance(chunksize, int):
raise TypeError(
"Please provide chunksize as an int, or possibly as None if you specify npartitions."
)
name = name or ("from_pandas-" + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if data.index.isna().any() and not data.index.is_numeric():
raise NotImplementedError(
"Index in passed data is non-numeric and contains nulls, which Dask does not entirely support.\n"
"Consider passing `data.loc[~data.isna()]` instead."
)
if sort:
if not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
divisions, locations = sorted_division_locations(
data.index, chunksize=chunksize
)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = {
(name, i): data.iloc[start:stop]
for i, (start, stop) in enumerate(zip(locations[:-1], locations[1:]))
}
return new_dd_object(dsk, name, data, divisions)
@_deprecated(after_version="2022.02.1")
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs):
"""Read BColz CTable into a Dask Dataframe
BColz is a fast on-disk compressed column store with careful attention
given to compression. https://bcolz.readthedocs.io/en/latest/
Parameters
----------
x : bcolz.ctable
chunksize : int, optional
The size(rows) of blocks to pull out from ctable.
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import bcolz
import dask.array as da
if isinstance(x, str):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (
np.issubdtype(x.dtype[name], np.string_)
or np.issubdtype(x.dtype[name], np.unicode_)
or | np.issubdtype(x.dtype[name], np.object_) | numpy.issubdtype |
# -*- coding: utf-8; -*-
#
# sparsegrad - automatic calculation of sparse gradient
# Copyright (C) 2016-2018 <NAME> (<EMAIL>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module contains implementation details sparse matrix operations
"""
from packaging.version import Version
import numpy as np
from sparsegrad import impl
__all__ = [
'sdcsr',
'sparsity_csr',
'sample_csr_rows',
'csr_matrix',
'csc_matrix']
scipy_sparse = impl.scipy.sparse
index_dtype = scipy_sparse.csr_matrix((0, 0)).indptr.dtype
def sample_csr_rows(csr, rows):
"return (indptr,ix) such that csr[rows]=csr_matrix((csr.data[ix],csr.indices[ix],indptr))"
start = np.take(csr.indptr, rows)
count = np.take(csr.indptr, rows + 1) - start
indptr = np.empty(len(rows) + 1, dtype=csr.indptr.dtype)
indptr[0] = 0
np.cumsum(count, out=indptr[1:])
ix = np.repeat(start - indptr[:-1], count) + np.arange(indptr[-1])
return indptr, ix
class csr_matrix_nochecking(scipy_sparse.csr_matrix):
"""
Subclass of scipy.sparse.csr_matrix which does not perform checking matrix format checking.
When possible, it avoids going through original csr_matrix constructor which is very slow.
"""
def __init__(self, *args, **kwargs):
if not args and not kwargs:
scipy_sparse.spmatrix.__init__(self)
elif len(args) == 1 and 'shape' in kwargs and not kwargs.get('copy', False):
scipy_sparse.spmatrix.__init__(self)
data, indices, indptr = args[0]
self.data = np.asarray(data, dtype=kwargs.get('dtype', data.dtype))
self.indices = np.asarray(indices, dtype=index_dtype)
self.indptr = np.asarray(indptr, dtype=index_dtype)
self._shape = kwargs['shape']
else:
super(csr_matrix_nochecking, self).__init__(*args, **kwargs)
@classmethod
def fromarrays(cls, data, indices, indptr, shape):
"Optimized matrix constructor from individual CSR arrays, returns csr_matrix((data,indices,indptr),shape=shape)"
self = cls()
self.data = data
self.indices = | np.asarray(indices, dtype=index_dtype) | numpy.asarray |
from __future__ import print_function
import os
import cv2
import h5py
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_type", type=str, help="extra/train/val/test")
args = parser.parse_args()
#----------------------------Arguments---------------------------------------
dataset_type = args.dataset_type # Change to train/test
dataset_dir = './dataset'
curated_dataset = os.path.join(dataset_dir, dataset_type + '_cropped')
curated_textfile = os.path.join(dataset_dir, dataset_type + '.txt')
ground_attn_dir = os.path.join(dataset_dir, dataset_type + '_attn_grnd')
file_path = './dataset/%s/' % (dataset_type)
img_size = (64, 64) # (width, height)
max_steps = 6
ground_attention_size = (14, 14) # (width, height)
if os.path.exists(ground_attn_dir) == False:
os.mkdir(ground_attn_dir)
#---------------------------- Functions ----------------------------------------
def load_file(curated_textfile):
all_data = []
with open(curated_textfile, 'r') as f:
frames = f.readlines()
for frame in frames:
frame = frame.split(', ')
iterm_data = []
# Remove all non-interger characters
for i in frame:
i = i.replace("[", "")
i = i.replace("[[", "")
i = i.replace("]", "")
i = i.replace("]]", "")
i = i.replace("'", "")
iterm_data.append(int(i))
final_data = []
count = 0
for u in range(max_steps):
each_data = []
for k in range(6):
if k == 0:
each_data.append(str(iterm_data[count]) + '.png')
else:
each_data.append(iterm_data[count])
count += 1
final_data.append(each_data)
all_data.append(final_data)
return all_data
def samples_total(samples):
"""
Computes the total digits in a sample
"""
total_samples= 0
for k in range(max_steps):
sample_label = samples[k][1]
if int(sample_label) != 0:
total_samples += 1
return total_samples
def gaussian2d(sup, scales):
"""
Creates a 2D Gaussian based on the size and scale.
"""
var = scales * scales
shape = (sup[0], sup[1])
n,m = [(i-1)/2 for i in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
g = (1/np.sqrt(2*np.pi*var))*np.exp( -(x*x + y*y) / (2*var))
return g
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
"""
e_x = np.exp(x - | np.max(x) | numpy.max |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "<NAME>, <NAME>, and <NAME>"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import shutil
import time
import tempfile
import hashlib
import logging
from rdkit import Chem
from rdkit.Chem import AllChem
import deepchem.utils.rdkit_util as rdkit_util
import numpy as np
from copy import deepcopy
from collections import deque
from functools import partial
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.save import log
"""
TODO(LESWING) add sanitization with rdkit upgrade to 2017.*
"""
def get_ligand_filetype(ligand_filename):
"""Returns the filetype of ligand."""
if ".mol2" in ligand_filename:
return ".mol2"
elif ".sdf" in ligand_filename:
return "sdf"
elif ".pdbqt" in ligand_filename:
return ".pdbqt"
elif ".pdb" in ligand_filename:
return ".pdb"
else:
raise ValueError("Unrecognized_filename")
def load_molecule(molecule_file, add_hydrogens=True, calc_charges=False):
return rdkit_util.load_molecule(molecule_file, add_hydrogens, calc_charges)
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def compute_centroid(coordinates):
"""Compute compute the x,y,z centroid of provided coordinates
coordinates: np.ndarray
Shape (N, 3), where N is number atoms.
"""
centroid = np.mean(coordinates, axis=0)
return (centroid)
def generate_random__unit_vector():
"""generate a random unit vector on the 3-sphere
citation:
http://mathworld.wolfram.com/SpherePointPicking.html
a. Choose random theta \element [0, 2*pi]
b. Choose random z \element [-1, 1]
c. Compute output: (x,y,z) = (sqrt(1-z^2)*cos(theta), sqrt(1-z^2)*sin(theta),z)
d. output u
"""
theta = np.random.uniform(low=0.0, high=2 * np.pi)
z = np.random.uniform(low=-1.0, high=1.0)
u = np.array(
[np.sqrt(1 - z**2) * np.cos(theta), np.sqrt(1 - z**2) * np.sin(theta), z])
return (u)
def generate_random_rotation_matrix():
"""
1. Generate a random unit vector, i.e., randomly sampled from the unit
3-sphere
a. see function _generate_random__unit_vector() for details
2. Generate a second random unit vector thru the algorithm in (1), output v
a. If absolute value of u \dot v > 0.99, repeat.
(This is important for numerical stability. Intuition: we want them to
be as linearly independent as possible or else the orthogonalized
version of v will be much shorter in magnitude compared to u. I assume
in Stack they took this from Gram-Schmidt orthogonalization?)
b. v" = v - (u \dot v)*u, i.e. subtract out the component of v that's in
u's direction
c. normalize v" (this isn"t in Stack but I assume it must be done)
3. find w = u \cross v"
4. u, v", and w will form the columns of a rotation matrix, R. The
intuition is that u, v" and w are, respectively, what the standard basis
vectors e1, e2, and e3 will be mapped to under the transformation.
"""
u = generate_random__unit_vector()
v = generate_random__unit_vector()
while np.abs(np.dot(u, v)) >= 0.99:
v = generate_random__unit_vector()
vp = v - (np.dot(u, v) * u)
vp /= np.linalg.norm(vp)
w = np.cross(u, vp)
R = np.column_stack((u, vp, w))
return (R)
def rotate_molecules(mol_coordinates_list):
"""Rotates provided molecular coordinates.
Pseudocode:
1. Generate random rotation matrix. This matrix applies a random
transformation to any 3-vector such that, were the random transformation
repeatedly applied, it would randomly sample along the surface of a sphere
with radius equal to the norm of the given 3-vector cf.
_generate_random_rotation_matrix() for details
2. Apply R to all atomic coordinatse.
3. Return rotated molecule
"""
R = generate_random_rotation_matrix()
rotated_coordinates_list = []
for mol_coordinates in mol_coordinates_list:
coordinates = deepcopy(mol_coordinates)
rotated_coordinates = np.transpose(np.dot(R, np.transpose(coordinates)))
rotated_coordinates_list.append(rotated_coordinates)
return (rotated_coordinates_list)
def compute_pairwise_distances(protein_xyz, ligand_xyz):
"""
Takes an input m x 3 and n x 3 np arrays of 3d coords of protein and ligand,
respectively, and outputs an m x n np array of pairwise distances in Angstroms
between protein and ligand atoms. entry (i,j) is dist between the i"th protein
atom and the j"th ligand atom
"""
pairwise_distances = np.zeros(
(np.shape(protein_xyz)[0], np.shape(ligand_xyz)[0]))
for j in range(0, np.shape(ligand_xyz)[0]):
differences = protein_xyz - ligand_xyz[j, :]
squared_differences = np.square(differences)
pairwise_distances[:, j] = np.sqrt(np.sum(squared_differences, 1))
return (pairwise_distances)
"""following two functions adapted from:
http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
"""
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(vector_i, vector_j):
"""Returns the angle in radians between vectors "vector_i" and "vector_j"::
>>> print("%0.06f" % angle_between((1, 0, 0), (0, 1, 0)))
1.570796
>>> print("%0.06f" % angle_between((1, 0, 0), (1, 0, 0)))
0.000000
>>> print("%0.06f" % angle_between((1, 0, 0), (-1, 0, 0)))
3.141593
"""
vector_i_u = unit_vector(vector_i)
vector_j_u = unit_vector(vector_j)
angle = np.arccos(np.dot(vector_i_u, vector_j_u))
if np.isnan(angle):
if (vector_i_u == vector_j_u).all():
return 0.0
else:
return np.pi
return angle
def hash_sybyl(sybyl, sybyl_types):
return (sybyl_types.index(sybyl))
def hash_ecfp(ecfp, power):
"""
Returns an int of size 2^power representing that
ECFP fragment. Input must be a string.
"""
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def hash_ecfp_pair(ecfp_pair, power):
"""
Returns an int of size 2^power representing that
ECFP pair. Input must be a tuple of strings.
"""
ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1])
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def compute_all_ecfp(mol, indices=None, degree=2):
"""
For each atom:
Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to an int.
Return a dictionary mapping atom index to hashed SMILES.
"""
ecfp_dict = {}
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_ecfp_features(mol, ecfp_degree, ecfp_power):
"""Computes ECFP features for provided openbabel molecule.
Parameters:
-----------
system_ob: openbabel molecules
Molecule to featurize.
ecfp_degree: int
ECFP radius
ecfp_power: int
Number of bits to store ECFP features (2^ecfp_power will be length of
ECFP array)
Returns:
--------
ecfp_array: np.ndarray
Returns an array of size 2^ecfp_power where array at index i has a 1 if
that ECFP fragment is found in the molecule and array at index j has a 0
if ECFP fragment not in molecule.
"""
bv = AllChem.GetMorganFingerprint(mol, 2)
return [int(bv.GetBit(x)) for x in range(bv.GetNumBits())]
def featurize_binding_pocket_ecfp(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=4.5,
ecfp_degree=2):
"""Computes ECFP dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: PDB object (TODO(rbharath): Correct?)
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: PDB object (TODO(rbharath): Correct?)
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
features_dict = {}
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
return (protein_ecfp_dict, ligand_ecfp_dict)
def compute_all_sybyl(mol, indices=None):
"""Computes Sybyl atom types for atoms in molecule."""
raise ValueError("Not Yet Implemented")
def featurize_binding_pocket_sybyl(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=7.0):
"""Computes Sybyl dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: Rdkit Molecule
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: Rdkit Molecule
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
features_dict = {}
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_sybyl_dict = compute_all_sybyl(protein, indices=protein_atoms)
ligand_sybyl_dict = compute_all_sybyl(ligand)
return (protein_sybyl_dict, ligand_sybyl_dict)
def compute_splif_features_in_range(protein,
ligand,
pairwise_distances,
contact_bin,
ecfp_degree=2):
"""Computes SPLIF features for protein atoms close to ligand atoms.
Find all protein atoms that are > contact_bin[0] and < contact_bin[1] away
from ligand atoms. Then, finds the ECFP fingerprints for the contacting
atoms. Returns a dictionary mapping (protein_index_i, ligand_index_j) -->
(protein_ecfp_i, ligand_ecfp_j)
"""
contacts = np.nonzero((pairwise_distances > contact_bin[0]) & (
pairwise_distances < contact_bin[1]))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
contacts = zip(contacts[0], contacts[1])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
splif_dict = {
contact: (protein_ecfp_dict[contact[0]], ligand_ecfp_dict[contact[1]])
for contact in contacts
}
return (splif_dict)
def featurize_splif(protein_xyz, protein, ligand_xyz, ligand, contact_bins,
pairwise_distances, ecfp_degree):
"""Computes SPLIF featurization of protein-ligand binding pocket.
For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.) compute a
dictionary mapping (protein_index_i, ligand_index_j) tuples -->
(protein_ecfp_i, ligand_ecfp_j) tuples. return a list of such splif
dictionaries.
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
splif_dicts = []
for i, contact_bin in enumerate(contact_bins):
splif_dicts.append(
compute_splif_features_in_range(protein, ligand, pairwise_distances,
contact_bin, ecfp_degree))
return (splif_dicts)
def compute_ring_center(mol, ring):
ring_xyz = np.zeros((len(ring._path), 3))
for i, atom_idx in enumerate(ring._path):
atom = mol.GetAtom(int(atom_idx))
ring_xyz[i, :] = [atom.x(), atom.y(), atom.z()]
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def compute_ring_normal(mol, ring):
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring._path):
if i == 3: break
atom = mol.GetAtom(int(atom_idx))
points[i, :] = [atom.x(), atom.y(), atom.z()]
v1 = points[1, :] - points[0, :]
v2 = points[2, :] - points[0, :]
normal = np.cross(v1, v2)
return normal
def is_pi_parallel(protein_ring_center, protein_ring_normal, ligand_ring_center,
ligand_ring_normal):
dist = np.linalg.norm(protein_ring_center - ligand_ring_center)
angle = angle_between(protein_ring_normal, ligand_ring_normal) * 180 / np.pi
if ((np.abs(angle) < 30.0) or
(np.abs(angle) > 150.0 and np.abs(angle) < 210.0) or
(np.abs(angle) > 330.0 and np.abs(angle) < 360.0)):
if dist < 8.0:
return True
return False
def is_pi_t(protein_ring_center, protein_ring_normal, ligand_ring_center,
ligand_ring_normal):
dist = np.linalg.norm(protein_ring_center - ligand_ring_center)
angle = angle_between(protein_ring_normal, ligand_ring_normal) * 180 / np.pi
if ((np.abs(angle) > 60.0 and np.abs(angle) < 120.0) or
(np.abs(angle) > 240.0 and np.abs(angle) < 300.0)):
if dist < 5.5:
return True
return False
def update_feature_dict(feature_dict, idxs=None, indices=None):
if idxs is not None:
indices = []
for idx in idxs:
indices.append(idx - 1)
for index in indices:
if index not in feature_dict.keys():
feature_dict[index] = 1
else:
feature_dict[index] += 1
return feature_dict
def compute_pi_stack(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
dist_cutoff=4.4,
angle_cutoff=30.):
"""
Pseudocode:
for each ring in ligand:
if it is aromatic:
for each ring in protein:
if it is aromatic:
compute distance between centers
compute angle.
if it counts as parallel pi-pi:
for each atom in ligand and in protein,
add to list of atom indices
if it counts as pi-T:
for each atom in ligand and in protein:
add to list of atom indices
"""
raise ValueError("Not Yet Implemented")
def is_cation_pi(cation_position, ring_center, ring_normal):
cation_to_ring_vec = cation_position - ring_center
dist = np.linalg.norm(cation_to_ring_vec)
angle = angle_between(cation_to_ring_vec, ring_normal) * 180. / np.pi
if dist < 6.5:
if ((np.abs(angle) < 30.0) or
(np.abs(angle) > 150.0 and np.abs(angle) < 210.0) or
( | np.abs(angle) | numpy.abs |
"""
Implement a Vanilla NBP and NRW model for MC simulation of the NTE.
Code for the article "Monte Carlo Methods for the Neutron Transport Equation.
By <NAME>, <NAME>, <NAME>, <NAME>.
Thi sfile contains the code to produce the plots in the case of the 2D version
of the NTE.
MIT License
Copyright (c) <NAME>, 2020.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.filters import gaussian_filter
class circle:
"""Shape to determine area of scatter/branching."""
def __init__(self, centre, radius):
self.centre = centre
self.radius = radius
def time_in_circle(self, pos, theta, v):
"""Compute entry, exit times in circle for given trajectory."""
a = v**2
b = 2*v*((pos[0] - self.centre[0])* | np.cos(theta) | numpy.cos |
'''
Testing of the zoo
'''
import pytest
import numpy as np
np.random.seed(100)
from freelunch.zoo import animal, particle, krill
from freelunch.util import BadObjectiveFunctionScores, InvalidSolutionUpdate
animals = [particle, krill]
def test_animal():
location_1 = np.array([1,1,1])
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = animal(dna=location_1, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, fitness_3)
assert(np.all(friend.dna == location_3))
assert(friend.fitness == 10)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
with pytest.raises(ValueError):
friend.move(location_3, np.inf)
with pytest.raises(ValueError):
friend.move(location_3, np.nan)
with pytest.raises(ValueError):
friend.move(location_3, [])
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.inf,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.nan,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([1+2j,1,1]), 1)
friend = animal(dna=location_1, fitness=fitness_1)
friend2 = animal(dna=location_2, fitness=fitness_2)
assert(friend2 < friend)
assert(friend > friend2)
friend2._fitness = None # Or will throw error
assert(friend < friend2)
assert(not (friend2 < friend))
assert(friend2 > friend)
assert(not (friend > friend2))
friend._fitness = None # Or will throw error
with pytest.raises(BadObjectiveFunctionScores):
friend < friend2
with pytest.raises(BadObjectiveFunctionScores):
friend > friend2
@pytest.mark.parametrize('creature', animals)
def test_particle(creature):
location_1 = np.array([1,1,1])
vel = np.random.randn(1,3)
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = creature(pos=location_1, vel=vel, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, vel, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, vel, fitness_3)
assert( | np.all(friend.dna == location_3) | numpy.all |
"""
convolution network
"""
import numpy as np
def valid(width, kernel, pad, slide):
res = (width - kernel + 2 * pad)/slide + 1
return res == int(res)
def bn(x, e=1e-7):
"""before we pass x to next conv, we apply
batch normalization"""
mu = np.mean(x)
sigma2 = | np.var(x) | numpy.var |
#
# Created by <NAME> (<EMAIL>) 04/2019
# Last updated: 04/16/20
#
# Contains code to calculate distance matrices for spherical,
# ring like, and cylindrical geometries.
# Also include code to calculate the EMD between two particle
# physics collider events.
#
# Dependencies: POT library
#
##########################################
import sys
import time
import warnings
import numpy as np
from numpy import linalg as LA
import random
import ot
from ot.lp import emd2
#########################################
# PROCESSING FUNCTIONS
# Calculate phi from 3 momentum
def phi(vec): # Should only return values between 0 and 2 pi
#
if len(vec) !=3:
raise Exception('emdVar Error: invalid phi argument')
px, py = vec[0], vec[1]
if px==0:
if py>0:
return np.pi/2.
else:
return 3*np.pi/2.
phiV = np.arctan(py/px)
if px<0:
phiV=np.pi+phiV
else:
if py<0:
phiV=2*np.pi+phiV
return phiV
# Calculate eta from 3 momentum
def eta(vec):
if len(vec) !=3:
raise Exception('emdVar Error: invalid eta argument')
px, py, pz = vec[0], vec[1], vec[2]
etaV = np.arctanh(pz/np.sqrt(px**2+py**2+pz**2))
return etaV
# Define to handle periodicity of phi
# Processes array of phi values
def preproc(X):
return np.array([wrapCheck(x) for x in X])
# Ensures that all phi values are between 0 and 2pi
def wrapCheck(x):
if x<0:
return x+2*np.pi
if x>2*np.pi:
return wrapCheck(x-2*np.pi)
return x
##########################################################################
# Define distance metrics.
#######################################
## CYLINDRICAL GEOMETRY
#######################################
# Calculates euclidean distance where the first column is eta, the second is phi, eg elements in both X and Y are (y,phi)
# This is the distance on the cylinder, beta = 2 measure
# ym is max rapidity, needed for correct normalization
def _cdist_phi_y(X,Y, ym):
# define ym as the maximum rapidity cut on the quasi-isotropic event
# Make sure the phi values are in range
phi1 = preproc(X[:,1])
phi2 = preproc(Y[:,1])
# Trick to account for phi distance periodicity
phi_d =np.pi -np.abs(np.pi-np.abs(phi1[:,np.newaxis] - phi2[:]))
y_d = X[:,0,np.newaxis] - Y[:,0]
norm = 12.0/(np.pi*np.pi+16*ym*ym)
dist = norm*(phi_d**2 + y_d**2)
return dist
# Distance on cylinder, beta = 1 metric
# first column is eta, the second is phi, eg elements in both X and Y are (y,phi)
def _cdist_phi_y_sqrt(X,Y):
# NOTE: THIS IS NOT NORMALIZED!! DOES NOT RUN FROM 0 TO 1
# Make sure the phi values are in range
phi1 = preproc(X[:,1])
phi2 = preproc(Y[:,1])
# Trick to account for phi distance 'wrap around'
phi_d =np.pi -np.abs(np.pi-np.abs(phi1[:,np.newaxis] - phi2[:]))
y_d = X[:,0,np.newaxis] - Y[:,0]
dist = phi_d**2 + y_d**2
return np.sqrt(dist)
#######################################
## RING LIKE GEOMETRY
#######################################
# Calculates distance on ring, phi metric
# X, Y are arrays of phi
def _cdist_phi(X,Y):
phi1 = preproc(X)
phi2 = preproc(Y)
phi_d =np.pi -np.abs(np.pi-np.abs(phi1[:,np.newaxis] - phi2[:])) # GIVES MATRIX OF DIFFERENCE OF PHI VALUES
return (4/np.pi)*phi_d
# Calculates distance on ring, cos phi measure
# X, Y are arrays of phi
def _cdist_phicos(X,Y):
phi1 = preproc(X)
phi2 = preproc(Y)
phi_d =np.pi -np.abs(np.pi-np.abs(phi1[:,np.newaxis] - phi2[:])) # GIVES MATRIX OF DIFFERENCE OF PHI VALUES
return (np.pi/(np.pi-2))*(1-np.cos(phi_d))
#######################################
## SPHERICAL GEOMETRY
#######################################
# Calculates the distance on the sphere, angluar distance
# X, Y are arrays of 3 momenta of the particles in the event
def _cdist_sphere(X,Y):
theta_d=np.array([[np.arccos(np.around(np.dot(X[i],Y[j]), decimals=5)/np.around(LA.norm(Y[j])*LA.norm(X[i]),decimals=5)) for j in range(len(Y))] for i in range(len(X))])
return theta_d
# Calculates disntace on sphere, cos distance
# X, Y are arrays of 3 momenta of the particles in the event
def _cdist_cos(X,Y):
cos_d=np.array([[2*(1-np.around(np.dot(X[i],Y[j]), decimals=5)/np.around(LA.norm(Y[j])*LA.norm(X[i]),decimals=5)) for j in range(len(Y))] for i in range(len(X))])
return cos_d
# Distance on sphere, sqrt cos distance
# X, Y are arrays of 3 momenta of the particles in the event
def _cdist_sqrt_cos(X,Y):
cos_d=np.array([[(3./2.)*np.sqrt(1-np.around(np.dot(X[i],Y[j]), decimals=5)/np.around(LA.norm(Y[j])* | LA.norm(X[i]) | numpy.linalg.norm |
from unittest import TestCase
from esbo_etc.classes.SpectralQty import SpectralQty
import astropy.units as u
import numpy as np
class TestSpectralQty(TestCase):
qty = np.arange(1.1, 1.5, 0.1) << u.W / (u.m ** 2 * u.nm)
wl = np.arange(200, 204, 1) << u.nm
def setUp(self):
self.sqty = SpectralQty(self.wl, self.qty)
def test___eq__(self):
sqty_2 = SpectralQty(self.wl, self.qty)
self.assertEqual(self.sqty, sqty_2)
def test___mul__(self):
# Integer
self.assertEqual(self.sqty * 2, SpectralQty(np.arange(200, 204, 1) << u.nm,
np.arange(2.2, 3.0, 2e-1) << u.W / (u.m ** 2 * u.nm)))
self.assertEqual(2 * self.sqty, SpectralQty(np.arange(200, 204, 1) << u.nm,
np.arange(2.2, 3.0, 2e-1) << u.W / (u.m ** 2 * u.nm)))
# Float
self.assertEqual(self.sqty * 2., SpectralQty(np.arange(200, 204, 1) << u.nm,
np.arange(2.2, 3.0, 2e-1) << u.W / (u.m ** 2 * u.nm)))
self.assertEqual(2. * self.sqty, SpectralQty(np.arange(200, 204, 1) << u.nm,
np.arange(2.2, 3.0, 2e-1) << u.W / (u.m ** 2 * u.nm)))
# SpectralQty
self.assertEqual(self.sqty * SpectralQty(self.wl, np.arange(1, 5, 1) << u.m),
SpectralQty(self.wl, np.array([1.1, 2.4, 3.9, 5.6]) << u.W / (u.m * u.nm)))
self.assertEqual(SpectralQty(self.wl, np.arange(1, 5, 1) << u.m) * self.sqty,
SpectralQty(self.wl, np.array([1.1, 2.4, 3.9, 5.6]) << u.W / (u.m * u.nm)))
# rebin without extrapolation and without reduction
self.assertEqual(
self.sqty * SpectralQty(np.arange(199.5, 204.5, 1) << u.nm, np.arange(1, 6, 1) << u.m),
SpectralQty(self.wl, [1.65, 3.0, 4.55, 6.3] * u.W / (u.m * u.nm)))
# rebin without extrapolation and with reduction
self.assertEqual(
self.sqty * SpectralQty(np.arange(200.5, 204.5, 1) << u.nm, np.arange(1, 5, 1) << u.m, fill_value=False),
SpectralQty(np.arange(201, 204) << u.nm, | np.array([1.8, 3.25, 4.9]) | numpy.array |
import numpy as np
import math as m
from ._free_utils import get_mu_and_ci
import random
class PyEVI_FSUCRLv1(object):
def __init__(self,
nb_states,
nb_options,
macro_actions_per_state,
mdp_actions_per_state,
threshold,
option_policies,
reachable_states_per_option,
options_terminating_conditions,
bound_type="chernoff",
random_state = None):
self.nb_states = nb_states
self.nb_options = nb_options
self.threshold = threshold
self.actions_per_state = macro_actions_per_state
self.mdp_actions_per_state = mdp_actions_per_state
self.u1 = np.zeros(nb_states)
self.u2 = np.zeros(nb_states)
self.option_policies = option_policies
self.reachable_states_per_option = reachable_states_per_option
self.options_terminating_conditions = options_terminating_conditions
if bound_type == "chernoff":
self.bound_type = 0
elif bound_type == "chernoff_statedim":
self.bound_type = 1
elif bound_type == "bernstein":
self.bound_type = 2
else:
raise ValueError("Unknown bound type")
def compute_mu_info(self,
estimated_probabilities_mdp,
estimated_rewards_mdp,
beta_r,
nb_observations_mdp,
alpha_mu,
total_time,
delta,
max_nb_actions,
r_max):
nb_states = self.nb_states
nb_options = self.nb_options
r_tilde_opt = [None] * nb_options
mu_opt = [None] * nb_options
condition_numbers_opt = np.empty((nb_options,))
beta_mu_p = np.zeros((nb_options,))
for o in range(nb_options):
option_policy = self.option_policies[o]
option_reach_states = self.reachable_states_per_option[o]
term_cond = self.options_terminating_conditions[o]
opt_nb_states = len(option_reach_states)
Q_o = np.zeros((opt_nb_states, opt_nb_states))
# compute the reward and the mu
r_o = [0] * len(option_reach_states)
visits = np.inf
bernstein_log = m.log(6* max_nb_actions / delta)
for i, s in enumerate(option_reach_states):
option_action = option_policy[s]
option_action_index = self.mdp_actions_per_state[s].index(option_action)
r_o[i] = min(r_max, estimated_rewards_mdp[s, option_action_index] + beta_r[s,option_action_index])
if visits > nb_observations_mdp[s, option_action_index]:
visits = nb_observations_mdp[s, option_action_index]
bernstein_bound = 0.
nb_o = max(1, nb_observations_mdp[s, option_action_index])
for j, sprime in enumerate(option_reach_states):
prob = estimated_probabilities_mdp[s][option_action_index][sprime]
#q_o[i,0] += term_cond[sprime] * prob
Q_o[i,j] = (1. - term_cond[sprime]) * prob
bernstein_bound += np.sqrt(bernstein_log * 2 * prob * (1 - prob) / nb_o) + bernstein_log * 7 / (3 * nb_o)
if beta_mu_p[o] < bernstein_bound:
beta_mu_p[o] = bernstein_bound
e_m = np.ones((opt_nb_states,1))
q_o = e_m - np.dot(Q_o, e_m)
r_tilde_opt[o] = r_o
if self.bound_type == 0:
beta_mu_p[o] = alpha_mu * np.sqrt(14 * opt_nb_states * m.log(2 * max_nb_actions
* (total_time + 1) / delta) / max(1, visits))
elif self.bound_type == 1:
beta_mu_p[o] = alpha_mu * np.sqrt(14 * nb_states * m.log(2 * max_nb_actions
* (total_time + 1) / delta) / max(1, visits))
Pprime_o = np.concatenate((q_o, Q_o[:, 1:]), axis=1)
if not np.allclose(np.sum(Pprime_o, axis=1), np.ones(opt_nb_states)):
print("{}\n{}".format(Pprime_o,Q_o))
Pap = (Pprime_o + np.eye(opt_nb_states)) / 2.
D, U = np.linalg.eig(
np.transpose(Pap)) # eigen decomposition of transpose of P
sorted_indices = np.argsort(np.real(D))
mu = np.transpose(np.real(U))[sorted_indices[-1]]
mu /= np.sum(mu) # stationary distribution
mu_opt[o] = mu
assert len(mu_opt[o]) == len(r_tilde_opt[o])
P_star = np.repeat(np.array(mu, ndmin=2), opt_nb_states,
axis=0) # limiting matrix
# Compute deviation matrix
I = np.eye(opt_nb_states) # identity matrix
Z = | np.linalg.inv(I - Pprime_o + P_star) | numpy.linalg.inv |
# @version: 1.0 date: 05/06/2015 by <NAME>
# @author: <EMAIL>, <EMAIL>, <EMAIL>
# @copyright: EPFL-IC-LCAV 2015
from __future__ import division
import numpy as np
import scipy.linalg as la
from .parameters import constants
from . import utilities as u
from .soundsource import build_rir_matrix
from . import windows
from . import stft
#=========================================================================
# Free (non-class-member) functions related to beamformer design
#=========================================================================
def H(A, **kwargs):
'''Returns the conjugate (Hermitian) transpose of a matrix.'''
return np.transpose(A, **kwargs).conj()
def sumcols(A):
'''
Sums the columns of a matrix (np.array).
The output is a 2D np.array
of dimensions M x 1.
'''
return np.sum(A, axis=1, keepdims=1)
def mdot(*args):
'''Left-to-right associative matrix multiplication of multiple 2D ndarrays.'''
ret = args[0]
for a in args[1:]:
ret = np.dot(ret, a)
return ret
def distance(x, y):
'''
Computes the distance matrix E.
E[i,j] = sqrt(sum((x[:,i]-y[:,j])**2)).
x and y are DxN ndarray containing N D-dimensional vectors.
'''
# Assume x, y are arrays, *not* matrices
x = np.array(x)
y = np.array(y)
# return np.sqrt((x[0,:,np.newaxis]-y[0,:])**2 + (x[1,:,np.newaxis]-y[1,:])**2)
return np.sqrt(np.sum((x[:, :, np.newaxis] - y[:, np.newaxis, :])**2, axis=0))
def unit_vec2D(phi):
return np.array([[np.cos(phi), np.sin(phi)]]).T
def linear_2D_array(center, M, phi, d):
'''
Creates an array of uniformly spaced linear points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M)
The array of points
'''
u = unit_vec2D(phi)
return np.array(center)[:, np.newaxis] + d * \
(np.arange(M)[np.newaxis, :] - (M - 1.) / 2.) * u
def circular_2D_array(center, M, phi0, radius):
'''
Creates an array of uniformly spaced circular points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi0: float
The counterclockwise rotation of the first element in the array (from the x-axis)
radius: float
The radius of the array
Returns
-------
ndarray (2, M)
The array of points
'''
phi = np.arange(M) * 2. * np.pi / M
return np.array(center)[:, np.newaxis] + radius * \
np.vstack((np.cos(phi + phi0), np.sin(phi + phi0)))
def poisson_2D_array(center, M, d):
'''
Create array of 2D positions drawn from Poisson process.
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
'''
from numpy.random import standard_exponential, randint
R = d*standard_exponential((2, M))*(2*randint(0, 2, (2, M)) - 1)
R = R.cumsum(axis=1)
R -= R.mean(axis=1)[:, np.newaxis]
R += np.array([center]).T
return R
def square_2D_array(center, M, N, phi, d):
'''
Creates an array of uniformly spaced grid points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
'''
c = linear_2D_array(center, M, phi+np.pi/2., d)
R = np.zeros((2, M*N))
for i in np.arange(M):
R[:, i*N:(i+1)*N] = linear_2D_array(c[:, i], N, phi, d)
return R
def spiral_2D_array(center, M, radius=1., divi=3, angle=None):
'''
Generate an array of points placed on a spiral
Parameters
----------
center: array_like
location of the center of the array
M: int
number of microphones
radius: float
microphones are contained within a cirle of this radius (default 1)
divi: int
number of rotations of the spiral (default 3)
angle: float
the angle offset of the spiral (default random)
Returns
-------
ndarray (2, M * N)
The array of points
'''
num_seg = int(np.ceil(M / divi))
pos_array_norm = np.linspace(0, radius, num=M, endpoint=False)
pos_array_angle = np.reshape(np.tile(np.pi * 2 * np.arange(divi) / divi, num_seg),
(divi, -1), order='F') + \
np.linspace(0, 2 * np.pi / divi,
num=num_seg, endpoint=False)[np.newaxis, :]
pos_array_angle = np.insert(pos_array_angle.flatten('F')[:M - 1], 0, 0)
if angle is None:
pos_array_angle += np.random.rand() * np.pi / divi
else:
pos_array_angle += angle
pos_mic_x = pos_array_norm * np.cos(pos_array_angle)
pos_mic_y = pos_array_norm * np.sin(pos_array_angle)
return np.array([pos_mic_x, pos_mic_y])
def fir_approximation_ls(weights, T, n1, n2):
freqs_plus = np.array(weights.keys())[:, np.newaxis]
freqs = np.vstack([freqs_plus,
-freqs_plus])
omega = 2 * np.pi * freqs
omega_discrete = omega * T
n = np.arange(n1, n2)
# Create the DTFT transform matrix corresponding to a discrete set of
# frequencies and the FIR filter indices
F = np.exp(-1j * omega_discrete * n)
w_plus = np.array(weights.values())[:, :, 0]
w = np.vstack([w_plus,
w_plus.conj()])
return np.linalg.pinv(F).dot(w)
#=========================================================================
# Classes (microphone array and beamformer related)
#=========================================================================
class MicrophoneArray(object):
'''Microphone array class.'''
def __init__(self, R, fs):
R = np.array(R)
self.dim = R.shape[0] # are we in 2D or in 3D
self.M = R.shape[1] # number of microphones
self.R = R # array geometry
self.fs = fs # sampling frequency of microphones
self.signals = None
self.center = np.mean(R, axis=1, keepdims=True)
def record(self, signals, fs):
'''
This simulates the recording of the signals by the microphones.
In particular, if the microphones and the room simulation
do not use the same sampling frequency, down/up-sampling
is done here.
Parameters
----------
signals:
An ndarray with as many lines as there are microphones.
fs:
the sampling frequency of the signals.
'''
if signals.shape[0] != self.M:
raise NameError('The signals array should have as many lines as there are microphones.')
if signals.ndim != 2:
raise NameError('The signals should be a 2D array.')
if fs != self.fs:
try:
import samplerate
fs_ratio = self.fs / float(fs)
newL = int(fs_ratio * signals.shape[1]) - 1
self.signals = np.zeros((self.M, newL))
# samplerate resample function considers columns as channels (hence the transpose)
for m in range(self.M):
self.signals[m] = samplerate.resample(signals[m], fs_ratio, 'sinc_best')
except ImportError:
raise ImportError('The samplerate package must be installed for resampling of the signals.')
else:
self.signals = signals
def to_wav(self, filename, mono=False, norm=False, bitdepth=np.float):
'''
Save all the signals to wav files.
Parameters
----------
filename: str
the name of the file
mono: bool, optional
if true, records only the center channel floor(M / 2) (default `False`)
norm: bool, optional
if true, normalize the signal to fit in the dynamic range (default `False`)
bitdepth: int, optional
the format of output samples [np.int8/16/32/64 or np.float (default)]
'''
from scipy.io import wavfile
if mono is True:
signal = self.signals[self.M // 2]
else:
signal = self.signals.T # each column is a channel
float_types = [float, np.float, np.float32, np.float64]
if bitdepth in float_types:
bits = None
elif bitdepth is np.int8:
bits = 8
elif bitdepth is np.int16:
bits = 16
elif bitdepth is np.int32:
bits = 32
elif bitdepth is np.int64:
bits = 64
else:
raise NameError('No such type.')
if norm:
from .utilities import normalize
signal = normalize(signal, bits=bits)
signal = np.array(signal, dtype=bitdepth)
wavfile.write(filename, self.fs, signal)
class Beamformer(MicrophoneArray):
'''
At some point, in some nice way, the design methods
should also go here. Probably with generic arguments.
Parameters
----------
R: numpy.ndarray
Mics positions
fs: int
Sampling frequency
N: int, optional
Length of FFT, i.e. number of FD beamforming weights, equally spaced. Defaults to 1024.
Lg: int, optional
Length of time-domain filters. Default to N.
hop: int, optional
Hop length for frequency domain processing. Default to N/2.
zpf: int, optional
Front zero padding length for frequency domain processing. Default is 0.
zpb: int, optional
Zero padding length for frequency domain processing. Default is 0.
'''
def __init__(self, R, fs, N=1024, Lg=None, hop=None, zpf=0, zpb=0):
MicrophoneArray.__init__(self, R, fs)
# only support even length (in freq)
if N % 2 is 1:
N += 1
self.N = int(N) # FFT length
if Lg is None:
self.Lg = N # TD filters length
else:
self.Lg = int(Lg)
# setup lengths for FD processing
self.zpf = int(zpf)
self.zpb = int(zpb)
self.L = self.N - self.zpf - self.zpb
if hop is None:
self.hop = self.L // 2
else:
self.hop = hop
# for now only support equally spaced frequencies
self.frequencies = np.arange(0, self.N // 2+1) / self.N * float(self.fs)
# weights will be computed later, the array is of shape (M, N/2+1)
self.weights = None
# the TD beamforming filters (M, Lg)
self.filters = None
def __add__(self, y):
''' Concatenates two beamformers together.'''
newR = np.concatenate((self.R, y.R), axis=1)
return Beamformer(newR, self.fs, self.Lg, self.N, hop=self.hop, zpf=self.zpf, zpb=self.zpb)
def filters_from_weights(self, non_causal=0.):
'''
Compute time-domain filters from frequency domain weights.
Parameters
----------
non_causal: float, optional
ratio of filter coefficients used for non-causal part
'''
if self.weights is None:
raise NameError('Weights must be defined.')
self.filters = np.zeros((self.M, self.Lg))
if self.N <= self.Lg:
# go back to time domain and shift DC to center
tw = np.fft.irfft(np.conj(self.weights), axis=1, n=self.N)
self.filters[:, :self.N] = np.concatenate((tw[:, -self.N//2:], tw[:, :self.N//2]), axis=1)
elif self.N > self.Lg:
# Least-square projection
for i in np.arange(self.M):
Lgp = | np.floor((1 - non_causal)*self.Lg) | numpy.floor |
#!/usr/bin/env python
"""
Write SNPs to VCF format.
"""
import datetime
import numpy as np
import pandas as pd
from ipcoal.io.transformer import Transformer
from ipcoal.io.genos import Genos
from ipcoal.utils.utils import convert_intarr_to_bytearr
import ipcoal
# TODO add an attribution of the ipcoal version and list sim parameters.
VCFHEADER = """\
##fileformat=VCFv4.2
##fileDate={date}
##source=ipcoal-v.{version}
##reference={reference}
{contig_lines}
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t\
"""
class VCF:
"""
Write SNPs in VCF format. Note: we use the true ancestral sequence to
represent the reference such that matching the reference of not
is the same as ancestral vs. derived. However, we only include sites
that are variable among the samples, i.e., a SNP is not defined if
all samples are derived relative to the reference (ancestral seq).
Parameters
==========
seqs (arr): int array (nloci, nsamples, nsites).
names (str): haploid sample names.
diploid (bool): make diploid genos.
ancestral (arr): the ancestral seq (nloci, nsites)
fill_missing_alleles: write diploids with missing alleles (0|.) as (0|0).
"""
def __init__(self, seqs, names, diploid, ancestral, fill_missing_alleles):
self.names = names
self.seqs = seqs
self.aseqs_ints = ancestral
self.aseqs_bytes = convert_intarr_to_bytearr(self.aseqs_ints)
self.fill_missing_alleles = fill_missing_alleles
# do not combine for ambiguity codes, but get diploid_map and names.
txf = Transformer(self.seqs, self.names, diploid)
self.dindex_map = txf.dindex_map
self.dnames = txf.names
def get_header(self):
"""
Called AFTER the .df vcf is built.
"""
# build the header
contig_lines = []
for loc in range(self.seqs.shape[0]):
arr = self.seqs[loc]
if np.any(arr != arr[0], axis=0).sum():
contig_lines.append(
"##contig=<ID={},length={}>".format(loc, arr.shape[1]))
header = VCFHEADER.format(**{
"date": datetime.datetime.now(),
"version": ipcoal.__version__,
"reference": "true_simulated_ancestral_sequence",
"contig_lines": "\n".join(contig_lines)
})
header = "{}{}\n".format(header, "\t".join(self.dnames))
return header
def vcf_chunk_generator(self):
"""
Build a DF of genotypes and metadata.
"""
# iterate over loci building vcf dataframes
for lidx in range(self.seqs.shape[0]):
# get array of sequence data
arr = self.seqs[lidx]
# get indices of variable sites while allowing for missing data
marr = np.ma.array(data=arr, mask=(arr == 9))
common = marr.mean(axis=0).round().astype(int)
varsites = np.where(np.any(marr != common, axis=0).data)[0]
nsites = varsites.size
# vcf dataframe
vdf = pd.DataFrame({
"CHROM": | np.repeat(0, nsites) | numpy.repeat |
from concurrent.futures import ProcessPoolExecutor
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from treeinterpreter import treeinterpreter as ti
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def get_ensamble_preds(model, x):
"""Return separately the predictions done by all the estimators in ensemble.
Parameters
----------
model
x
Returns
-------
predictions : ndarray shaped (estimators, n_samples)
"""
# TODO: figure out how to get the faster version commented below working
# (AttributeError: Can't pickle local object 'get_ensamble_preds.<locals>.get_preds')
# def get_preds(t):
# return t.predict(x)
#
# return np.stack(parallel_trees(model, get_preds))
# WORKAROUND for large datasets:
# run code above manually (eg. in script.notebook), then use
# make_ensemble_preds_with_confidence_table directly instead of get_ensemble_preds_with_confidence
return np.stack([t.predict(x) for t in model.estimators_])
def make_ensemble_preds_with_confidence_table(df_raw_val, preds, fld, y_fld):
df = df_raw_val.copy()
df['pred_std'] = np.std(preds, axis=0)
df['pred'] = np.mean(preds, axis=0)
flds = [fld, y_fld, 'pred', 'pred_std']
tbl = df[flds].groupby(flds[0], as_index=False).mean()
return tbl
def get_ensemble_preds_with_confidence(
model, x_val, df_raw_val, fld, y_fld
):
preds = get_ensamble_preds(model, x_val)
return make_ensemble_preds_with_confidence_table(df_raw_val, preds, fld, y_fld)
# TODO: confirm this works
def plot_ensemble_regression_preds_with_confidence(
model, x_val, df_raw_val, fld, y_fld, figsize=None
):
preds = get_ensamble_preds(model, x_val)
tbl = make_ensemble_preds_with_confidence_table(df_raw_val, preds, fld, y_fld)
return tbl.plot(tbl, 'pred', 'barh',
xerr='pred_std', alpha=0.6, figsize=figsize)
def rf_feat_importance(model, df):
return pd.DataFrame(
{'cols': df.columns, 'imp': model.feature_importances_}
).sort_values('imp', ascending=False)
def rf_show_plot_fi(model, df, figsize=(12,7), top_n=None):
fi = rf_feat_importance(model, df)
if top_n:
fi = fi[:top_n]
print(fi)
fi.plot('cols', 'imp', 'barh', figsize=figsize, legend=False)
def ti_make_readable_contribs(df, cs):
idxs = np.argsort(cs[:, 0])
sorted_feats = df.columns[idxs]
return [o for o in zip(
sorted_feats,
df.iloc[0][sorted_feats],
cs[idxs],
)]
def ti_make_explained_predictions_data(df, preds, contribs):
xcontribs = [ti_make_readable_contribs(df, c) for c in contribs]
preds_dict = [{
i : p
for i, p in enumerate(pred)
} for pred in preds]
return list(zip(preds_dict, xcontribs))
def rf_predict_with_explanations(model, x):
preds, biases, contribs = ti.predict(model, x)
return ti_make_explained_predictions_data(x, preds, contribs)
def plot_train_vs_test(
model, x, y,
trn_f=0.8, n_runs=5, step=50, start=None,
ylim=None
):
if start is None:
start = step
extra_plot_args = {'ylim': ylim} if ylim else {}
test_size = int(len(x) * (1 - trn_f))
train_sizes = list(range(start, int(x.shape[0] * trn_f), step))
scores_trn = np.zeros((3, len(train_sizes)))
scores_val = np.zeros((3, len(train_sizes)))
s_trn = np.zeros(n_runs)
s_val = np.zeros(n_runs)
for i, train_sz in enumerate(train_sizes):
for j in range(n_runs):
x_trn, x_val, y_trn, y_val = train_test_split(
x, y, test_size=test_size)
x_trn = x_trn[:train_sz]
y_trn = y_trn[:train_sz]
model.fit(x_trn, y_trn)
s_trn[j] = model.score(x_trn, y_trn)
s_val[j] = model.score(x_val, y_val)
scores_trn[0, i] = np.mean(s_trn)
scores_trn[1, i] = np.min(s_trn)
scores_trn[2, i] = np.max(s_trn)
scores_val[0, i] = np.mean(s_val)
scores_val[1, i] = np.min(s_val)
scores_val[2, i] = | np.max(s_val) | numpy.max |
import random
import numpy as np
def create_reference_solution(chromosome_length):
number_of_ones = int(chromosome_length / 2)
# Build an array with an equal mix of zero and ones
reference = np.zeros(chromosome_length)
reference[0: number_of_ones] = 1
# Shuffle the array to mix the zeros and ones
np.random.shuffle(reference)
return reference
def create_starting_population(individuals, chromosome_length):
# Set up an initial array of all zeros
population = | np.zeros((individuals, chromosome_length)) | numpy.zeros |
import numpy as np
import skimage.io as io
import cv2
import tensorflow as tf
from pycocotools.coco import COCO
from random import shuffle
from utils import combbe4d, combbe2d, comiou4d, comiou2d
def genx(coco, img_dir, classes, limit, ishape):
'''
Arguments
coco:
img_dir:
classes:
limit:
ishape:
Return
x:
img_id:
'''
cat_ids = coco.getCatIds(catNms=classes)
img_ids = coco.getImgIds(catIds=cat_ids)
# img_ids = coco.getImgIds();
imgs = coco.loadImgs(img_ids)
shuffle(imgs)
print(cat_ids)
print(len(imgs))
for img in imgs[limit[0]:limit[1]]:
# image data (h, w, channels)
pix = io.imread('{}/{}'.format(img_dir, img['file_name']))
# padding input img
x = np.zeros(ishape, dtype='float32')
if len(pix.shape) == 2:
x[:pix.shape[0], :pix.shape[1], 0] = pix[:ishape[0], :ishape[1]]
x[:pix.shape[0], :pix.shape[1], 1] = pix[:ishape[0], :ishape[1]]
x[:pix.shape[0], :pix.shape[1], 2] = pix[:ishape[0], :ishape[1]]
else:
x[:pix.shape[0], :pix.shape[1], :] = pix[:ishape[0], :ishape[1], :]
yield x, img['id']
def genx_selected(coco, img_dir, img_ids, ishape):
'''
Arguments
coco:
img_dir:
selection:
ishape:
Return
x:
img_id:
'''
imgs = coco.loadImgs(img_ids)
for img in imgs:
# image data (h, w, channels)
pix = io.imread('{}/{}'.format(img_dir, img['file_name']))
# padding input img
x = np.zeros(ishape, dtype='float32')
if len(pix.shape) == 2:
x[:pix.shape[0], :pix.shape[1], 0] = pix[:ishape[0], :ishape[1]]
x[:pix.shape[0], :pix.shape[1], 1] = pix[:ishape[0], :ishape[1]]
x[:pix.shape[0], :pix.shape[1], 2] = pix[:ishape[0], :ishape[1]]
else:
x[:pix.shape[0], :pix.shape[1], :] = pix[:ishape[0], :ishape[1], :]
yield x, img['id']
def gety(coco, img_id, classes, frame_mode=False, mapping=None):
'''
Arguments
coco:
img_id:
classes:
frame_mode: True is frame mode, False is box mode
mapping:
Return
bbox2d:
masks:
'''
cat_ids = coco.getCatIds(catNms=classes)
ann_ids = coco.getAnnIds(imgIds=img_id, catIds=cat_ids, iscrowd=0)
anns = coco.loadAnns(ids=ann_ids)
masks = None
# masks = np.array([coco.annToMask(ann) for ann in anns])
if mapping is None:
if frame_mode:
return np.array([[ann['bbox'][1], ann['bbox'][0],
ann['bbox'][1]+ann['bbox'][3], ann['bbox'][0]+ann['bbox'][2],
ann['category_id']] for ann in anns]), masks
return np.array([[ann['bbox'][0], ann['bbox'][1],
ann['bbox'][2], ann['bbox'][3],
ann['category_id']] for ann in anns]), masks
else:
if frame_mode:
return | np.array([[ann['bbox'][1], ann['bbox'][0],
ann['bbox'][1]+ann['bbox'][3], ann['bbox'][0]+ann['bbox'][2],
mapping[ann['category_id']]] for ann in anns]) | numpy.array |
import numpy as np
from functools import cached_property
class MarkovChain:
""" Discrete-time Markov chain. Provides methods to draw samples.
The magnitudes of the products of transition rates with frame intervals
are assumed to be <<1. (That is, we assume that the probability of two
state transitions in one frame interval is negligible.)
init
----
TM : 2D ndarray, transition rates. The element TM[i,j] is assumed
to give the transition rate from the i^th to the j^th state
in Hz.
dt : str, frame interval (seconds)
"""
def __init__(self, TM: np.ndarray, dt: float=1.0):
self.dt = dt
self.TM = np.asarray(TM)
assert len(self.TM.shape) == 2
assert TM.shape[0] == TM.shape[1]
self.n_states = self.TM.shape[0]
self.states = np.arange(self.n_states)
# Normalize transition matrix
self.P = self.TM.copy() * dt
self.P[self.states, self.states] = 1.0 - \
(self.P.sum(axis=1) - np.diagonal(self.P))
def is_diag(self, M: np.ndarray) -> bool:
""" Return True if a 2D ndarray is diagonal, 0 otherwise. """
i, j = np.nonzero(M)
return np.all(i==j)
@cached_property
def stat_dist(self) -> np.ndarray:
""" Stationary distribution; 1D ndarray of length (n_states,).
Returns
-------
1D ndarray of shape (n_states,), the stationary
distribution for the Markov chain
"""
if self.is_diag(self.P):
return np.diag(self.TM) / self.TM.sum()
else:
L, V = np.linalg.eig(self.P.T)
v = V[:,np.argmin(np.abs(L-1.0))]
return v / v.sum()
def __call__(self, n: int, initial: int=None) -> np.ndarray:
"""
Simulate a single state history of length *n*.
If no initial state is provided, the initial state is drawn
from the stationary distribution.
Parameters
----------
n : int, number of timepoints to simulate
Returns
-------
1D ndarray of shape (n,), the state indices at each
timepoint
"""
if self.is_diag(self.TM):
return np.ones(n, dtype=np.int64) * (np.random.choice(self.states, p=self.stat_dist) if initial is None else initial)
else:
s = np.empty(n, dtype=np.int64)
if initial is None:
s[0] = | np.random.choice(self.states, p=self.stat_dist) | numpy.random.choice |
import numpy as np
from PIL import Image
def random_crop(im,crop_shape,seed=None):
assert crop_shape[0] < im.width and crop_shape[1] < im.height, 'wrong crop shape'
np.random.seed(seed)
a = np.random.randint(im.width - crop_shape[0] + 1)
b = | np.random.randint(im.height - crop_shape[1] + 1) | numpy.random.randint |
"""
rnn_gru_cell_test_cy.py
Test the correctness of the GRU-cell implementation.
"""
import os
import unittest
from random import random
from numpy import asarray, ones, zeros
from torch import float64, FloatTensor, tensor
from torch.nn import GRUCell as PytorchGRU
from population.utils.rnn_cell_util.cy.berkeley_gru_cy import GRUCellCy as GRUBerkeley
from population.utils.rnn_cell_util.pytorch_gru import GRUCell as GRUPyTorch
EPSILON = 1e-5
def get_gru_berkeley(input_size):
"""Get a GRU-cell of the requested input-size, completely initialized with zeros."""
bias_h = zeros((3,))
weight_hh = zeros((3, 1))
weight_xh = zeros((3, input_size))
return GRUBerkeley(
input_size=input_size,
bias=bias_h,
weight_hh=weight_hh,
weight_xh=weight_xh,
)
def get_pytorch_gru(input_size, used_gru):
"""Load in a PyTorch GRU that is a copy of the currently used GRU."""
gru = PytorchGRU(input_size, 1)
if type(used_gru) == GRUBerkeley:
gru.bias_hh[:] = tensor(zeros((3,)), dtype=float64)[:]
gru.bias_ih[:] = tensor(used_gru.bias, dtype=float64)[:]
gru.weight_hh[:] = tensor(used_gru.weight_hh, dtype=float64)[:]
gru.weight_ih[:] = tensor(used_gru.weight_xh, dtype=float64)[:]
elif type(used_gru) == GRUPyTorch:
gru.bias_hh[:] = tensor(used_gru.bias_hh, dtype=float64)[:]
gru.bias_ih[:] = tensor(used_gru.bias_ih, dtype=float64)[:]
gru.weight_hh[:] = tensor(used_gru.weight_hh, dtype=float64)[:]
gru.weight_ih[:] = tensor(used_gru.weight_ih, dtype=float64)[:]
else:
raise Exception(f"Invalid input for used_gru: {used_gru}")
return gru
# noinspection PyArgumentList
class Berkeley(unittest.TestCase):
"""Test the Berkeley implementation of the GRU-cell."""
def test_single_input_single_batch(self):
"""> Test when only one input given and batch-size is only one."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# Get 'empty' GRU
gru = get_gru_berkeley(1)
# Completely zero GRU, all inputs get ignored
self.assertEqual(gru(asarray([[0]])), 0)
gru.hx = asarray([]) # GRU keeps own state, reset it
self.assertEqual(gru(asarray([[1]])), 0)
gru.hx = asarray([]) # GRU keeps own state, reset it
# Modify the GRU to have weight-arrays of one
gru.weight_hh = asarray(ones((3, 1)))
gru.weight_xh = asarray(ones((3, 1)))
# Load in PyTorch native GRU to compare with
pytorch_gru = get_pytorch_gru(1, gru)
# Test if they continue to obtain the same results
for _ in range(100):
i = random()
a = gru(asarray([[i]]))
gru.hx = asarray([]) # GRU keeps own state, reset it
b = pytorch_gru(FloatTensor([[i]]))
self.assertEqual(a.shape, b.shape)
self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)
# Set bias to minus ones
gru.bias = ones((3,)) * -1
# Load in PyTorch native GRU to compare with
pytorch_gru = get_pytorch_gru(1, gru)
# Test if they continue to obtain the same results
for _ in range(100):
i = random()
a = gru(asarray([[i]]))
gru.hx = asarray([]) # GRU keeps own state, reset it
b = pytorch_gru(FloatTensor([[i]]))
self.assertEqual(a.shape, b.shape)
self.assertTrue(float(a) - EPSILON <= float(b) <= float(a) + EPSILON)
def test_single_input_multi_batch(self):
"""> Test when only one input given and batch-size is more than one."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# Get 'empty' GRU
gru = get_gru_berkeley(1)
# Completely zero GRU, all inputs get ignored
result = gru(asarray([[0], [0]]))
for aa, bb in zip(result, asarray([[0], [0]])):
self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)
gru.hx = asarray([]) # GRU keeps own state, reset it
result = gru(asarray([[1], [1]]))
for aa, bb in zip(result, asarray([[0], [0]])):
self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)
gru.hx = asarray([]) # GRU keeps own state, reset it
# Modify the GRU to have weight-arrays of one
gru.weight_hh = ones((3, 1))
gru.weight_xh = ones((3, 1))
# Load in PyTorch native GRU to compare with
pytorch_gru = get_pytorch_gru(1, gru)
# Test if they continue to obtain the same results
for _ in range(100):
i = random()
a = gru(asarray([[i], [i]]))
gru.hx = asarray([]) # GRU keeps own state, reset it
b = pytorch_gru(FloatTensor([[i], [i]]))
self.assertEqual(a.shape, b.shape)
for aa, bb in zip(a, b):
self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)
# Set bias to minus ones
gru.bias = ones((3,)) * -1
# Load in PyTorch native GRU to compare with
pytorch_gru = get_pytorch_gru(1, gru)
# Test if they continue to obtain the same results
for _ in range(100):
i = random()
a = gru(asarray([[i], [i]]))
gru.hx = asarray([]) # GRU keeps own state, reset it
b = pytorch_gru(FloatTensor([[i], [i]]))
self.assertEqual(a.shape, b.shape)
for aa, bb in zip(a, b):
self.assertTrue(float(aa) - EPSILON <= float(bb) <= float(aa) + EPSILON)
def test_multi_input_single_batch(self):
"""> Test when only one input given and batch-size is more than one."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# Get 'empty' GRU
gru = get_gru_berkeley(2)
# Completely zero GRU, all inputs get ignored
self.assertEqual(gru(asarray([[0, 0]])), 0)
gru.hx = asarray([]) # GRU keeps own state, reset it
self.assertEqual(gru(asarray([[1, 1]])), 0)
gru.hx = asarray([]) # GRU keeps own state, reset it
# Modify the GRU to have weight-arrays of one
gru.weight_hh = ones((3, 1))
gru.weight_xh = ones((3, 2))
# Load in PyTorch native GRU to compare with
pytorch_gru = get_pytorch_gru(2, gru)
# Test if they continue to obtain the same results
for _ in range(100):
i = random()
a = gru( | asarray([[i, i]]) | numpy.asarray |
from orbit.models.ktrlite import KTRLite
import pandas as pd
import numpy as np
import math
from scipy.stats import nct
from enum import Enum
import torch
import matplotlib.pyplot as plt
from copy import deepcopy
from ..constants.constants import (
KTRTimePointPriorKeys,
PredictMethod,
TrainingMetaKeys,
PredictionMetaKeys
)
from ..exceptions import IllegalArgument, ModelException, PredictionException
from ..utils.general import is_ordered_datetime
from ..utils.kernels import gauss_kernel, sandwich_kernel
from ..utils.features import make_seasonal_regressors
from .model_template import ModelTemplate
from ..estimators.pyro_estimator import PyroEstimatorSVI
from ..models import KTRLite
from orbit.constants.palette import OrbitPalette
from ..utils.knots import get_knot_idx, get_knot_dates
from ..utils.plot import orbit_style_decorator
class DataInputMapper(Enum):
"""
mapping from object input to pyro input
"""
# All of the following have default defined in DEFAULT_SLGT_FIT_ATTRIBUTES
# ---------- Data Input ---------- #
# observation related
NUM_OF_VALID_RESPONSE = 'N_VALID_RES'
WHICH_VALID_RESPONSE = 'WHICH_VALID_RES'
RESPONSE_OFFSET = 'MEAN_Y'
DEGREE_OF_FREEDOM = 'DOF'
_RESIDUALS_SCALE_UPPER = 'RESID_SCALE_UB'
# ---------- Level ---------- #
_NUM_KNOTS_LEVEL = 'N_KNOTS_LEV'
LEVEL_KNOT_SCALE = 'LEV_KNOT_SCALE'
_KERNEL_LEVEL = 'K_LEV'
# ---------- Regression ---------- #
_NUM_KNOTS_COEFFICIENTS = 'N_KNOTS_COEF'
_KERNEL_COEFFICIENTS = 'K_COEF'
_NUM_OF_REGULAR_REGRESSORS = 'N_RR'
_NUM_OF_POSITIVE_REGRESSORS = 'N_PR'
_NUM_OF_NEGATIVE_REGRESSORS = 'N_NR'
_REGULAR_REGRESSOR_MATRIX = 'RR'
_POSITIVE_REGRESSOR_MATRIX = 'PR'
_NEGATIVE_REGRESSOR_MATRIX = 'NR'
_REGULAR_REGRESSOR_INIT_KNOT_LOC = 'RR_INIT_KNOT_LOC'
_REGULAR_REGRESSOR_INIT_KNOT_SCALE = 'RR_INIT_KNOT_SCALE'
_REGULAR_REGRESSOR_KNOT_SCALE = 'RR_KNOT_SCALE'
_POSITIVE_REGRESSOR_INIT_KNOT_LOC = 'PR_INIT_KNOT_LOC'
_POSITIVE_REGRESSOR_INIT_KNOT_SCALE = 'PR_INIT_KNOT_SCALE'
_POSITIVE_REGRESSOR_KNOT_SCALE = 'PR_KNOT_SCALE'
_NEGATIVE_REGRESSOR_INIT_KNOT_LOC = 'NR_INIT_KNOT_LOC'
_NEGATIVE_REGRESSOR_INIT_KNOT_SCALE = 'NR_INIT_KNOT_SCALE'
_NEGATIVE_REGRESSOR_KNOT_SCALE = 'NR_KNOT_SCALE'
# ---------- Prior Specification ---------- #
_COEF_PRIOR_LIST = 'COEF_PRIOR_LIST'
_LEVEL_KNOTS = 'LEV_KNOT_LOC'
_SEAS_TERM = 'SEAS_TERM'
class BaseSamplingParameters(Enum):
"""
The output sampling parameters related with the base model
"""
LEVEL_KNOT = 'lev_knot'
LEVEL = 'lev'
YHAT = 'yhat'
OBS_SCALE = 'obs_scale'
class RegressionSamplingParameters(Enum):
"""
The output sampling parameters related with regression component.
"""
COEFFICIENTS_KNOT = 'coef_knot'
COEFFICIENTS_INIT_KNOT = 'coef_init_knot'
COEFFICIENTS = 'coef'
# Defaults Values
DEFAULT_REGRESSOR_SIGN = '='
DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE = 1.0
DEFAULT_COEFFICIENTS_INIT_KNOT_LOC = 0
DEFAULT_COEFFICIENTS_KNOT_SCALE = 0.1
DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER = 0.01
DEFAULT_UPPER_BOUND_SCALE_MULTIPLIER = 1.0
class KTRModel(ModelTemplate):
"""Base KTR model object with shared functionality for PyroVI method
Parameters
----------
level_knot_scale : float
sigma for level; default to be .1
level_segments : int
the number of segments partitioned by the knots of level (trend)
level_knot_distance : int
the distance between every two knots of level (trend)
level_knot_dates : array like
list of pre-specified dates for the level knots
seasonality : int, or list of int
multiple seasonality
seasonality_fs_order : int, or list of int
fourier series order for seasonality
seasonality_segments : int
the number of segments partitioned by the knots of seasonality
seasonal_initial_knot_scale : float
scale parameter for seasonal regressors initial coefficient knots; default to be 1
seasonal_knot_scale : float
scale parameter for seasonal regressors drift of coefficient knots; default to be 0.1.
regressor_col : array-like strings
regressor columns
regressor_sign : list
list of signs with '=' for regular regressor, '+' for positive regressor, and '-' for negative regressor.
regressor_init_knot_loc : list
list of regressor knot pooling mean priors, default to be 0's
regressor_init_knot_scale : list
list of regressor knot pooling sigma's to control the pooling strength towards the grand mean of regressors;
default to be 1.
regressor_knot_scale : list
list of regressor knot sigma priors; default to be 0.1.
regression_segments : int
the number of segments partitioned by the knots of regression
regression_knot_distance : int
the distance between every two knots of regression
regression_knot_dates : array-like
list of pre-specified dates for regression knots
regression_rho : float
sigma in the Gaussian kernel for the regression term
degree of freedom : int
degree of freedom for error t-distribution
date_freq : str
date frequency; if not supplied, the minimum timestamp difference in the date would be used.
coef_prior_list : list of dicts
each dict in the list should have keys as
'name', prior_start_tp_idx' (inclusive), KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value (not inclusive),
KTRTimePointPriorKeys.PRIOR_MEAN.value, KTRTimePointPriorKeys.PRIOR_SD.value, and KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
residuals_scale_upper : float
flat_multiplier : bool
Default set as True. If False, we will adjust knot scale with a multiplier based on regressor volume
around each knot; When True, set all multiplier as 1
ktrlite_optim_args : dict
the optimizing config for the ktrlite model (to fit level/seasonality). Default to be dict().
"""
_data_input_mapper = DataInputMapper
# stan or pyro model name (e.g. name of `*.stan` file in package)
_model_name = 'ktr'
_supported_estimator_types = [PyroEstimatorSVI]
def __init__(self,
# level
level_knot_scale=0.1,
level_segments=10,
level_knot_distance=None,
level_knot_dates=None,
# seasonality
seasonality=None,
seasonality_fs_order=None,
seasonality_segments=2,
seasonal_initial_knot_scale=1.0,
seasonal_knot_scale=0.1,
# regression
regressor_col=None,
regressor_sign=None,
regressor_init_knot_loc=None,
regressor_init_knot_scale=None,
regressor_knot_scale=None,
regression_segments=5,
regression_knot_distance=None,
regression_knot_dates=None,
regression_rho=0.15,
# shared
degree_of_freedom=30,
date_freq=None,
# time-based coefficient priors
coef_prior_list=None,
flat_multiplier=True,
residuals_scale_upper=None,
ktrlite_optim_args=dict(),
**kwargs):
super().__init__(**kwargs) # create estimator in base class
# level configurations
self.level_knot_scale = level_knot_scale
self.level_segments = level_segments
self.level_knot_distance = level_knot_distance
self.level_knot_dates = level_knot_dates
self._level_knot_dates = self.level_knot_dates
self.level_knots = None
self._level_knots = None
self._kernel_level = None
self._num_knots_level = None
self.knots_tp_level = None
# seasonality configurations
self.seasonality = seasonality
self.seasonality_fs_order = seasonality_fs_order
self._seasonality = self.seasonality
# used to name different seasonal components in prediction
self._seasonality_labels = list()
self._seasonality_fs_order = self.seasonality_fs_order
self.seasonal_initial_knot_scale = seasonal_initial_knot_scale
self.seasonal_knot_scale = seasonal_knot_scale
self.seasonality_segments = seasonality_segments
self._seas_term = 0
self._seasonality_coef_knot_dates = None
self._seasonality_coef_knots = None
# regression configurations
self.regressor_col = regressor_col
self.regressor_sign = regressor_sign
self.regressor_init_knot_loc = regressor_init_knot_loc
self.regressor_init_knot_scale = regressor_init_knot_scale
self.regressor_knot_scale = regressor_knot_scale
self.regression_knot_distance = regression_knot_distance
self.regression_segments = regression_segments
self._regression_knot_dates = regression_knot_dates
self.regression_rho = regression_rho
self.flat_multiplier = flat_multiplier
# set private var to arg value
# if None set default in _set_default_args()
self._regressor_sign = self.regressor_sign
self._regressor_init_knot_loc = self.regressor_init_knot_loc
self._regressor_init_knot_scale = self.regressor_init_knot_scale
self._regressor_knot_scale = self.regressor_knot_scale
self.coef_prior_list = coef_prior_list
self._coef_prior_list = []
self._regression_knots_idx = None
self._num_of_regressors = 0
# positive regressors
self._num_of_positive_regressors = 0
self._positive_regressor_col = list()
self._positive_regressor_init_knot_loc = list()
self._positive_regressor_init_knot_scale = list()
self._positive_regressor_knot_scale_1d = list()
self._positive_regressor_knot_scale = list()
# negative regressors
self._num_of_negative_regressors = 0
self._negative_regressor_col = list()
self._negative_regressor_init_knot_loc = list()
self._negative_regressor_init_knot_scale = list()
self._negative_regressor_knot_scale_1d = list()
self._negative_regressor_knot_scale = list()
# regular regressors
self._num_of_regular_regressors = 0
self._regular_regressor_col = list()
self._regular_regressor_init_knot_loc = list()
self._regular_regressor_init_knot_scale = list()
self._regular_regressor_knot_scale_1d = list()
self._regular_regressor_knot_scale = list()
self._regressor_col = list()
# init dynamic data attributes
# the following are set by `_set_dynamic_attributes()` and generally set during fit()
# from input df
# response data
self._is_valid_response = None
self._which_valid_response = None
self._num_of_valid_response = 0
# regression data
self._knots_tp_coefficients = None
self._positive_regressor_matrix = None
self._negative_regressor_matrix = None
self._regular_regressor_matrix = None
# other configurations
self.date_freq = date_freq
self.degree_of_freedom = degree_of_freedom
self.residuals_scale_upper = residuals_scale_upper
self._residuals_scale_upper = residuals_scale_upper
self.ktrlite_optim_args = ktrlite_optim_args
self._set_static_attributes()
self._set_model_param_names()
def _set_model_param_names(self):
"""Overriding base template functions. Model parameters to extract"""
self._model_param_names += [param.value for param in BaseSamplingParameters]
if self._num_of_regressors > 0:
self._model_param_names += [param.value for param in RegressionSamplingParameters]
def _set_default_args(self):
"""Set default attributes for None"""
# default checks for seasonality and seasonality_fs_order will be conducted
# in ktrlite model and we will extract them from ktrlite model directly later
if self.coef_prior_list is not None:
self._coef_prior_list = deepcopy(self.coef_prior_list)
# if no regressors, end here #
if self.regressor_col is None:
# regardless of what args are set for these, if regressor_col is None
# these should all be empty lists
self._regressor_sign = list()
self._regressor_init_knot_loc = list()
self._regressor_init_knot_scale = list()
self._regressor_knot_scale = list()
return
def _validate_params_len(params, valid_length):
for p in params:
if p is not None and len(p) != valid_length:
raise IllegalArgument('Wrong dimension length in Regression Param Input')
# regressor defaults
num_of_regressors = len(self.regressor_col)
_validate_params_len([
self.regressor_sign, self.regressor_init_knot_loc,
self.regressor_init_knot_scale, self.regressor_knot_scale],
num_of_regressors
)
if self.regressor_sign is None:
self._regressor_sign = [DEFAULT_REGRESSOR_SIGN] * num_of_regressors
if self.regressor_init_knot_loc is None:
self._regressor_init_knot_loc = [DEFAULT_COEFFICIENTS_INIT_KNOT_LOC] * num_of_regressors
if self.regressor_init_knot_scale is None:
self._regressor_init_knot_scale = [DEFAULT_COEFFICIENTS_INIT_KNOT_SCALE] * num_of_regressors
if self.regressor_knot_scale is None:
self._regressor_knot_scale = [DEFAULT_COEFFICIENTS_KNOT_SCALE] * num_of_regressors
self._num_of_regressors = num_of_regressors
def _set_static_regression_attributes(self):
# if no regressors, end here
if self._num_of_regressors == 0:
return
for index, reg_sign in enumerate(self._regressor_sign):
if reg_sign == '+':
self._num_of_positive_regressors += 1
self._positive_regressor_col.append(self.regressor_col[index])
# used for 'pr_knot_loc' sampling in pyro
self._positive_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._positive_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'pr_knot' sampling in pyro
self._positive_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
elif reg_sign == '-':
self._num_of_negative_regressors += 1
self._negative_regressor_col.append(self.regressor_col[index])
# used for 'nr_knot_loc' sampling in pyro
self._negative_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._negative_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'nr_knot' sampling in pyro
self._negative_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
else:
self._num_of_regular_regressors += 1
self._regular_regressor_col.append(self.regressor_col[index])
# used for 'rr_knot_loc' sampling in pyro
self._regular_regressor_init_knot_loc.append(self._regressor_init_knot_loc[index])
self._regular_regressor_init_knot_scale.append(self._regressor_init_knot_scale[index])
# used for 'rr_knot' sampling in pyro
self._regular_regressor_knot_scale_1d.append(self._regressor_knot_scale[index])
# regular first, then positive, then negative
self._regressor_col = self._regular_regressor_col + self._positive_regressor_col + self._negative_regressor_col
# numpy conversion
self._positive_regressor_init_knot_loc = np.array(self._positive_regressor_init_knot_loc)
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_knot_scale_1d = np.array(self._positive_regressor_knot_scale_1d)
self._negative_regressor_init_knot_loc = np.array(self._negative_regressor_init_knot_loc)
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_knot_scale_1d = np.array(self._negative_regressor_knot_scale_1d)
self._regular_regressor_init_knot_loc = np.array(self._regular_regressor_init_knot_loc)
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_knot_scale_1d = np.array(self._regular_regressor_knot_scale_1d)
@staticmethod
def _validate_coef_prior(coef_prior_list):
for test_dict in coef_prior_list:
if set(test_dict.keys()) != set([
KTRTimePointPriorKeys.NAME.value,
KTRTimePointPriorKeys.PRIOR_START_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_END_TP_IDX.value,
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value
]):
raise IllegalArgument('wrong key name in inserted prior dict')
len_insert_prior = list()
for key, val in test_dict.items():
if key in [
KTRTimePointPriorKeys.PRIOR_MEAN.value,
KTRTimePointPriorKeys.PRIOR_SD.value,
KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value,
]:
len_insert_prior.append(len(val))
if not all(len_insert == len_insert_prior[0] for len_insert in len_insert_prior):
raise IllegalArgument('wrong dimension length in inserted prior dict')
# @staticmethod
# def _validate_level_knot_inputs(level_knot_dates, level_knots):
# if len(level_knots) != len(level_knot_dates):
# raise IllegalArgument('level_knots and level_knot_dates should have the same length')
def _set_coef_prior_idx(self):
if self._coef_prior_list and len(self._regressor_col) > 0:
for x in self._coef_prior_list:
prior_regressor_col_idx = [
np.where(np.array(self._regressor_col) == col)[0][0]
for col in x[KTRTimePointPriorKeys.PRIOR_REGRESSOR_COL.value]
]
x.update({'prior_regressor_col_idx': prior_regressor_col_idx})
def _set_static_attributes(self):
"""model data input based on args at instantiation or computed from args at instantiation"""
self._set_default_args()
self._set_static_regression_attributes()
# self._validate_level_knot_inputs(self.level_knot_dates, self.level_knots)
if self._coef_prior_list:
self._validate_coef_prior(self._coef_prior_list)
self._set_coef_prior_idx()
def _set_valid_response_attributes(self, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
response = training_meta[TrainingMetaKeys.RESPONSE.value]
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
if num_of_observations < max_seasonality:
raise ModelException(
"Number of observations {} is less than max seasonality {}".format(
num_of_observations, max_seasonality))
# get some reasonable offset to regularize response to make default priors scale-insensitive
if self._seasonality:
max_seasonality = np.round(np.max(self._seasonality)).astype(int)
self.response_offset = np.nanmean(response[:max_seasonality])
else:
self.response_offset = np.nanmean(response)
self.is_valid_response = ~np.isnan(response)
# [0] to convert tuple back to array
self.which_valid_response = np.where(self.is_valid_response)[0]
self.num_of_valid_response = len(self.which_valid_response)
def _set_regressor_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
# validate regression columns
if self.regressor_col is not None and \
not set(self.regressor_col).issubset(df.columns):
raise ModelException(
"DataFrame does not contain specified regressor column(s)."
)
# init of regression matrix depends on length of response vector
self._positive_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._negative_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
self._regular_regressor_matrix = np.zeros((num_of_observations, 0), dtype=np.double)
# update regression matrices
if self._num_of_positive_regressors > 0:
self._positive_regressor_matrix = df.filter(
items=self._positive_regressor_col, ).values
if self._num_of_negative_regressors > 0:
self._negative_regressor_matrix = df.filter(
items=self._negative_regressor_col, ).values
if self._num_of_regular_regressors > 0:
self._regular_regressor_matrix = df.filter(
items=self._regular_regressor_col, ).values
def _set_coefficients_kernel_matrix(self, df, training_meta):
"""Derive knots position and kernel matrix and other related meta data"""
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
# date_col = training_meta[TrainingMetaKeys.DATE_COL.value]
# placeholder
self._kernel_coefficients = np.zeros((num_of_observations, 0), dtype=np.double)
self._num_knots_coefficients = 0
if self._num_of_regressors > 0:
self._regression_knots_idx = get_knot_idx(
date_array=date_array,
num_of_obs=num_of_observations,
knot_dates=self._regression_knot_dates,
knot_distance=self.regression_knot_distance,
num_of_segments=self.regression_segments,
date_freq=self.date_freq,
)
tp = np.arange(1, num_of_observations + 1) / num_of_observations
self._knots_tp_coefficients = (1 + self._regression_knots_idx) / num_of_observations
self._kernel_coefficients = gauss_kernel(tp, self._knots_tp_coefficients, rho=self.regression_rho)
self._num_knots_coefficients = len(self._knots_tp_coefficients)
if self.date_freq is None:
self.date_freq = date_array.diff().min()
self._regression_knot_dates = get_knot_dates(date_array[0], self._regression_knots_idx, self.date_freq)
def _set_knots_scale_matrix(self, df, training_meta):
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
if self._num_of_positive_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_positive_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._positive_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._positive_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._positive_regressor_knot_scale has shape num_of_pr x num_of_knot
self._positive_regressor_knot_scale = (
multiplier * np.expand_dims(self._positive_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._positive_regressor_knot_scale[self._positive_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._positive_regressor_init_knot_scale = np.array(self._positive_regressor_init_knot_scale)
self._positive_regressor_init_knot_scale[self._positive_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_negative_regressors > 0:
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_negative_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._negative_regressor_matrix[str_idx:end_idx]), axis=0)
global_mean = np.expand_dims(np.mean(np.fabs(self._negative_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
self._negative_regressor_knot_scale = (
multiplier * np.expand_dims(self._negative_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._negative_regressor_knot_scale[self._negative_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._negative_regressor_init_knot_scale = np.array(self._negative_regressor_init_knot_scale)
self._negative_regressor_init_knot_scale[self._negative_regressor_init_knot_scale < 1e-4] = 1e-4
if self._num_of_regular_regressors > 0:
# do the same for regular regressor
# calculate average local absolute volume for each segment
local_val = np.ones((self._num_of_regular_regressors, self._num_knots_coefficients))
if self.flat_multiplier:
multiplier = np.ones(local_val.shape)
else:
multiplier = np.ones(local_val.shape)
# store local value for the range on the left side since last knot
for idx in range(len(self._regression_knots_idx)):
if idx < len(self._regression_knots_idx) - 1:
str_idx = self._regression_knots_idx[idx]
end_idx = self._regression_knots_idx[idx + 1]
else:
str_idx = self._regression_knots_idx[idx]
end_idx = num_of_observations
local_val[:, idx] = np.mean(np.fabs(self._regular_regressor_matrix[str_idx:end_idx]), axis=0)
# adjust knot scale with the multiplier derive by the average value and shift by 0.001 to avoid zeros in
# scale parameters
global_mean = np.expand_dims(np.mean(np.fabs(self._regular_regressor_matrix), axis=0), -1)
test_flag = local_val < 0.01 * global_mean
multiplier[test_flag] = DEFAULT_LOWER_BOUND_SCALE_MULTIPLIER
# replace entire row of nan (when 0.1 * global_mean is equal to global_min) with upper bound
multiplier[np.isnan(multiplier).all(axis=-1)] = 1.0
# geometric drift i.e. 0.1 = 10% up-down in 1 s.d. prob.
# self._regular_regressor_knot_scale has shape num_of_pr x num_of_knot
self._regular_regressor_knot_scale = (
multiplier * np.expand_dims(self._regular_regressor_knot_scale_1d, -1)
)
# keep a lower bound of scale parameters
self._regular_regressor_knot_scale[self._regular_regressor_knot_scale < 1e-4] = 1e-4
# TODO: we change the type here, maybe we should change it earlier?
self._regular_regressor_init_knot_scale = np.array(self._regular_regressor_init_knot_scale)
self._regular_regressor_init_knot_scale[self._regular_regressor_init_knot_scale < 1e-4] = 1e-4
def _generate_tp(self, training_meta, prediction_date_array):
"""Used in _generate_seas"""
training_end = training_meta[TrainingMetaKeys.END.value]
num_of_observations = training_meta[TrainingMetaKeys.NUM_OF_OBS.value]
date_array = training_meta[TrainingMetaKeys.DATE_ARRAY.value]
prediction_start = prediction_date_array[0]
output_len = len(prediction_date_array)
if prediction_start > training_end:
start = num_of_observations
else:
start = pd.Index(date_array).get_loc(prediction_start)
new_tp = | np.arange(start + 1, start + output_len + 1) | numpy.arange |
"""
Unit tests for crystal class
"""
__author__ = '<NAME>'
import unittest
import numpy as np
import yaml
import onsager.crystal as crystal
class UnitCellTests(unittest.TestCase):
"""Tests to make sure incell and halfcell work as expected."""
def testincell(self):
"""In cell testing"""
a = np.array([4. / 3., -2. / 3., 19. / 9.])
b = np.array([1. / 3., 1. / 3., 1. / 9.])
self.assertTrue(np.allclose(crystal.incell(a), b))
def testhalfcell(self):
"""Half cell testing"""
a = np.array([4. / 3., -2. / 3., 17. / 9.])
b = np.array([1. / 3., 1. / 3., -1. / 9.])
self.assertTrue(np.allclose(crystal.inhalf(a), b))
class GroupOperationTests(unittest.TestCase):
"""Tests for our group operations."""
def setUp(self):
self.rot = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
self.trans = np.zeros(3)
self.cartrot = np.array([[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
self.indexmap = ((0,),)
self.mirrorop = crystal.GroupOp(self.rot, self.trans, self.cartrot, self.indexmap)
self.ident = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((0,),))
def testEquality(self):
"""Can we check if two group operations are equal?"""
self.assertNotEqual(self.mirrorop, self.rot)
self.assertEqual(self.mirrorop.incell(), self.mirrorop)
# self.assertEqual(self.mirrorop.__hash__(), (self.mirrorop + np.array([1,0,0])).__hash__())
def testAddition(self):
"""Can we add a vector to our group operation and get a new one?"""
with self.assertRaises(TypeError):
self.mirrorop + 0
v1 = np.array([1, 0, 0])
newop = self.mirrorop + v1
mirroroptrans = crystal.GroupOp(self.rot, self.trans + v1, self.cartrot, self.indexmap)
self.assertEqual(newop, mirroroptrans)
self.assertTrue(np.allclose((self.ident - v1).trans, -v1))
def testMultiplication(self):
"""Does group operation multiplication work correctly?"""
self.assertEqual(self.mirrorop * self.mirrorop, self.ident)
v1 = np.array([1, 0, 0])
trans = self.ident + v1
self.assertEqual(trans * trans, self.ident + 2 * v1)
rot3 = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((1, 2, 0),))
ident3 = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((0, 1, 2),))
self.assertEqual(rot3 * rot3 * rot3, ident3)
def testInversion(self):
"""Is the product with the inverse equal to identity?"""
self.assertEqual(self.ident.inv, self.ident.inv)
self.assertEqual(self.mirrorop * (self.mirrorop.inv()), self.ident)
v1 = np.array([1, 0, 0])
trans = self.ident + v1
self.assertEqual(trans.inv(), self.ident - v1)
inversion = crystal.GroupOp(-np.eye(3, dtype=int), np.zeros(3), -np.eye(3), ((0,),))
self.assertEqual(inversion.inv(), inversion)
invtrans = inversion + v1
self.assertEqual(invtrans.inv(), invtrans)
def testHash(self):
"""Can we construct a frozenset? --requires __hash__"""
fr = frozenset([self.ident, self.mirrorop])
self.assertTrue(len(fr), 2)
def testGroupAnalysis(self):
"""If we determine the eigenvalues / vectors of a group operation, are they what we expect?"""
# This is entirely dictated by the cartrot part of a GroupOp, so we will only look at that
# identity
# rotation type: 1 = identity; 2..6 : 2- .. 6- fold rotation; negation includes a
# perpendicular mirror
# therefore: a single mirror is -1, and inversion is -2 (since 2-fold rotation + mirror = i)
rot = np.eye(3, dtype=int)
cartrot = np.eye(3)
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, 1) # should be the identity
self.assertTrue(np.allclose(eigenvect, np.eye(3)))
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 3) # should be a sphere
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 6) # should be 6 unique symmetric tensors
for t in tensorbasis:
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# inversion
rot = -np.eye(3, dtype=int)
cartrot = -np.eye(3)
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, -2) # should be the identity
self.assertTrue(np.allclose(eigenvect, np.eye(3)))
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 0) # should be a point
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 6) # should be 6 unique symmetric tensors
for t in tensorbasis:
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# mirror through the y=x line: (x,y) -> (y,x)
rot = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
cartrot = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., 1.]])
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, -1)
self.assertTrue(np.isclose(abs(np.dot(eigenvect[0],
np.array([1 / np.sqrt(2), -1 / np.sqrt(2), 0]))), 1))
self.assertTrue(np.allclose(-eigenvect[0], np.dot(rot, eigenvect[0]))) # inverts
self.assertTrue(np.allclose(eigenvect[1], np.dot(rot, eigenvect[1]))) # leaves unchanged
self.assertTrue(np.allclose(eigenvect[2], np.dot(rot, eigenvect[2]))) # leaves unchanged
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 2) # should be a plane
self.assertTrue(np.allclose(basis[1], eigenvect[0]))
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 4) # should be 4 unique symmetric tensors
for t in tensorbasis:
# check symmetry, and remaining unchanged with operations
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
rott = np.dot(rot, np.dot(t, rot.T))
self.assertTrue(np.allclose(t, rott),
msg="\n{}\nis not unchanged with\n{}\n{}".format(t, rot, rott))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# three-fold rotation around the body-center
rot = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
cartrot = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertEqual(rottype, 3)
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertTrue(np.isclose(abs(np.dot(eigenvect[0],
np.array([1 / np.sqrt(3), 1 / np.sqrt(3), 1 / np.sqrt(3)]))), 1))
self.assertTrue(np.allclose(eigenvect[0], np.dot(rot, eigenvect[0]))) # our rotation axis
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 1) # should be a line
self.assertTrue(np.allclose(basis[1], eigenvect[0]))
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 2) # should be 2 unique symmetric tensors
for t in tensorbasis:
# check symmetry, and remaining unchanged with operations
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
rott = np.dot(rot, np.dot(t, rot.T))
self.assertTrue(np.allclose(t, rott),
msg="\n{}\nis not unchanged with\n{}\n{}".format(t, rot, rott))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
def testCombineVectorBasis(self):
"""Test our ability to combine a few vector basis choices"""
# these are all (d, vect) tuples that we work with
sphere = (3, np.zeros(3))
point = (0, np.zeros(3))
plane1 = (2, np.array([0., 0., 1.]))
plane2 = (2, np.array([1., 1., 1.]) / np.sqrt(3))
line1 = (1, np.array([1., 0., 0.]))
line2 = (1, np.array([0., 1., 0.]))
line3 = (1, np.array([1., -1., 0.]) / np.sqrt(2))
for t in [sphere, point, plane1, plane2, line1, line2, line3]:
self.assertEqual(crystal.CombineVectorBasis(t, t)[0], t[0])
res = crystal.CombineVectorBasis(line1, plane1)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line1[1])), 1))
res = crystal.CombineVectorBasis(plane1, plane2)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line3[1])), 1))
res = crystal.CombineVectorBasis(plane1, line1)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line1[1])), 1))
res = crystal.CombineVectorBasis(plane2, line1)
self.assertEqual(res[0], 0) # should be a point
res = crystal.CombineVectorBasis(line1, line2)
self.assertEqual(res[0], 0) # should be a point
def testCombineTensorBasis(self):
"""Test the intersection of tensor bases"""
fullbasis = crystal.SymmTensorBasis(1, np.eye(3)) # full basis (identity)
yzbasis = crystal.SymmTensorBasis(-1, np.eye(3)) # mirror through the x axis
xzbasis = crystal.SymmTensorBasis(-1, [np.array([0., 1., 0.]), np.array([0., 0., 1.]), np.array([1., 0., 0.])])
rotbasis = crystal.SymmTensorBasis(3, np.eye(3)) # 120 deg rot through the x axis
rotbasis2 = crystal.SymmTensorBasis(3, [np.array([0., 0., 1.]), np.array([1., 0., 0.]), np.array([0., 1., 0.])])
for b in [fullbasis, yzbasis, xzbasis, rotbasis, rotbasis2]:
combbasis = crystal.CombineTensorBasis(fullbasis, b)
self.assertEqual(len(b), len(combbasis))
combbasis = crystal.CombineTensorBasis(b, fullbasis)
self.assertEqual(len(b), len(combbasis))
combbasis = crystal.CombineTensorBasis(yzbasis, rotbasis)
self.assertEqual(len(combbasis), len(crystal.CombineTensorBasis(rotbasis, yzbasis)))
self.assertEqual(len(combbasis), len(rotbasis)) # should be two left here
combbasis = crystal.CombineTensorBasis(rotbasis, rotbasis2)
self.assertEqual(len(combbasis), 1) # if there's only one, it has to be 1/sqrt(3).
self.assertAlmostEqual(1, abs(np.dot(combbasis[0].flatten(), np.eye(3).flatten() / np.sqrt(3))))
combbasis = crystal.CombineTensorBasis(yzbasis, xzbasis)
self.assertEqual(len(combbasis), 3)
class CrystalClassTests(unittest.TestCase):
"""Tests for the crystal class and symmetry analysis."""
def setUp(self):
self.a0 = 2.5
self.c_a = np.sqrt(8. / 3.)
self.sclatt = self.a0 * np.eye(3)
self.fcclatt = self.a0 * np.array([[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
self.bcclatt = self.a0 * np.array([[-0.5, 0.5, 0.5],
[0.5, -0.5, 0.5],
[0.5, 0.5, -0.5]])
self.hexlatt = self.a0 * np.array([[0.5, 0.5, 0],
[-np.sqrt(0.75), np.sqrt(0.75), 0],
[0, 0, self.c_a]])
self.basis = [np.array([0., 0., 0.])]
self.squarelatt = self.a0 * np.eye(2) # two-dimensional crystal
self.basis2d = [np.zeros(2)]
def isscMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0)
def isfccMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, 0.25 * a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, 0.5 * a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0.25 * a0 ** 2)
def isbccMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, 0.5 * a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, 0.75 * a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, -0.25 * a0 ** 2)
def ishexMetric(self, crys, a0=0, c_a=0):
if a0 == 0: a0 = self.a0
if c_a == 0: c_a = self.c_a
self.assertAlmostEqual(crys.volume, np.sqrt(0.75) * c_a * a0 ** 3)
self.assertAlmostEqual(crys.metric[0, 0], a0 ** 2)
self.assertAlmostEqual(crys.metric[1, 1], a0 ** 2)
self.assertAlmostEqual(crys.metric[0, 1], -0.5 * a0 ** 2)
self.assertAlmostEqual(crys.metric[2, 2], (c_a * a0) ** 2)
self.assertAlmostEqual(crys.metric[0, 2], 0)
self.assertAlmostEqual(crys.metric[1, 2], 0)
def issquareMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, a0 ** 2)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 3 == 0:
# diagonal element
self.assertAlmostEqual(a2, a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0)
def isspacegroup(self, crys):
"""Check that the space group obeys all group definitions: not fast."""
# 1. Contains the identity: O(group size)
identity = None
dim = crys.dim
for g in crys.G:
if np.all(g.rot == np.eye(dim, dtype=int)):
identity = g
self.assertTrue(np.allclose(g.trans, 0),
msg="Identity has bad translation: {}".format(g.trans))
for atommap in g.indexmap:
for i, j in enumerate(atommap):
self.assertTrue(i == j,
msg="Identity has bad indexmap: {}".format(g.indexmap))
self.assertTrue(identity is not None, msg="Missing identity")
# 2. Check for inverses: O(group size^2)
for g in crys.G:
inverse = g.inv().inhalf()
self.assertIn(inverse, crys.G,
msg="Missing inverse op:\n{}\n{}|{}^-1 =\n{}\n{}|{}".format(
g.rot, g.cartrot, g.trans,
inverse.rot, inverse.cartrot, inverse.trans))
# 3. Closed under multiplication: g.g': O(group size^3)
for g in crys.G:
for gp in crys.G:
product = (g * gp).inhalf()
self.assertIn(product, crys.G,
msg="Missing product op:\n{}\n{}|{} *\n{}\n{}|{} = \n{}\n{}|{}".format(
g.rot, g.cartrot, g.trans,
gp.rot, gp.cartrot, gp.trans,
product.rot, product.cartrot, product.trans))
def testscMetric(self):
"""Does the simple cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.sclatt, self.basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testfccMetric(self):
"""Does the face-centered cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.fcclatt, self.basis)
self.isfccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testbccMetric(self):
"""Does the body-centered cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.bcclatt, self.basis)
self.isbccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testsquareMetric(self):
"""Does the square lattice have the right volume and metric?"""
crys = crystal.Crystal(self.squarelatt, self.basis2d)
self.issquareMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscReduce(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]], dtype=int)
doublebasis = [self.basis[0], np.array([0.5, 0, 0]) + self.basis[0],
np.array([0, 0.5, 0]) + self.basis[0], np.array([0.5, 0.5, 0]) + self.basis[0]]
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), doublebasis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscReduce2(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[5, -3, 0], [1, -1, 3], [-2, 1, 1]], dtype=int)
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), self.basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testbccReduce2(self):
"""If we start with a supercell, does it get reduced back to our start?"""
basis = [[np.array([0., 0., 0.]), np.array([0.5, 0.5, 0.5])]]
crys = crystal.Crystal(self.sclatt, basis)
self.isbccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscShift(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[5, -3, 0], [1, -1, 3], [-2, 1, 1]], dtype=int)
basis = [np.array([0.33, -0.25, 0.45])]
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
self.assertTrue(np.allclose(crys.basis[0][0], np.array([0, 0, 0])))
def testsquareReduce(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[2, 0], [0, 2]], dtype=int)
doublebasis = [self.basis2d[0], np.array([0.5, 0]) + self.basis2d[0],
np.array([0, 0.5]) + self.basis2d[0], np.array([0.5, 0.5]) + self.basis2d[0]]
crys = crystal.Crystal(np.dot(self.squarelatt, nsuper), doublebasis)
self.issquareMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testhcp(self):
"""If we start with a supercell, does it get reduced back to our start?"""
basis = [np.array([0, 0, 0]), np.array([1. / 3., 2. / 3., 1. / 2.])]
crys = crystal.Crystal(self.hexlatt, basis)
self.ishexMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 2) # two atoms in the unit cell
# there needs to be [1/3,2/3,1/4] or [1/3,2/3,3/4], and then the opposite
# it's a little clunky; there's probably a better way to test this:
if np.any([np.allclose(u, np.array([1. / 3., 2. / 3., 0.25]))
for atomlist in crys.basis for u in atomlist]):
self.assertTrue(np.any([np.allclose(u, np.array([2. / 3., 1. / 3., 0.75]))
for atomlist in crys.basis for u in atomlist]))
elif np.any([np.allclose(u, np.array([1. / 3., 2. / 3., 0.75]))
for atomlist in crys.basis for u in atomlist]):
self.assertTrue(np.any([np.allclose(u, np.array([2. / 3., 1. / 3., 0.25]))
for atomlist in crys.basis for u in atomlist]))
else:
self.assertTrue(False, msg="HCP basis not correct")
self.assertEqual(len(crys.G), 24)
# for g in crys.G:
# print g.rot
# print g.cartrot, g.trans, g.indexmap
self.isspacegroup(crys)
self.assertEqual(len(crys.pointG[0][0]), 12)
self.assertEqual(len(crys.pointG[0][1]), 12)
def testLaGaO3(self):
"""Can we properly reduce down an LaGaO3 structure with errors in positions?"""
# this uses "real" DFT relaxation data to test the reduction capabilities
LaGa03latt = [np.array([ 7.88040734e+00, 5.87657472e-05, -1.95441808e-02]),
np.array([ -7.59206882e-05, 7.87786508e+00, 8.28811636e-05]),
np.array([ -1.95315626e-02, -5.74109318e-05, 7.88041614e+00])]
LaGaO3basis = [[np.array([ 2.02290790e-02, 2.32539034e-04, 9.91922251e-01]),
np.array([ 1.26313454e-02, 2.30601523e-04, 4.84327798e-01]),
np.array([ 0.97941805, 0.50023385, 0.01754055]),
np.array([ 0.98701667, 0.50023207, 0.52514002]),
np.array([ 5.12632654e-01, 2.30909936e-04, 9.84337122e-01]),
np.array([ 5.20224990e-01, 2.32577464e-04, 4.91932968e-01]),
np.array([ 0.48701525, 0.50023187, 0.02514135]),
np.array([ 0.47942077, 0.5002339 , 0.51754884])],
[np.array([ 0.24982273, 0.25023308, 0.25473045]),
np.array([ 0.24982282, 0.25023333, 0.75473148]),
np.array([ 0.249823 , 0.75023368, 0.25472946]),
np.array([ 0.24982247, 0.75023396, 0.75473027]),
np.array([ 0.74982257, 0.2502339 , 0.25473326]),
np.array([ 0.74982307, 0.25023197, 0.75473186]),
np.array([ 0.74982204, 0.75023295, 0.25473187]),
np.array([ 0.74982322, 0.75023469, 0.75473098])],
[np.array([ 0.28414742, 0.20916336, 0.00430709]),
np.array([ 0.0002463 , 0.20916015, 0.22041692]),
np.array([ 2.80317156e-01, 2.28151610e-04, 3.00655890e-01]),
np.array([ 0.21550181, 0.29129973, 0.50516544]),
np.array([ 0.99940227, 0.29128777, 0.78906602]),
np.array([ 2.03918412e-01, 2.36510236e-04, 7.24241274e-01]),
np.array([ 0.2841317 , 0.791303 , 0.00431445]),
np.array([ 2.54313708e-04, 7.91306290e-01, 2.20429168e-01]),
np.array([ 0.21933007, 0.50023581, 0.2088184 ]),
np.array([ 0.21551645, 0.70916116, 0.50515561]),
np.array([ 0.99939381, 0.7091728 , 0.78904879]),
np.array([ 0.29572872, 0.50022831, 0.78523308]),
np.array([ 0.71550064, 0.29129386, 0.00516782]),
np.array([ 0.4994013 , 0.29130198, 0.28906235]),
np.array([ 7.03903980e-01, 2.36323588e-04, 2.24257240e-01]),
np.array([ 0.78414767, 0.20916926, 0.50430849]),
np.array([ 0.50024549, 0.20917445, 0.72041481]),
np.array([ 7.80305988e-01, 2.27988377e-04, 8.00654063e-01]),
np.array([ 0.71551543, 0.7091663 , 0.0051578 ]),
np.array([ 0.49939281, 0.70915813, 0.28904503]),
np.array([ 0.79574297, 0.50022792, 0.28522595]),
np.array([ 0.78413198, 0.79129631, 0.50431609]),
np.array([ 0.50025359, 0.79129237, 0.72042732]),
np.array([ 0.71934128, 0.50023592, 0.70882833])]]
LaGaO3strict = crystal.Crystal(LaGa03latt, LaGaO3basis, ['La', 'Ga', 'O'],
threshold=1e-8)
LaGaO3toler = crystal.Crystal(LaGa03latt, LaGaO3basis, ['La', 'Ga', 'O'],
threshold=2e-5)
self.assertEqual(len(LaGaO3strict.G), 1)
self.assertEqual(len(LaGaO3toler.G), 2)
self.assertEqual([len(ulist) for ulist in LaGaO3strict.basis],
[len(ulist) for ulist in LaGaO3basis])
self.assertEqual([2*len(ulist) for ulist in LaGaO3toler.basis],
[len(ulist) for ulist in LaGaO3basis])
self.assertAlmostEqual(LaGaO3strict.volume, 2*LaGaO3toler.volume)
def testscgroupops(self):
"""Do we have 48 space group operations?"""
crys = crystal.Crystal(self.sclatt, self.basis)
self.assertEqual(len(crys.G), 48)
self.isspacegroup(crys)
# for g in crys.G:
# print g.rot, g.trans, g.indexmap
# print g.cartrot, g.carttrans
def testfccpointgroup(self):
"""Test out that we generate point groups correctly"""
crys = crystal.Crystal(self.fcclatt, self.basis)
for g in crys.G:
self.assertIn(g, crys.pointG[0][0])
def testsquaregroupops(self):
"""Do we have 8 space group operations?"""
crys = crystal.Crystal(self.squarelatt, self.basis2d)
self.assertEqual(len(crys.G), 8)
self.isspacegroup(crys)
# for g in crys.G:
# print g.rot, g.trans, g.indexmap
# print g.cartrot, g.carttrans
def testomegagroupops(self):
"""Build the omega lattice; make sure the space group is correct"""
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
self.assertEqual(crys.N, 3)
self.assertEqual(crys.atomindices, [(0, 0), (0, 1), (0, 2)])
self.assertEqual(len(crys.G), 24)
self.isspacegroup(crys)
def testcartesianposition(self):
"""Do we correctly map out our atom position (lattice vector + indices) in cartesian coord.?"""
crys = crystal.Crystal(self.fcclatt, self.basis)
lattvect = np.array([2, -1, 3])
for ind in crys.atomindices:
b = crys.basis[ind[0]][ind[1]]
pos = crys.lattice[:, 0] * (lattvect[0] + b[0]) + \
crys.lattice[:, 1] * (lattvect[1] + b[1]) + \
crys.lattice[:, 2] * (lattvect[2] + b[2])
self.assertTrue(np.allclose(pos, crys.pos2cart(lattvect, ind)))
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
for ind in crys.atomindices:
b = crys.basis[ind[0]][ind[1]]
pos = crys.lattice[:, 0] * (lattvect[0] + b[0]) + \
crys.lattice[:, 1] * (lattvect[1] + b[1]) + \
crys.lattice[:, 2] * (lattvect[2] + b[2])
self.assertTrue(np.allclose(pos, crys.pos2cart(lattvect, ind)))
def testmaptrans(self):
"""Does our map translation operate correctly?"""
basis = [[np.array([0, 0, 0])]]
trans, indexmap = crystal.maptranslation(basis, basis)
self.assertTrue(np.allclose(trans, np.array([0, 0, 0])))
self.assertEqual(indexmap, ((0,),))
oldbasis = [[np.array([0.2, 0, 0])]]
newbasis = [[np.array([-0.2, 0, 0])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([0.4, 0, 0])))
self.assertEqual(indexmap, ((0,),))
oldbasis = [[np.array([0., 0., 0.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.]), np.array([-1. / 3., -2. / 3., -1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([1. / 3., -1. / 3., -1. / 2.])))
self.assertEqual(indexmap, ((1, 0),))
oldbasis = [[np.array([0., 0., 0.])],
[np.array([1. / 3., 2. / 3., 1. / 2.]), np.array([2. / 3., 1. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.])],
[np.array([2. / 3., 1. / 3., 1. / 2.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([0., 0., 0.])))
self.assertEqual(indexmap, ((0,), (1, 0)))
oldbasis = [[np.array([0., 0., 0.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.]), np.array([-1. / 4., -1. / 2., -1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertEqual(indexmap, None)
def testfccgroupops_directions(self):
"""Test out that we can apply group operations to directions"""
crys = crystal.Crystal(self.fcclatt, self.basis)
# 1. direction
direc = np.array([2., 0., 0.])
direc2 = np.dot(direc, direc)
count = np.zeros(3, dtype=int)
for g in crys.G:
rotdirec = crys.g_direc(g, direc)
self.assertAlmostEqual(np.dot(rotdirec, rotdirec), direc2)
costheta = np.dot(rotdirec, direc) / direc2
self.assertTrue(np.isclose(costheta, 1) or np.isclose(costheta, 0) or np.isclose(costheta, -1))
count[int(round(costheta + 1))] += 1
self.assertEqual(count[0], 8) ## antiparallel
self.assertEqual(count[1], 32) ## perpendicular
self.assertEqual(count[2], 8) ## parallel
def testomegagroupops_positions(self):
"""Test out that we can apply group operations to positions"""
# 2. position = lattice vector + 2-tuple atom-index
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
lattvec = np.array([-2, 3, 1])
for ind in crys.atomindices:
pos = crys.pos2cart(lattvec, ind)
for g in crys.G:
# testing g_pos: (transform an atomic position)
rotpos = crys.g_direc(g, pos)
self.assertTrue(np.allclose(rotpos,
crys.pos2cart(*crys.g_pos(g, lattvec, ind))))
# testing g_vect: (transform a vector position in the crystal)
rotlatt, rotind = crys.g_pos(g, lattvec, ind)
rotlatt2, u = crys.g_vect(g, lattvec, crys.basis[ind[0]][ind[1]])
self.assertTrue(np.allclose(rotpos, crys.unit2cart(rotlatt2, u)))
self.assertTrue(np.all(rotlatt == rotlatt2))
self.assertTrue(np.allclose(u, crys.basis[rotind[0]][rotind[1]]))
# test point group operations:
for g in crys.pointG[ind[0]][ind[1]]:
origin = np.zeros(3, dtype=int)
rotlatt, rotind = crys.g_pos(g, origin, ind)
self.assertTrue(np.all(rotlatt == origin))
self.assertEqual(rotind, ind)
def testinverspos(self):
"""Test the inverses of pos2cart and unit2cart"""
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
lattvec = np.array([-2, 3, 1])
for ind in crys.atomindices:
lattback, uback = crys.cart2unit(crys.pos2cart(lattvec, ind))
self.assertTrue(np.all(lattback == lattvec))
self.assertTrue(np.allclose(uback, crys.basis[ind[0]][ind[1]]))
lattback, indback = crys.cart2pos(crys.pos2cart(lattvec, ind))
self.assertTrue(np.all(lattback == lattvec))
self.assertEqual(indback, ind)
lattback, indback = crys.cart2pos(np.array([0.25 * self.a0, 0.25 * self.a0, 0.]))
self.assertIsNone(indback)
def testWyckoff(self):
"""Test grouping for Wyckoff positions"""
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
# crys.Wyckoff : frozen set of frozen sets of tuples that are all equivalent
Wyckoffind = {frozenset([(0, 0)]),
frozenset([(0, 1), (0, 2)])}
self.assertEqual(crys.Wyckoff, Wyckoffind)
# now ask it to generate the set of all equivalent points
for wyckset in crys.Wyckoff:
for ind in wyckset:
# construct our own Wyckoff set using cart2pos...
wyckset2 = crys.Wyckoffpos(crys.basis[ind[0]][ind[1]])
# test equality:
for i in wyckset:
self.assertTrue(np.any([np.allclose(crys.basis[i[0]][i[1]], u) for u in wyckset2]))
for u in wyckset2:
self.assertTrue(np.any([np.allclose(crys.basis[i[0]][i[1]], u) for i in wyckset]))
def testVectorBasis(self):
"""Test for the generation of a vector (and tensor) basis for sites in a crystal: oct. + tet."""
# start with HCP, then "build out" a lattice that includes interstitial sites
basis = [[np.array([1. / 3., 2. / 3., 0.25]),
np.array([2. / 3., 1. / 3., 0.75])]]
HCPcrys = crystal.Crystal(self.hexlatt, basis)
octset = HCPcrys.Wyckoffpos(np.array([0., 0., 0.5]))
tetset = HCPcrys.Wyckoffpos(np.array([1. / 3., 2. / 3., 0.5]))
self.assertEqual(len(octset), 2)
self.assertEqual(len(tetset), 4)
# now, build up HCP + interstitials (which are of a *different chemistry*)
HCP_intercrys = crystal.Crystal(self.hexlatt, basis + [octset + tetset])
for i in range(2):
vbas = HCP_intercrys.VectorBasis((1, i)) # for our octahedral site
self.assertEqual(vbas[0], 0) # should be a point
tbas = HCP_intercrys.SymmTensorBasis((1, i))
self.assertEqual(len(tbas), 2)
for t in tbas:
for tij in (t[i, j] for i in range(3) for j in range(3) if i != j):
self.assertAlmostEqual(0, tij)
self.assertAlmostEqual(t[0, 0], t[1, 1])
for i in range(2, 6):
vbas = HCP_intercrys.VectorBasis((1, i)) # for our tetrahedal sites
self.assertEqual(vbas[0], 1) # should be a line
self.assertEqual(vbas[1][0], 0) # pointing vertically up
self.assertEqual(vbas[1][1], 0) # pointing vertically up
tbas = HCP_intercrys.SymmTensorBasis((1, i))
self.assertEqual(len(tbas), 2)
for t in tbas:
for tij in (t[i, j] for i in range(3) for j in range(3) if i != j):
self.assertAlmostEqual(0, tij)
self.assertAlmostEqual(t[0, 0], t[1, 1])
def testJumpNetwork(self):
"""Test for the generation of our jump network between octahedral and tetrahedral sites."""
# start with HCP, then "build out" a lattice that includes interstitial sites
basis = [[np.array([1. / 3., 2. / 3., 0.25]),
np.array([2. / 3., 1. / 3., 0.75])]]
HCPcrys = crystal.Crystal(self.hexlatt, basis)
octset = HCPcrys.Wyckoffpos(np.array([0., 0., 0.5]))
tetset = HCPcrys.Wyckoffpos(np.array([1. / 3., 2. / 3., 0.625]))
self.assertEqual(len(octset), 2)
self.assertEqual(len(tetset), 4)
# now, build up HCP + interstitials (which are of a *different chemistry*)
HCP_intercrys = crystal.Crystal(self.hexlatt, basis + [octset + tetset])
jumpnetwork = HCP_intercrys.jumpnetwork(1, self.a0 * 0.8, 0.5 * self.a0) # tuned to avoid t->t in basal plane
self.assertEqual(len(jumpnetwork), 2) # should contain o->t/t->o and t->t networks
self.assertEqual(sorted(len(t) for t in jumpnetwork), [4, 24])
# print yaml.dump(jumpnetwork)
# for i, t in enumerate(jumpnetwork):
# print i, len(t)
# for ij, dx in t:
# print "{} -> {}: {}".format(ij[0], ij[1], dx)
def testNNfcc(self):
"""Test of the nearest neighbor construction"""
crys = crystal.Crystal(self.fcclatt, self.basis)
nnlist = crys.nnlist((0, 0), 0.9 * self.a0)
self.assertEqual(len(nnlist), 12)
for x in nnlist:
self.assertTrue(np.isclose(np.dot(x, x), 0.5 * self.a0 * self.a0))
class CrystalSpinTests(unittest.TestCase):
"""Tests for crystal class when spins are involved"""
longMessage = False
def setUp(self):
self.a0 = 1.0
self.latt = self.a0 * np.eye(3)
# RockSalt:
self.basis = [[np.array([0., 0., 0.]), np.array([0., 0.5, 0.5]),
np.array([0.5, 0., 0.5]), np.array([0.5, 0.5, 0.])],
[np.array([0., 0., 0.5]), np.array([0., 0.5, 0.]),
np.array([0.5, 0., 0.]), np.array([0.5, 0.5, 0.5])]]
self.spins = [[1, -1, -1, 1], [0, 0, 0, 0]]
def testUN(self):
"""Uranium-Nitride structure"""
crys = crystal.Crystal(self.latt, self.basis, ['U', 'N'], self.spins)
# print(crys)
self.assertTrue(crys is not None)
self.assertEqual(len(crys.basis), 2)
self.assertEqual(len(crys.basis[0]), 2)
self.assertEqual(len(crys.basis[1]), 2)
self.assertEqual(len(crys.Wyckoff), 2,
msg='Not matching Wyckoff?\n{}\n{}'.format(crys.Wyckoff, crys))
tetlatt = self.a0 * np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.01]])
tetcrys = crystal.Crystal(tetlatt, self.basis, ['U', 'N'])
# the tetragonal distortion has 1 U and 1 N (when there's no spin), so the group op list
# is twice as big, and includes translations for each
self.assertEqual(len(crys.G), 2 * len(tetcrys.G))
def testmaptrans(self):
"""Does our map translation operate correctly with spins?"""
basis = [[np.array([0, 0, 0])]]
spins = [[1]]
trans, indexmap = crystal.maptranslation(basis, basis, spins, spins)
self.assertTrue(np.allclose(trans, | np.array([0, 0, 0]) | numpy.array |
import random
import numpy as np
from logger import logger
class Model(object):
def __init__(self):
pass
class Kmean(Model):
def __init__(self, k):
super().__init__()
self.k = k
def _validate_data(self, X):
pass
def get_random_vector(self, dimension, low=0, high=255):
a = np.empty(dimension)
for i in range(dimension):
a[i] = random.randint(low, high)
return a
def dist(self, x, y):
return np.sqrt(
np.sum((np.array(x) - np.array(y)) * (np.array(x) - np.array(y)))
)
def cluster_image(self, X, num_iter=100):
self._validate_data(X)
X = np.array(X)
n, dimension = X.shape
self.centriods = []
for i in range(self.k):
self.centriods.append(self.get_random_vector(dimension, 0, 255))
self.centriods = np.array(self.centriods)
for i in range(num_iter):
dist_matrix = np.sqrt(
np.sum(np.square(np.expand_dims(X, 1) - self.centriods),
axis=-1))
cluster_ids = | np.argmin(dist_matrix, axis=-1) | numpy.argmin |
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import numpy as np
from inference import FeatureExtractor
WEBFACE_ROOT = 'D:\\dev\\dataset\\CASIA-WebFace'
raw_register_lists = [line.strip() for line in open('data\\webface\\register.txt')]
raw_test_lists = [line.strip() for line in open('data\\webface\\register.txt')]
raw_register_label = [os.path.split(p)[0] for p in raw_register_lists]
raw_test_label = [os.path.split(p)[0] for p in raw_test_lists]
fe = FeatureExtractor()
register_features = [fe.get_feature(os.path.join(WEBFACE_ROOT, p)) for p in raw_register_lists]
register_features = np.vstack(register_features)
test_features = [fe.get_feature(os.path.join(WEBFACE_ROOT, p)) for p in raw_register_lists]
right = 0
error = 0
for test_feature, label in zip(test_features, raw_test_label):
index = np.argmin( | np.linalg.norm(test_feature - register_features) | numpy.linalg.norm |
import numpy as np
import unittest
from optpy import optimization
class TestFunctionWithApproxJacobian(unittest.TestCase):
def test_function(self):
def f(x):
return np.sum(x**2)
func = optimization.FunctionWithApproxJacobian(f, epsilon=1e-8)
point = np.array([1.0, 2.0, 3.0])
self.assertEqual(func(point), 14.0)
np.testing.assert_allclose(func.jac(point), [[2.0, 4.0, 6.0]])
class TestParameterManager(unittest.TestCase):
def test_build_vector(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 3.0]), x3=4.0)
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 3.0]), x3=4.0)
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
def test_build_vector_0d_array(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=np.array(1.0), x2=np.array([2.0, 3.0]), x3=4.0)
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters_0d_array(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=np.array(1.0), x2=np.array([2.0, 3.0]), x3=4.0)
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
class TestKeywordParameterManager(unittest.TestCase):
def test_build_vector(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
def test_build_vector_0d_array(self):
parameter_manager = optimization.KeywordParameterManager({'x1': np.array(1.0),
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
np.testing.assert_allclose(parameter_manager.build_vector(), [1.0, 2.0, 3.0])
np.testing.assert_allclose(parameter_manager.build_vector(x1=4), [4.0, 2.0, 3.0])
def test_extract_parameters_0d_array(self):
parameter_manager = optimization.KeywordParameterManager({'x1': np.array(1.0),
'x2': np.array([2.0, 3.0]),
'x3': 4.0},
['x1', 'x2'])
x = np.array([5.0, 6.0, 7.0])
params = parameter_manager.extract_parameters(x)
np.testing.assert_allclose(params['x1'], 5.0)
np.testing.assert_allclose(params['x2'], [6.0, 7.0])
np.testing.assert_allclose(params['x3'], 4.0)
class TestWrapParameterManager(unittest.TestCase):
def test_wrap_parameter_manager(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return x1+2*x2.sum()+3*x3
new_f = optimization.wrap_parameter_manager(f, parameter_manager)
self.assertEqual(new_f(np.array([1, 2, 3])), 14)
def test_wrap_keyword_parameter_manager(self):
parameter_manager = optimization.KeywordParameterManager({'x1': 1.0,
'x2': np.array([2.0, 2.0]),
'x3': 1.0}, ['x1', 'x2'])
def f(x1, x2, x3):
return x1+2*x2.sum()+3*x3
new_f = optimization.wrap_parameter_manager(f, parameter_manager)
self.assertEqual(new_f(np.array([1, 2, 3])), 14)
class TestMinimize(unittest.TestCase):
method='SLSQP'
def test_simple(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_with_equality_constraint(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x1-1.0
constraints = [{'type': 'eq', 'fun': constraint}]
res = optimization.minimize(f, parameter_manager, method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 1.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_with_inequality_constraint(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 0.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x2.sum()-1
constraints = [{'type': 'ineq', 'fun': constraint}]
res = optimization.minimize(f, parameter_manager, method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 0.0, atol=1e-8)
np.testing.assert_allclose(res.x2, [0.5, 0.5])
np.testing.assert_allclose(res.x3, 1.0)
def test_bounds(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager, method=self.method,
bounds={'x1': [(0.5, 3.0)], 'x2': [(-2, 4), (-2, 10)]}, tol=1e-11)
np.testing.assert_allclose(res.x1, 0.5)
np.testing.assert_allclose(res.x2, [0.0, 0.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0)
def partial_bounds(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager, method=self.method,
bounds={'x2': [(-2, 4), (1, 10)]}, tol=1e-11)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 1.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0)
def test_args(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3, argument):
self.assertEqual(argument, 'argument')
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager, args= ('argument', ), method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_kwargs(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3, argument):
self.assertEqual(argument, 'argument')
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, parameter_manager,
kwargs={'argument': 'argument'}, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_separate_jacobian(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def fprime(x1, x2, x3, optimize=None):
ret = []
if 'x1' in optimize:
ret.append(2*x1)
if 'x2' in optimize:
ret.append(2*x2)
if 'x3' in optimize:
ret.append(np.zeros_like(x3))
return ret
res = optimization.minimize(f, parameter_manager, jac = fprime, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_combined_jacobian(self):
parameter_manager = optimization.ParameterManager(['x1', 'x2', 'x3'], ['x1', 'x2'],
x1=1.0, x2=np.array([2.0, 2.0]), x3=1.0)
def f(x1, x2, x3, optimize=None):
val = np.sum(x1**2)+np.sum(x2**2)
jacs = []
if 'x1' in optimize:
jacs.append(2*x1)
if 'x2' in optimize:
jacs.append(2*x2)
if 'x3' in optimize:
jacs.append(np.zeros_like(x3))
return val, jacs
res = optimization.minimize(f, parameter_manager, jac = True, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
class TestMinimizeKeywordInterface(unittest.TestCase):
method='SLSQP'
def test_simple(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
method=self.method)
np.testing.assert_allclose(res.x1, 0.0, atol=1e-8)
np.testing.assert_allclose(res.x2, [0.0, 0.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0, atol=1e-8)
def test_with_equality_constraint(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x1-1.0
constraints = [{'type': 'eq', 'fun': constraint}]
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 1.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0)
def test_with_inequality_constraint(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def constraint(x1, x2, x3):
return x2.sum()-1
constraints = [{'type': 'ineq', 'fun': constraint}]
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
method=self.method, constraints = constraints)
np.testing.assert_allclose(res.x1, 0.0, atol=1e-8)
np.testing.assert_allclose(res.x2, [0.5, 0.5])
np.testing.assert_allclose(res.x3, 1.0)
def test_bounds(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
method=self.method,
bounds={'x1': [(0.5, 3.0)], 'x2': [(-2, 4), (-2, 10)]}, tol=1e-11)
np.testing.assert_allclose(res.x1, 0.5)
np.testing.assert_allclose(res.x2, [0.0, 0.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0)
def partial_bounds(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
method=self.method,
bounds={'x2': [(-2, 4), (1, 10)]}, tol=1e-11)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 1.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0)
def test_args(self):
def f(x1, x2, x3, argument):
self.assertEqual(argument, 'argument')
return np.sum(x1**2)+np.sum(x2**2)
with self.assertRaises(ValueError) as cm:
optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
args= ('argument', ), method=self.method)
self.assertTrue(cm.exception.args[0].startswith('Keyword based'))
def test_kwargs(self):
def f(x1, x2, x3, argument):
self.assertEqual(argument, 'argument')
return np.sum(x1**2)+np.sum(x2**2)
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
kwargs={'argument': 'argument'}, method=self.method)
np.testing.assert_allclose(res.x1, 0.0, atol=1e-8)
np.testing.assert_allclose(res.x2, [0.0, 0.0], atol=1e-8)
np.testing.assert_allclose(res.x3, 1.0, atol=1e-8)
def test_separate_jacobian(self):
def f(x1, x2, x3):
return np.sum(x1**2)+np.sum(x2**2)
def fprime(x1, x2, x3, optimize=None):
ret = []
if 'x1' in optimize:
ret.append(2*x1)
if 'x2' in optimize:
ret.append(2*x2)
if 'x3' in optimize:
ret.append(np.zeros_like(x3))
return ret
res = optimization.minimize(f, {'x1': 1.0, 'x2': np.array([2.0, 2.0]), 'x3': 1.0}, optimize=['x1', 'x2'],
jac = fprime, method=self.method)
np.testing.assert_allclose(res.x1, 0.0)
np.testing.assert_allclose(res.x2, [0.0, 0.0])
np.testing.assert_allclose(res.x3, 1.0)
def test_combined_jacobian(self):
def f(x1, x2, x3, optimize=None):
val = np.sum(x1**2)+np.sum(x2**2)
jacs = []
if 'x1' in optimize:
jacs.append(2*x1)
if 'x2' in optimize:
jacs.append(2*x2)
if 'x3' in optimize:
jacs.append(np.zeros_like(x3))
return val, jacs
res = optimization.minimize(f, {'x1': 1.0, 'x2': | np.array([2.0, 2.0]) | numpy.array |
import numpy as _np
import xarray as _xr
import reprlib
class LLCtransformation:
""" A class containing the transformation of LLC grids"""
__slots__ = (
"ds",
"varlist",
"transformation",
"centered",
"faces",
"drop",
)
def __init__(
self,
ds,
varlist,
transformation,
centered,
faces,
drop,
):
self._ds = ds # xarray.DataSet
self._varlist = varlist # variables names to be transformed
self._transformation = transformation # str - type of transf
self._centered = centered # str - where to be centered
self._faces = faces # faces involved in transformation
self._drop = drop
@classmethod
def arctic_centered(
cls,
ds,
varlist,
centered='Atlantic',
faces='all',
drop=False,
):
""" Transforms the dataset by removing faces as a dimension, into a
new dataset centered at the arctic, while preserving the grid. This is
ideal for data at high latitude (Norther Hemisphere) and is limited to
range of latitudes within faces/tiles [2, 5, 6, 7, 10]
"""
Nx = len(ds['X'])
Ny = len(ds['Y'])
if isinstance(faces, str):
faces = _np.array([2, 5, 6, 7, 10])
if isinstance(faces, list) or isinstance(faces, _np.ndarray):
face = [fac for fac in faces if fac not in [2, 5, 6, 7, 10]]
if len(face) > 0:
print("Range of latitudes is beyond the scope of"
"this rearrangement of faces. Choosing only closest"
"to arctic cap.")
faces = _np.array([2, 5, 6, 7, 10])
if isinstance(varlist, str):
if varlist == 'all':
varlist = ds.data_vars
else:
varlist = [varlist]
tNx = _np.arange(0, 3 * Nx + 1, Nx)
tNy = _np.arange(0, 3 * Ny + 1, Ny)
chunksX, chunksY = make_chunks(tNx, tNy)
# Set ordered position wrt array layout, in accordance to location
# of faces
if centered == 'Atlantic':
ix = [1, 2, 1, 1, 0]
jy = [0, 1, 1, 2, 1]
nrot = _np.array([2])
Arot = _np.array([5, 6, 7])
Brot = _np.array([10])
Crot = _np.array([0])
elif centered == 'Pacific':
ix = [1, 0, 1, 1, 2]
jy = [2, 1, 1, 0, 1]
nrot = _np.array([10])
Arot = _np.array([])
Brot = _np.array([2])
Crot = _np.array([5, 6, 7])
elif centered == 'Arctic':
ix = [0, 1, 1, 2, 1]
jy = [1, 0, 1, 1, 2]
nrot = _np.array([6, 5, 7])
Arot = _np.array([10])
Brot = _np.array([])
Crot = _np.array([2])
else:
raise ValueError("Centering not supported")
psX = []
psY = []
for i in range(len(ix)):
psX.append(chunksX[ix[i]])
psY.append(chunksY[jy[i]])
dsnew = make_array(ds, 3 * Nx, 3 * Ny)
metrics = ['dxC', 'dyC', 'dxG', 'dyG']
dsnew = init_vars(ds, dsnew, varlist)
for varName in varlist:
vName = varName
DIM = [dim for dim in ds[varName].dims if dim != 'face'][::-1]
dims = Dims(DIM)
if len(ds[varName].dims) == 1:
dsnew[varName] = (dims._vars[::-1], ds[varName].data)
dsnew[varName].attrs = ds[varName].attrs
else:
for k in range(len(faces)):
fac = 1
xslice = slice(psX[k][0], psX[k][1])
yslice = slice(psY[k][0], psY[k][1])
arg = {dims.X: xslice, dims.Y: yslice}
data = ds[varName].isel(face=faces[k])
if faces[k] in nrot:
dsnew[varName].isel(**arg)[:] = data.values
else:
dtr = list(dims)[::-1]
dtr[-1], dtr[-2] = dtr[-2], dtr[-1]
if faces[k] in Crot:
sort_arg = {'variables': dims.X,
'ascending': False}
if len(dims.X) + len(dims.Y) == 4:
if 'mates' in list(ds[varName].attrs):
vName = ds[varName].attrs['mates']
data = ds[vName].isel(face=faces[k])
if len(dims.Y) == 3:
if vName not in metrics:
fac = -1
_DIMS = [dim for dim in ds[vName].dims if dim != 'face']
_dims = Dims(_DIMS[::-1])
sort_arg = {'variables': _dims.X,
'ascending': False}
elif faces[k] in Arot:
sort_arg = {'variables': dims.Y,
'ascending': False}
if len(dims.X) + len(dims.Y) == 4:
if 'mates' in list(ds[varName].attrs):
vName = ds[varName].attrs['mates']
data = ds[vName].isel(face=faces[k])
if len(dims.X) == 3:
if vName not in metrics:
fac = -1
_DIMS = [dim for dim in ds[vName].dims if dim != 'face']
_dims = Dims(_DIMS[::-1])
sort_arg = {'variables': _dims.Y,
'ascending': False}
elif faces[k] in Brot:
sort_arg = {'variables': [dims.X, dims.Y],
'ascending': False}
if len(dims.X) + len(dims.Y) == 4:
if vName not in metrics:
fac = -1
data = fac * data.sortby(**sort_arg)
if faces[k] in Brot:
dsnew[varName].isel(**arg)[:] = data.values
else:
dsnew[varName].isel(**arg).transpose(*dtr)[:] = data.values
if drop is True:
dsnew = drop_size(dsnew, 'arctic_centered')
return dsnew
@classmethod
def arctic_crown(
cls,
ds,
varlist,
centered,
faces,
drop=False,
):
""" Transforms the dataset in which faces appears as a dimension into
one with faces, with grids and variables sharing a common grid
orientation.
"""
Nx = len(ds['X'])
Ny = len(ds['Y'])
if centered not in ['Atlantic', 'Pacific']:
raise ValueError("Centering option not recognized. Options are"
"Atlantic or Pacific"
)
if isinstance(faces, str):
faces = _np.arange(13)
nrot_faces, Nx_nrot, Ny_nrot, rot_faces, Nx_rot, Ny_rot = face_connect(ds, faces)
if isinstance(varlist, list):
varName = varlist[0]
elif isinstance(varlist, str):
if varlist == 'all':
varlist = ds.data_vars
varName = 'Depth'
else:
varName = varlist
varlist = [varlist]
elif len(varlist) > 0:
varlist = list(varlist)
varName = 'XG'
elif len(varlist) == 0:
raise ValueError("Empty list of variables")
arc_faces, Nx_ac_nrot, Ny_ac_nrot, Nx_ac_rot, Ny_ac_rot, ARCT = arct_connect(ds, varName, faces)
acnrot_faces = [k for k in arc_faces if k in _np.array([2, 5])]
acrot_faces = [k for k in arc_faces if k in _np.array([7, 10])]
tNy_nrot, tNx_nrot = chunk_sizes(nrot_faces, [Nx], [Ny])
tNy_rot, tNx_rot = chunk_sizes(rot_faces, [Nx], [Ny], rotated=True)
delNX = 0
delNY = 0
if len(ARCT) > 0:
delNX = int(Nx / 2)
delNY = int(Ny / 2)
tNy_nrot = tNy_nrot + delNY
tNy_rot = tNy_rot + delNX
Nx_nrot = _np.arange(0, tNx_nrot + 1, Nx)
Ny_nrot = _np.arange(0, tNy_nrot + 1, Ny)
Ny_rot = _np.arange(0, tNy_rot + 1, Ny)
Nx_rot = _np.arange(0, tNx_rot + 1, Nx)
chunksX_nrot, chunksY_nrot = make_chunks(Nx_nrot, Ny_nrot)
chunksX_rot, chunksY_rot = make_chunks(Nx_rot, Ny_rot)
POSY_nrot, POSX_nrot, POSYarc_nrot, POSXarc_nrot = pos_chunks(nrot_faces, acnrot_faces, chunksY_nrot, chunksX_nrot)
POSY_rot, POSX_rot, POSYa_rot, POSXa_rot = pos_chunks(rot_faces, acrot_faces, chunksY_rot, chunksX_rot)
X0 = 0
Xr0 = 0
if centered == 'Atlantic':
X0 = tNx_rot
elif centered == 'Pacific':
Xr0 = tNx_nrot
NR_dsnew = make_array(ds, tNx_nrot, tNy_nrot, X0)
R_dsnew = make_array(ds, tNx_rot, tNy_rot, Xr0)
metrics = ['dxC', 'dyC', 'dxG', 'dyG']
NR_dsnew = init_vars(ds, NR_dsnew, varlist)
R_dsnew = init_vars(ds, R_dsnew, varlist)
for varName in varlist:
vName = varName
fac = 1
DIM = [dim for dim in ds[varName].dims if dim != 'face'][::-1]
dims = Dims(DIM)
if len(ds[varName].dims) == 1:
R_dsnew[varName] = (dims._vars[::-1], ds[varName].data)
NR_dsnew[varName] = (dims._vars[::-1], ds[varName].data)
NR_dsnew[varName].attrs = ds[varName].attrs
R_dsnew[varName].attrs = ds[varName].attrs
else:
if len(dims.X) + len(dims.Y) == 4: # vector fields
if 'mates' in list(ds[varName].attrs):
vName = ds[varName].attrs['mates']
if len(dims.X) == 1 and varName not in metrics:
fac = -1
arc_faces, Nx_ac_nrot, Ny_ac_nrot, Nx_ac_rot, Ny_ac_rot, ARCT = arct_connect(ds, varName, faces)
for k in range(len(nrot_faces)):
data = ds[varName].isel(face=nrot_faces[k]).values
xslice = slice(POSX_nrot[k][0], POSX_nrot[k][1])
yslice = slice(POSY_nrot[k][0], POSY_nrot[k][1])
arg = {dims.X: xslice, dims.Y: yslice}
NR_dsnew[varName].isel(**arg)[:] = data
for k in range(len(rot_faces)):
kk = len(rot_faces) - (k + 1)
xslice = slice(POSX_rot[k][0], POSX_rot[k][1])
if dims.Y == 'Yp1':
yslice = slice(POSY_rot[kk][0] + 1, POSY_rot[kk][1] + 1)
else:
yslice = slice(POSY_rot[kk][0], POSY_rot[kk][1])
data = fac * ds[vName].isel(face=rot_faces[k])
arg = {dims.Y: yslice, dims.X: xslice}
ndims = Dims(list(data.dims)[::-1])
dtr = list(ndims)[::-1]
dtr[-1], dtr[-2] = dtr[-2], dtr[-1]
sort_arg = {'variables': ndims.X, 'ascending': False}
data = data.sortby(**sort_arg).transpose(*dtr)
R_dsnew[varName].isel(**arg)[:] = data.values
for k in range(len(acnrot_faces)):
data = ARCT[k]
xslice = slice(POSXarc_nrot[k][0], POSXarc_nrot[k][1])
yslice = slice(POSYarc_nrot[k][0], POSYarc_nrot[k][1])
arg = {dims.X: xslice, dims.Y: yslice}
NR_dsnew[varName].isel(**arg)[:] = data.values
for k in range(len(acrot_faces)):
tk = len(acnrot_faces) + k
xslc = slice(POSXa_rot[k][0], POSXa_rot[k][1])
yslc = slice(POSYa_rot[k][0], POSYa_rot[k][1])
arg = {dims.Y: yslc, dims.X: xslc}
data = ARCT[tk]
if acrot_faces[k] == 7:
sort_arg = {'variables': ndims.X, 'ascending': False}
elif acrot_faces[k] == 10:
sort_arg = {'variables': dims.Y, 'ascending': False}
data = data.sortby(**sort_arg)
R_dsnew[varName].isel(**arg)[:] = data.values
if centered == 'Atlantic':
DS = R_dsnew.combine_first(NR_dsnew)
elif centered == 'Pacific':
DS = NR_dsnew.combine_first(R_dsnew)
DS = DS.reset_coords()
if drop is True:
DS = drop_size(DS, 'arctic_crown')
return DS
def make_chunks(Nx, Ny):
chunksX = []
chunksY = []
for ii in range(len(Nx) - 1):
chunksX.append([Nx[ii], Nx[ii + 1]])
for jj in range(len(Ny) - 1):
chunksY.append([Ny[jj], Ny[jj + 1]])
return chunksX, chunksY
def make_array(ds, tNx, tNy, X0=0):
crds = {
"X": (("X",), _np.arange(X0, X0 + tNx), {"axis": "X"}),
"Xp1": (("Xp1",), _np.arange(X0, X0 + tNx), {"axis": "X"}),
"Y": (("Y",), _np.arange(tNy), {"axis": "Y"}),
"Yp1": (("Yp1",), _np.arange(tNy), {"axis": "Y"}),
}
old_coords = ["X", "Xp1", "Y", "Yp1", "face"]
coords = [k for k in ds.coords if k not in old_coords]
for crd in coords:
array = ds.coords[crd].values
attrs = ds.coords[crd].attrs
crds = {**crds, **{crd: ((crd,), array, attrs)}}
dsnew = _xr.Dataset(coords=crds)
for dim in dsnew.dims:
dsnew[dim].attrs = ds[dim].attrs
return dsnew
def init_vars(ds, DSNEW, varlist):
""" initializes dataarray within dataset"""
for varName in varlist:
dims = Dims([dim for dim in ds[varName].dims if dim != 'face'][::-1])
if len(dims) == 1:
ncoords = {dims.X: DSNEW.coords[dims.X]}
elif len(dims) == 2:
ncoords = {dims.X: DSNEW.coords[dims.X],
dims.Y: DSNEW.coords[dims.Y]}
elif len(dims) == 3:
ncoords = {dims.X: DSNEW.coords[dims.X],
dims.Y: DSNEW.coords[dims.Y],
dims.Z: DSNEW.coords[dims.Z]}
elif len(dims) == 4:
ncoords = {dims.X: DSNEW.coords[dims.X],
dims.Y: DSNEW.coords[dims.Y],
dims.Z: DSNEW.coords[dims.Z],
dims.T: DSNEW.coords[dims.T]}
ds_new = _xr.DataArray(_np.nan, coords=ncoords, dims=dims._vars[::-1])
DSNEW[varName] = ds_new
DSNEW[varName].attrs = ds[varName].attrs
return DSNEW
def drop_size(ds, transformation='arctic_crown'):
coords = {}
for crd in ds.coords:
if crd in ['X', 'Y']:
array = ds.coords[crd].values[0:-1]
else:
array = ds.coords[crd].values
attrs = ds.coords[crd].attrs
coords = {**coords, **{crd: ((crd,), array, attrs )}}
DS_final = _xr.Dataset(coords)
for dim in DS_final.dims:
DS_final[dim].attrs = ds[dim].attrs
for varName in ds.data_vars:
dims = Dims([dim for dim in ds[varName].dims][::-1])
if len(dims) == 1:
DS_final[varName] = ds[varName]
else:
if len(dims.X) + len(dims.Y) == 2:
arg = {dims.X: slice(0, -1), dims.Y: slice(0, -1)}
elif len(dims.X) + len(dims.Y) == 6:
arg = {}
elif len(dims.X) + len(dims.Y) == 4:
if len(dims.X) == 1:
arg = {dims.X: slice(0, -1)}
elif len(dims.Y) == 1:
arg = {dims.Y: slice(0, -1)}
DS_final[varName] = ds[varName].isel(**arg)
DS_final[varName].attrs = ds[varName].attrs
return DS_final
def pos_chunks(faces, arc_faces, chunksY, chunksX):
nrotA = [k for k in range(3)]
nrotB = [k for k in range(3, 6)]
nrot = nrotA + nrotB
rotA = [k for k in range(7, 10)]
rotB = [k for k in range(10, 13)]
rot = rotA + rotB
nrot_A = [k for k in faces if k in nrotA]
nrot_B = [k for k in faces if k in nrotB]
rot_A = [k for k in faces if k in rotA]
rot_B = [k for k in faces if k in rotB]
ny_nApos = len(nrot_A)
ny_nBpos = len(nrot_B)
ny_Apos = len(rot_A)
ny_Bpos = len(rot_B)
POSY = []
POSX = []
for k in faces:
if k in nrot:
if k in nrot_A:
xk = 0
yk = 0
if ny_nApos == 1:
yk = 0
elif ny_nApos == 2:
if k == nrot_A[0]:
yk = 0
else:
yk = 1
elif ny_nApos == 3:
if k == nrotA[0]:
yk = 0
elif k == nrotA[1]:
yk = 1
elif k == nrotA[2]:
yk = 2
elif k in nrot_B:
if ny_nApos > 0:
xk = 1
else:
xk = 0
if ny_nBpos == 1:
yk = 0
elif ny_nBpos == 2:
if k == nrot_B[0]:
yk = 0
else:
yk = 1
elif ny_nBpos == 3:
if k == nrotB[0]:
yk = 0
elif k == nrotB[1]:
yk = 1
elif k == nrotB[2]:
yk = 2
elif k in rot:
if k in rot_A:
xk = 0
yk = 0
if ny_Apos == 1:
yk = 0
elif ny_Apos == 2:
if k == rot_A[0]:
yk = 0
else:
yk = 1
elif ny_Apos == 3:
if k == rotA[0]:
yk = 0
elif k == rotA[1]:
yk = 1
elif k == rotA[2]:
yk = 2
elif k in rot_B:
if ny_Apos > 0:
xk = 1
else:
xk = 0
if ny_Bpos == 1:
yk = 0
elif ny_Bpos == 2:
if k == rot_B[0]:
yk = 0
else:
yk = 1
elif ny_Bpos == 3:
if k == rotB[0]:
yk = 0
elif k == rotB[1]:
yk = 1
elif k == rotB[2]:
yk = 2
else:
raise ValueError("face index not in LLC grid")
POSY.append(chunksY[yk])
POSX.append(chunksX[xk])
# This to create a new list with positions for Arctic cap slices
POSY_arc = []
POSX_arc = []
aface_nrot = [k for k in arc_faces if k in nrotA + nrotB]
aface_rot = [k for k in arc_faces if k in rotA + rotB]
if len(aface_rot) == 0:
if len(aface_nrot) > 0:
pos_r = chunksY[-1][-1]
pos_l = chunksY[-1][0]
if len(aface_nrot) == 1:
POSX_arc.append(chunksX[0])
POSY_arc.append([pos_r, int(pos_r + (pos_r - pos_l) / 2)])
elif len(aface_nrot) == 2:
for k in range(len(aface_nrot)):
POSX_arc.append(chunksX[k])
POSY_arc.append([pos_r, int(pos_r + (pos_r - pos_l) / 2)])
else:
pos_r = chunksY[-1][-1]
pos_l = chunksY[-1][0]
if len(aface_rot) == 1:
POSX_arc.append(chunksX[0])
POSY_arc.append([pos_r, int(pos_r + (pos_r - pos_l) / 2)])
else:
for k in range(len(aface_rot)):
POSX_arc.append(chunksX[k])
POSY_arc.append([pos_r, int(pos_r + (pos_r - pos_l) / 2)])
return POSY, POSX, POSY_arc, POSX_arc
def chunk_sizes(faces, Nx, Ny, rotated=False):
'''
Determines the total size of array that will connect all rotated or
non-rotated faces
'''
if rotated is False:
A_ref = _np.array([k for k in range(3)])
B_ref = _np.array([k for k in range(3, 6)])
elif rotated is True:
A_ref = _np.array([k for k in range(7, 10)])
B_ref = _np.array([k for k in range(10, 13)])
A_list = [k for k in faces if k in A_ref]
B_list = [k for k in faces if k in B_ref]
if len(A_list) == 0:
if len(B_list) > 0:
tNx = Nx[0]
if len(B_list) == 1:
tNy = Ny[0]
elif len(B_list) == 2:
if min(B_list) == B_ref[0] and max(B_list) == B_ref[-1]:
raise ValueError("These faces do not connect. Not"
"possible to create a single dataset"
"that minimizes nans.")
else:
tNy = len(B_list) * Ny[0]
else:
tNy = len(B_list) * Ny[0]
else:
tNx = 0
tNy = 0
print("Zero data in a facet survives the cutout.")
else:
if len(B_list) == 0:
tNx = Nx[0]
if len(A_list) == 1:
tNy = Ny[0]
elif len(A_list) == 2:
if min(A_list) == A_ref[0] and max(A_list) == A_ref[-1]:
raise ValueError("These faces do not connect. Not"
"possible to create a single datase"
"that minimizes nans")
tNy = 0
else:
tNy = len(A_list) * Ny[0]
else:
tNy = len(A_list) * Ny[0]
elif len(B_list) > 0:
tNx = 2 * Nx[0]
if len(B_list) == len(A_list):
if len(A_list) == 1:
iA = [_np.where(faces[nk] == A_ref)[0][0] for nk in range(len(faces)) if faces[nk] in A_ref]
iB = [_np.where(faces[nk] == B_ref)[0][0] for nk in range(len(faces)) if faces[nk] in B_ref]
if iA == iB:
tNy = Ny[0]
else:
tNy = 0
raise ValueError("faces do not connect within facet")
elif len(A_list) == 2:
if min(A_list) == A_ref[0] and max(A_list) == A_ref[-1]:
raise ValueError("faces do not connect within facet")
tNy = 0
else:
iA = [ | _np.where(faces[nk] == A_ref) | numpy.where |
import sys
sys.path.append("..")
import numpy as np
from env.grid_world import GridWorld
from algorithms.temporal_difference import qlearning
def test_qlearning():
np.random.seed(1)
# load the test data
vp = np.load("../data/test_data/test_qlearning.npy")
test_q = vp[:,0].reshape(-1,1)
test_pi = vp[:,1].reshape(-1,1)
# specify world parameters
num_rows = 4
num_cols = 4
obstacles = np.array([[1, 1], [2, 1], [1, 2]])
bad_states = | np.array([[3,0]]) | numpy.array |
import numpy as np
import pysim.handlers as handlers
from pysim.objects import Reader, Model, Antenna, Generator, Medium
import pysim.epcstd as std
import pysim.simulator as sim
KMPH_TO_MPS_MUL = 1.0 / 3.6
class Settings:
# PIE settings
delim = 12.5e-6 # sec.
tari = 6.25e-6 # sec.
_rtcal = None # if None, rtcal_tari_mul is used
rtcal_tari_mul = 3.0 # used when _rtcal is None, 2.5 <= x <= 3.0
_trcal = None # if None, trcal_rtcal_mul is used
trcal_rtcal_mul = 2.5 # used when _trcal is None, 1.1 <= x <= 3.0
temp = std.TempRange.NOMINAL
def get_rtcal(self, tari):
return tari * self.rtcal_tari_mul
def get_trcal(self, rtcal):
return rtcal * self.trcal_rtcal_mul
@property
def rtcal(self):
return self._rtcal if self._rtcal is not None \
else self.get_rtcal(self.tari)
@rtcal.setter
def rtcal(self, value):
self._rtcal = value
@property
def trcal(self):
return self._trcal if self._trcal is not None \
else self.get_trcal(self.rtcal)
@trcal.setter
def trcal(self, value):
self._trcal = value
# Geometry and speed
speed = 60 * KMPH_TO_MPS_MUL # meters per second
initial_distance_to_reader = 15.0 # meters
travel_distance = 30.0 # meters
lanes_num = 1 # 1 or 2
lane_width = 3.5 # meters
reader_antenna_sides = ['front', 'back']
reader_antenna_height = 5.0 # meters (z-axis)
reader_antenna_offset = 1.0 # meters (x-axis)
tag_antenna_height = 0.5 # meters (z-axis)
tag_orientation = 'front' # 'front' or 'back'
update_interval = 0.01 # sec.
# Energy settings
reader_power = 31.5 # dBm
reader_antenna_gain = 6.0 # dB
reader_cable_loss = -2.0 # dB
tag_antenna_gain = 3.0 # dB
tag_modulation_loss = -12.0 # dB
tag_sensitivity = -18.0 # dBm
reader_noise = -80.0 # dBm
# Medium settings
ber_distribution = 'rayleigh' # 'rayleigh' or 'awgn'
frequency = 860 * 1e6 # MHz
permittivity = 15.0 # for ground reflection
conductivity = 0.03 # for ground reflection
polarization_loss = -3.0 # dB
ground_reflection_type = 'reflection' # 'reflection' or 'const'
use_doppler = True
# Reader power control and antennas switching
reader_switch_power = True
reader_power_on_duration = 2.0 # sec.
reader_power_off_duration = 0.1 # sec.
reader_antenna_switching_interval = 0.1 # sec.
reader_always_start_with_first_antenna = False
reader_antenna_angle = np.pi / 4.0
# Inventory settings
read_tid_bank = True
read_tid_words_num = 4 # was: 32 (?!)
q = 2
tag_encoding = std.TagEncoding.M4
dr = std.DivideRatio.DR_8
sel = std.SelFlag.ALL
session = std.Session.S0
target = std.InventoryFlag.A
trext = True
target_strategy = "const"
rounds_per_target = 1
# Tag internal settings
epc_bitlen = 96
tid_bitlen = 64
s1_persistence = 2.0 # sec.
s2_persistence = 2.0 # sec.
s3_persistence = 2.0 # sec.
# Generator settings
generation_interval = (lambda: 1.0, )
max_tags_simulated = 5
# Statistics
collect_power_statistics = False
def get_power_control_mode(self, reader_switch_power=None):
x = reader_switch_power if reader_switch_power is not None \
else self.reader_switch_power
return Reader.PowerControlMode.PERIODIC if x else \
Reader.PowerControlMode.ALWAYS_ON
modelParams = Settings()
def simulate_tags(settings=None, speed=None, encoding=None, tari=None,
dr=None, trext=None, q=None, session=None,
target=None, target_strategy=None, rounds_per_target=None,
antenna_switching_interval=None, orientation=None,
reader_antenna_angle=None, sim_time_limit=None,
real_time_limit=None, log_level=sim.Logger.Level.INFO,
generation_const_interval=None, tags_num=None,
use_doppler=None, frequency=None):
settings = settings if settings is not None else modelParams
# 0) Building the model
model = Model()
model.max_tags_num = tags_num if tags_num is not None \
else settings.max_tags_simulated
model.update_interval = settings.update_interval
model.statistics.use_power_statistics = settings.collect_power_statistics
# 1) Building the reader
reader = Reader()
model.reader = reader
reader.tari = tari if tari is not None else settings.tari
reader.rtcal = settings.get_rtcal(reader.tari)
reader.trcal = settings.get_trcal(reader.rtcal)
reader.delim = settings.delim
reader.temp = settings.temp
reader.q = q if q is not None else settings.q
reader.session = session if session is not None else settings.session
reader.target = target if target is not None else settings.target
reader.sel = settings.sel
reader.dr = dr if dr is not None else settings.dr
reader.trext = trext if trext is not None else settings.trext
reader.tag_encoding = encoding if encoding is not None \
else settings.tag_encoding
reader.target_strategy = target_strategy if target_strategy is not None \
else settings.target_strategy
reader.rounds_per_target = \
rounds_per_target if rounds_per_target is not None \
else settings.rounds_per_target
reader.power_control_mode = settings.get_power_control_mode()
reader.max_power = settings.reader_power
reader.power_on_duration = settings.reader_power_on_duration
reader.power_off_duration = settings.reader_power_off_duration
reader.noise = settings.reader_noise
reader.read_tid_bank = settings.read_tid_bank
reader.read_tid_words_num = settings.read_tid_words_num
reader.always_start_with_first_antenna = \
settings.reader_always_start_with_first_antenna
reader.antenna_switch_interval = antenna_switching_interval \
if antenna_switching_interval is not None \
else settings.reader_antenna_switching_interval
# 2) Attaching antennas to reader
lane_centers = []
if settings.lanes_num == 1:
lane_centers.append(0.0)
elif settings.lanes_num == 2:
y = settings.lane_width / 2
lane_centers.append(-y)
lane_centers.append(y)
else:
raise ValueError("support only 1 or 2 lanes")
if reader_antenna_angle is None:
reader_antenna_angle = settings.reader_antenna_angle
for y in lane_centers:
for side in settings.reader_antenna_sides:
ant = Antenna()
if side == 'front':
x = settings.reader_antenna_offset
dx = np.sin(reader_antenna_angle)
elif side == 'back':
x = -1.0 * settings.reader_antenna_offset
dx = - | np.sin(reader_antenna_angle) | numpy.sin |
"""Some special pupropse layers for SSD."""
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
class Normalize(Layer):
"""Normalization layer as described in ParseNet paper.
# Arguments
scale: Default feature scale.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
Same as input
# References
http://cs.unc.edu/~wliu/papers/parsenet.pdf
#TODO
Add possibility to have one scale for all features.
"""
def __init__(self, scale, **kwargs):
if K.image_data_format() == 'channels_last':
self.axis = 3
else:
self.axis = 1
self.scale = scale
super(Normalize, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
init_gamma = self.scale * np.ones(shape)
self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
self.trainable_weights = [self.gamma]
def call(self, x, mask=None):
output = K.l2_normalize(x, self.axis)
output *= self.gamma
return output
class PriorBox(Layer):
"""Generate the prior boxes of designated sizes and aspect ratios.
# Arguments
img_size: Size of the input image as tuple (w, h).
min_size: Minimum box size in pixels.
max_size: Maximum box size in pixels.
aspect_ratios: List of aspect ratios of boxes.
flip: Whether to consider reverse aspect ratios.
variances: List of variances for x, y, w, h.
clip: Whether to clip the prior's coordinates
such that they are within [0, 1].
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
# Output shape
3D tensor with shape:
(samples, num_boxes, 8)
# References
https://arxiv.org/abs/1512.02325
#TODO
Add possibility not to have variances.
Add Theano support
"""
def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,
flip=True, variances=[0.1], clip=True, **kwargs):
if K.image_data_format() == 'channels_last':
self.waxis = 2
self.haxis = 1
else:
self.waxis = 3
self.haxis = 2
self.img_size = img_size
if min_size <= 0:
raise Exception('min_size must be positive.')
self.min_size = min_size
self.max_size = max_size
self.aspect_ratios = [1.0]
if max_size:
if max_size < min_size:
raise Exception('max_size must be greater than min_size.')
self.aspect_ratios.append(1.0)
if aspect_ratios:
for ar in aspect_ratios:
if ar in self.aspect_ratios:
continue
self.aspect_ratios.append(ar)
if flip:
self.aspect_ratios.append(1.0 / ar)
self.variances = np.array(variances)
self.clip = True
super(PriorBox, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
num_priors_ = len(self.aspect_ratios)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
num_boxes = num_priors_ * layer_width * layer_height
return (input_shape[0], num_boxes, 8)
def call(self, x, mask=None):
if hasattr(x, '_keras_shape'):
input_shape = x._keras_shape
elif hasattr(K, 'int_shape'):
input_shape = K.int_shape(x)
layer_width = input_shape[self.waxis]
layer_height = input_shape[self.haxis]
img_width = self.img_size[0]
img_height = self.img_size[1]
# define prior boxes shapes
box_widths = []
box_heights = []
for ar in self.aspect_ratios:
if ar == 1 and len(box_widths) == 0:
box_widths.append(self.min_size)
box_heights.append(self.min_size)
elif ar == 1 and len(box_widths) > 0:
box_widths.append(np.sqrt(self.min_size * self.max_size))
box_heights.append(np.sqrt(self.min_size * self.max_size))
elif ar != 1:
box_widths.append(self.min_size * np.sqrt(ar))
box_heights.append(self.min_size / np.sqrt(ar))
box_widths = 0.5 * np.array(box_widths)
box_heights = 0.5 * np.array(box_heights)
# define centers of prior boxes
step_x = img_width / layer_width
step_y = img_height / layer_height
linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,
layer_width)
liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,
layer_height)
centers_x, centers_y = np.meshgrid(linx, liny)
centers_x = centers_x.reshape(-1, 1)
centers_y = centers_y.reshape(-1, 1)
# define xmin, ymin, xmax, ymax of prior boxes
num_priors_ = len(self.aspect_ratios)
prior_boxes = np.concatenate((centers_x, centers_y), axis=1)
prior_boxes = | np.tile(prior_boxes, (1, 2 * num_priors_)) | numpy.tile |
import sys,os
sys.path.append(os.path.dirname(__file__) + os.sep + '../')
import cv2
import numpy as np
import config.config as config
import time
# 摄像头畸变矫正 旋转
import json # 使用json存储摄像头矫正参数
file_name = '.\\config\\config.txt'
with open(file_name) as file_obj:
temp_d = json.load(file_obj) # 返回列表数据,也支持字典
mtx = np.array(temp_d['mtx'])
dist = np.array(temp_d['dist'])
# print("读取参数: ", mtx, dist)
def get_camera_img(img):
img = cv2.undistort(img, mtx, dist, None, mtx)
img = np.rot90(img)
return img
# 透射变换
def get_perspective_transform(img_src, points, is_show=False):
ps_dst = np.float32([[0,0],[0,899],[599, 0],[599,899]])
ps_src = np.float32(points)
mat_pers = cv2.getPerspectiveTransform(ps_src, ps_dst)
img_dst = cv2.warpPerspective(img_src, mat_pers, (600,900))
if is_show:
cv2.imshow('rst_image', img_dst)
return img_dst
# 输入: 投射变换后的试题图片(900,600,3)
# 输出: 第n道算式图片(50,150,3)
def get_equation_n(img, num=0):
pos_x = 150 * (num // 18)
pos_y = 50 * (num % 18)
img_equation = img[pos_y:pos_y+50, pos_x:pos_x+150, :]
return img_equation
# 输入: 投射变换后的试题图片(900,600,3)
# 输出: 算式图片的迭代器(50,150,3)
def get_equation(img, num=0):
while num < 72: #18*4
pos_x = 150 * (num // 18)
pos_y = 50 * (num % 18)
img_equation = img[pos_y:pos_y+50, pos_x:pos_x+150, :]
num = num+1
yield img_equation
SCALSIZE = 1
LARGEST_NUMBER_OF_SYMBOLS = 50
# 读取图片并将图片转化成二值图,返回原彩色图和二值图
def read_img_and_convert_to_binary(img):
#读取待处理的图片
original_img = cv2.resize(img, (600,200))
# print(original_img)
#将原图分辨率缩小SCALSIZE倍,减少计算复杂度
original_img = cv2.resize(original_img,( | np.int(original_img.shape[1]/SCALSIZE) | numpy.int |
import numpy as np
import scipy.sparse as sp
from pyspark import RDD
from splearn.rdd import ArrayRDD, BlockRDD, DictRDD, SparseRDD, block
from splearn.utils.testing import (SplearnTestCase, assert_almost_equal,
assert_array_almost_equal,
assert_array_equal, assert_equal,
assert_is_instance,
assert_multiple_tuples_equal, assert_raises,
assert_true, assert_tuple_equal)
from splearn.utils.validation import check_rdd_dtype
class TestBlocking(SplearnTestCase):
def test_empty(self):
n_partitions = 3
empty_data = self.sc.parallelize([], n_partitions)
assert_raises(ValueError, block, empty_data)
def test_dtype(self):
n_partitions = 10
n_samples = 100
data = self.sc.parallelize(["lorem" for i in range(n_samples)],
n_partitions)
blocked_data = block(data, dtype=list)
assert_array_equal(["lorem"] * 10, blocked_data.first())
blocks = blocked_data.collect()
assert_equal(len(blocks), n_partitions)
assert_array_equal(["lorem"] * 10, blocks[-1])
assert_equal(sum(len(b) for b in blocks), n_samples)
n_partitions = 17
data = self.sc.parallelize([1 for i in range(n_samples)],
n_partitions)
blocked_data = block(data, dtype=tuple)
assert_array_equal(tuple([1] * (n_samples // n_partitions)),
blocked_data.first())
blocks = blocked_data.collect()
assert_equal(len(blocks), n_partitions)
assert_equal(sum(len(b) for b in blocks), n_samples)
def test_array(self):
n_partitions = 10
n_samples = 100
data = self.sc.parallelize([np.array([1]) for i in range(n_samples)],
n_partitions)
blocked_data = block(data)
assert_array_equal(np.ones((10, 1)), blocked_data.first())
blocks = blocked_data.collect()
assert_equal(len(blocks), n_partitions)
assert_array_equal(np.ones((10, 1)), blocks[-1])
assert_equal(sum(len(b) for b in blocks), n_samples)
n_partitions = 17
data = self.sc.parallelize([np.array([1]) for i in range(n_samples)],
n_partitions)
blocked_data = block(data)
assert_array_equal(np.ones((n_samples // n_partitions, 1)),
blocked_data.first())
blocks = blocked_data.collect()
assert_equal(len(blocks), n_partitions)
assert_equal(sum(len(b) for b in blocks), n_samples)
def test_array_bsize(self):
n_partitions = 10
n_samples = 107
data = self.sc.parallelize([ | np.array([1]) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import inspect
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS,
FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, IGNORED_FUNCTIONS)
from astropy.utils.compat import (
NUMPY_LT_1_14, NUMPY_LT_1_15, NUMPY_LT_1_16, NUMPY_LT_1_18)
NO_ARRAY_FUNCTION = not ARRAY_FUNCTION_ENABLED
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
all_wrapped_functions = {name: f for name, f in np.__dict__.items()
if callable(f) and hasattr(f, '__wrapped__') and
(NUMPY_LT_1_15 or f is not np.printoptions)}
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
# alen is deprecated in Numpy 1.8
if NUMPY_LT_1_18:
def test_alen(self):
assert np.alen(self.q) == 3
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="expand_dims used asarray in numpy <1.16")
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
@pytest.mark.xfail(NUMPY_LT_1_15,
reason="flip needs axis argument in numpy <1.15")
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# TODO: should we change the default for subok?
self.check(np.broadcast_to, (3, 3, 3), subok=True)
def test_broadcast_arrays(self):
# TODO: should we change the default for subok?
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
@pytest.mark.skip(NUMPY_LT_1_15,
reason="take_long_axis added in numpy 1.15")
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
@pytest.mark.skip(NUMPY_LT_1_15,
reason="put_long_axis added in numpy 1.15")
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
o = func(q_list, *args, **kwargs)
unit = q_list[0].unit
v_list = [q.to_value(unit) for q in q_list]
expected = func(v_list, *args, **kwargs) * unit
assert o.shape == expected.shape
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_stack(self):
self.check(np.stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_column_stack(self):
self.check(np.column_stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_hstack(self):
self.check(np.hstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vstack(self):
self.check(np.vstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dstack(self):
self.check(np.dstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_block(self):
self.check(np.block)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_insert(self):
# Unit of inserted values is ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) * u.m
assert np.all(out == expected)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(NotImplementedError):
np.any(self.q)
def test_all(self):
with pytest.raises(NotImplementedError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(NotImplementedError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(NotImplementedError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="angle used asarray in numpy <1.16")
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tril(self):
self.check(np.tril)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_triu(self):
self.check(np.triu)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
| np.unwrap([1., 2.]*u.m) | numpy.unwrap |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from numpy.testing import assert_array_equal
from math import isclose
import mock
import GPy
import pytest
from emukit.quadrature.interfaces.base_gp import IBaseGaussianProcess
from emukit.quadrature.methods.vanilla_bq import VanillaBayesianQuadrature
from emukit.quadrature.kernels.bounds import BoxBounds
from emukit.quadrature.kernels.quadrature_kernels import QuadratureKernel
from emukit.quadrature.kernels import QuadratureRBFLebesgueMeasure
from emukit.core.continuous_parameter import ContinuousParameter
from emukit.model_wrappers.gpy_quadrature_wrappers import RBFGPy, BaseGaussianProcessGPy
REL_TOL = 1e-5
ABS_TOL = 1e-4
@pytest.fixture
def vanilla_bq():
X = np.array([[-1, 1], [0, 0], [-2, 0.1]])
Y = np.array([[1], [2], [3]])
D = X.shape[1]
integral_bounds = [(-1, 2), (-3, 3)]
gpy_model = GPy.models.GPRegression(X=X, Y=Y, kernel=GPy.kern.RBF(input_dim=D))
qrbf = QuadratureRBFLebesgueMeasure(RBFGPy(gpy_model.kern), integral_bounds=integral_bounds)
model = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
vanilla_bq = VanillaBayesianQuadrature(base_gp=model, X=X, Y=Y)
return vanilla_bq
def test_vanilla_bq_shapes(vanilla_bq):
Y = np.array([[1], [2], [3]])
x = np.array([[-1, 1], [0, 0], [-2, 0.1], [-3, 4]])
# integrate
res = vanilla_bq.integrate()
assert len(res) == 2
assert isinstance(res[0], float)
assert isinstance(res[1], float)
# transformations
assert vanilla_bq.transform(Y).shape == Y.shape
assert vanilla_bq.inverse_transform(Y).shape == Y.shape
# predictions base
res = vanilla_bq.predict_base(x)
assert len(res) == 4
for i in range(4):
assert res[i].shape == (x.shape[0], 1)
# predictions base full covariance
res = vanilla_bq.predict_base_with_full_covariance(x)
assert len(res) == 4
assert res[0].shape == (x.shape[0], 1)
assert res[1].shape == (x.shape[0], x.shape[0])
assert res[2].shape == (x.shape[0], 1)
assert res[3].shape == (x.shape[0], x.shape[0])
# predictions
res = vanilla_bq.predict(x)
assert len(res) == 2
assert res[0].shape == (x.shape[0], 1)
assert res[1].shape == (x.shape[0], 1)
# predictions full covariance
res = vanilla_bq.predict_with_full_covariance(x)
assert len(res) == 2
assert res[0].shape == (x.shape[0], 1)
assert res[1].shape == (x.shape[0], x.shape[0])
# predict gradients
res = vanilla_bq.get_prediction_gradients(x)
assert len(res) == 2
assert res[0].shape == (x.shape[0], x.shape[1])
assert res[1].shape == (x.shape[0], x.shape[1])
def test_vanilla_bq_transformations():
X = np.random.rand(5, 2)
Y = np.random.rand(5, 1)
mock_gp = mock.create_autospec(IBaseGaussianProcess)
method = VanillaBayesianQuadrature(base_gp=mock_gp, X=X, Y=Y)
# we can use equal comparison here because vanilla bq uses the identity transform. For non-trivial transforms
# with numerical errors use a closeness test instead.
assert_array_equal(method.inverse_transform(Y), Y)
assert_array_equal(method.transform(Y), Y)
assert_array_equal(method.inverse_transform(method.transform(Y)), Y)
assert_array_equal(method.transform(method.inverse_transform(Y)), Y)
def test_vanilla_bq_model():
X_train = np.random.rand(5, 2)
Y_train = | np.random.rand(5, 1) | numpy.random.rand |
import torch
import numpy as np
import torch.nn as nn
from transformers import AutoModel
class MultiHeadModelForMultiTasks(nn.Module):
def __init__(self,
base_model: str,
device: torch.device):
super().__init__()
self.base_model = AutoModel.from_pretrained(base_model, output_attentions=True)
self.heads = nn.ModuleDict()
self.criterions, self.metrics, self.masks = {}, {}, {}
self.to(device)
self.device = device
def setup(self, task):
out_dim = task.num_labels
hidden_dim = self.base_model.config.hidden_size
n_layers = self.base_model.config.num_hidden_layers
n_heads = self.base_model.config.num_attention_heads
self.heads[task.id] = nn.Linear(hidden_dim, out_dim).to(self.device)
self.masks[task.id] = torch.ones(n_layers, n_heads).to(self.device)
self.criterions[task.id] = nn.MSELoss() if out_dim==1 else nn.CrossEntropyLoss()
self.metrics[task.id] = task.metric
def forward(self, input_ids, attention_mask, token_type_ids, head_mask, task_id):
outputs = self.base_model(input_ids, attention_mask, token_type_ids,
head_mask=head_mask, return_dict=True)
pooler_output = outputs['pooler_output']
logits = self.heads[task_id](pooler_output)
outputs['task_logits'] = logits
return outputs
def step(self, batch, task_id, head_mask=None):
batch = {k: v.to(self.device) for k, v in batch.items()}
head_mask = self.masks[task_id] if head_mask is None else head_mask
outputs = self.forward(batch['input_ids'],
batch['attention_mask'],
batch['token_type_ids'],
head_mask, task_id)
logits = outputs['task_logits']
labels = batch.pop('labels')
loss = self.criterions[task_id](torch.squeeze(logits), labels)
logits = logits.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
preds = | np.argmax(logits, axis=1) | numpy.argmax |
import cv2 as cv
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Dense, UpSampling2D, Conv2D, Input, BatchNormalization, MaxPooling2D
from tensorflow.keras.optimizers import Adam
import utils
import numpy as np
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
import tensorflow.keras
np.random.seed(123)
inputData = utils.GetInputData((100, 100))
imageSize = (100, 100, 3)
nevi = inputData[inputData['cellTypeId'] == 4]
inputDataWoNevi = inputData.drop(
inputData[inputData['cellTypeId'] == 4].iloc[:len(inputData)].index)
features = nevi.drop(columns=['cellTypeId'], axis=1)
target = nevi['cellTypeId']
xTrainSplit, xTest, yTrainSplit, yTest = train_test_split(
features, target, test_size=0.05, random_state=123)
xTrain, xValidate, yTrain, yValidate = train_test_split(
xTrainSplit, yTrainSplit, test_size=0.35, random_state=123)
xTrain = np.asarray(xTrain['image'].tolist())
xTest = np.asarray(xTest['image'].tolist())
xValidate = np.asarray(xValidate['image'].tolist())
inputDataWoNevi = np.asarray(inputDataWoNevi['image'].tolist())
# NORM
# xTrainMean = np.mean(xTrain)
# xTrainStd = np.std(xTrain)
# xTestMean = np.mean(xTest)
# xTestStd = np.std(xTest)
# xValMean = np.mean(xValidate)
# xValStd = np.std(xValidate)
# xTrain = (xTrain - xTrainMean)/xTrainStd
# xTest = (xTest - xTestMean)/xTestStd
# xValidate = (xValidate - xValMean)/xValStd
# NO NORM
xTrain = xTrain.astype('float32') / 255.
xTest = xTest.astype('float32') / 255.
xValidate = xValidate.astype('float32') / 255.
inputDataWoNevi = inputDataWoNevi.astype('float32') / 255.
newImgSize = (100, 100, 3)
xTrain = xTrain.reshape(xTrain.shape[0], *newImgSize)
xTest = xTest.reshape(xTest.shape[0], *imageSize)
xValidate = xValidate.reshape(xValidate.shape[0], *newImgSize)
inputDataWoNevi = inputDataWoNevi.reshape(
inputDataWoNevi.shape[0], *newImgSize)
epochs = 50
batch_size = 10
# autoencoder = load_model('models/autoEnc/autoencoder.h5', custom_objects={
autoencoder = load_model('models/autoEncWoNorm/autoencoderWoNorm.h5', custom_objects={
'CalculateF1Score': utils.CalculateF1Score})
# Model test predictions
goodImagesPred = autoencoder.predict(xTest)
badImagesPred = autoencoder.predict(inputDataWoNevi)
# reconstruction error
rec1 = | np.sum((goodImagesPred - xTest)**2, axis=(1, 2, 3)) | numpy.sum |
import numpy as np
import gym
from gym import spaces
# from attn_toy.env.fourrooms import Fourrooms as Fourrooms
from attn_toy.env.fourrooms import FourroomsNorender as Fourrooms
class ImageInputWarpper(gym.Wrapper):
def __init__(self, env, max_steps=100):
gym.Wrapper.__init__(self, env)
screen_height = self.env.obs_height
screen_width = self.env.obs_width
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
# self.num_steps = 0
self.max_steps = max_steps
# self.state_space_capacity = self.env.state_space_capacity
self.mean_obs = None
def step(self, action):
state, reward, done, info = self.env.step(action)
# self.num_steps += 1
if self.num_steps >= self.max_steps:
done = True
obs = self.env.render(state)
# print("step reporting",done)
# if self.mean_obs is None:
# self.mean_obs = np.mean(obs)
# print("what is wrong?",self.mean_obs)
# obs = obs - 0.5871700112336601
# info['ori_obs'] = ori_obs
info['s_tp1'] = state
return obs, reward, done, info
def reset(self, state=-1):
if state < 0:
state = np.random.randint(0, self.state_space_capacity)
self.env.reset(state)
# self.num_steps = self.env.num_steps
obs = self.env.render(state)
# print("reset reporting")
# if self.mean_obs is None:
# self.mean_obs = np.mean(obs)
# print("what is wrong? reset",self.mean_obs)
# obs = obs - 0.5871700112336601
# info['ori_obs'] = ori_obs
return obs.astype(np.uint8)
class FourroomsDynamicNoise(Fourrooms): # noise type = dynamic relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * 3)
self.state_space_capacity = self.observation_space.n
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise, self).render(state)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise, self).step(action)
state += self.num_pos * (self.num_steps % 3)
return state, reward, done, info
def reset(self, state=-1):
obs = super(FourroomsDynamicNoise, self).reset(state % self.num_pos)
self.num_steps = state % 3
return state
class FourroomsDynamicNoise2(Fourrooms): # noise type = state relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise2, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * max_epilen)
self.state_space_capacity = self.num_pos * max_epilen
self.last_action = -1
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise2, self).step(action)
state += self.num_pos * self.num_steps
return state, reward, done, info
def reset(self, state=-1):
self.state = state
obs = super(FourroomsDynamicNoise2, self).reset(state % self.num_pos)
self.num_steps = state // self.num_pos
return state
def render(self, state=-1):
# which_background = self.num_steps % 3
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
obs = np.tile(self.color[self.num_steps + 1][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
# obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise2, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
class FourroomsDynamicNoise3(Fourrooms): # noise type = action relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise3, self).__init__(max_epilen, goal)
self.agent_color = np.tile(np.array([[1, 0, 0]]), (100, 1))
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * self.action_space.n)
self.state_space_capacity = self.observation_space.n
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# print(which_background, self.color[which_background])
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise3, self).render(state)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise3, self).step(action)
state += self.num_pos * action
# print("state in step",state)
return state, reward, done, info
def reset(self, state=-1):
obs = super(FourroomsDynamicNoise3, self).reset(state % self.num_pos)
self.num_steps = state // self.num_pos
return state
class FourroomsRandomNoise(Fourrooms): # noise type = random
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsRandomNoise, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
# self.background = np.random.randint(0, 255, (10, 1, 1, 3))
# self.background[:, :, :, 2] = 0
# self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.random_background = np.random.randint(0, 255, (100, obs_size, obs_size, 3))
# self.random_background[..., 2] = 100
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
# self.color[:, 2] = 100
self.rand_range = 100
self.observation_space = spaces.Discrete(self.num_pos * self.rand_range)
self.state_space_capacity = self.observation_space.n
self.which_background = -1
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
# obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
obs = self.random_background[which_background]
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsRandomNoise, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsRandomNoise, self).step(action)
self.which_background = np.random.randint(0, self.rand_range)
state += self.num_pos * self.which_background
return state, reward, done, info
def reset(self, state=-1):
self.which_background = np.random.randint(0, self.rand_range)
super(FourroomsRandomNoise, self).reset(state % self.num_pos)
return state
class FourroomsRandomNoisePos(FourroomsRandomNoise): # noise type = random
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
super(FourroomsRandomNoisePos, self).__init__(max_epilen, obs_size, seed, goal)
def render(self, state=-1):
obs = np.zeros((self.obs_height, self.obs_width, 3))
pos = state // self.num_pos
obs[pos * 12:pos * 12 + 12, :12] = self.color[pos]
arr = super(FourroomsRandomNoise, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
class FourroomsOptimalNoise(Fourrooms): # noise type = optimal action
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77, optimal_action=None):
np.random.seed(seed)
super(FourroomsOptimalNoise, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
# self.background = np.random.randint(0, 255, (10, 1, 1, 3))
# self.background[:, :, :, 2] = 0
# self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(0, 255, (200, 3))
# self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos)
self.state_space_capacity = self.num_pos
self.last_action = -1
self.optimal_action = optimal_action
def step(self, action):
state, reward, done, info = super(FourroomsOptimalNoise, self).step(action)
# state += self.num_pos * self.num_steps
return state, reward, done, info
def reset(self, state=-1):
# self.num_steps = state // self.num_pos
self.state = state
obs = super(FourroomsOptimalNoise, self).reset(state % self.num_pos)
return state
def render(self, state=-1):
# which_background = self.num_steps % 3
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
obs = np.tile(self.color[self.optimal_action[state]][np.newaxis, np.newaxis, :],
(self.obs_size, self.obs_size, 1))
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
# obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsOptimalNoise, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
class FourroomsOptimalNoisePos(FourroomsOptimalNoise): # noise type = optimal action
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77, optimal_action=None):
super(FourroomsOptimalNoisePos, self).__init__(max_epilen, obs_size, seed, goal, optimal_action)
def render(self, state=-1):
obs = | np.zeros((self.obs_height, self.obs_width, 3)) | numpy.zeros |
# -*- coding: utf-8 -*-
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Solves the power flow using a fast decoupled method.
"""
import warnings
from numpy import array, angle, exp, linalg, conj, r_, Inf, column_stack, real
from scipy.sparse.linalg import splu
from pandapower.pypower.makeSbus import makeSbus
from pandapower.pypower.makeB import makeB
def decoupledpf(Ybus, Sbus, V0, pv, pq, ppci, options):
"""Solves the power flow using a fast decoupled method.
Solves for bus voltages given the full system admittance matrix (for
all buses), the complex bus power injection vector (for all buses),
the initial vector of complex bus voltages, the FDPF matrices B prime
and B double prime, and column vectors with the lists of bus indices
for the swing bus, PV buses, and PQ buses, respectively. The bus voltage
vector contains the set point for generator (including ref bus)
buses, and the reference angle of the swing bus, as well as an initial
guess for remaining magnitudes and angles. C{ppopt} is a PYPOWER options
vector which can be used to set the termination tolerance, maximum
number of iterations, and output options (see L{ppoption} for details).
Uses default options if this parameter is not given. Returns the
final complex voltages, a flag which indicates whether it converged
or not, and the number of iterations performed.
@see: L{runpf}
@author: <NAME> (PSERC Cornell)
Modified to consider voltage_depend_loads
"""
# old algortihm options to the new ones
pp2pypower_algo = {'fdbx': 2, 'fdxb': 3}
# options
tol = options["tolerance_mva"]
max_it = options["max_iteration"]
# No use currently for numba. TODO: Check if can be applied in Bp and Bpp
# numba = options["numba"]
# NOTE: options["algorithm"] is either 'fdbx' or 'fdxb'. Otherwise, error
algorithm = pp2pypower_algo[options["algorithm"]]
voltage_depend_loads = options["voltage_depend_loads"]
v_debug = options["v_debug"]
baseMVA = ppci["baseMVA"]
bus = ppci["bus"]
branch = ppci["branch"]
gen = ppci["gen"]
# initialize
i = 0
V = V0
Va = angle(V)
Vm = abs(V)
dVa, dVm = None, None
if v_debug:
Vm_it = Vm.copy()
Va_it = Va.copy()
else:
Vm_it = None
Va_it = None
# set up indexing for updating V
pvpq = r_[pv, pq]
# evaluate initial mismatch
P, Q = _evaluate_mis(Ybus, V, Sbus, pvpq, pq)
# check tolerance
converged = _check_for_convergence(P, Q, tol)
# create and reduce B matrices
Bp, Bpp = makeB(baseMVA, bus, real(branch), algorithm)
# splu requires a CSC matrix
Bp = Bp[array([pvpq]).T, pvpq].tocsc()
Bpp = Bpp[array([pq]).T, pq].tocsc()
# factor B matrices
Bp_solver = splu(Bp)
Bpp_solver = splu(Bpp)
# do P and Q iterations
while (not converged and i < max_it):
# update iteration counter
i = i + 1
# ----- do P iteration, update Va -----
dVa = -Bp_solver.solve(P)
# update voltage
Va[pvpq] = Va[pvpq] + dVa
V = Vm * | exp(1j * Va) | numpy.exp |
import pyclesperanto_prototype as cle
import numpy as np
def test_absolute():
test = cle.push(np.asarray([
[1, -1],
[1, -1]
]))
test2 = cle.create(test)
cle.absolute(test, test2)
print(test2)
a = cle.pull(test2)
assert (np.min(a) == 1)
assert (np.max(a) == 1)
assert ( | np.mean(a) | numpy.mean |
import unittest
import raocp.core.cones as core_cones
import numpy as np
class TestCones(unittest.TestCase):
__real = core_cones.Real()
__zero = core_cones.Zero()
__nonnegative_orthant = core_cones.NonnegativeOrthant()
__second_order_cone = core_cones.SecondOrderCone()
__cartesian = core_cones.Cartesian([__real, __zero, __nonnegative_orthant, __second_order_cone])
__num_samples = 100
__sample_multiplier = 10
__cone_dimension = 20
__num_test_repeats = 100
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
def test_dimension_check(self):
# cone size equals vector size
_ = core_cones._check_dimension("Real", 5, np.ones(5))
def test_dimension_check_failure(self):
# cone size does not equal vector size
with self.assertRaises(ValueError):
_ = core_cones._check_dimension("Real", 5, np.ones(6))
def test_real_project(self):
# create cone
cone_type = "Real"
real = TestCones.__real
# create point for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
# create points for test
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = np.random.randint(-100, 100, 20) # real samples
# test real cone
self.assertEqual(cone_type, type(real).__name__)
projection = real.project(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,)),
samples[i].reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,))) <= 0)
def test_real_project_dual(self):
# create cone
cone_type = "Real"
real = TestCones.__real
# create point for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension)) \
.reshape((TestCones.__cone_dimension, 1))
# create points for test
dual_samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
dual_samples[i] = np.zeros(TestCones.__cone_dimension) # real dual samples (zero)
# test real cone
self.assertEqual(cone_type, type(real).__name__)
projection_onto_dual = real.project_onto_dual(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,)),
dual_samples[i].reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,))) <= 0)
def test_zero_project(self):
# create cone
cone_type = "Zero"
zero = TestCones.__zero
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = np.zeros(TestCones.__cone_dimension) # zero samples
# test zero
self.assertEqual(cone_type, type(zero).__name__)
projection = zero.project(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,)),
samples[i].reshape((TestCones.__cone_dimension,))
- projection.reshape((TestCones.__cone_dimension,))) <= 0)
def test_zero_project_dual(self):
# create cone
cone_type = "Zero"
zero = TestCones.__zero
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
dual_samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
dual_samples[i] = np.random.randint(-100, 100, TestCones.__cone_dimension) # zero dual samples (real)
# test zero dual
self.assertEqual(cone_type, type(zero).__name__)
projection_onto_dual = zero.project_onto_dual(vector)
for i in range(TestCones.__num_samples):
self.assertTrue(np.inner(vector.reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,)),
dual_samples[i].reshape((TestCones.__cone_dimension,))
- projection_onto_dual.reshape((TestCones.__cone_dimension,))) <= 0)
def test_nonnegative_orthant_project(self):
# create cone
cone_type = "NonnegativeOrthant"
nonnegative_orthant = TestCones.__nonnegative_orthant
# create points for projection
vector = np.array(TestCones.__sample_multiplier * np.random.rand(TestCones.__cone_dimension))\
.reshape((TestCones.__cone_dimension, 1))
samples = [None] * TestCones.__num_samples
for i in range(TestCones.__num_samples):
samples[i] = | np.random.randint(0, 100, TestCones.__cone_dimension) | numpy.random.randint |
#!/usr/bin/python
"""
This module reads and writes the parameters of a Multi Gaussian Expansion model (Monnet et al.
1992, Emsellem et al. 1994). It can read and write MGE input ascii files and
computes a number of basic parameters for the corresponding models.
uptdated regularly and may still contains some obvious bugs. A stable version will
be available hopefully before the end of 2012.
For questions, please contact <NAME> at <EMAIL>
"""
"""
Importing the most import modules
This MGE module requires NUMPY and SCIPY
"""
import os
try:
import numpy as np
except ImportError:
raise Exception("numpy is required for pygme")
try:
from scipy import special
except ImportError:
raise Exception("scipy is required for pygme")
from numpy import asarray
from numpy import cos, sin, copy, sqrt, exp
from .rwcfor import floatMGE
from .mge_miscfunctions import print_msg
__version__ = '1.1.6 (22 Dec 2014)'
## Version 1.1.6 : EE - Fixed found2D
## Version 1.1.5 : EE - Fixed mcut input parameter
## Version 1.1.4 : EE - Fixed a typo on indices
## Version 1.1.3 : EE - Added BetaEps, M/L etc also in the 2D Gauss just in case
## Version 1.1.2 : EE - Changed imin,imax into ilist
## Version 1.1.1 : EE - Removed the condition for comp_Nparticles when reading an mge
## Version 1.1.0 : EE - Some serious cleanup in the naming of the variables
## Version 1.0.2 : EE - few minor changes including adding saveMGE
## Version 1.0.1 : EE - replaces ones to zeros in initialisation of GaussGroupNumber
############################################################################
# Class to define dynamical MGE parameters useful for calculation purposes #
############################################################################
class dynParamMGE():
"""
Class to add some parameters which are useful for dynamical routines
"""
def __init__(self, MGEmodel):
"""
Initialisation of the additional dynamical parameters
"""
if (MGEmodel._findGauss3D > 0):
self.Sig3Darc2_soft = MGEmodel.Sig3Darc**2 + MGEmodel.Softarc**2 # Sigma softened in arcsec
self.dSig3Darc2_soft = 2. * self.Sig3Darc2_soft
# Deriving some more numbers
self.Bij = np.zeros((MGEmodel.nGauss, MGEmodel.nGauss), floatMGE)
self.Bij_soft = np.zeros((MGEmodel.nGauss, MGEmodel.nGauss), floatMGE)
self.e2q2dSig3Darc2 = np.zeros(MGEmodel.nGauss, floatMGE)
self.e2q2Sig3Darc2 = np.zeros(MGEmodel.nGauss, floatMGE)
self.sqpi2s = sqrt(np.pi / 2.) / MGEmodel.qSig3Darc
self.qq2s2 = 4. * MGEmodel.QxZ2 * MGEmodel.Sig3Darc2
self.q2Sig3Darc2 = MGEmodel.QxZ2 * MGEmodel.Sig3Darc2
for i in range(MGEmodel.nGauss) :
if self.q2Sig3Darc2[i] != 0. :
self.e2q2dSig3Darc2[i] = MGEmodel.e2[i] / (2. * self.q2Sig3Darc2[i])
self.e2q2Sig3Darc2[i] = MGEmodel.e2[i] / self.q2Sig3Darc2[i]
else :
print("WARNING: %d component has q2*Sig2=0" %(i+1))
for j in range(MGEmodel.nGauss) :
self.Bij[i,j] = MGEmodel.e2[j] - self.q2Sig3Darc2[i] / MGEmodel.Sig3Darc2[j]
self.Bij_soft[i,j] = MGEmodel.e2[j] - self.q2Sig3Darc2[i] / self.Sig3Darc2_soft[j]
self.kRZ2 = MGEmodel.kRZ**2
self.mkRZ2q2 = 1. - self.kRZ2 * MGEmodel.QxZ2
self.mkRZ2 = 1. - self.kRZ2
self.Dij = np.zeros((MGEmodel.nGauss,MGEmodel.nGauss), floatMGE)
self.Dij_soft = np.zeros((MGEmodel.nGauss,MGEmodel.nGauss), floatMGE)
for i in range(MGEmodel.nGauss) :
for j in range(MGEmodel.nGauss) :
self.Dij[i,j] = self.mkRZ2[i] * self.Bij[i,j] + MGEmodel.e2[j] * self.kRZ2[i]
self.Dij_soft[i,j] = self.mkRZ2[i] * self.Bij_soft[i,j] + MGEmodel.e2[j] * self.kRZ2[i]
## ===========================================================================================
############################################################################
# Class to define photometric MGE parameters useful for calculation purposes #
############################################################################
class photParamMGE():
"""
Class to add some parameters which are useful for photometric routines
"""
def __init__(self, MGEmodel):
"""
Initialisation of the additional photometric parameters
These are hidden in this class
"""
if (MGEmodel._findGauss3D > 0):
self.dSig3Darc = sqrt(2.) * MGEmodel.Sig3Darc
self.dSig3Darc2 = 2. * MGEmodel.Sig3Darc2
self.qParc = MGEmodel.QxZ * MGEmodel.Parc
self.dqSig3Darc = sqrt(2.) * MGEmodel.qSig3Darc
## ===========================================================================================
class paramMGE(object) :
def __init__(self, infilename=None, saveMGE=None, indir=None, **kwargs) :
"""
Initialisation of the MGE model - reading the input file
infilename : input MGE ascii file defining the MGE model
indir: directory where to find the mge file
saveMGE: directory in which some MGE model will be saved automatically during the
realisation of the Nbody sample
If saveMGE is None (default), it will be defined as ~/MGE
This will be created by default (if not existing)
Additional Input (not required):
nTotalPart: total number of particles
nPartStar : number of Stellar particles
nPartHalo: number of Dark Matter particles
nPartGas : number of Gas particles
FirstRealisedPart : number for the first realised Particle
This is useful if we wish to realise the model in chunks
nMaxPart : Max number of particles to be realised for this run
mcut : cut in pc, Default is 50 000 (50 kpc)
Used for the Ellipsoid truncation
Rcut : cut in pc, Default is 50 000 (50 kpc)
Zcut : cut in pc, Default is 50 000 (50 kpc)
Used for the Cylindre truncation
FacBetaEps : Coefficient for : Beta = Coef * Epsilon
Default if Coef = 0.6
Can also be a vector (one for each Gaussian)
MaxFacBetaEps: maximum value allowed for FacBetaEps. Default is 0.8.
"""
## Now checking if saveMGE has been defined and act accordingly
if saveMGE is None :
## This is the default dir (~/MGE) if none is given
saveMGE = os.path.expanduser("~/MGE")
if not os.path.isdir(saveMGE) :
## Creating the default saveMGE directory
os.system("mkdir ~/MGE")
## Test now if this exists
if not os.path.isdir(saveMGE) :
print("ERROR: directory for Archival does not exist = %s"%(saveMGE))
return
## Finally save the value of saveMGE in the structure
self.saveMGE = saveMGE
## Setting up some fixed variable #####################################
## G is in (km/s)2. Msun-1 . pc .
## OLD VALUE WAS: self.Gorig = 0.0043225821
self.Gorig = floatMGE(0.0043225524) # value from <NAME>
self.nPart = np.int(kwargs.get("nTotalPart", 0)) # TOTAL Number of n bodies
self.nPartStar = np.int(kwargs.get("nPartStar", 0)) # TOTAL Number of n bodies
self.nPartHalo = np.int(kwargs.get("nPartHalo", 0)) # TOTAL Number of n bodies
self.nPartGas = np.int(kwargs.get("nPartGas", 0)) # TOTAL Number of n bodies
self.Add_BHParticle = True # Add a BH if Mbh > 0 when realising particles
self.FirstRealisedPart = np.int(kwargs.get("FirstRealisedPart", 0)) # First Realised Particle
self.nMaxPart = np.int(kwargs.get("nMaxPart", 0)) # Max number of particles to be realised
self.Euler = np.array([0., 90., 0.]) # Inclination - Default is 90 degrees = edge-on
self.TruncationMethod = "Ellipsoid" # Default method to truncate Gaussians (other = Cylindre)
self.mcut = kwargs.get("Mcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Rcut = kwargs.get("Rcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Zcut = kwargs.get("Zcut", 50000.) # Default truncation in pc - Default is 50kpc
self.Mbh = 0. # Black hole mass
self.axi = 1
self.Nquad = 100 # Number of Points for the Quadrature, default is 100
self._findGauss3D = 0
self._findGauss2D = 0
self.FacBetaEps = kwargs.get("FacBetaEps", 0.6) # Coefficient for the BETAEPS option: Beta = Coef * Epsilon
self.MaxFacBetaEps = kwargs.get("MaxFacBetaEps", 0.8) # Max value the BETAEPS Factor
self.DummyFacBetaEps = 0.6
## Test if infilename is None. If this is the case reset MGE with 0 Gaussians
self.nGauss = self.nGroup = self.nDynComp = 0
self._reset(All=True)
if infilename is not None :
self.read_mge(infilename, indir=indir)
def _reset(self, **kwargs) :
"""
Reset values of the MGE model
Possible options:
nGauss
nGroup
NDynComp
Dist
Softening
infilename
pwd
All : will set all to None, or 0 (and Dist to 10 Mpc)
"""
AllReset = kwargs.get("All", False)
if AllReset :
for key in ["infilename", "pwd"] :
kwargs[key] = ""
for key in ["nGauss", "nGroup", "nDynComp"] :
kwargs[key] = 0
self._reset_Dist()
self._reset_Softening()
kwargs["Dist"] = self.Dist
kwargs["Softening"] = self.Softening
for key in kwargs :
if key == "nGauss" :
nGauss = kwargs.get("nGauss", None)
self._reset_nGauss(nGauss) # Set nGauss
elif key == "nGroup" :
nGroup = kwargs.get("nGroup", None)
self._reset_nGroup(nGroup) # Set nGroup
elif key == "Dist" :
Dist = kwargs.get("Dist", None)
self._reset_Dist(Dist) # Distance in Mpc - Default is 10 Mpc
elif key == "Softening" :
Softening = kwargs.get("Softening", None)
self._reset_Softening(Softening) # Set Softening
elif key == "nDynComp" :
self.nDynComp = kwargs.get("nDynComp", None)
elif key == "infilename" :
self.infilename = kwargs.get("infilename", None)
elif key == "pwd" :
self.pwd = kwargs.get("pwd", None)
def _reset_nGroup(self, nGroup=None) :
## nGroup Reset
if nGroup is not None :
self.nGroup = nGroup # Number of Groups
self.nPartGroup = np.zeros((self.nGroup,), np.int) # Number of particles per Group
self.nRealisedPartGroup = np.zeros((self.nGroup,), np.int) # Number of REALISED particles per Group
## =============================================================
def _reset_nGauss(self, nGauss=0, verbose=0) :
## nGauss reset
if nGauss is not None :
if np.size(nGauss) == 3 :
self.nStarGauss = int(nGauss[0])
self.nGasGauss = int(nGauss[1])
self.nHaloGauss = int(nGauss[2])
self.nGauss = self.nStarGauss + self.nGasGauss + self.nHaloGauss
elif np.size(nGauss) == 1 :
self.nGauss = nGauss # Number of Gaussians
self.nStarGauss = nGauss
self.nGasGauss = self.nHaloGauss = 0
else :
print_msg("With nGauss which should contain 1 or 3 integers", 2)
return
self._findGauss3D = 0
self._findGauss2D = 0
self.Imax2D = np.zeros((self.nGauss,), floatMGE) # In Lsun pc-2
self.Sig2Darc = np.zeros((self.nGauss,), floatMGE) # in arcsecond
self.Q2D = np.zeros((self.nGauss,), floatMGE)
self.PAp = np.zeros((self.nGauss,), floatMGE)
self.Imax3D = np.zeros((self.nGauss,), floatMGE) # In Lsun pc-2 arcsec-1
self.Sig3Darc = np.zeros((self.nGauss,), floatMGE) # in arcsecond
self.QxZ = np.zeros((self.nGauss,), floatMGE)
self.QyZ = np.zeros((self.nGauss,), floatMGE)
self.ML = np.ones((self.nGauss,), floatMGE)
self.kRTheta = np.ones((self.nGauss,), floatMGE) # sigma_R / sigma_Theta
self.kRZ = | np.ones((self.nGauss,), floatMGE) | numpy.ones |
import numpy as np
from pyqtree import Index
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import io
from fractal_compression import compress
from PIL import Image
import sys
sys.path.append('./')
from utils import load_im
max_depth = 6
bias = 0.0125
a = 1.0 # visual_complexity_weight
b = 0.4 # processing_complexity_left_weight
c = 0.2 # processing_complexity_right_weight
class Region:
def __init__(self, location, size, level):
self.location = location
self.size = size
self.level = level
def split4(im):
top_bottom = np.split(im, 2, axis=0)
top_lr = np.split(top_bottom[0], 2, axis=1)
bottom_lr = np.split(top_bottom[1], 2, axis=1)
return top_lr[0], top_lr[1], bottom_lr[0], bottom_lr[1]
def concat4(tl, tr, bl, br):
top = | np.concatenate((tl, tr), axis=1) | numpy.concatenate |
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp_ewc import MLPEWC
from mjrl.algos.npg_cg_ewc_mlp import NPGEWC
import numpy as np
import gym
import pickle
import torch
import os
SEED = 50 # initial value, 10 will be added for every iteration
job_name_ewc = 'results/metaworld_ewc_exp'
torch.set_num_threads(5)
# MTL policy
# ==================================
num_tasks = 10
num_seeds = 5
num_cpu = 5
env_dict = {
'reach-v1': 'sawyer_reach_push_pick_place:SawyerReachPushPickPlaceEnv',
'push-v1': 'sawyer_reach_push_pick_place:SawyerReachPushPickPlaceEnv',
'pick-place-v1': 'sawyer_reach_push_pick_place:SawyerReachPushPickPlaceEnv',
'door-v1': 'sawyer_door:SawyerDoorEnv',
'drawer-open-v1': 'sawyer_drawer_open:SawyerDrawerOpenEnv',
'drawer-close-v1': 'sawyer_drawer_close:SawyerDrawerCloseEnv',
'button-press-topdown-v1': 'sawyer_button_press_topdown:SawyerButtonPressTopdownEnv',
'peg-insert-side-v1': 'sawyer_peg_insertion_side:SawyerPegInsertionSideEnv',
'window-open-v1': 'sawyer_window_open:SawyerWindowOpenEnv',
'window-close-v1': 'sawyer_window_close:SawyerWindowCloseEnv',
}
e_unshuffled = {}
for task_id, (env_id, entry_point) in enumerate(env_dict.items()):
kwargs = {'obs_type': 'plain'}
if env_id == 'reach-v1':
kwargs['task_type'] = 'reach'
elif env_id == 'push-v1':
kwargs['task_type'] = 'push'
elif env_id == 'pick-place-v1':
kwargs['task_type'] = 'pick_place'
gym.envs.register(
id=env_id,
entry_point='metaworld.envs.mujoco.sawyer_xyz.' + entry_point,
max_episode_steps=150,
kwargs=kwargs
)
e_unshuffled[task_id] = GymEnv(env_id)
for i in range(num_seeds):
np.random.seed(SEED)
torch.manual_seed(SEED)
job_name_ewc_seed = job_name_ewc + '/seed_{}'.format(i)
e = {}
task_order = np.random.permutation(num_tasks)
for task_id in range(num_tasks):
e[task_id] = e_unshuffled[task_order[task_id]]
baseline_mtl = {}
forward_transfer_results = {}
for task_id in range(num_tasks):
iterdir = job_name_ewc_seed + '/iterations/task_{}/'.format(task_id)
f = open(iterdir + 'policy_199.pickle','rb')
policy_mtl = pickle.load(f)
f.close()
f = open(iterdir + 'baseline_199.pickle', 'rb')
baseline_mtl[task_id] = pickle.load(f)
f.close()
agent_mtl = NPGEWC(e, policy_mtl, baseline_mtl, ewc_lambda=1e-7, scaled_lambda=False, normalized_step_size=0.01, seed=SEED, save_logs=True)
mean_test_perf = agent_mtl.test_tasks(test_rollouts=10,
num_cpu=num_cpu,
task_ids= | np.array([task_id]) | numpy.array |
import time
import numpy as np
import matplotlib.pyplot as plt
from Utils import *
from PerformanceMeasure import *
from multiprocessing import Pool
def run(args):
x_0, r = args
X = []
x = x_0.copy()
X.append(x_0.copy())
# algorithm 1, line 2
for n in range(N):
# optional:
if (n + 1) % show_status_interval == 0 and show_status:
print('r:', r + 1, '\tn:', n + 1, '\tx:', x,
'\t{} % done / estimated time left {}s ...'.format(round(100 * (r * N + n + 1) / (N * R), 2), int(
(time.time() - start) / ((r * N + n + 1) / (N * R)) - time.time() + start)))
# algorithm 2, lines 1-3
if n == 0:
c_1 = {k: 0 for k in range(len(x))}
c_M = {k: 0 for k in range(len(x))}
# algorithm 2, lines 4-3
if (n + 1) % L == 0:
if use_adaption and (not use_burn_in or n < burn_in * N):
# slightly different than in paper ...
if show_adaption_info:
print('frequencies of center proposals:', [c_1[k] / L for k in range(len(c_1))])
print('frequencies of tail proposals:', [c_M[k] / L for k in range(len(c_M))])
P_n = np.maximum(.99 ** n, 1 / np.sqrt(n + 1))
for k in range(len(x)):
if np.random.rand() < P_n:
if c_1[k] > eta_1 * L:
delta[k] *= .5
delta_1[k] *= .5
if c_M[k] > eta_2 * L:
delta[k] *= 2
delta_1[k] *= 2
if show_adaption_info:
print('current delta/delta_1:', delta)
c_1 = {k: 0 for k in range(len(x))}
c_M = {k: 0 for k in range(len(x))}
# algorithm 1, line 3
for k in range(len(x)):
# algorithm 1, line 4
z = [rejection_sampling_trial(x[k], j, M, delta[k], delta_1[k], sigma, sigma_0, sigma_1) for j in range(M)]
# algorithm 1, line 5
w = [trial_weight(z[j], x, k, j, M, target_dist, delta[k], delta_1[k], sigma, sigma_0, sigma_1) for j in
range(M)]
# algorithm 1, line 6
y, index = draw_from_z_proportional_to_w(z, w)
# count for adaption:
if index == 0:
c_1[k] += 1
elif index == M - 1:
c_M[k] += 1
# algorithm 1, line 7
x_star = [rejection_sampling_trial(y, j, M, delta[k], delta_1[k], sigma, sigma_0, sigma_1) for j in
range(M - 1)]
x_star.append(x[k])
# algorithm 1, line 8
y_bold = x.copy()
y_bold[k] = y
num = np.sum(
[trial_weight(z[j], x, k, j, M, target_dist, delta[k], delta_1[k], sigma, sigma_0, sigma_1) for j in
range(M)])
den = np.sum(
[trial_weight(x_star[j], y_bold, k, j, M, target_dist, delta[k], delta_1[k], sigma, sigma_0, sigma_1)
for j in range(M)])
if den != 0:
alpha = np.minimum(1, num / den)
else:
alpha = 1
# algorithm 1, lines 9-13
if | np.random.rand() | numpy.random.rand |
import numpy as np
from numpy import inf
import time
import random
def GS_static(graph, eps, alpha, seed, method ) :
# initial distribution
N = graph.A.shape[0]
y = np.zeros([N,1]); y[seed,0] = 1;
# extract operator and coefficients
if method == 'PageRank':
rho = (1-alpha)
psi = alpha;
OP = graph.P.T
gt = graph.pr.T
deg = np.count_nonzero(OP, axis = 1)
elif method == 'GammaPageRank':
mu = (1-alpha)/alpha
psi = -10/(2*mu + 10)
rho = (2*mu)/(2*mu + 10)
OP = graph.Op_shift
gt = graph.gpr
deg = np.count_nonzero(OP, axis = 1)
# compute initial distribution (assuming p(0) = 0)
p = np.zeros([N,1]);
r = rho*y
it = 0;
flops = 0
#while np.linalg.norm(r, inf) > eps :
while (np.linalg.norm(p - gt, ord=2)/np.linalg.norm(gt,ord=2)) > eps :
it += 1;
ix_u = np.argmax(abs(r))
r_u = float(r[ix_u]);
p[ix_u] += r_u
r[ix_u] = 0;
r = r + np.expand_dims(psi*r_u*OP[:,ix_u],1)
# count the flops
flops_scaling = np.count_nonzero(OP[:,ix_u])
flops = flops + 2*flops_scaling + np.int(deg[ix_u])
print('---- Gauss Soutwell ----')
print('iter =', it)
print('flops =', flops)
print('err =', np.linalg.norm(p - gt,ord=2)/np.linalg.norm(gt,ord=2) )
return p, r, it, flops
def PI_static(graph, eps, alpha, seed, method) :
# initial distribution
N = graph.A.shape[0]
y = np.zeros([N,1]); y[seed,0] = 1;
# extract operator and coefficients
if method == 'PageRank':
rho = (1-alpha)
psi = alpha;
OP = graph.P.T
gt = graph.pr.T
elif method == 'GammaPageRank':
mu = (1-alpha)/alpha
psi = -10/(2*mu + 10)
rho = (2*mu)/(2*mu + 10)
OP = graph.Op_shift
gt = graph.gpr
# compute initial distribution
p = np.zeros([N,1]);
it = 0;
flops = 0
#for it in range(K):
while (np.linalg.norm(p - gt, 2)/np.linalg.norm(gt,ord=2)) > eps :
it += 1
# count flops
nnz = np.where(p > 0)[0]
flops_dotprod = np.count_nonzero(OP[:,nnz])
flops_scaling = np.count_nonzero(p)
flops_addition = np.count_nonzero(OP.dot(p))
flops = flops + flops_dotprod + flops_scaling + flops_addition
# next iteration in approx
p = rho*y + psi*OP.dot(p)
print('---- Power Iteration ----')
print('iter =', it)
print('flops =', flops)
print('err =', np.linalg.norm(p - gt, 2)/np.linalg.norm(gt,ord=2))
return p, it, flops
def CP_static(graph, eps, alpha, seed, method, mode) :
# initial distribution
N = graph.A.shape[0];
y = np.zeros([N,1]); y[seed,0] = 1
flops = 0
# Coefficient parameters
mu = (1-alpha)/alpha
theta = np.linspace(0,np.pi,50000+1)
step = theta[1] - theta[0]
# extract operator and coefficients
if method == 'PageRank':
Lap = (graph.D - graph.A).dot(graph.Dinv)
OP = Lap - np.eye(N)
gt = graph.pr.T
filt_arg = (np.cos(theta) + 1)
filt = np.divide(mu, mu + filt_arg)
elif method == 'GammaPageRank':
OP = graph.Op_shift
gt = graph.gpr
filt_arg = (10/2)*(np.cos(theta) + 1)
filt = np.divide(mu, mu + filt_arg)
# coefficients
tmp1 = np.multiply(np.cos(0*theta),filt*step); tmp1[0]=tmp1[0]/2; tmp1[-1]=tmp1[-1]/2;
tmp2 = np.multiply(np.cos(1*theta),filt*step); tmp2[0]=tmp2[0]/2; tmp2[-1]=tmp2[-1]/2;
coef1 = (2/np.pi)*np.sum(tmp1)
coef2 = (2/np.pi)*np.sum(tmp2)
# Polynomial elements
polyTerm_2back = np.array(y)
polyTerm_1back = np.array(OP).dot(y)
nnz = np.where(y != 0)[0]
flops = flops + np.count_nonzero(OP[:,nnz])
# Chebyshev approximation
Cheby_approximation_prev = 0.5*coef1*polyTerm_2back + coef2*polyTerm_1back;
Cheby_approximation_curr = np.array(Cheby_approximation_prev)
flops = flops + 2*np.count_nonzero(polyTerm_1back)
#for it in range(2, hops-1):
it = 2;
activeNodes = np.where(graph.clust_memb > 0)[0]
if mode == 'FixedError':
while (np.linalg.norm(Cheby_approximation_curr[activeNodes] - gt[activeNodes], ord=2)/np.linalg.norm(gt[activeNodes],ord=2)) > eps :
# Chebyshev coefficient
tmp = np.array(np.multiply(np.cos(it*theta),filt*step)); tmp[0]=tmp[0]/2; tmp[-1]=tmp[-1]/2;
coef_curr = (2/np.pi)*np.sum(tmp);
# Current polynomial term
polyTerm_curr = 2*(OP).dot(polyTerm_1back) - polyTerm_2back;
nnz = np.where(polyTerm_1back != 0)[0]
flops = flops + np.count_nonzero(OP[:,nnz]) + np.count_nonzero(OP.dot(polyTerm_1back)) + np.count_nonzero(polyTerm_1back) + np.count_nonzero(polyTerm_2back)
# Chebyshev approximation
Cheby_approximation_curr = np.array(Cheby_approximation_prev + coef_curr*polyTerm_curr);
flops = flops + 2*np.count_nonzero(polyTerm_curr)
# Update
polyTerm_2back = np.array(polyTerm_1back);
polyTerm_1back = np.array(polyTerm_curr);
Cheby_approximation_prev = np.array(Cheby_approximation_curr);
it += 1
elif mode == 'FixedFlops':
while flops < eps:
# Chebyshev coefficient
tmp = np.array(np.multiply( | np.cos(it*theta) | numpy.cos |
import numpy as np
from keras.utils import Sequence
class PairGenerator(Sequence):
def __init__(self, X, y, batch_size=32, shuffle=True, seed=None, amount_of_pairs=1.0, neg_pair_ratio=1.0):
"""Initialize a generator
Inputs:
X: Training spectra
y: Training labels
batch_size: Number of samples per batch (use smaller batch for limited GPUs)
shuffle: Shuffle the training data after each epoch
seed: Use a known seed for the rng
amount_of_pairs: Amount of pairs used from the maximum (between 0 and 1)
neg_pair_ratio: Negative-to-positive-pair ratio (between 0 and n_classes-1)
"""
# Initialize
self.dim = [batch_size] + [k for k in X.shape[1:]]
self.batch_size = batch_size
self.shuffle = shuffle
self.spectra = X
# Set seed if specified
if seed is not None:
self.rng = np.random.RandomState(seed)
else:
self.rng = np.random.RandomState()
self.get_pair_ids(y, amount_of_pairs=amount_of_pairs,
neg_pair_ratio=neg_pair_ratio)
self.on_epoch_end()
def get_pair_ids(self, y, amount_of_pairs, neg_pair_ratio):
"""Form training sample pair indices for generator usage"""
# Determine how many classes are used for pairs
y = y[:,np.nonzero( | np.sum(y, axis=0) | numpy.sum |
from abc import ABC, abstractmethod
from enum import Enum, auto
import numpy as np
import scipy.stats as sts
from cvxopt import matrix, solvers
from einops import rearrange, repeat
from scipy.interpolate import interp1d
from ..geometry import hs_dst, if_dst, trace_dst
from ..mhmc import MHMC, normalized_update
from ..routines import _left_inv, _mat2vec, _matrix_to_real_tril_vec, _real_tril_vec_to_matrix, _vec2mat
from ..stats import l2_mean, l2_variance
from .polytopes.utils import count_confidence, count_delta
solvers.options["show_progress"] = False
class ConfidenceInterval(ABC):
"""Functor for obtaining confidence intervals."""
EPS = 1e-15
def __init__(self, tmg, **kwargs):
"""
Parameters
----------
tmg : StateTomograph or ProcessTomograph
Object with tomography flat_results
"""
self.tmg = tmg
if hasattr(tmg, "state"):
self.mode = Mode.STATE
elif hasattr(tmg, "channel"):
self.mode = Mode.CHANNEL
else:
raise ValueError()
for name, value in kwargs.items():
setattr(self, name, value)
def __call__(self, conf_levels=None):
"""Return confidence interval.
Returns
-------
conf_levels : np.array
List of confidence levels.
"""
if conf_levels is None:
conf_levels = | np.linspace(1e-3, 1 - 1e-3, 1000) | numpy.linspace |
"""
Grid applications
-----------------
Functions to remap data given source and target grids
Some utilities use python tool xESMF.
Author: <NAME> (contributions from <NAME>)
Date: Jan 2019
"""
import numpy as np
import logging
def rotated_grid_transform(lons, lats, pole_lon, pole_lat, rot2reg=True):
# If lon/lat is 1D; create 2D meshgrid
lon, lat = np.meshgrid(lons, lats)\
if lats.ndim == 1 else (lons, lats)
lon = (lon*np.pi)/180 # Convert degrees to radians
lat = (lat*np.pi)/180
theta = 90 - pole_lat # Rotation around y-axis
phi = pole_lon + 180 # Rotation around z-axis
# Convert degrees to radians
theta = (theta*np.pi)/180
phi = (phi*np.pi)/180
# Convert from spherical to cartesian coordinates
x = np.cos(lon)*np.cos(lat)
y = np.sin(lon)*np.cos(lat)
z = np.sin(lat)
if rot2reg: # Rotated -> Regular
phi = -phi
theta = -theta
x_new = np.cos(theta)*np.cos(phi)*x + np.sin(phi)*y +\
np.sin(theta)*np.cos(phi)*z
y_new = -np.cos(theta)*np.sin(phi)*x + np.cos(phi)*y -\
np.sin(theta)*np.sin(phi)*z
z_new = -np.sin(theta)*x + np.cos(theta)*z
else: # Regular -> Rotated
x_new = np.cos(theta)*np.cos(phi)*x + np.cos(theta)*np.sin(phi)*y +\
np.sin(theta)*z
y_new = -np.sin(phi)*x + np.cos(phi)*y
z_new = -np.sin(theta)*np.cos(phi)*x - np.sin(theta)*np.sin(phi)*y +\
np.cos(theta)*z
# Convert cartesian back to spherical coordinates
lon_trans = np.arctan2(y_new, x_new)
lat_trans = np.arcsin(z_new)
# Convert radians back to degrees
lon_trans = (lon_trans*180)/np.pi
lat_trans = (lat_trans*180)/np.pi
return lon_trans, lat_trans
def fnCellCorners(rgrLon, rgrLat):
"""
File name: fnCellBoundaries
Author: <NAME>
E-mail: <EMAIL>
Date created: 20.03.2015
Date last modified: 20.03.2015
Estimate the cell boundaries from the cell location of regular grids
returns: rgrLonBND & rgrLatBND --> arrays of dimension [nlon,nlat]
containing the cell boundaries of each gridcell in rgrlon and rgrlat
"""
# from ipdb import set_trace as stop
logging.debug('fnCellCorners')
rgLonSize = np.array(rgrLon).shape
rgLatSize = np.array(rgrLat).shape
if len(rgLonSize) == 1:
rgrLat = np.broadcast_to(rgrLat, (rgLonSize[0],
rgLatSize[0])).swapaxes(0, 1)
rgrLon = np.broadcast_to(rgrLon, (rgLatSize[0], rgLonSize[0]))
rgiSize = np.array(rgrLon).shape
rgrLonBND = np.empty((rgiSize[0]+1, rgiSize[1]+1,))
rgrLonBND[:] = np.NAN
rgrLatBND = | np.empty((rgiSize[0]+1, rgiSize[1]+1,)) | numpy.empty |
import time
import numpy as np
import pandas as pd
import quadprog
import statsmodels.formula.api as smf
from numpy.linalg import LinAlgError
from scipy.stats import skewnorm
def jackknife_averaging(data, subset):
"""
calulates the averaged coefficients across several linear regression models
according to the Jackknife Model Averaging (<NAME> (2012)).
Parameters
----------
data : pd.DataFrame
first column consists of the dependent variable and the others
of the regressors over which the averaging is supposed to be performed.
subset : np.array
This array contains in each row the index of the column of the x
matrix to indicate which regressors should be added for this model.
Each row, hence, describes one model.
Returns
-------
weights : np.array
the optimal weights to average the coefficients.
averaged_beta : np.array
the averaged coefficients.
expected_test_mse : float
the expected test MSE when applying the averaged coefficients.
"""
# extract data as to numpy arrays
y = data.iloc[:, 0].astype(float).to_numpy()
x = data.iloc[:, 1:].astype(float).to_numpy()
num_obs = x.shape[0]
num_regressors = x.shape[1]
num_models = subset.shape[0]
# Initialize empty containers for the results
beta_all = np.zeros((num_regressors, num_models))
transformed_residuals_all = np.zeros((num_obs, num_models))
# get the cross validated mse for each model
for model in range(num_models):
x_model = x[:, subset[model]]
beta_model = np.linalg.inv(x_model.T @ x_model) @ x_model.T @ y
beta_all[subset[model], model] = beta_model
residuals_model = y - x_model @ beta_model
transformer = np.diag(x_model @ np.linalg.inv(x_model.T @ x_model) @ x_model.T)
transformed_residuals_all[:, model] = residuals_model * (1 / (1 - transformer))
# solve the quadratic programming to get the weights of the models
weights = quadprog.solve_qp(
transformed_residuals_all.T @ transformed_residuals_all,
np.zeros(num_models),
np.hstack(
(
np.ones((num_models, 1)),
np.identity(num_models),
-np.identity(num_models),
)
),
np.hstack((np.ones(1), np.zeros(num_models), -np.ones(num_models))),
1,
)[0]
# get the resulting coefficients after applying the weights
averaged_beta = beta_all @ weights
# get the resulting minimized cross validation criterion
expected_test_mse = (
weights.T @ (transformed_residuals_all.T @ transformed_residuals_all) @ weights
) / num_obs
# # Running it by hand
# fitted_values = np.zeros(num_obs)
# for row in range(num_obs):
# x_row = x_model[row]
# x_temp = np.delete(x_model, row, axis=0)
# y_temp = np.delete(y, row, axis=0)
# fitted_values[row] = x_row @ np.linalg.inv(x_temp.T @ x_temp) @ x_temp.T @ y_temp
# residuals = y - fitted_values
return weights, averaged_beta, expected_test_mse
def simulate_data(
num_obs, coefficients, polynomials=1, curvature=(0, 0), error_dist="random_cluster"
):
"""
Simulate data with different polynomials for small firms
without any treatment effect for large firms with a flat dependent variable
around zero.
Parameters
----------
num_obs : int
the total number of firms.
coefficients : dict
dictinairy with keys "untreated" and "treated" both holding a numpy array
of length polynomials. The first float in each numpy array corresponds
to the coeffcient for polynomial zero.
polynomials : int, optional
the amount of polynomials for each untreated and treated firms.
The default is 1.
curvature : tuple
indicates the coefficient and superscript of a curvature regressors.
Default is (0, 0) which means no curvature regressor is added.
error_dist : string
indicates the distribution of the error term. Default is "normal".
Returns
-------
data : pandas DataFrame
holds the simulated independent as well as dependent variables.
"""
# create empty data frame for data
data = pd.DataFrame(
index=pd.Index(np.arange(num_obs), name="firm"),
columns=["large", "score", "scaled_investment"],
)
# draw size of the firm
data["large"] = np.random.binomial(1, 0.5, num_obs)
data["small"] = 1 - data["large"]
value_counts = data["large"].value_counts().to_dict()
num_small = value_counts[0]
num_large = value_counts[1]
# get scores for large firms
loc = 92
scale = 18
score_large = pd.DataFrame(
skewnorm.rvs(-5, loc=loc, scale=scale, size=num_large), columns=["score"]
)
array = score_large.loc[(score_large["score"] <= 90) & (score_large["score"] >= 80)]
# flatten peak for normal distribution
score_large.loc[(score_large["score"] <= 90) & (score_large["score"] >= 80)] = (
np.random.uniform(78, 92, len(array))
).reshape((len(array), 1))
# make sure no value is below zero or above 100
if len(score_large.loc[score_large["score"] < 0]) > 0:
score_large.loc[score_large["score"] < 0] = np.random.choice(
score_large.loc[(score_large["score"] >= 0) & (score_large["score"] <= 100)]
.to_numpy()
.flatten(),
size=len(score_large.loc[score_large["score"] < 0]),
).reshape(len(score_large.loc[score_large["score"] < 0]), 1)
if len(score_large.loc[score_large["score"] > 100]) > 0:
score_large.loc[score_large["score"] > 100] = np.random.choice(
score_large.loc[(score_large["score"] >= 0) & (score_large["score"] <= 100)]
.to_numpy()
.flatten(),
size=len(score_large.loc[score_large["score"] > 100]),
).reshape(len(score_large.loc[score_large["score"] > 100]), 1)
# round the numbers to the next integer
score_large = score_large.round()
data.loc[data["large"] == 1, "score"] = score_large.values
# get scores for small firms
loc = 88
scale = 12
num_normal = int(4 / 5 * num_small)
score_small_1 = pd.DataFrame(
skewnorm.rvs(-2, loc=loc, scale=scale, size=num_normal), columns=["score"]
)
# adjust for uniform like lower tail
score_small_2 = pd.DataFrame(
np.random.uniform(20, 55, num_small - num_normal), columns=["score"]
)
score_small = pd.concat([score_small_1, score_small_2])
if len(score_small.loc[score_small["score"] < 0]) > 0:
score_small.loc[score_small["score"] < 0] = np.random.choice(
score_small.loc[(score_small["score"] >= 0) & (score_small["score"] <= 100)]
.to_numpy()
.flatten(),
size=len(score_small.loc[score_small["score"] < 0]),
).reshape(len(score_small.loc[score_small["score"] < 0]), 1)
if len(score_small.loc[score_small["score"] > 100]) > 0:
score_small.loc[score_small["score"] > 100] = np.random.choice(
score_small.loc[(score_small["score"] >= 0) & (score_small["score"] <= 100)]
.to_numpy()
.flatten(),
size=len(score_small.loc[score_small["score"] > 100]),
).reshape(len(score_small.loc[score_small["score"] > 100]), 1)
score_small = score_small.round()
data.loc[data["large"] == 0, "score"] = score_small.values
# get treatment variable based on score
data.loc[data["score"] >= 75, "treated"] = 1
data.loc[data["score"] < 75, "treated"] = 0
# normalize score
# data = data.astype(int)
data["score"] = data["score"] - 75
# get the error term according to the specified way
if error_dist == "normal":
error = (
0.1 - 0.1 * np.abs(data["score"].astype(float).to_numpy()) / 100
) * | np.random.normal(size=num_obs) | numpy.random.normal |
# Stats
import datetime
from io import BytesIO
from flask import jsonify, Blueprint, current_app, make_response
from sqlalchemy import extract, func
from sqlalchemy.orm import Session
from backend.database.objects import Replay, Game
bp = Blueprint('stats', __name__, url_prefix='/stats')
@bp.route('/ping')
def ping():
return jsonify({'status': 'Pong!'})
@bp.route('/uploads/<time>/<model>')
def upload_stats(time, model):
if time not in ['d', 'h']:
return jsonify([])
session = current_app.config['db']()
if time == 'h':
result = session.query(extract('year', Replay.upload_date).label('y'),
extract('month', Replay.upload_date).label('m'),
extract('day', Replay.upload_date).label('d'),
extract('hour', Replay.upload_date).label('h'), func.count(Replay.upload_date)).filter(
Replay.upload_date > datetime.datetime.utcnow() - datetime.timedelta(hours=24))
else: # day
result = session.query(extract('year', Replay.upload_date).label('y'),
extract('month', Replay.upload_date).label('m'),
extract('day', Replay.upload_date).label('d'),
func.count(Replay.upload_date)).filter(
Replay.upload_date > datetime.datetime.utcnow() - datetime.timedelta(days=30))
if model != '*':
result = result.filter(Replay.model_hash.startswith(model))
result = result.group_by('y').group_by(
'm').group_by('d')
if time == 'h':
result = result.group_by('h')
result = result.all()
if time == 'h':
result = [{
'year': r[0],
'month': r[1],
'day': r[2],
'hour': r[3],
'count': r[4]
} for r in result[::-1]]
result = sorted(result, key=lambda x: x['year'] * 365 + x['month'] * 30 + x['day'] + x['hour'] * (1 / 24.0))
else:
result = [{
'year': r[0],
'month': r[1],
'day': r[2],
'count': r[3]
} for r in result[::-1]]
result = sorted(result, key=lambda x: x['year'] * 365 + x['month'] * 30 + x['day'])
session.close()
return jsonify(result)
@bp.route('/mmrs')
def rl_stats():
import numpy as np
mmrs = get_mmr_array()
data = np.histogram(mmrs, bins=200)
return jsonify({'data': data[0].tolist(), 'bins': data[1].tolist()})
@bp.route('/mmrs/image')
def rl_stats_img():
from matplotlib.figure import Figure
mmrs = get_mmr_array()
fig = Figure()
ax = fig.add_subplot(111)
ax.hist(mmrs, bins=200)
ax.set_title('MMR Histogram')
ax.set_xlabel('MMR')
r = get_mpl_response(fig)
return r
@bp.route('/ranks')
def rl_stats_ranks():
import numpy as np
ranks = get_rank_array()
ranks = ranks[ranks != 0]
# print(mmrs)
data = np.histogram(ranks, bins=np.arange(0, 21))
# print(data)
return jsonify({'data': data[0].tolist(), 'bins': data[1].tolist()})
@bp.route('/ranks/image')
def rl_stats_img_ranks():
import numpy as np
ranks = get_rank_array()
ranks = ranks[ranks != 0]
from matplotlib.figure import Figure
fig = Figure()
ax = fig.add_subplot(111)
ax.hist(ranks, bins=np.arange(0, 21))
ax.set_title('Rank Histogram')
ax.set_xlabel('Rank')
ax.set_xticks(np.arange(0, 21))
r = get_mpl_response(fig)
return r
def get_mpl_response(fig):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
f = BytesIO()
FigureCanvas(fig).print_png(f)
f.seek(0)
r = make_response(f.getvalue())
r.headers['Content-Type'] = 'image/png'
return r
def get_mmr_array():
import numpy as np
session = current_app.config['db']() # type: Session
games = session.query(Game.mmrs).all()
mmrs = | np.array([]) | numpy.array |
"""Test read_csv"""
from io import StringIO
import pathlib
import pytest
import numpy as np
from unyt import unyt_array, unyt_quantity
from unyt.testing import allclose_units
from toolbag import read_csv
from toolbag.labview_utilities import DataContainer
from toolbag import convert_timestamp
data_dir = pathlib.Path("tests/data files")
# pylint: disable=missing-function-docstring
def test_singlerow():
data = read_csv(data_dir.joinpath("single row.csv"))
assert np.all(data == np.arange(10))
def test_singlerow_header():
data = read_csv(data_dir.joinpath("single row header.csv"))
assert np.all(data == np.arange(10))
assert read_csv.header == "My data"
def test_singlerow_header_datalabel():
data = read_csv(data_dir.joinpath("single row header data label.csv"))
assert allclose_units(data.Time, unyt_array(np.arange(10), "s"))
assert data.header == "My data"
def test_singlecolumn():
data = read_csv(data_dir.joinpath("single column.csv"))
assert np.all(data == np.arange(10))
def test_singlecolumn_header_datalabel():
data = read_csv(data_dir.joinpath("single column header data label.csv"))
expected_array = unyt_array(np.arange(10), "dimensionless")
assert allclose_units(data.Time, expected_array)
assert read_csv.header == "My data"
def test_2darray():
data = read_csv(data_dir.joinpath("2d array.csv"))
assert np.all(data[:, 0] == np.arange(10))
assert np.all(data[:, 1] == np.arange(10) ** 2)
def test_column_header_nodatalabels():
data = read_csv(data_dir.joinpath("column header no data labels.csv"))
assert np.all(data[:, 0] == np.arange(10))
assert np.all(data[:, 1] == np.arange(10) ** 2)
assert read_csv.header == "My data"
def test_column_noheader_datalabelsnounits():
data = read_csv(data_dir.joinpath("column no header data labels no units.csv"))
assert isinstance(data, DataContainer)
expected_array = unyt_array(np.arange(10), "dimensionless")
assert allclose_units(data[0], expected_array)
assert allclose_units(data["Time"], expected_array)
assert allclose_units(data["Time - Plot 0"], expected_array)
expected_array = unyt_array(np.arange(10) ** 2, "dimensionless")
assert allclose_units(data[1], expected_array)
assert allclose_units(data["Amplitude"], expected_array)
assert allclose_units(data["Amplitude - Plot 0"], expected_array)
assert data.header == ""
assert data.legends == ["Plot 0", "Plot 0"]
def test_column_noheader_datalabels():
data = read_csv(data_dir.joinpath("column no header data labels.csv"))
expected_array = unyt_array(np.arange(10), "s")
assert allclose_units(data["Time (s) - Plot 0"], expected_array)
expected_array = unyt_array(np.arange(10) ** 3, "V")
assert allclose_units(data["Amplitude (V) - Plot 1"], expected_array)
def test_column_header_datalabels():
data = read_csv(data_dir.joinpath("column header data labels.csv"))
expected_array = unyt_array( | np.arange(10) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 21:06:19 2018
This file includes the necessary tools to decorelate transients harmoic and noise components separately.
This includes functions to separate audio into transients, harmonic and noise
components and then to process these separately using decorrelators in the decorrelation toolbox.
@author: <NAME>
"""
from __future__ import print_function
import numpy as np
import librosa
import librosa.display
from . import decorr_toolbox as dt
def separate_mono_audio(audio,
fftTrans = 1024,
fftHarm = 2048,
marginTrans = 2.14,
marginHarm = 3.0):
#Separates mono audio file into transients harmonic and noise components. based on given settings.
D_stage1 = librosa.stft(audio,n_fft=fftTrans)
D_harmonic1, D_transient = librosa.decompose.hpss(D_stage1,
margin=(1.0, marginTrans))
Transients = librosa.istft(D_transient)
#TODO simplify using the transient and harmonic component extraction from librosa rather than the hpss which does both and isnt needed. Find out how to select the fft length
D_residual1 = D_stage1 - D_transient #Residual 1 is everything except the Transients
Residual1 = librosa.istft(D_residual1)
D_2 = librosa.stft(Residual1,n_fft=fftHarm)
D_harmonic2, D_percussive2 = librosa.decompose.hpss(D_2,
margin=(marginHarm, 1.0))
D_Noise = D_2 - D_harmonic2
Harmonic = librosa.istft(D_harmonic2)
Noise = librosa.istft(D_Noise)
return {'Transients':Transients, 'Harmonic':Harmonic ,'Noise':Noise }
def mono_audio(audio):
AudioOut = np.sum(audio, axis = 1)
return AudioOut
def separate_audio (audio,
fftTrans = 1024,
fftHarm = 2048,
marginTrans = 2.14,
marginHarm = 3.0):
#Separates audio file into separate components.
multiAudio = dt.add_dimension(audio)
numChans = multiAudio.shape[1]
Transients = np.zeros_like(multiAudio)
Harmonic = | np.zeros_like(multiAudio) | numpy.zeros_like |
""" Hyperparameters for MJC peg insertion trajectory optimization. """
from __future__ import division
from datetime import datetime
import os.path
import numpy as np
from gps import __file__ as gps_filepath
from gps.agent.mjc.agent_mjc import AgentMuJoCo
from gps.algorithm.algorithm_traj_opt import AlgorithmTrajOpt
from gps.algorithm.cost.cost_fk import CostFK
from gps.algorithm.cost.cost_action import CostAction
from gps.algorithm.cost.cost_sum import CostSum
from gps.algorithm.dynamics.dynamics_lr_prior import DynamicsLRPrior
from gps.algorithm.dynamics.dynamics_prior_gmm import DynamicsPriorGMM
from gps.algorithm.traj_opt.traj_opt_lqr_python import TrajOptLQRPython
from gps.algorithm.policy.lin_gauss_init import init_lqr
from gps.proto.gps_pb2 import JOINT_ANGLES, JOINT_VELOCITIES, \
END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES, ACTION
from gps.gui.config import generate_experiment_info
SENSOR_DIMS = {
JOINT_ANGLES: 7,
JOINT_VELOCITIES: 7,
END_EFFECTOR_POINTS: 6,
END_EFFECTOR_POINT_VELOCITIES: 6,
ACTION: 7,
}
PR2_GAINS = | np.array([3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098]) | numpy.array |
import ctypes
from collections import defaultdict
from itertools import groupby
import multiprocessing as mp
import time
import os.path
import h5py
import numpy as np
def divmod_splits(indices, splits):
indices = np.asarray(indices)
splits = np.asarray(splits)
split_indices = np.searchsorted(splits, indices, side='right')
elem_indices = indices
elem_indices[split_indices != 0] -= splits[split_indices[split_indices != 0] - 1]
return split_indices, elem_indices
def list_to_slices(data):
slices = []
for key, group in groupby(enumerate(data), lambda i: i[0] - i[1]):
# group = map(itemgetter(1), group)
group = [g[1] for g in group]
slices.append(slice(group[0], group[-1] + 1))
return slices
class HDF5Concat(object):
def __init__(self, datasets):
self.datasets = datasets
self.sizes = list(map(len, self.datasets))
self.cumsizes = np.cumsum(self.sizes)
# print self.cumsizes[-1]
# print self.datasets[0].shape
self.shape = (self.cumsizes[-1], self.datasets[0].shape[1])
self.size = | np.prod(self.shape) | numpy.prod |
import os
import os.path
import sys
from sys import platform
sys.path.append(os.path.join(os.getcwd(), "Measures"))
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
import timeit
import random
from .SimpleHashing import SimpleHashing
from collections import defaultdict
import statistics
from collections import Counter
class LSH(SimpleHashing):
def DoHash(self):
self.measure.GeneratesimMatrix()
self.GenerateSimilarityMatrix(self.measure.simMatrix)
self.bit_indexes = np.argpartition(self.cut_values_normal, self.hbits)[:self.hbits]
self.GenerateHashTable()
return -1
def GenerateHashTable(self):
print("Generating LSH hash table: ", " hbits:", str(self.hbits) +'('+ str(2**self.hbits)+')', " k", self.k , " d", self.d , " n=",self.n )
self.hash_values = [self.ComputeHashValue(x) for x in self.X]
self.hashTable = defaultdict(list)
for i in range(self.n):
self.hashTable[self.hash_values[i]].append(i)
def GetNeighborsbyBucket(self, item_id):
return self.hashTable[self.hash_values[item_id]]
def ComputeHashValue(self,x):
val=0
for i in range(self.hbits):
partitions = self.partitions[self.bit_indexes[i]]
val <<=1
if x[self.bit_indexes[i]] in partitions[1]:
val+=1
return val
#def ComputeHashValue(self,x):
# val=0
# for i in range(self.hbits):
# #partitions = self.partitions[self.bit_indexes[i]]
# val <<=1
# #if x[self.bit_indexes[i]] in partitions[1]:
# if x[self.bit_indexes[i]] >= self.D[self.bit_indexes[i]]/2:
# val+=1
# return val
#def TryToMovePoint(self,hashValue_ori, hashValue_cur, index ):
#for i in range(index,self.hbits):
def hammingDistance(self, x, y):
ans = 0
for i in range(31,-1,-1):
b1= x>>i&1
b2 = y>>i&1
ans+= not(b1==b2)
return ans
def CorrectSingletonBucket(self):
list_hash_1 = []
for hashValue,itemList in self.hashTable.items():
if(len(itemList)<=1):
list_hash_1.append((hashValue, itemList))
for iters in list_hash_1:
del self.hashTable[iters[0]]
for iters in list_hash_1:
closest_hash_value = -1
closest_dist=1000000
for hashValue,itemList in self.hashTable.items():
temp = self.hammingDistance(iters[0],hashValue)
if temp < closest_dist:
closest_hash_value = hashValue
closest_dist = temp
for i in iters[1]:
self.hash_values[i] = closest_hash_value
self.hashTable[closest_hash_value].append(i)
print("LSH Merged ",len(list_hash_1),"/", len(self.hashTable) , " buckets!!!" )
def TestHashTable(self):
n = len(self.hashTable.items())
num_0 = 2**self.hbits - len(self.hashTable.items());
num_1 = 0;
len_list=[]
for hashValue,itemList in self.hashTable.items():
if(len(itemList)==1): num_1+=1
len_list.append(len(itemList))
mean=np.mean(len_list)
std_ = | np.std(len_list) | numpy.std |
import pickle
import os
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
# from sklearn.utils.random import sample_without_replacement
# from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
# from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
# from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
# from sklearn.gaussian_process import GaussianProcessClassifier
# from sklearn.svm import LinearSVR
# from sklearn.neural_network import MLPClassifier
# from sklearn.feature_selection import RFECV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import make_scorer
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
from framework.data_portals.data_portal import DataPortal
from pyESN import ESN
all_tickers = pd.read_csv('C:\\Users\\kohle\\Documents\\Machine Learning\\Echo State Networks\\Stock_Data\\list.csv')[
'A'].tolist()
pknum = 0
ticker_range = (pknum * 7000, (pknum + 1) * 7000)
ticker_range = (0, len(all_tickers))
delay_minutes = 0
tasks = ['new', 'continue', 'loop_new'] # choose from: ['new', 'predict_all', 'continue', 'combine', 'loop_new']
# tasks = ['continue']
# tasks = ['combine']
tasks = []
new_env = False # instruction to keep specified model_env (instead of loading old one, when available)
end_int = -1 # condition to set number of iterations to be run
model_env = {
'all_tickers': all_tickers,
'tickers': np.random.choice(all_tickers, 500, replace=False),
'n_res_list': [30, 30, 30, 30, 30, 30, 50, 80],
'sparsity_list': [0.5, 0.75, 0.8, 0.85, 0.9, 0.925, 0.95, 0.96],
'train_len': 4000,
'drop_len': 200,
'n_trees': 200,
'n_comp': 10,
'vol': False,
'individual': False,
# 'model_ui': '0145_SGD',
'model_ui': '0041_SGD',
'folder_path': 'models/SGD_hinge_loss'
}
class LinDetrend(object):
lin_trend = None
def fit(self, X, y, **fit_params):
self.lin_trend = np.polyfit(range(len(X)), X, 1)
return self
def transform(self, X):
return X - np.polyval(self.lin_trend, range(len(X))).reshape((1, len(X))).T
def individual_fit_results(tickers, model, prep, net, pca=None, new_fit=True, drop_len=200, train_len=4000,
test_len=200, vol=True):
"""
model is assumed to generate a 1,0 classification to either buy or sell
"""
gen = portal.iter_get_uids('daily_prices', 'default', tickers)
df = pd.DataFrame() # Dataframe with tickers and performance metrics
df1 = pd.DataFrame() # array of model coefficients
df2 = pd.DataFrame() # array of trading results
df3 = pd.DataFrame() # array of buy & hold results
df4 = pd.DataFrame() # array of predictions from model
i = 0
for data in gen:
print(i)
x_train, x_test = np.zeros((0, sum(model_env['n_res_list']) + 1)), \
np.zeros((0, sum(model_env['n_res_list']) + 1))
y_train, y_test, y_cv, y_tcv = [], [], [], []
w_train, w_test = [], []
log_vol = np.log10(np.array(data['volume'] + 1).reshape((len(data), 1)))
log_prices = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
if len(log_prices) > train_len + test_len:
prep.fit(log_prices[:train_len])
log_prices = prep.transform(log_prices)
if vol:
prep.fit(log_vol[:train_len])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, log_prices)
# if pca:
# states = pca.transform(states)
x_train = np.vstack((x_train, states[model_env['drop_len']:train_len]))
y_train += np.sign((np.sign(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]) + 1) / 2).tolist()
w_train += np.abs(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]).tolist()
y_cv += (log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[
model_env['drop_len']:train_len,
0]).tolist()
x_test = np.vstack((x_test, states[train_len:-1]))
y_test += np.sign(
(np.sign(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]) + 1) / 2).tolist()
w_test += np.abs(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
y_tcv += (log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
if pca:
states = pca.transform(states)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
if new_fit:
model.fit(x_train, y_train, sample_weight=w_train)
acc = model.score(states[1:], np.sign((np.sign(log_prices[1:, 0] - log_prices[:-1, 0]) + 1) / 2).tolist())
pred = model.predict(states[drop_len:])
hold = np.array(np.log10(data['adjusted_close'])[drop_len:])
trading = np.hstack((hold[0], (hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum())))
all_hold_ret = hold[-1] - hold[0]
all_trade_ret = trading[-1] - trading[0]
all_inc_ret = all_trade_ret / abs(all_hold_ret) - 1
train_hold_ret = hold[train_len - drop_len] - hold[0]
train_trade_ret = trading[train_len - drop_len] - trading[0]
train_inc_ret = train_trade_ret / abs(train_hold_ret) - 1
test_hold_ret = hold[train_len + test_len - drop_len] - hold[train_len - drop_len]
test_trade_ret = trading[train_len + test_len - drop_len] - trading[train_len - drop_len]
test_inc_ret = test_trade_ret - test_hold_ret
if isinstance(df2, pd.DataFrame):
df2 = np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])
df3 = np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])
# df1 = model._get_coef() #Support Vector Classifier (SVC)
# df1 = model.feature_importances_ #Random Forest (RF)
df1 = model.coef_ # SGDClassifier (SGD)
df4 = np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
else:
df2 = np.vstack((df2, np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])))
df3 = np.vstack((df3, np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])))
df1 = np.vstack((df1, model.coef_))
# df1 = np.vstack((df1, model._get_coef()))
# df1 = np.vstack((df1, model.feature_importances_()))
df4 = np.vstack((df4, np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])))
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
i += 1
df.reset_index(drop=True, inplace=True)
return df, df1, df2, df3, df4
def inspect_ticker(ticker, model, prep, net, pca=None, vol=None, drop_len=200):
data = portal.get('daily_prices', 'default', ticker)
pp = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
prep.fit(pp[:model_env['train_len']])
pp = prep.transform(pp)
if vol:
log_vol = np.log10(np.array(ticker['volume'] + 1).reshape((len(ticker), 1)))
prep.fit(log_vol[:model_env['train_len']])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, pp)
if pca:
states = pca.transform(states)
pred = model.predict(states[drop_len:])
# score = trading_score()
hold = data['adjusted_close'][drop_len:]
trading = hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum()
return hold, trading
def plot_ticker(ticker, model, prep, net, pca=None, vol=False):
hold, trading = inspect_ticker(ticker, model, prep, net, pca=pca, vol=vol)
plt.plot(hold, label=ticker)
plt.plot(trading, label=ticker + '_ESN')
plt.legend()
def generate_plots(tickers, model, prep, net):
for ticker in tickers:
print(ticker)
yield plot_ticker(ticker, model, prep, net)
def trading_score(y, y_pred):
return sum(y * np.sign(y_pred)) / sum(y * np.sign(y))
def combine_pickles(model_uis, path, keys=('out', 'coefs', 'trading', 'hold', 'pred')):
""" Combines dictionaries of arrays (saved as separate pickles) into in a single dictionary of arrays """
data_dict = {}
if isinstance(model_uis, str):
model_uis = [model_uis]
for model_ui in model_uis:
data_dict[model_ui] = dict(zip(keys, [None] * len(keys)))
for frame in keys:
with open(f'{path}/{model_ui}/{model_ui}_{frame}0.pkl', 'rb') as file:
data_dict[model_ui][frame] = pickle.load(file)
for frame in keys:
for i in range(1, pknum + 1):
with open(f'{path}/{model_ui}/{model_ui}_{frame}{i}.pkl', 'rb') as file:
df = pickle.load(file)
if isinstance(df, pd.DataFrame):
data_dict[model_ui][frame] = data_dict[model_ui][frame].append(df)
else:
data_dict[model_ui][frame] = np.vstack((data_dict[model_ui][frame], df))
return data_dict.copy()
def predict_all(model_env, ticker_range, all_tickers, pknum=0, new_env=True):
path = model_env["folder_path"]
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_def.pkl', 'rb') as file:
model_def = pickle.load(file)
if not new_env:
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_env.pkl', 'rb') as file:
model_env = pickle.load(file)
out, coefs, trading, hold, pred = pd.DataFrame(), None, None, None, None
for batch in range(ticker_range[0], ticker_range[-1], 25):
df, df1, df2, df3, df4 = individual_fit_results(all_tickers[batch:batch + 25],
model_def['model'], model_def['prep'],
model_def['net'], pca=model_def['pca'],
new_fit=model_env['individual'],
train_len=model_env['train_len'],
vol=model_env['vol'], drop_len=model_env['drop_len'])
out = out.append(df)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_out{pknum}.pkl', 'wb+') as file:
pickle.dump(out, file)
if coefs is None:
coefs, trading, hold, pred = df1, df2, df3, df4
else:
trading = np.vstack((trading, df2))
coefs = | np.vstack((coefs, df1)) | numpy.vstack |
"""Test plots made by pyiem.plot.geoplot"""
import datetime
import tempfile
import os
import copy
import pytest
import matplotlib.colors as mpcolors
import numpy as np
from shapely.geometry import Polygon
# Local
from pyiem import plot
from pyiem.plot import (
MapPlot,
centered_bins,
pretty_bins,
load_bounds,
mask_outside_geom,
)
from pyiem.util import utc
PAIN = 1.1 # how much do we care, sigh.
def test_invalid_file():
"""Test that we don't error out on an invalid filename."""
assert load_bounds("this shall not work") is None
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_nws_sector_twitter_res():
"""Test that Hawaii does not overlap Florida for Twitter Res."""
mp = MapPlot(
twitter=True,
nocaption=True,
sector="nws",
title="Don't hide Flo Rida",
)
mp.draw_cwas()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_nashville():
"""Test that Benton County, TNC005 does not show for OHX."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="OHX",
title="Don't show Benton County TN!",
)
mp.fill_ugcs({"TNC005": 10}, plotmissing=True)
mp.draw_cwas()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_plotmissing():
"""Test that we can plotmissing."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="FSD",
title="Testing plotmissing",
)
mp.fill_climdiv({"IAC001": 10}, plotmissing=False)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_fill_by_str():
"""Test that we can fill by string or dict."""
mp = MapPlot(
nocaption=True,
sector="state",
state="CA",
title="Testing color provision",
)
mp.fill_climdiv({"CAC001": 10}, color="b", plotmissing=False)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_fill_by_dict():
"""Test that we can fill by string or dict."""
mp = MapPlot(
nocaption=True,
sector="state",
state="CA",
title="Testing color provision",
)
mp.fill_climdiv({"CAC001": 10}, color={"CAC001": "r"}, plotmissing=False)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_issue374_pah():
"""Test drawing fire weather zones for Paducah."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="PAH",
title="Paducah Fire Weather Zones including MOZ098 Shannon",
)
mp.fill_ugcs({"MOZ098": 10}, is_firewx=True)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_memphis_cwa():
"""Test that we can draw a map with Memphis CWA.."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="MEG",
title="Memphis including Hardin, TN TNZ092",
)
mp.fill_ugcs({"TNZ092": 10})
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_overlay_roadcond():
"""Test being able to plot Iowa Road Conditions."""
mp = MapPlot(
nocaption=True,
sector="iowa",
title="A long and long title that has no purpose but to test things",
)
mp.overlay_roadcond(utc(2021, 2, 4, 17))
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_overlay_nexrad():
"""Test being able to plot NEXRAD."""
mp = MapPlot(
nocaption=True,
sector="conus",
title="A long and long title that has no purpose but to test things",
)
mp.overlay_nexrad(utc(2021, 2, 9, 17))
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_overlay_nexrad_hawaii():
"""Test that we can plot nexrad over Hawaii."""
mp = MapPlot(
nocaption=True,
sector="state",
state="HI",
title="A long and long title that has no purpose but to test things",
)
caxpos = [0.05, 0.05, 0.35, 0.015]
mp.overlay_nexrad(utc(2021, 2, 9, 17), caxpos=caxpos)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_overlay_nexrad_alaska():
"""Test that we can plot nexrad over Alaska."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="AJK",
title="A long and long title that has no purpose but to test things",
)
mp.overlay_nexrad(utc(2021, 2, 9, 17))
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_overlay_nexrad_puerto_rico():
"""Test that we can plot nexrad over Puerto Rico."""
mp = MapPlot(
nocaption=True,
sector="cwa",
cwa="SJU",
title="A long and long title that has no purpose but to test things",
)
mp.overlay_nexrad(utc(2021, 2, 9, 17))
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_conus_contour():
"""Test that a conus sector plot can generate a contour correctly."""
mp = MapPlot(nocaption=True, sector="conus", twitter=True)
mp.contourf(
np.arange(-120, -47, 3),
np.arange(25, 50),
np.arange(25),
np.arange(25),
clip_on=False,
)
mp.draw_mask(sector="conus")
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_iowa_contour_with_polygon_mask():
"""Test that we can mask with a single Polygon."""
mp = MapPlot(nocaption=True, sector="iowa", twitter=True)
mp.contourf(
np.arange(-120, -47, 3),
np.arange(25, 50),
np.arange(25),
np.arange(25),
clip_on=False,
)
poly = Polygon([(-95, 40), (-95, 45), (-90, 45), (-90, 40)])
mask_outside_geom(mp.ax, poly)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_issue365_cape_cod():
"""Test that we don't mask out Cape Cod."""
mp = MapPlot(nocaption=True, sector="cwa", cwa="BOX")
mp.contourf(
np.arange(-75, -66),
np.arange(36, 45),
np.arange(9),
np.arange(9),
clevlabels=["a", "b", "c", "d", "e", "f", "g", "h", "i"],
clip_on=False,
)
mp.draw_mask(sector="conus")
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_issue217():
"""See what happens with our logo on very scaled figure."""
mp = MapPlot(nocaption=True, figsize=(6.00, 3.35))
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_twitter_resolution():
"""Test that we get good plot domain when we want a twitter resolution."""
mp = MapPlot(sector="conus", nocaption=True, twitter=True)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN + 1) # lots of bars
def test_issue98_labelbar():
"""Sometimes our label bar sucks."""
mp = MapPlot(
title="Proportional Colorbar with some rotation",
sector="iowa",
nocaption=True,
)
cmap = copy.copy(plot.maue())
cmap.set_under("white")
cmap.set_over("black")
clevs = np.arange(0, 1.0, 0.1)
clevs[-1] = 3.987654
norm = mpcolors.BoundaryNorm(clevs, cmap.N)
mp.plot_values(
[-94, -92, -91, -92],
[42, 41, 43, 42.4],
["0.5", "0.25", "1.0", "5.0"],
color=cmap(norm([0.5, 0.25, 1.0, 5.0])),
showmarker=True,
)
mp.draw_colorbar(clevs, cmap, norm, spacing="proportional")
return mp.fig
def test_savefile():
"""Can we properly save a file."""
mp = MapPlot()
tmpfd = tempfile.NamedTemporaryFile(delete=False)
mp.postprocess(filename=tmpfd.name)
assert os.path.isfile(tmpfd.name)
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_illinois():
"""Produce a plot that doesn't suck"""
mp = MapPlot(sector="state", state="IL", nocaption=True)
mp.draw_cwas()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_issue292_nws_fill_ugcs():
"""Test that fill_ugcs works for nws sector view."""
mp = MapPlot(sector="nws", title="Four Counties", nocaption=True)
data = {"IAC001": 10, "AKC013": 20, "HIC001": 30, "PRC001": 40}
mp.fill_ugcs(data)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_fill_ugcs_color():
"""Provide an explicit color to fill_ugcs"""
mp = MapPlot(
sector="cwa", cwa="DMX", title="Three Counties", nocaption=True
)
data = {"IAC001": 10, "IAC003": 20, "IAC135": 30}
fc = {"IAC001": "#FF0000", "IAC003": "black"}
ec = {}
mp.fill_ugcs(data, fc=fc, ec=ec, nocbar=True)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_dep():
"""Produce a plot with the DEP logo on it."""
mp = MapPlot(sector="state", state="IA", nocaption=True, logo="dep")
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=0.7)
def test_usdm():
"""Can we plot the current USDM"""
mp = MapPlot(sector="conus", nocaption=True)
mp.draw_usdm(valid=datetime.date(2018, 5, 7), hatched=True, filled=False)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_hexbin():
"""See if we can do hexbin OKish"""
mp = MapPlot(
sector="north_america", continentalcolor="white", nocaption=True
)
lons = np.arange(-100, -80, 0.25)
lats = np.arange(40, 50, 0.25)
vals = np.linspace(0, 1, lats.shape[0] * lons.shape[0]).reshape(
[lats.shape[0], lons.shape[0]]
)
lons, lats = np.meshgrid(lons, lats)
mp.hexbin(
lons.flatten(),
lats.flatten(),
vals.flatten(),
np.arange(0, 1, 0.1),
cmap="jet",
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=10) # Lots of one pixel diffs
def test_pcolormesh():
"""See if we can do pcolormesh OKish"""
mp = MapPlot(
sector="custom",
north=43,
east=-80,
west=-96,
south=38,
continentalcolor="white",
nocaption=True,
)
lons = np.arange(-100, -80, 0.25)
lats = np.arange(40, 50, 0.25)
vals = np.linspace(0, 1, lats.shape[0] * lons.shape[0]).reshape(
[lats.shape[0], lons.shape[0]]
)
lons, lats = np.meshgrid(lons, lats)
mp.pcolormesh(lons, lats, vals, np.arange(0, 1, 0.1))
return mp.fig
def test_pretty_bins():
"""Test that we get nice pretty bins!"""
a = pretty_bins(-1, 10)
assert abs(a[-1] - 10.5) < 0.01
def test_centered_bins():
"""See that we can compute some nice centered bins"""
a = centered_bins(10)
assert a[0] == -10
a = centered_bins(55)
assert a[0] == -56
a = centered_bins(99)
assert a[0] == -100
a = centered_bins(99, bins=9)
assert a[0] == -99
a = centered_bins(100, on=100)
assert a[0] == 0
a = centered_bins(0.9)
assert abs(a[-1] - 1.2) < 0.001
a = centered_bins(1.2888)
assert abs(a[-1] - 1.6) < 0.001
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_michigan():
"""See what we do with Michigan"""
mp = MapPlot(sector="state", state="MI", nocaption=True)
mp.contourf(
np.arange(-84, -75),
np.arange(36, 45),
np.arange(9),
np.arange(9),
clevlabels=["a", "b", "c", "d", "e", "f", "g", "h", "i"],
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_drawcities():
"""Draw Cities"""
mp = MapPlot(
title="Fill and Draw Cities",
subtitle="This is my subtitle",
continentalcolor="blue",
sector="iowa",
nocaption=True,
)
mp.drawcities()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_drawrandomtext():
"""See if we can handle the fun that is drawing random text"""
mp = MapPlot(
sector="iowa",
title="Fun Text, here and there",
continentalcolor="white",
debug=True,
nocaption=True,
)
mp.plot_values(
[-94, -92, -91, -92],
[42, 41, 43, 42.4],
["One", "Two\nTwo", "Three\nThree\nThree", "Four\nFour\nFour\nFour"],
showmarker=True,
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_drawiowawfo():
"""Iowa Contour Plot"""
mp = MapPlot(sector="iowawfo", title="Iowa Contour plot", nocaption=True)
mp.contourf(
np.arange(-94, -85),
np.arange(36, 45),
np.arange(9),
np.arange(9),
clevlabels=["a", "b", "c", "d", "e", "f", "g", "h", "i"],
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_fillstates():
"""Can we fill states"""
data = {"AK": 10, "HI": 30, "IA": 40, "NY": 80}
mp = MapPlot(
sector="nws",
title="Fill AK, HI, IA, NY States",
subtitle="test_fillstates",
nocaption=True,
)
mp.fill_states(data, lblformat="%.0f", ilabel=True)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=0.25)
def test_drawcounties():
"""draw counties on the map"""
mp = MapPlot(sector="midwest", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=0.25)
def test_drawcounties_cornbelt():
"""draw counties on the map"""
mp = MapPlot(sector="cornbelt", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_drawcounties_iailin():
"""draw IA IL IN masked"""
mp = MapPlot(sector="iailin", title="Counties", nocaption=True)
mp.contourf(
np.arange(-94, -85),
np.arange(36, 45),
np.arange(9),
np.arange(9),
clevlabels=["a", "b", "c", "d", "e", "f", "g", "h", "i"],
)
mp.drawcounties()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=1.0)
def test_climdiv():
"""Run tests agains the fill_climdiv"""
mp = MapPlot(sector="conus", title="Climate Divisions", nocaption=True)
data = {"IAC001": 10, "MNC001": 20, "NMC001": 30}
mp.fill_climdiv(data, ilabel=True, lblformat="%.0f")
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_colorbar():
"""Run tests against the colorbar algorithm"""
mp = MapPlot(sector="iowa", nocaption=True)
cmap = copy.copy(plot.maue())
cmap.set_under("white")
clevs = list(range(0, 101, 10))
norm = mpcolors.BoundaryNorm(clevs, cmap.N)
mp.drawcities()
mp.draw_colorbar(clevs, cmap, norm)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_colorbar2():
"""draw a colorbar"""
mp = MapPlot(sector="iowa", nocaption=True)
cmap = plot.maue()
clevs = list(range(0, 101, 10))
clevlabels = [
"One",
"Three",
"Blahh",
"Longest",
"Five",
"Six",
"Ten",
"Fourty",
100000,
"Hi\nHo",
100,
]
norm = mpcolors.BoundaryNorm(clevs, cmap.N)
mp.draw_colorbar(
clevs, cmap, norm, clevlabels=clevlabels, extend="neither"
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_colorbar3():
"""draw another colorbar"""
mp = MapPlot(sector="iowa", nocaption=True)
cmap = copy.copy(plot.maue())
cmap.set_over("black")
clevs = [0, 100, 250, 500, 1000, 2000, 20000]
norm = mpcolors.BoundaryNorm(clevs, cmap.N)
mp.draw_colorbar(
clevs, cmap, norm, title="Erosion $kg/m^2$", spacing="uniform"
)
return mp.fig
# as of writing, python2.7 failure tolerance of 1.45
@pytest.mark.mpl_image_compare(tolerance=1.6)
def test_drawugcs():
"""test drawing of UGCS"""
mp = MapPlot(
sector="conus", title="Counties, 3 filled in Iowa", nocaption=True
)
mp.fill_ugcs({"IAC001": 10, "IAC003": 20, "IAC005": 30})
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=1.0)
def test_drawugcs2():
"""3 filled zones"""
mp = MapPlot(
sector="iowa",
title="Zones, 3 filled in Iowa, label",
subtitle="test_drawugcs2",
nocaption=True,
)
mydict = {"IAZ001": 10, "IAZ003": 20, "IAZ005": 30}
mp.fill_ugcs(mydict, ilabel=True, lblformat="%.0f")
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_lwx_cities():
"""Test that cities plot in a reasonable spot."""
mp = MapPlot(
sector="cwa",
cwa="LWX",
title="DC should be where DC is",
nocaption=True,
)
mp.drawcities()
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_ugcs_lwx():
"""Ensure that we can plot some counties in LWX CWA."""
mp = MapPlot(
sector="cwa",
cwa="LWX",
title="Two Maryland Counties",
subtitle="test_ugcs_lwx",
nocaption=True,
)
labels = {"MDC003": "MDC003", "MDC033": "MDC033"}
mp.fill_ugcs(
{"MDC003": 1, "MDC033": 40},
bins=list(range(0, 101, 10)),
labels=labels,
ilabel=True,
extend="min",
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_ugcs_lwx_zones():
"""Ensure that we can plot some zones in LWX CWA."""
mp = MapPlot(
sector="cwa",
cwa="LWX",
title="Two Maryland zones (MDZ001, MDZ008) xfered to LWX",
subtitle="test_ugcs_lwx",
nocaption=True,
)
labels = {"MDZ001": "MDZ001", "MDZ008": "MDZ008"}
mp.fill_ugcs(
{"MDZ001": 1, "MDZ008": 40},
bins=list(range(0, 101, 10)),
labels=labels,
ilabel=True,
extend="min",
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN + 1) # lots of bars on plot
def test_ugcs_withcustomlabels():
"""Fill ugcs with provided labels."""
mp = MapPlot(
sector="iowa",
title="Zones, 3 filled in Iowa, custom label",
subtitle="test_drawugcs2",
nocaption=True,
)
labels = {"IAZ001": "IAZ001", "IAZ003": "IAZ003"}
bins = list(range(24))
clevlabels = [""] * 24
clevlabels[0] = "mid"
clevlabels[12] = "noon"
mp.fill_ugcs(
{"IAZ001": 1, "IAZ003": 4, "IAZ005": 12},
bins=bins,
labels=labels,
ilabel=True,
clevstride=12,
clevlabels=clevlabels,
lblformat="%.0f",
extend="neither",
)
return mp.fig
@pytest.mark.mpl_image_compare(tolerance=PAIN)
def test_states():
"""Exercise the state plotting routines"""
mp = MapPlot(sector="state", state="CA", nocaption=True)
assert mp.state == "CA"
return mp.fig
# high tolerance due to python2.7 issue I don't wish to deal with now.
@pytest.mark.mpl_image_compare(tolerance=4.0)
def test_cwa_with_custom_masking():
"""Exercise the cwa plotting routines"""
mp = MapPlot(sector="cwa", cwa="DLH", nocaption=True)
mp.contourf(
np.arange(-94, -89),
np.arange(45, 50),
| np.arange(5) | numpy.arange |
"""
Metabolite annotation
"""
import numpy as np
from medipy.io import load
import os
import fonc_util as f
import gen_in
import lawij
import ser_xml
import xml_gen
import locale
def ann(root,wa7da):
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, "C")
tabp,tabc=gen_in.ini1(root)
boc=-1
lolo= load(os.path.join(root,"2rr"))
H=lolo.data
D=H[0,:,:]
H=D
#print corps.interquartile_range(H)
#print np.size(H,0),np.size(H,1)
list_exp=ser_xml.exceptions(root)
raxep=[]
while boc<len(tabp)-1:
boc+=1
a=str(tabp[boc][2])
#print a,boc
newn=''
for j in range(len(a)):
if (a[j]=='_')==1:
break
newn+=a[j]
r1=[]
r1.append((boc))
for jj in range(boc+1,len(tabp)):
nomn=str(tabp[jj][2])
#print nomn[0:j]
try:
if nomn[0:j+1]==newn+'_':
r1.append((jj))
#print 'ok'
else:
break
except:
break
boc=boc+len(r1)-1
#print tabp[r1[0]:r1[len(r1)-1]+1]
#raw_input()
#print len(r1)
nt=tabp[r1[0]:r1[len(r1)-1]+1]
#print nt
#print 'Start'
test=[]
testc3=[]
con=0
ampref=[]
amp=[]
newrt=[]
#print newn
ham=0
ed5il=0
for jj in range(len(nt)):
#print newn
#print jj
#print nt[jj][0],nt[jj][1]
#print nt[jj][0],nt[jj][1]
r,indi=lawij.alig(nt[jj][0],nt[jj][1],H,nt[jj][3],wa7da)
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1],nt[jj][7]
if nt[jj][7]=='n':
ed5il=1
#raw_input()
if r[0]==100 and nt[jj][7]=='y':
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1],nt[jj][7]
ham=1
break
if indi==0 :
con=con+1
if np.abs(r[0])==4 or np.abs(r[1])==6:
testc3.append((1))
else:
testc3.append((0))
test.append((0))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
#print congl
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#print ch
#plt.show()
#print nt[jj][3]
ampref.append(float(nt[jj][3])*1.)
amp.append( H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
#print str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])])
newrt.append((nr[0],nr[1],nt[jj][2],str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]),float(nt[jj][3]),chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
#print newrt
else:
if r[0]<100 :
if np.abs(r[0])==3 or np.abs(r[1])==5:
testc3.append((1))
else:
testc3.append((0))
con=con+1
test.append((1))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
ampref.append(float(nt[jj][3])*1.)
amp.append(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
newrt.append((nr[0],nr[1],nt[jj][2],H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.,float(nt[jj][3]),chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
else:
test.append((2))
testc3.append((2))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[nt[jj][0]-7:(nt[jj][0])+8,(nt[jj][1])-7:(nt[jj][1])+8]*1.
ampref.append(float(nt[jj][3])*1.)
amp.append(0)
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#raw_input()
newrt.append((0,0,0,0,0,0,0,0,0,0,0,0,0))
#raw_input()
#print newn
#print ampref
#print amp,newn,testc3
#print nt
#print newrt
#raw_input()
#amptest=np.nonzero(amp>0)
o=np.nonzero(testc3==0)
vamp=np.array(amp)
ivamref=vamp[o]
o=np.nonzero(ivamref>210000)
#print newn,'test'
#print 'ham',ham
#if (float(len(o[0]))*1.000000001)/float(len(ivamref)*1.00000001)>0.4 or f.exp_hand(list_exp,newn)==1 or ham==0:
if ham==0:
#print 'd5aal'
if len(nt)==con:
if len(nt)==1:
raxep.append(newrt[0])
#print 'accepted'
else:
artestc3=np.array(testc3)
olc3=np.nonzero(artestc3==1)
if np.size(olc3)>0:
if f.exp_ync(amp,ampref,testc3)==1:
#print 'ouffffff'
artest=np.array(test)
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
if np.size(ol)>0:
#print 'accepted with some conditions'
#print f.exp_ync(amp,ampref,testc3)
#if f.exp_ync(amp,ampref,testc3)==0:
#print 'llllllaaaaaaaaa'
if f.exp_yn(amp,ampref,test)==1:
for no in range(len(newrt)):
raxep.append(newrt[no])
elif f.exp_hand(list_exp,newn)==1:
artest=np.array(test)
rnt=np.array(newrt)
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
vo=rnt[o]
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
else:
#print f.exp_ync(amp,ampref,testc3)
for no in range(len(nt)):
raxep.append(newrt[no])
#print 'accepted'
else:
#print 'ouuuuut'
#print f.exp_hand(list_exp,newn)
if f.exp_hand(list_exp,newn)==1 :
artest=np.array(test)
artestc3=np.array(testc3)
rnt=np.array(newrt)
#print nt
#raw_input()
ol=np.nonzero(artest==1)
condlist=[artest==0,artestc3==0]
g=condlist[0]*condlist[1]
#print g,artest,artestc3
o=np.nonzero(artest==0)
#print '%%%hhh%%%%%%'
#print rnt
#print test
vo=rnt[g]
#print vo,'%%%hhh%%%%%%'
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
else:
#print f.exp_ync(amp,ampref,testc3)
for no in range(len(nt)):
raxep.append(newrt[no])
#print 'accepted'
else:
if ham==0:
if f.exp_hand(list_exp,newn)==1 or ed5il==1:
artest=np.array(test)
#print newrt
rnt=xml_gen.cell2tab8(newrt)
#print nt
#raw_input()
ol=np.nonzero(artest==1)
o=np.nonzero(artest==0)
#print '%%%hhh%%%%%%'
rnt=np.array(rnt)
#print test
vo=rnt[o]
#print vo
#print len(vo)
#raw_input()
for no in range(len(vo)):
#print '%%%%%%%%%'
zi=lawij.mod(vo[no])
#print zi
#print artest
#raw_input()
raxep.append((zi))
#print 'may be...'
#else:
#print 'refused without discussion'
#print test
#print 'DONE'
#print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
#print ' '
#raw_input()
locale.setlocale(locale.LC_ALL, old_locale)
return raxep,tabc
#for lll in range(len(raxep)):
#print raxep[lll]
def rec(root):
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, "C")
#root='/home/miv/belghith/Bureau/KAROM/Akram/nmr/RP/5/pdata/1'
tabp,tabc=gen_in.ini2(root)
boc=-1
lolo= load(os.path.join(root,"2rr"))
H=lolo.data
D=H[0,:,:]
H=D
#print corps.interquartile_range(H)
#print np.size(H,0),np.size(H,1)
list_exp=ser_xml.exceptions(root)
raxep=[]
while boc<len(tabp)-1:
boc+=1
a=str(tabp[boc][2])
#print a,boc
newn=''
for j in range(len(a)):
if (a[j]=='_')==1:
break
newn+=a[j]
r1=[]
r1.append((boc))
for jj in range(boc+1,len(tabp)):
nomn=str(tabp[jj][2])
#print nomn[0:j]
try:
if nomn[0:j+1]==newn+'_':
r1.append((jj))
#print 'ok'
else:
break
except:
break
boc=boc+len(r1)-1
#print tabp[r1[0]:r1[len(r1)-1]+1]
#raw_input()
#print len(r1)
nt=tabp[r1[0]:r1[len(r1)-1]+1]
#print nt
#print 'Start'
test=[]
testc3=[]
con=0
ampref=[]
amp=[]
newrt=[]
#print newn
for jj in range(len(nt)):
#print newn
#print jj
#print nt[jj][0],nt[jj][1]
#print nt[jj][0],nt[jj][1]
r,indi=lawij.aligrec(nt[jj][0],nt[jj][1],H,nt[jj][3])
#print r,indi,nt[jj][2],nt[jj][0]-r[0],nt[jj][1]-r[1]
#raw_input()
if indi==0 :
con=con+1
#print 'ok'
testc3.append((0))
test.append((0))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
#print congl
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#print ch
#plt.show()
ampref.append(float(nt[jj][3])*1.)
amp.append( H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
#print str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])])
newrt.append((nr[0],nr[1],nt[jj][2],str(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]),chg[0],chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
#print newrt
else:
if r[0]<100 :
if np.abs(r[0])==3 or np.abs(r[1])==5:
testc3.append((1))
else:
testc3.append((0))
con=con+1
test.append((1))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[(nt[jj][0]-r[0])-3:(nt[jj][0]-r[0])+4,(nt[jj][1]-r[1])-3:(nt[jj][1]-r[1])+4]*1.
nr=f.subpix2(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
zayneb=H[(nt[jj][0]-r[0])-9:(nt[jj][0]-r[0])+10,(nt[jj][1]-r[1])-9:(nt[jj][1]-r[1])+10]*1.
#nr=f.subpix(zayneb,(nt[jj][0]-r[0]),(nt[jj][1]-r[1]))
#print (nt[jj][0]-r[0]),(nt[jj][1]-r[1]),nr
chl=f.dephcl(zayneb)
chg=f.dephcg(zayneb)
#ch,congl=f.dephc(zayneb)
ch,congl=f.dephcaprio(zayneb,float(nt[jj][4]),float(nt[jj][5]),nt[jj][6])
ampref.append(float(nt[jj][3])*1.)
amp.append(H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.)
newrt.append((nr[0],nr[1],nt[jj][2],H[(nt[jj][0]-r[0]),(nt[jj][1]-r[1])]*1.,chg[0],chg[1],chl[0],chl[1],ch[0],ch[1],congl,r[0],r[1]))
else:
test.append((2))
testc3.append((2))
#fig = plt.figure()
#ax = fig.add_subplot(111)
zayneb=H[nt[jj][0]-7:(nt[jj][0])+8,(nt[jj][1])-7:(nt[jj][1])+8]*1.
ampref.append(float(nt[jj][3])*1.)
amp.append(0)
#cax = ax.imshow(zayneb, interpolation='nearest')
#plt.show()
#raw_input()
newrt.append((0,0,0,0,0,0,0,0,0,0,0,0,0))
#raw_input()
#print newn
#print ampref
#print amp,newn,testc3
#print nt
#print newrt
#raw_input()
#amptest=np.nonzero(amp>0)
o=np.nonzero(testc3==0)
vamp=np.array(amp)
ivamref=vamp[o]
o=np.nonzero(ivamref>100)
#print newn,'test'
if (float(len(o[0]))*1.000000001)/float(len(ivamref)*1.00000001)>0.4 or f.exp_hand(list_exp,newn)==1:
#print newn,'d5aal'
if len(nt)==con:
if len(nt)==1:
raxep.append(newrt[0])
#print 'accepted'
else:
artestc3=np.array(testc3)
olc3=np.nonzero(artestc3==1)
if | np.size(olc3) | numpy.size |
"""
iamgeset.py
Written by <NAME>, 2019
Apache License
Implements a dataset for images of a single resolution
"""
import math
import os
import os.path as p
import cv2
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import config.config as config
def createDataloader(cfg: config.Config,
is_train_dataloader: bool = False,
is_validation_dataloader: bool = False,
is_test_dataloader: bool = False,
downsampler_mode="bicubic") -> data.DataLoader:
cfg_d = None
tf = []
if is_train_dataloader:
cfg_d = cfg.dataset_train
if cfg_d.data_aug_flip:
tf.append(transforms.ToPILImage())
tf.append(transforms.RandomHorizontalFlip())
tf.append(transforms.RandomVerticalFlip())
if cfg_d.data_aug_rot:
pass
# Okay, so this is done as a rotation somewhere between -90 and +90 degrees
# Which is not ideal as I'd like 0, 90, 180, or 270 deg exactly, nothing inbetween
#tf.append(transforms.RandomRotation(90))
elif is_validation_dataloader:
cfg_d = cfg.dataset_val
elif is_test_dataloader:
cfg_d = cfg.dataset_test
else:
raise ValueError("must specify if dataloader is for train/valid/test")
tf.append(transforms.ToTensor())
dataset = None
do_drop_last = True
tf = transforms.Compose(tf)
if cfg_d.mode == "downsampler":
dataset = DownsamplerImageset(cfg, cfg_d, tf, downsampling=downsampler_mode)
elif cfg_d.mode == "lr":
dataset = LRImageset(cfg, cfg_d, tf)
elif cfg_d.mode == "hrlr":
dataset = HRLRImageset(cfg, cfg_d, tf)
#if not is_train_dataloader:
# do_drop_last = False
dataloader = data.DataLoader( dataset,
batch_size=cfg_d.batch_size,
shuffle=cfg_d.data_aug_shuffle,
num_workers=cfg_d.n_workers,
drop_last=do_drop_last,
pin_memory=True)
return dataloader
class DownsamplerImageset(data.Dataset):
"""
DownsamplerImageset
Takes one HR dataset, and yields (LR, HR) images from __getitem__ (in that order)
LR images are created with nearest neighbour downsampling. (Rationale: SRDGAN paper)
Also adds gaussian noise to LR images of the cfg flag is set.
args:
cfg: config for the run
cfg_d: config of the dataset you're using (passed since cfg may contain both train, val, test datasets)
"""
def __init__(self, cfg: config.Config, cfg_d: config.DatasetConfig, transforms,
downsampling: str = "bicubic"):
super(DownsamplerImageset, self).__init__()
self.cfg = cfg
self.cfg_d = cfg_d
self.transforms = transforms
if downsampling == "bicubic":
self.interp = cv2.INTER_CUBIC
elif downsampling == "nearest":
self.interp = cv2.INTER_NEAREST
else:
raise ValueError(f"interpolation {downsampling} har not been implemented")
self.hr_img_paths = [ p.join(cfg_d.dataroot_hr, f) for f in os.listdir( cfg_d.dataroot_hr) \
if p.isfile( p.join(cfg_d.dataroot_hr, f)) ]
if len(self.hr_img_paths) == 0:
raise ValueError(f"no image files in {cfg_d.dataroot_hr}")
np.random.seed()
def __getitem__(self, idx):
hr_sz = self.cfg_d.hr_img_size
lr_sz = hr_sz // self.cfg.scale
hr_path = self.hr_img_paths[idx]
hr_name = os.path.basename(hr_path)
hr_name = os.path.splitext(hr_name)[0]
hr = cv2.imread(hr_path,cv2.IMREAD_UNCHANGED)
# randomly crop if the image is larger than the target dataset h,w
h,w,c = hr.shape
if hr_sz < h and hr_sz < w:
hr = randomCrop(hr, hr_sz)
# training data
lr = cv2.resize(hr, (lr_sz, lr_sz), interpolation=self.interp)
if self.cfg_d.data_aug_gaussian_noise:
# std dev in cfg is for normalized [0,1] image repr, and cv2 image is uint8 [0,255]
var_normalized = self.cfg_d.gaussian_stddev ** 2
var_unnormalized = 255 * 255 * var_normalized
stddev_unnormalized = math.sqrt(var_unnormalized)
lr = lr + np.random.normal(loc=0, scale=stddev_unnormalized, size=lr.shape)
lr[lr < 0] = 0
lr[lr > 255] = 255
# ToTensor() in transforms normalizes the images to [0,1] as long as they are uint8
lr = lr.astype(np.uint8)
hr = hr.astype(np.uint8)
if self.transforms:
hr = self.transforms(hr)
lr = self.transforms(lr)
return {"LR": lr, "HR": hr, "hr_name": hr_name}
return None
def __len__(self):
return len(self.hr_img_paths)
class LRImageset(data.Dataset):
"""
Imageset
args:
cfg: config for the run
cfg_d: config of the dataset you're using (passed since cfg may contain both train, val, test datasets)
"""
def __init__(self, cfg: config.Config, cfg_d: config.DatasetConfig, transforms):
super(LRImageset, self).__init__()
self.cfg = cfg
self.cfg_d = cfg_d
self.transforms = transforms
self.lr_img_paths = [ p.join(cfg_d.dataroot_lr, f) for f in os.listdir( cfg_d.dataroot_lr) \
if p.isfile( p.join(cfg_d.dataroot_lr, f)) and not ".DS_Store" in f ]
if len(self.lr_img_paths) == 0:
raise ValueError(f"no image files in {cfg_d.dataroot_lr}")
| np.random.seed() | numpy.random.seed |
import numpy as np
from ordered_set import OrderedSet
from text_selection.common.ngram_extractor import get_count_array
def test_component():
result = get_count_array(
ngram_nrs=(1, 2, 3, 1, 3, 3, 4),
target_symbols_ordered=OrderedSet((1, 2, 3)),
)
np.testing.assert_array_equal(result, np.array([2, 1, 3]))
def test_empty_3_targets__returns_3_zero():
result = get_count_array(
ngram_nrs=tuple(),
target_symbols_ordered=OrderedSet((1, 2, 3)),
)
np.testing.assert_array_equal(result, np.array([0, 0, 0]))
def test_empty_0_targets__returns_empty():
result = get_count_array(
ngram_nrs=tuple(),
target_symbols_ordered=OrderedSet(),
)
np.testing.assert_array_equal(result, np.array([]))
def test_2_different_numbers_1_target__returns_1_target_count():
result = get_count_array(
ngram_nrs=(1, 2),
target_symbols_ordered=OrderedSet((2,)),
)
np.testing.assert_array_equal(result, | np.array([1]) | numpy.array |
# Practice sites
#https://www.machinelearningplus.com/python/101-numpy-exercises-python/
#http://www.cs.umd.edu/~nayeem/courses/MSML605/files/04_Lec4_List_Numpy.pdf
#https://www.gormanalysis.com/blog/python-numpy-for-your-grandma/
#https://nickmccullum.com/advanced-python/numpy-indexing-assignment/
# 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
##? 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
import numpy as np
print(np.__version__)
##? 2. How to create a 1D array?
# Difficulty Level: L1
# Q. Create a 1D array of numbers from 0 to 9
arr = np.arange(10)
arr
##? 3. How to create a boolean array?
# Difficulty Level: L1
# Q. Create a 3×3 numpy array of all True’s
arr = np.full((3,3), True, dtype=bool)
arr
##? 4. How to extract items that satisfy a given condition from 1D array?
# Difficulty Level: L1
# Q. Extract all odd numbers from arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1]
##? 5. How to replace items that satisfy a condition with another value in numpy array?
# Difficulty Level: L1
# Q. Replace all odd numbers in arr with -1
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1] = -1
arr
##? 6. How to replace items that satisfy a condition without affecting the original array?
# Difficulty Level: L2
# Q. Replace all odd numbers in arr with -1 without changing arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#1 np.where
out = np.where(arr % 2 == 1, -1, arr)
out
#2 list comp
out = np.array([-1 if x % 2 == 1 else x for x in arr])
out
##? 7. How to reshape an array?
# Difficulty Level: L1
# Q. Convert a 1D array to a 2D array with 2 rows
arr = np.arange(10)
arr.reshape(2, -1)
# Setting y to -1 automatically decides number of columns.
# Could do the same with
arr.reshape(2, 5)
##? 8. How to stack two arrays vertically?
# Difficulty Level: L2
# Q. Stack arrays a and b vertically
a = np.arange(10).reshape(2, -1)
b = | np.repeat(1, 10) | numpy.repeat |
'''
This is based on cnn35_64. This is after the first pilot.
Changes:
-don't filter out # in the tokenizer, tokenize both together. or save tokenizer https://stackoverflow.com/questions/45735070/keras-text-preprocessing-saving-tokenizer-object-to-file-for-scoring
-use 'number' w2v as representation for any digit
-shuffling problem should be check before advancing: plot random selection of conv1 layers. theys should all be 14 or 15.
-tune hyperparameters.
'''
from sklearn.linear_model import LogisticRegression
import time
import datetime
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
import pickle
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from keras.utils import np_utils
from numpy.random import seed
seed(123)
import os
import data_helpers
import config
import matplotlib.pyplot as plt
plt.switch_backend('agg')
# Parameters
# =====================================================================
categories = config.categories
verbose = config.verbose
save_checkpoints = config.save_checkpoints
plot_RSA = config.plot_RSA
if config.local_or_cluster:
categories = categories[:3]
epochs=1
verbose=1
else:
epochs = config.epochs # it will probably need more.
# epochs = 6
#
save_checkpoints=False
print('running for '+str(epochs)+' epochs')
if config.local_or_cluster:
# directory_name = 'svm_gs0_test'
directory_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
file_name = 'logreg'
else:
# directory_name = 'svm_gs0_test'
directory_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
file_name = os.path.basename(__file__)
print('running '+directory_name+' '+file_name)
categories = ['University','Decoration','MilitaryConflict','MilitaryPerson','Politician', 'Monarch']
Xtrain, Ytrain = data_helpers.load_all_data(config.train_path,config.validation_path, categories, shuffle=False) # I changed this so it combines train and test
Xtest, Ytest = data_helpers.load_data(config.test_path, categories)
# Xtest_raw, Ytest_raw = data_helpers.load_data_raw(config.test_path, categories)
# X, y= data_helpers.load_whole_dataset(config.train_path, config.validation_path, config.test_path,categories,load_all=True, shuffle=False,one_hot=False)
# Remove Stopwords
# with open('stopwords-it2.txt', 'r') as f:
# sw = f.readlines()
#
# italian_stop_words = [n.replace('\n','') for n in sw]
#
# Xtrain2 = []
# for sentence in Xtrain:
# sentence_no_stopwords = ' '.join([word for word in sentence.split() if word not in italian_stop_words])
# Xtrain2.append(sentence_no_stopwords)
#
# Xtest2 = []
# for sentence in Xtest:
# sentence_no_stopwords = ' '.join([word for word in sentence.split() if word not in italian_stop_words])
# Xtest2.append(sentence_no_stopwords)
# #
# # ## Encode Ytrain
# # # =====================================================================================
# # one hot encode and integer encode
# # Ytrain_encoded = np_utils.to_categorical(Ytrain)
# # Ytrain_integer = np.array(Ytrain)
# # Ytest_encoded = np_utils.to_categorical(Ytest)
# # Ytest_integer = np.array(Ytest)
# #
# # # Zero pad (encode) Xtrain and Xtest
# # # ==================================================================================================
# tokenizer = Tokenizer(filters='') #depending on word embedding, set lower=False.
# tokenizer.fit_on_texts(np.append(np.array(Xtrain2), np.array(Xtest2)))
# sequences = tokenizer.texts_to_sequences(Xtrain2)
# sequences2 = tokenizer.texts_to_sequences(Xtest2)
# # sequences3 = tokenizer.texts_to_sequences(X)
# word_index = tokenizer.word_index
# print('Found %s unique tokens.' % len(word_index))
#
# # Xtrain_encoded = pad_sequences(sequences, maxlen=sequence_length, padding='post')
# # Xtest_encoded = pad_sequences(sequences2, maxlen=sequence_length, padding='post')
# # X_encoded = pad_sequences(sequences3, maxlen=sequence_length, padding='post')
#
# def load_obj(path_and_filename):
# with open(path_and_filename, 'rb') as f:
# return pickle.load(f)
#
# embeddings_index = load_obj(config.word_embeddings_path+'/gensim_it_w2v.pkl') #dictionary embeddings_index.get('è') returns word embedding
#
#
#
# number = np.random.normal(0., 0.23, 300)
#
# embedding_dim = 300
#
# embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim)) # this will be all embeddings for my vocabulary
#
# for word, i in word_index.items():
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# # words not found in embedding index will be all-zeros.
# embedding_matrix[i] = embedding_vector
# elif "#" in word:
# embedding_matrix[i] = number
# Create average sentence vectors
# Xtrain = []
# for sequence in sequences:
# all_word_embeddings = []
# for word_id in sequence:
# embedding = embedding_matrix[word_id]
# if np.sum(embedding)!=0.0:
# all_word_embeddings.append(embedding)
# mean_sentence_vector = list(pd.DataFrame(all_word_embeddings).mean())
# if len(mean_sentence_vector)==0:
# mean_sentence_vector = list(np.random.normal(0., 0.23, 300))
# Xtrain.append(mean_sentence_vector)
#
# Xtest = []
# for sequence in sequences2:
# all_word_embeddings = []
# for word_id in sequence:
# embedding = embedding_matrix[word_id]
# all_word_embeddings.append(embedding)
# mean_sentence_vector = list(pd.DataFrame(all_word_embeddings).mean())
# if len(mean_sentence_vector)==0:
# mean_sentence_vector = list(np.random.normal(0., 0.23, 300))
# Xtest.append(mean_sentence_vector)
path_to_dir = os.path.join(config.save_to, directory_name + '/')
try: os.makedirs(path_to_dir)
except: pass
print('directory_name: '+directory_name)
print('path_to_dir: '+path_to_dir)
# Xtrain = np.array(Xtrain)
# Xtest = np.array(Xtest)
#
# np.save(config.word_embeddings_path+'Xtrain_w2v_mean', Xtrain)
# np.save(config.word_embeddings_path+'Xtest_w2v_mean', Xtest)
if config.local_or_cluster:
amount_of_categories = len(categories)
Xtrain = np.load(config.word_embeddings_path + 'Xtrain_w2v_mean.npy')
Xtrain = [list(n) for n in Xtrain][:amount_of_categories*4800]
Xtest = np.load(config.word_embeddings_path + 'Xtest_w2v_mean.npy')
Xtest = [list(n) for n in Xtest][:amount_of_categories*1200]
else:
Xtrain = np.load(config.word_embeddings_path+'Xtrain_w2v_mean.npy')
Xtrain = [list(n) for n in Xtrain]
Xtest = np.load(config.word_embeddings_path+'Xtest_w2v_mean.npy')
Xtest = [list(n) for n in Xtest]
if len(categories)<64:
category_ids = [config.categories.index(n) for n in categories]
# choose embeddings from the right sentences in Xtrain
sentence_index = []
for i in category_ids:
category_starts = i*4800
category_ends =category_starts +4800
sentence_index.append(list(range(category_starts,category_ends)))
sentence_index = [n for i in sentence_index for n in i]
Xtrain = list(np.array(Xtrain)[sentence_index])
# choose embeddings from the right sentences in Xtest
sentence_index1 = []
for i in category_ids:
category_starts = i * 1200
category_ends = category_starts + 1200
sentence_index1.append(list(range(category_starts, category_ends)))
sentence_index1 = [n for i in sentence_index1 for n in i]
Xtest= list(np.array(Xtest)[sentence_index1])
# Model
## ======================================================================================================
print("Creating Model...")
with open(path_to_dir + 'log.txt', 'a+') as f:
f.write(file_name + '\n')
f.write(directory_name+ '\n\n')
# Cs = [0.01, 0.1, 1, 10]
# kernels = ['linear', 'rbf']
# kernels = ['linear']
# max_features_all = [100000,None]
# stop_words = [italian_stop_words, None]
# Final
# Top1 and Top5 accuracy on test set.
# clf = LinearSVC(verbose=verbose)
if config.local_or_cluster:
Xtrain_toy = []
for i in range(0,len(Xtrain), 100):
Xtrain_toy.append(Xtrain[i])
Ytrain_toy = []
for i in range(0, len(Ytrain), 100):
Ytrain_toy.append(Ytrain[i])
Xtest_toy = []
for i in range(0,len(Xtest), 100):
Xtest_toy.append(Xtest[i])
Ytest_toy = []
for i in range(0, len(Ytest), 100):
Ytest_toy.append(Ytest[i])
start = time.time()
clf = LogisticRegression(verbose=verbose,n_jobs=-1)
clf.fit(Xtrain, Ytrain)
probs = clf.predict_proba(Xtest)
best_1 = np.argsort(probs, axis=1)
best_1 = [n[-1] for n in best_1]
top1_accuracy = np.round(np.sum(np.array(Ytest)==np.array(best_1))/len(Ytest),4)
best_2 = np.argsort(probs, axis=1)
best_2 = [n[-2:] for n in best_2]
top2_acc = []
for i in range(len(best_2)):
if Ytest[i] in best_2[i]:
top2_acc.append(1)
else:
top2_acc.append(0)
top2_accuracy = np.round(np.sum(top2_acc)/len(Ytest),4)
best_3 = np.argsort(probs, axis=1)
best_3 = [n[-3:] for n in best_3]
top3_acc = []
for i in range(len(best_3)):
if Ytest[i] in best_3[i]:
top3_acc.append(1)
else:
top3_acc.append(0)
top3_accuracy = np.round(np.sum(top3_acc)/len(Ytest),4)
#
# best_5 = np.argsort(probs, axis=1)
# best_5 = [n[-5:] for n in best_5]
# top5_acc = []
# for i in range(len(best_5)):
# if Ytest[i] in best_5[i]:
# top5_acc.append(1)
# else:
# top5_acc.append(0)
#
# top5_accuracy = np.round(np.sum(top5_acc)/len(Ytest),4)
# Ypredict_encoded = np_utils.to_categorical(Ypredict.argmax(axis=-1))
# Ypredict_integer = Ypredict.argmax(axis=-1)
# Save outputs
np.save(path_to_dir + 'Ypredict_integer', best_1)
np.save(path_to_dir + 'accuracy_integer_top3', top3_acc)
# np.save(path_to_dir + 'accuracy_integer_top5', top5_acc)
np.save(path_to_dir + 'probability_output', probs)
| np.save(path_to_dir + 'log_reg_coefficients', clf.coef_) | numpy.save |
import requests
import re
import time
import random
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
class PaperSpider:
def __init__(self, paper_title_list, need_other_cited=True, need_cite_format=True):
self.google_domain = 'https://scholar.google.com.hk/'
self.paper_title_list = paper_title_list
# 请替换headers中的cookie为合法cookie; replace below cookie with valid cookie
self.headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6',
'cache-control': 'max-age=0',
'cookie': 'NID=204=PvOUsefBSG5QDsh7UadlmVOsRf7JllTq4z8PKvzTZ9yV9yMeZPsUAZgEe6IzPbeLpX9SU_z-QnCKAAiCt8lMKr3XDbHJHyIOyNfFKFlxEI0QaGqhwBUZokwcgrPwhHHkvPobSPfgORBTuMtT4UpvAFpmJoCd2I_CtmmIOEGrtEQ; GSP=A=Hul7MQ:CPTS=1593501177:LM=1593501177:S=7ePiEbiD7wWb4BcR',
'referer': 'https://scholar.google.com.hk/scholar?hl=zh-CN&as_sdt=1%2C5&as_vis=1&q=A+Min-max+Cut+for+Graph+Partition+and+Data+Clustering&btnG=',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
}
self.need_other_cited = need_other_cited
self.need_cite_format = need_cite_format
def run(self):
all_info_list = []
tmp_info_list = []
count = 0
columns_info = ['paper', 'title_for_check', 'authors', 'year', 'conference or journal', 'cited_num']
if self.need_other_cited:
columns_info.append('other_cited_num')
if self.need_cite_format:
columns_info.extend(['GB/T 7714', 'MLA', 'APA'])
for paper_title in self.paper_title_list:
paper_info = self.crawl_single_paper(paper_title)
tmp_info_list.append(paper_info)
all_info_list.append(paper_info)
print('paper \"{}\" finish\n'.format(paper_title))
count += 1
# save result every 10 paper
if count % 10 == 0:
df = pd.DataFrame(tmp_info_list, columns=columns_info)
df.to_csv('result{}.csv'.format(count), encoding="utf_8_sig")
tmp_info_list.clear()
df = pd.DataFrame(all_info_list, columns=columns_info)
df.to_csv('all_result.csv'.format(count), encoding="utf_8_sig")
def crawl_single_paper(self, paper_title):
paper = []
print('crawling paper \"{}\"'.format(paper_title))
response_text = self.send_request('https://scholar.google.com.hk/scholar',
{'hl': 'zh-CN',
'as_sdt': '0,5',
'btnG': '',
'q': paper_title}, self.headers)
# the paper title crawled from google, you can use it to check if this is the paper you want
title_for_check = ''
# google scholar author id
author_ids = []
# number of citations
cited_num = 0
# number of other citations, please note that some author may not have id,
# so this number is bigger than true number of other citations
other_cited_num = 0
# cite format
GBT = ''
MLA = ''
APA = ''
possible_author_name = ''
soup = BeautifulSoup(response_text, 'html.parser')
paper_divs = soup.find_all('div', {'class': 'gs_r gs_or gs_scl'})
if len(paper_divs) > 0:
# we assume first result is we want
paper_div = paper_divs[0]
if self.need_cite_format:
paper_id = paper_div['data-cid']
GBT, MLA, APA = self.crawl_cite_format(paper_id)
# get it's title
h3 = paper_div.find('h3', {'class': 'gs_rt'})
title_for_check = h3.a.get_text()
title_for_check = str(title_for_check).replace('<b>', '').replace('</b>', '')
# get it's author ids
author_div = paper_div.find('div', {'class': 'gs_a'})
possible_author_name = author_div.get_text()
for a in author_div.find_all('a'):
searchObj = re.search(r'user=(.*)&hl=', a['href'], re.M | re.I)
author_id = searchObj.group(1)
author_ids.append(author_id)
# get it's number of citations
if self.need_other_cited:
cite_div = paper_div.find('div', {'class': 'gs_ri'}).find('div', {'class': 'gs_fl'})
cite_a = cite_div.find_all('a')[2]
if str(cite_a.string).find('被引用次数') != -1:
cited_num = int(str(cite_a.string).replace('被引用次数:', ''))
cited_url = cite_a['href']
if len(cited_url) != 0:
real_cited_num, other_cited_num = self.crawl_cited_papers(cited_url, cited_num, author_ids)
# get it's detail information from https://dblp.uni-trier.de/
author_name_list, publish_info, year_info, page_info = self.crawl_detail(paper_title)
author_info = ''
for authur_name in author_name_list:
author_info += authur_name + ','
author_info = author_info[:-1]
paper.append(paper_title)
paper.append(title_for_check)
if len(author_info) == 0:
paper.append(possible_author_name)
else:
paper.append(author_info)
paper.append(year_info)
paper.append(publish_info + ',' + page_info)
paper.append(cited_num)
if self.need_other_cited:
paper.append(other_cited_num)
if self.need_cite_format:
paper.append(GBT)
paper.append(MLA)
paper.append(APA)
return paper
def crawl_cited_papers(self, cited_url, cited_num, author_ids):
author_ids = | np.array(author_ids) | numpy.array |
import os
import tarfile
from Bio import SeqIO
import numpy as np
import math
import time
import random
import sys
import logging
#region Logging start
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
handler = logging.FileHandler('results/logger.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
#endregion
AA_list = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
# Custom dictionary, amino acids with similar properties are "close" to one another
AA_to_int = {'X': 0, 'R': 1, 'H': 2, 'K': 3, 'D': 4, 'E': 5, 'S': 6, 'T': 7, 'N': 8, 'Q': 9, 'C': 10,
'G': 11, 'P': 12, 'A': 13, 'V': 14, 'I': 15, 'L': 16, 'M': 17, 'F': 18, 'Y': 19, 'W': 20}
int_to_AA = {x: y for y, x in AA_to_int.items()}
def check_region(region):
if any(x in region for x in ['c','n','h','C']):
return np.array([1])
else:
return np.array([0])
def progress(file_counter, total_file_count, sample_counter):
s = (f"{file_counter} out of {total_file_count} files loaded, "
f"{sample_counter} samples loaded")
print(s, end='\r')
def one_hot_endcoding(vector):
# Stupid keras....
# TO FIGURE OUT THIS BULLSHIT TOOK SO LONG TIME, I FIRST THOUGHT IT WAS NUMPY BUT NOOOO....
for i,j in enumerate(vector):
_hot = [0]*len(AA_to_int.keys())
if j in AA_to_int.keys():
_hot[AA_to_int[j]] = 1. # Add 1. at correct index
else:
_hot[0] = 1. # Add X if unknown AA
vector[i] = _hot
return vector
def load_training(seq_length, data_folder, data_augmentation=False,
fix_samples='IGNORE', equalize_data=False, save_array=True,
use_ascii=False, vectorize=True):
"""Loads traning data into a numpy array.
Ignores files that starts with . since they are config files in ubuntu.
"""
print('Loading data...')
t = time.time()
cur_dir = os.getcwd() # Needed to reset working directory
os.chdir(data_folder) # Go to data folder
sample_counter = 0 # Just to count amount of data
file_counter = 0
total_file_count = 0 # Count total amount of files
for dirpath, dirnames, files in os.walk(os.getcwd(), topdown=True):
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
total_file_count += len([x for x in files if not x.startswith('.')])
progress(file_counter, total_file_count, sample_counter)
big_seq_list = [] # FASTER
big_label_list = [] # FASTER
for (dirpath, dirnames, filenames) in os.walk(os.getcwd(), topdown=True): # Walks through all files and dirs
dirnames[:] = [x for x in dirnames if not x.startswith('.')]
for filename in filenames:
if filename.startswith('.'): # Ignore config files
continue
records = SeqIO.parse(dirpath + '/' + filename, 'fasta')
for record in records:
record = str(record.seq)
record = record.split('#')
full_seq = list(record[0])
# Discard bad data
if len(full_seq) < 2:
continue
# The first amino acid is usually M or not in signal peptide. Ignore it
full_seq = full_seq[1:]
# seqs = list in list
if not data_augmentation:
seqs = [full_seq[:seq_length]]
elif data_augmentation:
# Divide into smaller pieces
seqs = [full_seq[x:x + seq_length] for x in range(0, len(full_seq), seq_length)]
else:
print('No resample method has been choosen')
quit()
if fix_samples == 'LOOP_SEQ':
seqs = [list(x) + (full_seq*(math.ceil(seq_length/(len(full_seq)))))[:seq_length-len(x)]
if len(x) < seq_length
else x for x in seqs]
elif fix_samples == 'ZERO':
seqs = [list(x) + ['X']*(seq_length-len(x))
if len(x) < seq_length
else x for x in seqs]
elif fix_samples == 'IGNORE':
seqs = [x for x in seqs
if len(x) == seq_length]
if seqs == []: # Check for empty lists
continue
elif fix_samples == 'NOISE':
seqs = [x + random.choices(AA_list, k=(seq_length-len(x)))
if len(x) < seq_length
else x for x in seqs]
# Fix Y
if 'positive' in dirpath:
"""No region, assume the first bases are the signal peptide"""
for i in range(len(seqs)):
if i == 0:
big_label_list.append([1.])
else: # When doing data augmentation, this is needed
big_label_list.append([0.])
elif 'negative' in dirpath:
for i in range(len(seqs)):
big_label_list.append([0.])
else:
# Unknown
big_label_list.append([-1.])
# Fix X
if vectorize:
for i,j in enumerate(seqs):
seqs[i] = one_hot_endcoding(j)
elif use_ascii:
# Using ascii numbers, ord('A') = 65
"""Doing this sped up the process by 20 fold!"""
for i,j in enumerate(seqs):
seqs[i] = [float(ord(x)) - 65 for x in j]
elif not use_ascii:
# Using ascii numbers, ord('A') = 65
"""Doing this sped up the process by 20 fold!"""
for i,j in enumerate(seqs):
seqs[i] = [float(AA_to_int[x])
if x in AA_to_int.keys()
else 0 # Fix unknown amino acids
for x in j]
for seq in seqs:
big_seq_list.append(seq) # Needed, since data aug breaks
sample_counter += len(seqs)
# Slows performance, but I still like it here
#progress(file_counter, total_file_count, sample_counter)
file_counter += 1
progress(file_counter, total_file_count, sample_counter)
"""Can be used in future to find which data was tm or not"""
#print(os.path.basename(dirpath))
"""For neg or pos"""
#print(os.path.basename(dirpath))
# Needs to flatten big_seq_list, since it is now a 3 matrix
print('')
logger.info(f'Loaded {sample_counter} samples')
#print('Flattening...')
#big_seq_list = sum(big_seq_list, []) # Flattens list, needed since the code needs list in lists for data aug
print('Converting to numpy array...')
X = np.array(big_seq_list, dtype=np.float32) # THIS DOES NOT WORK FOR VECTORIZATION, NEEDS MORE PROCESSING
Y = np.array(big_label_list, dtype=np.float32)
print('Flattening...')
X = np.squeeze(X) # WAY faster than using the sum flattening
if not vectorize:
X = X.reshape(X.shape[0], X.shape[1], 1) # Reshape, need 3d for CNN
os.chdir(cur_dir)
logger.info('Dataset is ' + str(X.nbytes / 1e6) + ' mb in memory') # X is [samples, time steps, features]
logger.info('{} positive samples and {} negative samples'.format(np.count_nonzero(Y), Y.shape[0]-np.count_nonzero(Y)))
logger.info('It took {0:.5f} seconds to load'.format(time.time()-t))
#print('Positive values starts at: ' + str(np.argmax(Y)))
t = time.time()
if equalize_data:
amount_positive = np.count_nonzero(Y)
amount_negative = Y.shape[0] - amount_positive
removed_samples = 0
amount_to_remove = 0
indices = []
if amount_positive > amount_negative:
amount_to_remove = amount_positive - amount_negative
# Removes random samples, to prevent bias. (it is read in order)
indices = random.sample(list(np.nonzero(Y)[0]), amount_to_remove) # np.where(Y == Y.argmax())[0] DID NOT WORK!!
logger.info(f'More positive than negative samples. Removing {amount_to_remove} positive samples')
elif amount_positive <= amount_negative:
amount_to_remove = amount_negative - amount_positive
indices = random.sample(list(np.where(Y == 0)[0]), amount_to_remove)
logger.info(f'More negative than positive samples. Removing {amount_to_remove} negative samples')
X = np.delete(X, list(indices), axis=0)
Y = np.delete(Y, list(indices), axis=0)
removed_samples = len(indices)
logger.info(f'Equalized, removed {removed_samples} samples')
logger.info('{} positive samples and {} negative samples'.format( | np.count_nonzero(Y) | numpy.count_nonzero |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Find the minimal mask for a given image and a trained neural network.
Given an image and a trained neural network this code constructs an smt encoding
of the forward pass of the neural network and further, employs z3 solver to
learn a mask for the inputs given the weights. The smt constraints can be
either formulated by constraining the activations of the first hidden node or
the final layer.
"""
import collections
import math
import time
import numpy as np
from saliency.tf1 import integrated_gradients
from saliency.tf1 import xrai
import scipy.ndimage as scipy_ndimage
import tensorflow.compat.v1 as tf
import z3
from smug_saliency import utils
tf.disable_eager_execution()
class RunParams(
collections.namedtuple(
'_RunParams', [
'model_type', 'model_path', 'image_placeholder_shape', 'padding',
'strides', 'tensor_names', 'activations', 'pixel_range'
])):
"""Run parameters for a particular dataset and neural network model pair.
The named tuple contains:
model_type: str, type of the model for which a mask is being found.
model_path: str, path to the saved model.
image_placeholder_shape: tuple, input placeholder of the neural
network for a single image.
padding: tuple of length 2, takes the form (a, b) where a is the
number of 0 padded vectors on the top and left side of the image;
b is the number of 0 padded vectors on the bottom and right side of the
image.
strides: int, number of pixel shifts over the input matrix.
tensor_names: dict,
* input: str, name of input tensor in tf graph.
* first_layer: str, name of the first layer pre-relu activation
tensor in the tf graph.
* first_layer_relu: str, name of the first layer relu activation
tensor in the tf graph.
* logits: str, name of the logits tensor in the tf graph.
* softmax: str, name of the softmax tensor in the tf graph.
* weights_layer_1: str, name of the layer 1 fc / conv weights.
* biases_layer_1: str, name of the layer 1 biases.
* (text only) embedding: str, name of the embedding layer.
activations (full encoding only): list of str, activation functions of
all the hidden layers and the final layer. Each activation takes a
one of the following values {'relu', 'linear'}. The default value of
activations is set to None.
pixel_range: (images only) tuple of length 2, takes the form (a, b) where a
and b represent minimum and maximum values each pixel should take. This is
specific to the neural network model.
"""
def _encode_input(image, z3_mask):
"""Encodes the image pixels by multiplying them with masking variables.
Converts the pixels into z3.ExprRef by multiplying them with their
corresponding masking variable. For an image with pixels with the same
spatial dimensions are multiplied with the same masking variable.
Args:
image: float numpy array with shape
(image_edge_length, image_edge_length, image_channels), image.
z3_mask: list of z3.ExprRef with length (edge_length // window_size) ** 2,
unique masking variables.
Returns:
list of list of list of z3.ExprRef with dimensions
(image_channels, image_edge_length, image_edge_length), encoded input.
"""
image_edge_length, _, image_channels = image.shape
encoded_input = []
for channel in range(image_channels):
# Slicing the image across each channel
encoded_input_per_channel = []
for image_row in range(image_edge_length):
encoded_input_row = []
for image_column in range(image_edge_length):
index = image_row * image_edge_length + image_column
encoded_input_row.append(
z3.ToReal(z3_mask[index]) * image[image_row][image_column][channel])
encoded_input_per_channel.append(encoded_input_row)
encoded_input.append(encoded_input_per_channel)
return encoded_input
def _formulate_smt_constraints_final_layer(
z3_optimizer, smt_output, delta, label_index):
"""Formulates smt constraints using the logits in the final layer.
Generates constraints by setting the logit corresponding to label_index to be
more than the rest of the logits by an amount delta for the forward pass of a
masked image.
Args:
z3_optimizer: instance of z3.Optimizer, z3 optimizer.
smt_output: list of z3.ExprRef with length num_output.
delta: float, masked logit for the label is greater than the rest of the
masked logits by delta.
label_index: int, index of the label of the training image.
Returns:
z3 optimizer with added smt constraints.
"""
for i, output in enumerate(smt_output):
if i != label_index:
z3_optimizer.solver.add(smt_output[label_index] - output > delta)
return z3_optimizer
def _get_hidden_node_location(flattened_index, num_rows, num_columns):
"""Converts the flattened index of a hidden node to its index in the 3D array.
Converts the index of a hidden node in the first convolution layer (flattened)
into its location- row, column, and channel in the 3D activation map. The
3D activation map has dimensions: (num_channels, num_rows, num_columns).
Args:
flattened_index: int, index of a hidden node in the first convolution
layer after it is flattened.
num_rows: int, number of rows in the activation map produced by each
kernel.
num_columns: int, number of columns in the activation map produced by each
kernel.
Returns:
channel: int, channel number of the activation map to which the hidden node
belongs to.
row: int, row number of the hidden node in the activation map.
column: int, column number of the hidden node in the activation map.
"""
total = num_rows * num_columns
output_activation_map_row = (flattened_index % total) // num_columns
output_activation_map_column = (flattened_index % total) % num_columns
return (flattened_index // total,
output_activation_map_row,
output_activation_map_column)
def _formulate_smt_constraints_convolution_layer(
z3_optimizer, kernels, biases, padding, strides, gamma, chosen_indices,
output_activation_map_shape, conv_activations, input_activation_maps):
"""Formulates the smt constraints for a convolution layer.
Formulates the smt constraints by performing convolutions for the activations
whose indices are specified in chosen_indices.
Args:
z3_optimizer: instance of z3.Optimizer, z3 optimizer.
kernels: numpy array with shape
(output_channels, kernel_size, kernel_size, input_channels),
weights of the convolution layer.
biases: None if biases are missing else numpy array with shape
(output_channels,), biases of the convolution layer.
padding: tuple, number of layers 0 padded vectors on top/left side of the
image.
strides: int, number of pixel shifts over the input matrix.
gamma: float, masked activation is greater than gamma times the unmasked
activation. Its value is always between [0,1).
chosen_indices: list of int, indices (after flattening the activation maps)
of the hidden node activations for which the minimisation is being done.
output_activation_map_shape: tuple of length 2, shape of the activation
map of the form (num_rows, num_columns).
conv_activations: float numpy array, flattened convolution layer
activations.
input_activation_maps: list of z3.ExprRef of depth 3, padded_image.
Returns:
an instance of z3.Optimizer with added smt constraints.
"""
padded_input_activation_maps = []
for input_activation_map in input_activation_maps:
padded_input_activation_maps.append(
utils.zero_pad(activation_map=input_activation_map, padding=padding))
for index in chosen_indices:
(hidden_node_channel, hidden_node_row,
hidden_node_column) = _get_hidden_node_location(
flattened_index=index,
num_rows=output_activation_map_shape[0],
num_columns=output_activation_map_shape[1])
# Perform convolution
if biases is None:
smt_equation = 0
else:
smt_equation = biases[hidden_node_channel]
# kernels.shape[-1] represents the number of input channels in the kernel.
for image_channel in range(kernels.shape[-1]):
smt_equation += utils.dot_product(
input_activation_map=padded_input_activation_maps[image_channel],
# hidden_node_row * strides is the starting row of the convolution
# patch in the input image. Similarly, hidden_node_column * strides
# is the starting column of the convolution patch.
input_activation_map_row=hidden_node_row * strides,
input_activation_map_column=hidden_node_column * strides,
sliced_kernel=kernels[hidden_node_channel][:, :, image_channel])
# Add constraint to the solver
if conv_activations[index] > 0:
# we constrain only those nodes whose activations are positive.
# In future we might handle nodes with negative values as well.
z3_optimizer.solver.add(
smt_equation > gamma * conv_activations[index])
return z3_optimizer
def _formulate_smt_constraints_fully_connected_layer(
z3_optimizer, nn_first_layer, smt_first_layer, top_k, gamma):
"""Formulates smt constraints using first layer activations.
Generates constraints for the top_k nodes in the first hidden layer by setting
the masked activation to be greater than that of the unmasked activation.
Args:
z3_optimizer: instance of z3.Optimizer, z3 optimizer.
nn_first_layer: numpy array with shape (num_hidden_nodes_first_layer,)
smt_first_layer: list of z3.ExprRef with length
num_hidden_nodes_first_layer.
top_k: int, constrain the nodes with top k activations in the first hidden
layer.
gamma: float, masked activation is greater than gamma times the unmasked
activation. Its value is always between [0,1).
Returns:
z3 optimizer with added smt constraints.
"""
for index in nn_first_layer.argsort()[-top_k:]:
if nn_first_layer[index] > 0:
# we constrain only those nodes whose activations are positive.
# In future we might handle nodes with negative values as well.
z3_optimizer.solver.add(
smt_first_layer[index] > gamma * nn_first_layer[index])
return z3_optimizer
def _verify_mask_dimensions(mask, model_type):
"""Checks if the mask dimensions are valid for a given model_type.
Args:
mask: For
* image - numpy array with shape (image_edge_length, image_edge_length),
binary mask of the image.
* text - numpy array with shape (num_words,), binary mask of the text.
model_type: str, type of the model for which a mask is being found.
Takes one of the following values: {'cnn', 'text_cnn',
'fully_connected'}.
Raises:
ValueError: If -
* model_type is 'text_cnn' and mask isn't a 1D numpy array, or
* model_type is 'fully_connected', cnn' and mask isn't a 2D numpy array
with num rows equal to num columns.
raises a value error.
"""
if model_type == 'text_cnn' and mask.ndim != 1:
raise ValueError('Invalid mask shape: {}. Expected a mask '
'with 1 dimension.'.format(mask.shape))
if model_type == 'cnn' or model_type == 'fully_connected':
if mask.ndim != 2 or mask.shape[0] != mask.shape[1]:
raise ValueError('Invalid mask shape: {}. Expected a mask '
'with 2 equal dimensions.'.format(mask.shape))
def _record_solution(result, mask, solver_output, image, session, run_params):
"""Stores the activations and logits of the masked and the inv-masked images.
Using the image and its mask, this function generates the masked image and
the inv-masked image. Then, it does a forward pass to find the pre-relu
activations of the first hidden layer, and the logits and then stores them
in the result dictionary.
Args:
result: defaultdict,
* image: float numpy array with shape
(image_edge_length * image_edge_length * image_channels,)
* combined_solver_runtime: float, time taken by the solver to find all
the solutions.
* unmasked_logits: float numpy array with shape (num_outputs,)
* unmasked_first_layer: float numpy array with shape
(num_hidden_nodes_first_layer,)
* masked_first_layer: list with length num_sols, contains float numpy
array with shape (num_hidden_nodes_first_layer,)
* inv_masked_first_layer: list with length num_sols, contains float numpy
array with shape (num_hidden_nodes_first_layer,)
* masks: list with length num_sols, contains float numpy array
with shape (image_edge_length ** 2,)
* masked_images: list with length num_sols, contains float numpy array
with shape (image_edge_length ** 2,)
* inv_masked_images: list with length num_sols, contains float numpy
array with shape (image_edge_length ** 2,)
* masked_logits: list with length num_sols, contains float numpy array
with shape (num_outputs,)
* inv_masked_logits: list with length num_sols, contains float numpy
array with shape (num_outputs,)
* solver_outputs: list with length num_sols, contains strings
corresponding to every sampled solution saying 'sat', 'unsat' or
'unknown'.
mask: For
* image - numpy array with shape (image_edge_length, image_edge_length),
binary mask of the image.
* text - numpy array with shape (num_words,), binary mask of the text.
solver_output: string, takes the value 'sat', 'unsat', or 'unknown'.
image: numpy array with shape (image_edge_length, image_edge_length,
image_channels), image for which the mask was found.
session: tensorflow session with loaded graph.
run_params: RunParams with model_type, image_placeholder_shape,
tensor_names.
"""
_verify_mask_dimensions(mask, run_params.model_type)
tensor_names = run_params.tensor_names
image_placeholder_shape = run_params.image_placeholder_shape
if run_params.model_type != 'text_cnn':
mask = np.repeat(mask[:, :, np.newaxis], image.shape[2], axis=2)
masked_image = image * mask
masked_predictions = session.run(
tensor_names,
feed_dict={
tensor_names['input']: masked_image.reshape(image_placeholder_shape)})
inv_masked_image = image * (1 - mask)
inv_masked_predictions = session.run(
tensor_names,
feed_dict={
tensor_names['input']:
inv_masked_image.reshape(image_placeholder_shape)})
result['masks'].append(mask.reshape(-1))
result['masked_images'].append(masked_image.reshape(-1))
result['masked_logits'].append(masked_predictions['logits'].reshape(-1))
# masked_first_layer is stored even in the case of full_encoding to study the
# first layer activations.
result['masked_first_layer'].append(
masked_predictions['first_layer'].reshape(-1))
result['solver_outputs'].append(solver_output.encode('utf-8'))
result['inv_masked_logits'].append(
inv_masked_predictions['first_layer'].reshape(-1))
result['inv_masked_images'].append(inv_masked_image.reshape(-1))
result['inv_masked_first_layer'].append(
inv_masked_predictions['logits'].reshape(-1))
def _verify_image_dimensions(image):
"""Verifies if the input image has the correct shape.
Args:
image: float numpy array with shape (image_edge_length, image_edge_length,
image_channels), image to be masked.
Raises:
ValueError: The input image should be of the shape- (height, width,
channels). Raises an error if the image doesn't have 3 dimensions,
or height != width, or if channels has a value other than
1 (black and white image) and 3 (rgb image).
"""
if np.ndim(image) != 3:
raise ValueError('The input image should have 3 dimensions. Shape of the '
'image: %s' % str(image.shape))
if image.shape[0] != image.shape[1]:
raise ValueError('The input image should have height == width. Shape of '
'the input image: %s' % str(image.shape))
if image.shape[2] != 1 and image.shape[2] != 3:
raise ValueError('The color channels of the input image has a value other '
'than 1 or 3. Shape of the image: %s' % str(image.shape))
def find_mask_full_encoding(image,
weights,
biases,
run_params,
window_size,
label_index,
delta=0,
timeout=600,
num_unique_solutions=1,
session=None):
"""Finds a binary mask for a given image and a trained Neural Network.
Args:
image: float numpy array with shape (image_edge_length, image_edge_length,
image_channels), image to be masked. For MNIST, the pixel values are
between [0, 1].
weights: list of num_layers float numpy arrays with shape
(output_dim, input_dim), weights of the neural network.
biases: list of num_layers float numpy arrays with shape (output_dim,),
biases of the neural network.
run_params: RunParams with model_type, model_path, image_placeholder_shape,
activations, tensor_names.
window_size: int, side length of the square mask.
label_index: int, index of the label of the training image.
delta: float, logit of the correct label is greater than the rest of the
logit by an amount delta. Its value is always >= 0. It is only used when
constrain_final_layer is True.
timeout: int, solver timeout in seconds.
num_unique_solutions: int, number of unique solutions you want to sample.
session: tf.Session, (default None) tensorflow session with the loaded
neural network.
Returns:
result: dictionary,
* image: float numpy array with shape
(image_edge_length * image_edge_length * image_channels,)
* combined_solver_runtime: float, time taken by the solver to find all
the solutions.
* unmasked_logits: float numpy array with shape (num_outputs,)
* unmasked_first_layer: float numpy array with shape
(num_hidden_nodes_first_layer,)
* masked_first_layer: list with length num_sols, contains float numpy
array with shape (num_hidden_nodes_first_layer,)
* inv_masked_first_layer: list with length num_sols, contains float numpy
array with shape (num_hidden_nodes_first_layer,)
* masks: list with length num_sols, contains float numpy array
with shape (image_edge_length ** 2,)
* masked_images: list with length num_sols, contains float numpy array
with shape (image_edge_length ** 2,)
* inv_masked_images: list with length num_sols, contains float numpy
array with shape (image_edge_length ** 2,)
* masked_logits: list with length num_sols, contains float numpy array
with shape (num_outputs,)
* inv_masked_logits: list with length num_sols, contains float numpy
array with shape (num_outputs,)
* solver_outputs: list with length num_sols, contains strings
corresponding to every sampled solution saying 'sat', 'unsat' or
'unknown'.
"""
_verify_image_dimensions(image)
image_placeholder_shape = run_params.image_placeholder_shape
tensor_names = run_params.tensor_names
# z3's timeout is in milliseconds
z3.set_option('timeout', timeout * 1000)
image_edge_length, _, _ = image.shape
num_masks_along_row = math.ceil(image_edge_length / window_size)
if not session:
session = utils.restore_model(run_params.model_path)
z3_mask = []
mask_id_to_var = {}
for row in range(image_edge_length):
for column in range(image_edge_length):
mask_id = (
num_masks_along_row * (row // window_size)) + (column // window_size)
if mask_id in mask_id_to_var.keys():
z3_var = mask_id_to_var[mask_id]
else:
mask_name = f'mask_{mask_id}'
z3_var = z3.Int(mask_name)
mask_id_to_var[mask_name] = z3_var
z3_mask.append(z3_var)
unmasked_predictions = session.run(
tensor_names,
feed_dict={
tensor_names['input']: image.reshape(image_placeholder_shape)})
smt_output, _ = utils.smt_forward(
features=utils.flatten_nested_lists(_encode_input(
image=image,
z3_mask=z3_mask)),
weights=weights,
biases=biases,
activations=run_params.activations)
z3_optimizer = _formulate_smt_constraints_final_layer(
z3_optimizer=utils.ImageOptimizer(
z3_mask=z3_mask,
window_size=window_size,
edge_length=image_edge_length),
smt_output=smt_output,
delta=delta,
label_index=label_index)
solver_start_time = time.time()
result = collections.defaultdict(list)
# All the masks found in each call of z3_optimizer.generator() is guarranteed
# to be unique since duplicated solutions are blocked. For more details
# refer z3_optimizer.generator().
for mask, solver_output in z3_optimizer.generator(num_unique_solutions):
_record_solution(result=result,
mask=mask,
solver_output=solver_output,
image=image,
session=session,
run_params=run_params)
result.update({
'image': image.reshape(-1),
'combined_solver_runtime': time.time() - solver_start_time,
'unmasked_logits': | np.squeeze(unmasked_predictions['logits']) | numpy.squeeze |
from .busSim.manager import managerFactory
from .result.searchResult import SearchResult
from .util import gen_start_time, transform
from .gtfs_edit import copy_with_edits
from .service.yelp import get_results
from .census import Census
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
from shapely.wkt import loads
from pyproj import Transformer
from zipfile import ZipFile
from io import TextIOWrapper
import os
from pathlib import Path
from math import ceil, floor
from collections import defaultdict
import time
class SCanalyzer:
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
self.orig_gtfs_path = gtfs_path
self.base_out_path = self._get_out_path()
self.out_path = self.base_out_path
self._preprocess_gtfs()
def gtfs_edit(self, edit_fn, route, from_orig=True):
orig_gtfs_name = os.path.basename(self.orig_gtfs_path)
modified_gtfs_name = f"{edit_fn.__name__}-{route}-{orig_gtfs_name}"
modified_gtfs_path = os.path.join(
self.base_out_path, modified_gtfs_name)
from_path = self.orig_gtfs_path if from_orig else self.gtfs_path
copy_with_edits(from_path, modified_gtfs_path, edit_fn, route)
self.gtfs_path = modified_gtfs_path
def set_batch_label(self, label):
self.out_path = os.path.join(self.base_out_path, label)
Path(self.out_path).mkdir(parents=True, exist_ok=True)
def reset_batch_label(self):
self.out_path = self.base_out_path
def search(self, config, perf_df=None):
# prerun check
if not config.is_runnable():
raise Exception("The current config is not runnable")
# dynamically init a manager
manager = managerFactory.create(
config.get_run_env(), gtfs_path=self.gtfs_path, out_path=self.out_path, borders=self.borders)
result_df = manager.run_batch(config, perf_df)
return result_df
def load_census(self, cache=True):
"""
Looks for a stops.csv file in data/mmt_gtfs, queries TigerWeb Census API to pull out census tracts
based on the center and radius of the system. An optional addition of 1km (default) is added to the radius.
From the tracts, and a default set of demographs the ACS 5-year 2019 dataset is queried to get the demographics
data for each tract. A few statistics are computed. It returns a geodataframe with all of this information and
saves it to the output folder.
cache default=True, if true will load a saved result and return
"""
# Pull from Cache and return:
cache_path = os.path.join(self.base_out_path, "census.csv")
if cache and os.path.exists(cache_path):
census_df = pd.read_csv(cache_path)
return self._csvdf_to_gdf(census_df)
# Create the Geodataframe:
c = Census(gtfs_filename="../data/mmt_gtfs/stops.csv")
gdf_tracts = c.getCensusTracts()
demographic_data = c.getDemographicsData(
gdf_tracts, demographics=['Race', 'Vehicles'])
# Save output:
demographic_data.to_csv(cache_path, index=False)
return self._csvdf_to_gdf(demographic_data)
def load_yelp(self, api_key, services=["banks", "clinics", "dentists", "hospitals", "supermarket"], cache=True):
cache_path = os.path.join(self.base_out_path, "services.csv")
if cache and os.path.exists(cache_path):
return pd.read_csv(cache_path)
dfs = [get_results(api_key, service, self.borders)
for service in services]
df = pd.concat(dfs)
df.to_csv(cache_path, index=False)
return df
def add_service_metrics(self, result_gdf, services_gdf, perf_df=None):
# load grid size from a map_identifier (pick the first one on result_gdf)
max_x, min_x, max_y, min_y, grid_size, x_num, y_num = self._load_grid_size(
result_gdf)
record_perf = (perf_df is not None)
def get_grid(df):
grid = | np.zeros(x_num*y_num) | numpy.zeros |
from decimal import Decimal
import numpy as np
from datetime import timedelta
import pprint
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import env_proc_trigger, ep_time_step, config_sim
from typing import Dict, List
pp = pprint.PrettyPrinter(indent=4)
seeds = {
'z': np.random.RandomState(1),
'a': np.random.RandomState(2),
'b': | np.random.RandomState(3) | numpy.random.RandomState |
# https://github.com/pymc-devs/resources/blob/master/Rethinking/Chp_02.ipynb
#%matplotlib inline
import pymc3 as pm
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
#%config InlineBackend.figure_format = 'retina'
plt.style.use(['seaborn-colorblind', 'seaborn-darkgrid'])
ways = np.array([0, 3, 8, 9, 0])
ways / ways.sum()
stats.binom.pmf(6, n=9, p=0.5)
def posterior_grid_approx(grid_points=5, success=6, tosses=9):
"""
"""
# define grid
p_grid = np.linspace(0, 1, grid_points)
# define prior
prior = | np.repeat(5, grid_points) | numpy.repeat |
# Chapter 2: More Image Transformation and Manipulation
# Author: <NAME>
###########################################
# ## Problems
# ### 1.0 Basics of Linear Geometric Transformations in 2D
# ### 1.1 Rotating an image with scipy.ndimage
from scipy.ndimage import rotate
from skimage.io import imread
import matplotlib.pylab as plt
im = imread('images/Img_02_04.jpg')
im = rotate(im, -45)
plt.figure(figsize=(5,5))
plt.imshow(im)
plt.axis('off') # stop showing the axes
plt.show()
# ### 1.2 Flipping and Flopping an image with *numpy*
import matplotlib.pyplot as plt
import numpy as np
im = plt.imread('images/Img_02_42.jpg')
im_filpped = np.flipud(im)
plt.figure(figsize=(10, 12))
plt.subplot(211), plt.imshow(im), plt.axis('off'), plt.title('original', size=20)
plt.subplot(212), plt.imshow(im_filpped), plt.axis('off'), plt.title('flipped', size=20) #np.fliplr(im)
plt.show()
im = plt.imread('images/Img_02_43.jpeg')
im_filpped = np.fliplr(im)
plt.figure(figsize=(15, 12))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original', size=20)
plt.subplot(122), plt.imshow(im_filpped), plt.axis('off'), plt.title('flopped', size=20) #np.fliplr(im)
plt.show()
# ### 1.3 Applying Affine Transformation with *scipy.ndimage*
from skimage.io import imread
from scipy.ndimage import affine_transform
import numpy as np
import matplotlib.pylab as plt
im = imread("images/Img_02_01.jpg")
rot_mat = np.array([[np.cos(np.pi/4),np.sin(np.pi/4), 0],[-np.sin(np.pi/4),np.cos(np.pi/4), 0], [0,0,1]])
shr_mat = np.array([[1, 0.45, 0], [0, 0.75, 0], [0, 0, 1]])
transformed = affine_transform(im, rot_mat@shr_mat, offset=[-im.shape[0]/4+25, im.shape[1]/2-50, 0], output_shape=im.shape)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(transformed), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ## 2. Implement Image Transformation with Warping / Inverse Warping using scikit-image and scipy.ndimage
# ### 2.1 Applying translation on an image using scikit-image warp
from skimage.io import imread
from skimage.transform import warp
import matplotlib.pylab as plt
def translate(xy, t_x, t_y):
xy[:, 0] -= t_y
xy[:, 1] -= t_x
return xy
im = imread('images/Img_02_01.jpg')
im = warp(im, translate, map_args={'t_x':-250, 't_y':200}) # create a dictionary for translation parameters
plt.imshow(im)
plt.title('Translated image', size=20)
plt.show()
# ### 2.2 Implementing the Swirl transformation using scikit-image warp
def swirl(xy, x0, y0, R):
r = np.sqrt((xy[:,1]-x0)**2 + (xy[:,0]-y0)**2)
a = np.pi*r / R
xy[:, 1] = (xy[:, 1]-x0)*np.cos(a) + (xy[:, 0]-y0)*np.sin(a) + x0
xy[:, 0] = -(xy[:, 1]-x0)*np.sin(a) + (xy[:, 0]-y0)*np.cos(a) + y0
return xy
im = imread('images/Img_02_02.jpg')
print(im.shape)
im1 = warp(im, swirl, map_args={'x0':220, 'y0':360, 'R':650})
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ### 2.3 Implementing Swirl Transform using *scipy.ndimage*
from scipy import ndimage as ndi
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pylab as plt, numpy as np
def apply_swirl(xy, x0, y0, R):
r = np.sqrt((xy[1]-x0)**2 + (xy[0]-y0)**2)
a = np.pi*r / R
return ((xy[1]-x0)*np.cos(a) + (xy[0]-y0)*np.sin(a) + x0, -(xy[1]-x0)*np.sin(a) + (xy[0]-y0)*np.cos(a) + y0)
im = rgb2gray(imread('images/Img_02_06.jpg'))
print(im.shape)
im1 = ndi.geometric_transform(im, apply_swirl, extra_arguments=(100, 100, 250))
plt.figure(figsize=(20,10))
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('Input image', size=20)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('Output image', size=20)
plt.show()
# ### 2.4 Implementing Elastic Deformation
import numpy as np
import matplotlib.pylab as plt
from skimage.color import rgb2gray
from scipy.ndimage import gaussian_filter, map_coordinates
def elastic_transform(image, alpha, sigma):
random_state = np.random.RandomState(None)
h, w = image.shape
dx = gaussian_filter((random_state.rand(*image.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*image.shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(w), np.arange(h))
indices = np.reshape(y+dy, (-1, 1)), | np.reshape(x+dx, (-1, 1)) | numpy.reshape |
import argparse
from mppca.mixture_ppca import MPPCA
from code.pytorch.LAMPO.core.imitation_learning import PPCAImitation, RunModel
from code.pytorch.LAMPO.core.task_interface import TaskInterface
from code.pytorch.LAMPO.core.plot import LampoMonitor
from code.pytorch.LAMPO.core.lampo import Lampo
import numpy as np
from code.pytorch.LAMPO.core.model import RLModel
from code.pytorch.LAMPO.core.config import config
import json
import matplotlib.pyplot as plt
def get_arguments_dict():
parser = argparse.ArgumentParser()
parser.add_argument("--folder_name",
help="Where you would like to save the experimental results and configuration.")
parser.add_argument("-t", "--task_name",
help="Task name.",
default="reacher_target")
parser.add_argument("-i", "--id",
help="Identifier of the process.",
type=int, default=10)
parser.add_argument("-b", "--batch_size",
help="How many episodes before improvement.",
type=int, default=10)
parser.add_argument("-l", "--imitation_learning",
help="How many episodes before improvement.",
type=int, default=200)
parser.add_argument("-p", "--plot",
help="Show real time plots.",
action="store_true")
parser.add_argument("-v", "--visualize_robot",
help="Show robotic behavior",
action="store_true")
parser.add_argument("-z", "--normalize",
help="Normalized Importance Sampling",
action="store_true")
parser.add_argument("-s", "--save",
help="Save the results in the experiment directory.",
action="store_true")
parser.add_argument("-d", "--load",
help="Load configuration from folder.",
action="store_true")
parser.add_argument("-r", "--slurm",
help="Don't look for CPU usage.",
action="store_true")
parser.add_argument("--il_noise",
help="Add noise on the context",
type=float,
default=0.03)
parser.add_argument("--dense_reward",
help="Use dense reward",
action="store_true")
parser.add_argument("-c", "--context_kl_bound",
help="Bound the context kl.",
type=float,
default=50.)
parser.add_argument("-k", "--kl_bound",
help="Bound the improvement kl.",
type=float,
default=0.2)
parser.add_argument("--context_reg",
help="Bound the improvement kl.",
type=float,
default=1E-4)
parser.add_argument("-f", "--forward",
help="Bound the improvement kl.",
action="store_true")
parser.add_argument("-m", "--max_iter",
help="Maximum number of iterations.",
type=int,
default=20)
parser.add_argument("-e", "--n_evaluations",
help="Number of the evaluation batch.",
type=int,
default=500)
parser.add_argument("--data_augment",
help="Number of artificially generated data (x times). (=1 means no data augmentation)",
type=int,
default=1)
args = parser.parse_args()
return args
class Objectview(object):
def __init__(self, d):
self.__dict__ = d
def process_parameters(parameters, n_samples, n_context, noise=0.03, augment=1):
parameters = parameters[:n_samples].copy()
data_list = []
for i in range(augment):
data_list.append(np.copy(parameters))
data_list[-1][:, :n_context] += noise * np.random.normal(size=parameters[:, :n_context].shape)
return | np.concatenate(data_list, axis=0) | numpy.concatenate |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops for all atom representations."""
from typing import Dict, Text
from alphafold.common import residue_constants
from alphafold.model import geometry
from alphafold.model import utils
import jax
import jax.numpy as jnp
import numpy as np
def squared_difference(x, y):
return jnp.square(x - y)
def _make_chi_atom_indices():
"""Returns atom indices needed to compute chi angles for all residue types.
Returns:
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
in the order specified in residue_constants.restypes + unknown residue type
at the end. For chi angles which are not defined on the residue, the
positions indices are by default set to 0.
"""
chi_atom_indices = []
for residue_name in residue_constants.restypes:
residue_name = residue_constants.restype_1to3[residue_name]
residue_chi_angles = residue_constants.chi_angles_atoms[residue_name]
atom_indices = []
for chi_angle in residue_chi_angles:
atom_indices.append(
[residue_constants.atom_order[atom] for atom in chi_angle])
for _ in range(4 - len(atom_indices)):
atom_indices.append([0, 0, 0, 0]) # For chi angles not defined on the AA.
chi_atom_indices.append(atom_indices)
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
return np.array(chi_atom_indices)
def _make_renaming_matrices():
"""Matrices to map atoms to symmetry partners in ambiguous case."""
# As the atom naming is ambiguous for 7 of the 20 amino acids, provide
# alternative groundtruth coordinates where the naming is swapped
restype_3 = [
residue_constants.restype_1to3[res] for res in residue_constants.restypes
]
restype_3 += ['UNK']
# Matrices for renaming ambiguous atoms.
all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
correspondences = np.arange(14)
for source_atom_swap, target_atom_swap in swap.items():
source_index = residue_constants.restype_name_to_atom14_names[
resname].index(source_atom_swap)
target_index = residue_constants.restype_name_to_atom14_names[
resname].index(target_atom_swap)
correspondences[source_index] = target_index
correspondences[target_index] = source_index
renaming_matrix = np.zeros((14, 14), dtype=np.float32)
for index, correspondence in enumerate(correspondences):
renaming_matrix[index, correspondence] = 1.
all_matrices[resname] = renaming_matrix.astype(np.float32)
renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
return renaming_matrices
def _make_restype_atom37_mask():
"""Mask of which atoms are present for which residue type in atom37."""
# create the corresponding mask
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
for restype, restype_letter in enumerate(residue_constants.restypes):
restype_name = residue_constants.restype_1to3[restype_letter]
atom_names = residue_constants.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = residue_constants.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
return restype_atom37_mask
def _make_restype_atom14_mask():
"""Mask of which atoms are present for which residue type in atom14."""
restype_atom14_mask = []
for rt in residue_constants.restypes:
atom_names = residue_constants.restype_name_to_atom14_names[
residue_constants.restype_1to3[rt]]
restype_atom14_mask.append([(1. if name else 0.) for name in atom_names])
restype_atom14_mask.append([0.] * 14)
restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32)
return restype_atom14_mask
def _make_restype_atom37_to_atom14():
"""Map from atom37 to atom14 per residue type."""
restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14
for rt in residue_constants.restypes:
atom_names = residue_constants.restype_name_to_atom14_names[
residue_constants.restype_1to3[rt]]
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14.append([
(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
for name in residue_constants.atom_types
])
restype_atom37_to_atom14.append([0] * 37)
restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32)
return restype_atom37_to_atom14
def _make_restype_atom14_to_atom37():
"""Map from atom14 to atom37 per residue type."""
restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37
for rt in residue_constants.restypes:
atom_names = residue_constants.restype_name_to_atom14_names[
residue_constants.restype_1to3[rt]]
restype_atom14_to_atom37.append([
(residue_constants.atom_order[name] if name else 0)
for name in atom_names
])
# Add dummy mapping for restype 'UNK'
restype_atom14_to_atom37.append([0] * 14)
restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32)
return restype_atom14_to_atom37
def _make_restype_atom14_is_ambiguous():
"""Mask which atoms are ambiguous in atom14."""
# create an ambiguous atoms mask. shape: (21, 14)
restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32)
for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
for atom_name1, atom_name2 in swap.items():
restype = residue_constants.restype_order[
residue_constants.restype_3to1[resname]]
atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name1)
atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index(
atom_name2)
restype_atom14_is_ambiguous[restype, atom_idx1] = 1
restype_atom14_is_ambiguous[restype, atom_idx2] = 1
return restype_atom14_is_ambiguous
def _make_restype_rigidgroup_base_atom37_idx():
"""Create Map from rigidgroups to atom37 indices."""
# Create an array with the atom names.
# shape (num_restypes, num_rigidgroups, 3_atoms): (21, 8, 3)
base_atom_names = | np.full([21, 8, 3], '', dtype=object) | numpy.full |
def test_import():
import pyromsobs
def test_merge():
import pyromsobs
from numpy import array
obs1 = pyromsobs.OBSstruct()
obs1.value = array([5.])
obs1.type = array([6])
obs1.error = array([0.5])
obs1.Xgrid = array([1.2])
obs1.Ygrid = array([1.4])
obs1.Zgrid = array([5.2])
obs1.time = array([15782.5])
obs1 = pyromsobs.utils.setDimensions(obs1)
obs2 = pyromsobs.OBSstruct()
obs2.value = array([8.])
obs2.type = array([6])
obs2.error = array([0.5])
obs2.Xgrid = array([1.6])
obs2.Ygrid = array([1.8])
obs2.Zgrid = array([5.4])
obs2.time = array([15782.5])
obs2 = pyromsobs.utils.setDimensions(obs2)
obs3 = pyromsobs.OBSstruct()
obs3.value = array([5., 8.])
obs3.type = array([6, 6])
obs3.error = array([0.5, 0.5])
obs3.Xgrid = array([1.2, 1.6])
obs3.Ygrid = array([1.4, 1.8])
obs3.Zgrid = array([5.2, 5.4])
obs3.time = array([15782.5, 15782.5] )
obs3 = pyromsobs.utils.setDimensions(obs3)
obs4 = pyromsobs.merge([obs2, obs1])
for var in ['value', 'error', 'Xgrid', 'Ygrid', 'Zgrid']:
assert all(getattr(obs4, var) == getattr(obs3, var))
def test_superob():
import pyromsobs
from numpy import array
obs1 = pyromsobs.OBSstruct()
obs1.value = array([5.])
obs1.type = array([6])
obs1.error = array([0.5])
obs1.Xgrid = array([1.2])
obs1.Ygrid = array([1.4])
obs1.Zgrid = array([5.2])
obs1.time = array([15782.5])
obs1 = pyromsobs.utils.setDimensions(obs1)
obs2 = pyromsobs.OBSstruct()
obs2.value = array([8.])
obs2.type = array([6])
obs2.error = array([0.5])
obs2.Xgrid = array([1.6])
obs2.Ygrid = array([1.8])
obs2.Zgrid = array([5.4])
obs2.time = array([15782.5])
obs2 = pyromsobs.utils.setDimensions(obs2)
obs3 = pyromsobs.merge([obs2, obs1])
obs3 = pyromsobs.superob(obs3)
for var in ['value', 'error', 'Xgrid', 'Ygrid', 'Zgrid']:
assert getattr(obs3, var) == (getattr(obs1, var) + getattr(obs2, var))/2.
def test_remove_duplicates():
import pyromsobs
from numpy import array
obs1 = pyromsobs.OBSstruct()
obs1.value = array([5.])
obs1.type = array([6])
obs1.error = array([0.5])
obs1.Xgrid = array([1.2])
obs1.Ygrid = array([1.4])
obs1.Zgrid = array([5.2])
obs1.time = array([15782.5])
obs1 = pyromsobs.utils.setDimensions(obs1)
obs2 = pyromsobs.OBSstruct()
obs2.value = array([5.])
obs2.type = array([6])
obs2.error = array([0.5])
obs2.Xgrid = array([1.2])
obs2.Ygrid = array([1.4])
obs2.Zgrid = array([5.2])
obs2.time = array([15782.5])
obs2 = pyromsobs.utils.setDimensions(obs2)
obs3 = pyromsobs.merge([obs2, obs1])
obs3 = pyromsobs.remove_duplicates(obs3)
assert obs3.Ndatum == 1
for var in ['value', 'error', 'Xgrid', 'Ygrid', 'Zgrid']:
assert getattr(obs3, var) == getattr(obs1, var)
def test_put():
import pyromsobs
from numpy import array, isin
obs3 = pyromsobs.OBSstruct()
obs3.value = array([5., 8.])
obs3.type = array([6, 6])
obs3.error = array([0.5, 0.5])
obs3.Xgrid = array([1.2, 1.6])
obs3.Ygrid = array([1.4, 1.8])
obs3.Zgrid = array([5.2, 5.4])
obs3.time = array([15782.5, 15782.5] )
obs3 = pyromsobs.utils.setDimensions(obs3)
obs_dict = {'value': 6, 'type': 3, 'error': 0.4, 'Xgrid': 10, 'Ygrid': 15, 'Zgrid': 35.2, 'time': 15782.0, 'provenance': 10}
obs = pyromsobs.OBSstruct()
obs.put(obs_dict, fill_value = -99999)
obs3.put(obs_dict, fill_value = -99999)
assert obs3.Ndatum == 3
assert any(isin(obs3.value, 6))
assert any(isin(obs3.provenance, 10))
assert any(isin(obs3.provenance, -99999))
assert obs.Ndatum == 1
assert obs.value == 6
assert obs.provenance == 10
def test_index():
import pyromsobs
from numpy import array, isin, where
obs3 = pyromsobs.OBSstruct()
obs3.value = array([5., 8.])
obs3.type = array([6, 6])
obs3.error = array([0.5, 0.5])
obs3.Xgrid = array([1.2, 1.6])
obs3.Ygrid = array([1.4, 1.8])
obs3.Zgrid = array([5.2, 5.4])
obs3.time = array([15782.5, 15782.5] )
obs3 = pyromsobs.utils.setDimensions(obs3)
obs_dict = {'value': 6, 'type': 3, 'error': 0.4, 'Xgrid': 10, 'Ygrid': 15, 'Zgrid': 35.2, 'time': 15782.0, 'provenance': 10}
obs = pyromsobs.OBSstruct()
obs.put(obs_dict, fill_value = -99999)
obs3.put(obs_dict, fill_value = -99999)
obs1 = obs3[0]
obs2 = obs3[-1]
obs4 = obs3[where(obs3.type == 6)]
assert obs1.Ndatum == 1
assert obs2.Ndatum == 1
assert obs4.Ndatum == 2
assert obs1.value == obs3.value[0]
assert obs2.value == obs3.value[-1]
assert all(isin(obs4.value, obs3.value[where(obs3.type == 6)]))
def test_adjust_survey():
import pyromsobs
import numpy as np
from datetime import datetime, timedelta
reftime = datetime(1970,1,1)
today = datetime.now()
obs1 = pyromsobs.OBSstruct()
obs1.value = np.array([5.])
obs1.type = np.array([6])
obs1.error = np.array([0.5])
obs1.lat = np.array([65.5])
obs1.lon = np.array([4])
obs1.depth = | np.array([-10]) | numpy.array |
"""
Feature highlights for Matplotlib 3.4.0.
"""
import numpy as np
from mplslide import FONT, TITLE_COLOUR, new_slide, slide_heading, annotate_pr_author
CODE = dict(fontfamily='monospace', fontsize=40, verticalalignment='top',
alpha=0.7)
def example_plot(ax, fontsize=12, hide_labels=False):
pc = ax.pcolormesh( | np.random.randn(30, 30) | numpy.random.randn |
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import sph_harm
from scipy.special import assoc_laguerre
def hydrogen_cloud(n,l,m):
x = np.linspace(-30, 30, 500)
y = 0 #### the plane locates at y = 0
z = np.linspace(-35, 35, 500)
X, Z = np.meshgrid(x, z)
rho = np.linalg.norm((X,y,Z), axis=0) / n
Lag = assoc_laguerre(2 * rho, n - l - 1, 2 * l + 1)
Ylm = sph_harm(m, l, np.arctan2(y,X), np.arctan2(np.linalg.norm((X,y), axis=0), Z))
Psi = np.exp(-rho) * | np.power((2*rho),l) | numpy.power |
"""
Purpose
-------
A Portfolio represents a collection of Aggregate objects. Applications include
* Model a book of insurance
* Model a large account with several sub lines
* Model a reinsurance portfolio or large treaty
"""
import collections
import json
import logging
from copy import deepcopy
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from pandas.io.formats.format import EngFormatter
import pypandoc
import scipy.stats as ss
from scipy.interpolate import interp1d
from IPython.core.display import HTML, display
from matplotlib.ticker import MultipleLocator, StrMethodFormatter, MaxNLocator, FixedLocator, \
FixedFormatter, AutoMinorLocator
from scipy import interpolate
import re
from pathlib import Path
from .distr import Aggregate, Severity
from .spectral import Distortion
from .utils import ft, \
ift, sln_fit, sgamma_fit, \
axiter_factory, AxisManager, html_title, \
suptitle_and_tight, \
MomentAggregator, Answer, subsets, round_bucket, report_time
# fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}
matplotlib.rcParams['legend.fontsize'] = 'xx-small'
logger = logging.getLogger('aggregate')
# debug
# info
# warning
# error
# critical
class Portfolio(object):
"""
Portfolio creates and manages a portfolio of Aggregate objects.
:param name: the name of the portfolio, no spaces or underscores
:param spec_list: a list of 1) dictionary: Aggregate object dictionary specifications or
2) Aggregate: An actual aggregate objects or
3) tuple (type, dict) as returned by uw['name'] or
4) string: Names referencing objects in the optionally passed underwriter
"""
def __init__(self, name, spec_list, uw=None):
self.name = name
self.agg_list = []
self.line_names = []
logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name}')
# logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name} at {super(Portfolio, self).__repr__()}')
ma = MomentAggregator()
max_limit = 0
for spec in spec_list:
if isinstance(spec, Aggregate):
# directly passed in an agg object
a = spec
agg_name = spec.name
elif isinstance(spec, str):
# look up object in uw return actual instance
# note here you could do uw.aggregate[spec] and get the dictionary def
# or uw(spec) to return the already-created (and maybe updated) object
# we go the latter route...if user wants they can pull off the dict item themselves
if uw is None:
raise ValueError(f'Must pass valid Underwriter instance to create aggs by name')
try:
a = uw(spec)
except e:
logger.error(f'Item {spec} not found in your underwriter')
raise e
agg_name = a.name
elif isinstance(spec, tuple):
# uw returns type, spec
assert spec[0] == 'agg'
a = Aggregate(**spec[1])
agg_name = spec[1]['name']
elif isinstance(spec, dict):
a = Aggregate(**spec)
agg_name = spec['name'][0] if isinstance(spec['name'], list) else spec['name']
else:
raise ValueError(f'Invalid type {type(spec)} passed to Portfolio, expect Aggregate, str or dict.')
self.agg_list.append(a)
self.line_names.append(agg_name)
self.__setattr__(agg_name, a)
ma.add_fs(a.report_ser[('freq', 'ex1')], a.report_ser[('freq', 'ex2')], a.report_ser[('freq', 'ex3')],
a.report_ser[('sev', 'ex1')], a.report_ser[('sev', 'ex2')], a.report_ser[('sev', 'ex3')])
max_limit = max(max_limit, np.max(np.array(a.limit)))
self.line_names_ex = self.line_names + ['total']
for n in self.line_names:
# line names cannot equal total
if n == 'total':
raise ValueError('Line names cannot equal total, it is reserved for...total')
# make a pandas data frame of all the statistics_df
temp_report = pd.concat([a.report_ser for a in self.agg_list], axis=1)
# max_limit = np.inf # np.max([np.max(a.get('limit', np.inf)) for a in spec_list])
temp = pd.DataFrame(ma.stats_series('total', max_limit, 0.999, remix=False))
self.statistics_df = pd.concat([temp_report, temp], axis=1)
# future storage
self.density_df = None
self.augmented_df = None
self.epd_2_assets = {}
self.assets_2_epd = {}
self.priority_capital_df = None
self.priority_analysis_df = None
self.audit_df = None
self.padding = 0
self.tilt_amount = 0
self._linear_quantile_function = None
self._cdf = None
self._pdf = None
self._tail_var = None
self._tail_var2 = None
self._inverse_tail_var = None
self.bs = 0
self.log2 = 0
self.ex = 0
self.last_update = 0
self.hash_rep_at_last_update = ''
self._distortion = None
self.sev_calc = ''
self._remove_fuzz = 0
self.approx_type = ""
self.approx_freq_ge = 0
self.discretization_calc = ''
# for storing the info about the quantile function
self.q_temp = None
self._renamer = None
self._line_renamer = None
self._tm_renamer = None
# if created by uw it stores the program here
self.program = ''
self.audit_percentiles = [.9, .95, .99, .996, .999, .9999, 1 - 1e-6]
self.dists = None
self.dist_ans = None
def __str__(self):
"""
Goal: readability
:return:
"""
# cannot use ex, etc. because object may not have been updated
if self.audit_df is None:
ex = self.statistics_df.loc[('agg', 'mean'), 'total']
empex = np.nan
isupdated = False
else:
ex = self.get_stat(stat="Mean")
empex = self.get_stat()
isupdated = True
# df = pd.DataFrame(columns=['Statistic', 'Value'])
# df = df.set_index('Statistic')
# df.loc['Portfolio Name', 'Value'] = self.name
# df.loc['Expected loss', 'Value'] = ex
# df.loc['Model loss', 'Value'] = empex
# df.loc['Error', 'Value'] = ex / empex - 1
# print(df)
s = f'Portfolio name {self.name:<15s}\n' \
f'Theoretic expected loss {ex:15,.1f}\n' \
f'Actual expected loss {empex:15,.1f}\n' \
f'Error {empex / ex - 1:15.6f}\n' \
f'Discretization size {self.log2:15d}\n' \
f'Bucket size {self.bs:15.2f}\n' \
f'{object.__repr__(self)}'
if not isupdated:
s += '\nNOT UPDATED!'
return s
@property
def distortion(self):
return self._distortion
def remove_fuzz(self, df=None, eps=0, force=False, log=''):
"""
remove fuzz at threshold eps. if not passed use np.finfo(np.float).eps.
Apply to self.density_df unless df is not None
Only apply if self.remove_fuzz or force
:param eps:
:param df: apply to dataframe df, default = self.density_df
:param force: do regardless of self.remove_fuzz
:return:
"""
if df is None:
df = self.density_df
if eps == 0:
eps = np.finfo(np.float).eps
if self._remove_fuzz or force:
logger.debug(f'Portfolio.remove_fuzz | Removing fuzz from {self.name} dataframe, caller {log}')
df[df.select_dtypes(include=['float64']).columns] = \
df.select_dtypes(include=['float64']).applymap(lambda x: 0 if abs(x) < eps else x)
def __repr__(self):
"""
Goal unmbiguous
:return:
"""
# return str(self.to_dict())
# this messes up when port = self has been enhanced...
if isinstance(self, Portfolio):
s = [super(Portfolio, self).__repr__(), f"{{ 'name': '{self.name}'"]
else:
s = [f'Non-Portfolio (enhanced) object {{ "name": "{self.name}"']
agg_list = [str({k: v for k, v in a.__dict__.items() if k in Aggregate.aggregate_keys})
for a in self.agg_list]
s.append(f"'spec': [{', '.join(agg_list)}]")
if self.bs > 0:
s.append(f'"bs": {self.bs}')
s.append(f'"log2": {self.log2}')
s.append(f'"padding": {self.padding}')
s.append(f'"tilt_amount": {self.tilt_amount}')
s.append(f'"distortion": "{repr(self._distortion)}"')
s.append(f'"sev_calc": "{self.sev_calc}"')
s.append(f'"remove_fuzz": {self._remove_fuzz}')
s.append(f'"approx_type": "{self.approx_type}"')
s.append(f'"approx_freq_ge": {self.approx_freq_ge}')
return ', '.join(s) + '}'
def _repr_html_(self):
s = [f'<h2>Portfolio object: {self.name}</h2>']
_n = len(self.agg_list)
_s = "" if _n <= 1 else "s"
s.append(f'Portfolio contains {_n} aggregate component{_s}')
summary_sl = (slice(None), ['mean', 'cv', 'skew'])
if self.audit_df is not None:
_df = pd.concat((self.statistics_df.loc[summary_sl, :],
self.audit_df[['Mean', 'EmpMean', 'MeanErr', 'CV', 'EmpCV', 'CVErr', 'P99.0']].T),
sort=True)
s.append(_df._repr_html_())
else:
s.append(self.statistics_df.loc[summary_sl, :]._repr_html_())
return '\n'.join(s)
def __hash__(self):
"""
hashing behavior
:return:
"""
return hash(repr(self.__dict__))
def __iter__(self):
"""
make Portfolio iterable: for each x in Portfolio
:return:
"""
return iter(self.agg_list)
def __getitem__(self, item):
"""
allow Portfolio[slice] to return bits of agg_list
:param item:
:return:
"""
if type(item) == str:
return self.agg_list[self.line_names.index(item)]
return self.agg_list[item]
@property
def audit(self):
"""
Renamed version of the audit dataframe
:return:
"""
if self.audit_df is not None:
return self.audit_df.rename(columns=self.renamer, index=self.line_renamer).T
@property
def density(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.density_df is not None:
return self.density_df.rename(columns=self.renamer)
@property
def augmented(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.augmented_df is not None:
return self.augmented_df.rename(columns=self.renamer)
@property
def statistics(self):
"""
Renamed version of the statistics dataframe
:return:
"""
return self.statistics_df.rename(columns=self.renamer)
def json(self, stream=None):
"""
write object as json
:param stream:
:return: stream or text
"""
args = dict()
args["bs"] = self.bs
args["log2"] = self.log2
args["padding"] = self.padding
args["tilt_amount"] = self.tilt_amount
args["distortion"] = repr(self._distortion)
args["sev_calc"] = self.sev_calc
args["remove_fuzz"] = self._remove_fuzz
args["approx_type"] = self.approx_type
args["approx_freq_ge"] = self.approx_freq_ge
args["last_update"] = str(self.last_update)
args["hash_rep_at_last_update"] = str(self.hash_rep_at_last_update)
d = dict()
# original
# d[self.name] = dict(args=args, spec=[a.spec for a in self.agg_list])
d['name'] = self.name
d['args'] = args
d['spec_list'] = [a._spec for a in self.agg_list]
logger.debug(f'Portfolio.json| dummping {self.name} to {stream}')
s = json.dumps(d) # , default_flow_style=False, indent=4)
logger.debug(f'Portfolio.json | {s}')
if stream is None:
return s
else:
return stream.write(s)
def save(self, filename='', mode='a'):
"""
persist to json in filename; if none save to user.json
:param filename:
:param mode: for file open
:return:
"""
if filename == "":
filename = Path.home() / 'agg/user.json'
filename.parent.mkdir(parents=True, exist_ok=True)
with filename.open(mode=mode, encoding='utf-8') as f:
self.json(stream=f)
logger.debug(f'Portfolio.save | {self.name} saved to {filename}')
def __add__(self, other):
"""
Add two portfolio objects INDEPENDENT sum (down road can look for the same severity...)
:param other:
:return:
"""
assert isinstance(other, Portfolio)
new_spec = []
for a in self.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
for a in other.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
return Portfolio(f'({self.name}) + ({other.name})', new_spec)
def __rmul__(self, other):
"""
new = other * self; treat as scale change
:param other:
:return:
"""
assert other > 0
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the severity
s = d['severity']
if 'mean' in s:
s['mean'] *= other
elif 'scale' in s:
s['scale'] *= other
else:
raise ValueError(f"Cannot adjust s['name'] for scale")
return Portfolio(f'{other} x {self.name}', new_spec)
def __mul__(self, other):
"""
new = self * other, other integer, sum of other independent copies
:param other:
:return:
"""
assert isinstance(other, int)
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the frequency
# TODO better freq dists; deal with Bernoulli where n=log<1
d['frequency']['n'] *= other
return Portfolio(f'Sum of {other} copies of {self.name}', new_spec)
def snap(self, x):
"""
snap value x to the index of density_df
:param x:
:return:
"""
ix = self.density_df.index.get_loc(x, 'nearest')
return self.density_df.iat[ix, 0]
def audits(self, kind='all', **kwargs):
"""
produce audit plots to assess accuracy of outputs.
Currently only exeqa available
:param kind:
:param kwargs: passed to pandas plot, e.g. set xlim
:return:
"""
if kind == 'all':
kind = ['exeqa']
for k in kind:
if k == 'exeqa':
temp = self.density_df.filter(regex='exeqa_.*(?<!total)$').copy()
temp['sum'] = temp.sum(axis=1)
temp['err'] = temp['sum'] - temp.index
f, axs = plt.subplots(1, 2, figsize=(8, 3.75), constrained_layout=True)
ax = axs.flatten()
a = temp['err'].abs().plot(logy=True, title=f'Exeqa Sum Error', ax=ax[1], **kwargs)
a.plot(self.density_df.loss, self.density_df.p_total, label='p_total')
a.plot(self.density_df.loss, self.density_df.p_total * temp.err, label='prob wtd err')
a.grid('b')
a.legend(loc='lower left')
if 'xlim' in kwargs:
kwargs['ylim'] = kwargs['xlim']
temp.filter(regex='exeqa_.*(?<!total)$|sum').plot(title='exeqa and sum of parts', ax=ax[0],
**kwargs).grid('b')
f.suptitle(f'E[Xi | X=x] vs. Sum of Parts\nbs={self.bs}, log2={self.log2}, padding={self.padding}',
fontsize='x-large')
return f # for doc maker
def get_stat(self, line='total', stat='EmpMean'):
"""
Other analysis suggests that iloc and iat are about same speed but slower than ix
:param line:
:param stat:
:return:
"""
return self.audit_df.loc[line, stat]
def q(self, p, kind='lower'):
"""
return lowest quantile, appropriate for discrete bucketing.
quantile guaranteed to be in the index
nearest does not work because you always want to pick rounding up
Definition 2.1 (Quantiles)
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] ≥ α} is the lower α-quantile of X
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] > α} is the upper α-quantile of X.
We use the x-notation if the dependence on X is evident, otherwise the q-notion.
Acerbi and Tasche (2002)
:param p:
:param kind: allow upper or lower quantiles
:return:
"""
if self._linear_quantile_function is None:
# revised Dec 2019
self._linear_quantile_function = {}
self.q_temp = self.density_df[['loss', 'F']].groupby('F').agg({'loss': np.min})
self.q_temp.loc[1, 'loss'] = self.q_temp.loss.iloc[-1]
self.q_temp.loc[0, 'loss'] = 0
# revised Jan 2020
# F loss loss_s
# 0.000000 0.0 0.0
# 0.667617 0.0 4500.0
# a value here is V and ^ which is the same: correct
# 0.815977 4500.0 5500.0
# 0.937361 5500.0 9000.0
# upper and lower only differ at exact values of F where lower is loss and upper is loss_s
# in between must take the next value for lower and the previous value for next to get the same answer
self.q_temp = self.q_temp.sort_index()
# that q_temp left cts, want right continuous:
self.q_temp['loss_s'] = self.q_temp.loss.shift(-1)
self.q_temp.iloc[-1, 1] = self.q_temp.iloc[-1, 0]
# create interp functions
# old
# self._linear_quantile_function['upper'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# self._linear_quantile_function['lower'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# revised
self._linear_quantile_function['upper'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
fill_value='extrapolate')
self._linear_quantile_function['lower'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='next', bounds_error=False,
fill_value='extrapolate')
# change to using loss_s
self._linear_quantile_function['middle'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='linear', bounds_error=False,
fill_value='extrapolate')
l = float(self._linear_quantile_function[kind](p))
# because we are not interpolating the returned value must (should) be in the index...
assert kind == 'middle' or l in self.density_df.index
return l
def cdf(self, x):
"""
distribution function
:param x:
:return:
"""
if self._cdf is None:
# Dec 2019: kind='linear' --> kind='previous'
self._cdf = interpolate.interp1d(self.density_df.loss, self.density_df.F, kind='previous',
bounds_error=False, fill_value='extrapolate')
return self._cdf(x)
def sf(self, x):
"""
survival function
:param x:
:return:
"""
return 1 - self.cdf(x)
def pdf(self, x):
"""
probability density function, assuming a continuous approximation of the bucketed density
:param x:
:return:
"""
if self._pdf is None:
self._pdf = interpolate.interp1d(self.density_df.loss, self.density_df.p_total, kind='linear',
bounds_error=False, fill_value='extrapolate')
return self._pdf(x) / self.bs
# # make some handy aliases; delete these go strictly with scipy.stats notation
# def F(self, x):
# """
# handy alias for distribution, CDF
# :param x:
# :return:
# """
# return self.cdf(x)
#
# def S(self, x):
# """
# handy alias for survival function, S
# :param x:
# :return:
# """
# return self.sf(x)
def var(self, p):
"""
value at risk = alias for quantile function
:param p:
:return:
"""
return self.q(p)
def tvar(self, p, kind='interp'):
"""
Compute the tail value at risk at threshold p
Really this function returns ES
Definition 2.6 (Tail mean and Expected Shortfall)
Assume E[X−] < ∞. Then
x¯(α) = TM_α(X) = α^{−1}E[X 1{X≤x(α)}] + x(α) (α − P[X ≤ x(α)])
is α-tail mean at level α the of X.
Acerbi and Tasche (2002)
We are interested in the right hand exceedence [?? note > vs ≥]
α^{−1}E[X 1{X > x(α)}] + x(α) (P[X ≤ x(α)] − α)
McNeil etc. p66-70 - this follows from def of ES as an integral
of the quantile function
:param p:
:param kind: 'interp' = interpolate exgta_total; 'tail' tail integral, 'body' NYI - (ex - body integral)/(1-p)+v
'inverse' from capital to p using interp method
:return:
"""
assert self.density_df is not None
if kind == 'tail':
# original
# _var = self.q(p)
# ex = self.density_df.loc[_var + self.bs:, ['p_total', 'loss']].product(axis=1).sum()
# pip = (self.density_df.loc[_var, 'F'] - p) * _var
# t_var = 1 / (1 - p) * (ex + pip)
# return t_var
# revised
if self._tail_var2 is None:
self._tail_var2 = self.density_df[['p_total', 'loss']].product(axis=1).iloc[::-1].cumsum().iloc[::-1]
_var = self.q(p)
ex = self._tail_var2.loc[_var + self.bs]
pip = (self.density_df.loc[_var, 'F'] - p) * _var
t_var = 1 / (1 - p) * (ex + pip)
return t_var
elif kind == 'interp':
# original implementation interpolated
if self._tail_var is None:
# make tvar function
sup = (self.density_df.p_total[::-1] > 0).idxmax()
if sup == self.density_df.index[-1]:
sup = np.inf
_x = self.density_df.F
_y = self.density_df.exgta_total
else:
_x = self.density_df.F.values[:self.density_df.index.get_loc(sup)]
_y = self.density_df.exgta_total.values[:self.density_df.index.get_loc(sup)]
p0 = self.density_df.at[0., 'F']
if p0 > 0:
ps = np.linspace(0, p0, 200, endpoint=False)
tempx = np.hstack((ps, _x))
tempy = np.hstack((self.ex / (1-ps), _y))
self._tail_var = interpolate.interp1d(tempx, tempy,
kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
else:
self._tail_var = interpolate.interp1d(_x, _y, kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
if type(p) in [float, np.float]:
return float(self._tail_var(p))
else:
return self._tail_var(p)
elif kind == 'inverse':
if self._inverse_tail_var is None:
# make tvar function
self._inverse_tail_var = interpolate.interp1d(self.density_df.exgta_total, self.density_df.F,
kind='linear', bounds_error=False,
fill_value='extrapolate')
if type(p) in [int, np.int, float, np.float]:
return float(self._inverse_tail_var(p))
else:
return self._inverse_tail_var(p)
else:
raise ValueError(f'Inadmissible kind passed to tvar; options are interp (default), inverse, or tail')
def tvar_threshold(self, p, kind):
"""
Find the value pt such that TVaR(pt) = VaR(p) using numerical Newton Raphson
"""
a = self.q(p, kind)
def f(p):
return self.tvar(p) - a
loop = 0
p1 = 1 - 2 * (1 - p)
fp1 = f(p1)
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
df1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / df1
fp1 = f(p1)
loop += 1
if loop == 10:
raise ValueError(f'Difficulty computing TVaR to match VaR at p={p}')
return p1
def equal_risk_var_tvar(self, p_v, p_t):
"""
solve for equal risk var and tvar: find pv and pt such that sum of
individual line VaR/TVaR at pv/pt equals the VaR(p) or TVaR(p_t)
these won't return elements in the index because you have to interpolate
hence using kind=middle
"""
# these two should obviously be the same
target_v = self.q(p_v, 'middle')
target_t = self.tvar(p_t)
def fv(p):
return sum([float(a.q(p, 'middle')) for a in self]) - target_v
def ft(p):
return sum([float(a.tvar(p)) for a in self]) - target_t
ans = np.zeros(2)
for i, f in enumerate([fv, ft]):
p1 = 1 - 2 * (1 - (p_v if i == 0 else p_t))
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk {"TVaR" if i else "VaR"} at p_v={p_v}, p_t={p_t}. '
'No convergence after 100 iterations. ')
ans[i] = p1
return ans
def equal_risk_epd(self, a):
"""
determine the common epd threshold so sum sa equals a
"""
def f(p):
return sum([self.epd_2_assets[(l, 0)](p) for l in self.line_names]) - a
p1 = self.assets_2_epd[('total', 0)](a)
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk EPD at pe={pe}. No convergence after 100 iterations. ')
return p1
def merton_perold(self, p, kind='lower'):
"""
compute <NAME>old capital allocation at VaR(p) capital using VaR as risk measure
v = q(p)
TODO TVaR version of <NAME>
"""
# figure total assets
a = self.q(p, kind)
# shorthand abbreviation
df = self.density_df
loss = df.loss
ans = []
total = 0
for l in self.line_names:
q = self.density_df.loss.iloc[np.searchsorted(self.density_df[f'ημ_{l}'].cumsum(), .995, side='right')]
diff = a - q
ans.append(diff)
total += diff
ans.append(total)
return ans
def cotvar(self, p):
"""
make the p co-tvar asset allocation using ISA
Asset alloc = exgta = tail expected value, treating TVaR like a pricing variable
"""
av = self.q(p)
return self.density_df.loc[av, [f'exgta_{l}' for l in self.line_names_ex]].values
def as_severity(self, limit=np.inf, attachment=0, conditional=False):
"""
convert into a severity without recomputing
throws error if self not updated
:param limit:
:param attachment:
:param conditional:
:return:
"""
if self.density_df is None:
raise ValueError('Must update prior to converting to severity')
return Severity(sev_name=self, sev_a=self.log2, sev_b=self.bs,
exp_attachment=attachment, exp_limit=limit, sev_conditional=conditional)
def fit(self, approx_type='slognorm', output='agg'):
"""
returns a dictionary specification of the portfolio aggregate_project
if updated uses empirical moments, otherwise uses theoretic moments
:param approx_type: slognorm | sgamma
:param output: return a dict or agg language specification
:return:
"""
if self.audit_df is None:
# not updated
m = self.statistics_df.loc[('agg', 'mean'), 'total']
cv = self.statistics_df.loc[('agg', 'cv'), 'total']
skew = self.statistics_df.loc[('agg', 'skew'), 'total']
else:
# use statistics_df matched to computed aggregate_project
m, cv, skew = self.audit_df.loc['total', ['EmpMean', 'EmpCV', 'EmpSkew']]
name = f'{approx_type[0:4]}~{self.name[0:5]}'
agg_str = f'agg {name} 1 claim sev '
if approx_type == 'slognorm':
shift, mu, sigma = sln_fit(m, cv, skew)
# self.fzapprox = ss.lognorm(sigma, scale=np.exp(mu), loc=shift)
sev = {'sev_name': 'lognorm', 'sev_shape': sigma, 'sev_scale': np.exp(mu), 'sev_loc': shift}
agg_str += f'{np.exp(mu)} * lognorm {sigma} + {shift} '
elif approx_type == 'sgamma':
shift, alpha, theta = sgamma_fit(m, cv, skew)
# self.fzapprox = ss.gamma(alpha, scale=theta, loc=shift)
sev = {'sev_name': 'gamma', 'sev_a': alpha, 'sev_scale': theta, 'sev_loc': shift}
agg_str += f'{theta} * lognorm {alpha} + {shift} '
else:
raise ValueError(f'Inadmissible approx_type {approx_type} passed to fit')
if output == 'agg':
agg_str += ' fixed'
return agg_str
else:
return {'name': name, 'note': f'frozen version of {self.name}', 'exp_en': 1, **sev, 'freq_name': 'fixed'}
def collapse(self, approx_type='slognorm'):
"""
returns new Portfolio with the fit
Deprecated...prefer uw(self.fit()) to go through the agg language approach
:param approx_type: slognorm | sgamma
:return:
"""
spec = self.fit(approx_type, output='dict')
logger.debug(f'Portfolio.collapse | Collapse created new Portfolio with spec {spec}')
logger.warning(f'Portfolio.collapse | Collapse is deprecated; use fit() instead.')
return Portfolio(f'Collapsed {self.name}', [spec])
def percentiles(self, pvalues=None):
"""
report_ser on percentiles and large losses
uses interpolation, audit_df uses nearest
:pvalues: optional vector of log values to use. If None sensible defaults provided
:return: DataFrame of percentiles indexed by line and log
"""
df = pd.DataFrame(columns=['line', 'log', 'Agg Quantile'])
df = df.set_index(['line', 'log'])
# df.columns.name = 'perspective'
if pvalues is None:
pvalues = [0.5, 0.75, 0.8, 0.85, 0.9, 0.95, 0.98, 0.99, 0.994, 0.995, 0.999, 0.9999]
for line in self.line_names_ex:
q_agg = interpolate.interp1d(self.density_df[f'p_{line}'].cumsum(), self.density_df.loss,
kind='linear', bounds_error=False, fill_value='extrapolate')
for p in pvalues:
qq = q_agg(p)
df.loc[(line, p), :] = [float(qq)]
df = df.unstack(level=1)
return df
def recommend_bucket(self):
"""
data to help estimate a good bucket size
:return:
"""
df = pd.DataFrame(columns=['line', 'bs10'])
df = df.set_index('line')
for a in self.agg_list:
df.loc[a.name, :] = [a.recommend_bucket(10)]
df['bs11'] = df['bs10'] / 2
df['bs12'] = df['bs10'] / 4
df['bs13'] = df['bs10'] / 8
df['bs14'] = df['bs10'] / 16
df['bs15'] = df['bs10'] / 32
df['bs16'] = df['bs10'] / 64
df['bs17'] = df['bs10'] / 128
df['bs18'] = df['bs10'] / 256
df['bs19'] = df['bs10'] / 515
df['bs20'] = df['bs10'] / 1024
df.loc['total', :] = df.sum()
return df
def best_bucket(self, log2=16):
bs = sum([a.recommend_bucket(log2) for a in self])
return round_bucket(bs)
def update(self, log2, bs, approx_freq_ge=100, approx_type='slognorm', remove_fuzz=False,
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1, tilt_amount=0, epds=None,
trim_df=False, verbose=False, add_exa=True, aggregate_cession_function=None):
"""
create density_df, performs convolution. optionally adds additional information if ``add_exa=True``
for allocation and priority analysis
tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
(ASTIN 1999)
tilt x numbuck < 20 is recommended log. 210
num buckets and max loss from bucket size
:param log2:
:param bs: bucket size
:param approx_freq_ge: use method of moments if frequency is larger than ``approx_freq_ge``
:param approx_type: type of method of moments approx to use (slognorm or sgamma)
:param remove_fuzz: remove machine noise elements from FFT
:param sev_calc: how to calculate the severity, discrete (point masses as xs) or continuous (uniform between xs points)
:param discretization_calc: survival or distribution (accurate on right or left tails)
:param normalize: if true, normalize the severity so sum probs = 1. This is generally what you want; but
:param padding: for fft 1 = double, 2 = quadruple
:param tilt_amount: for tiling methodology - see notes on density for suggested parameters
:param epds: epd points for priority analysis; if None-> sensible defaults
:param trim_df: remove unnecessary columns from density_df before returning
:param verbose: level of output
:param add_exa: run add_exa to append additional allocation information needed for pricing; if add_exa also add
epd info
:param aggregate_cession_function: function of Portfolio object that adjusts individual line densities; applied
after line aggs created but before creating not-lines; actual statistics do not reflect impact.
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.tilt_amount = tilt_amount
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = remove_fuzz
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
if self.hash_rep_at_last_update == hash(self):
logger.warning(f'Nothing has changed since last update at {self.last_update}')
return
self._linear_quantile_function = None
ft_line_density = {}
# line_density = {}
# not_line_density = {}
# add the densities
# tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
# (ASTIN 1999)
# tilt x numbuck < 20 recommended log. 210
# num buckets and max loss from bucket size
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# make all the single line aggs
# note: looks like duplication but will all be references
# easier for add_exa to have as part of the portfolio module
# tilt
if self.tilt_amount != 0:
tilt_vector = np.exp(self.tilt_amount * np.arange(N))
else:
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update(xs, self.padding, tilt_vector, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize, verbose=verbose)
if verbose:
display(_a)
if aggregate_cession_function is not None:
aggregate_cession_function(agg, self.padding, tilt_vector)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# ft_line_density['total'] = ft_all
# make the not self.line_density = sum of all but the given line
# have the issue here that if you divide and the dist
# is symmetric then you get a div zero...
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
self.remove_fuzz(log='update')
# make audit statistics_df df
theoretical_stats = self.statistics_df.T.filter(regex='agg')
theoretical_stats.columns = ['EX1', 'EX2', 'EX3', 'Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']
theoretical_stats = theoretical_stats[['Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']]
# self.audit_percentiles = [0.9, 0.95, 0.99, 0.995, 0.996, 0.999, 0.9999, 1 - 1e-6]
self.audit_df = pd.DataFrame(
columns=['Sum probs', 'EmpMean', 'EmpCV', 'EmpSkew', "EmpKurt", 'EmpEX1', 'EmpEX2', 'EmpEX3'] +
['P' + str(100 * i) for i in self.audit_percentiles])
for col in self.line_names_ex:
sump = np.sum(self.density_df[f'p_{col}'])
t = self.density_df[f'p_{col}'] * self.density_df['loss']
ex1 = np.sum(t)
t *= self.density_df['loss']
ex2 = np.sum(t)
t *= self.density_df['loss']
ex3 = np.sum(t)
t *= self.density_df['loss']
ex4 = np.sum(t)
m, cv, s = MomentAggregator.static_moments_to_mcvsk(ex1, ex2, ex3)
# empirical kurtosis
kurt = (ex4 - 4 * ex3 * ex1 + 6 * ex1 ** 2 * ex2 - 3 * ex1 ** 4) / ((m * cv) ** 4) - 3
ps = np.zeros((len(self.audit_percentiles)))
temp = self.density_df[f'p_{col}'].cumsum()
for i, p in enumerate(self.audit_percentiles):
ps[i] = (temp > p).idxmax()
newrow = [sump, m, cv, s, kurt, ex1, ex2, ex3] + list(ps)
self.audit_df.loc[col, :] = newrow
self.audit_df = pd.concat((theoretical_stats, self.audit_df), axis=1, sort=True)
self.audit_df['MeanErr'] = self.audit_df['EmpMean'] / self.audit_df['Mean'] - 1
self.audit_df['CVErr'] = self.audit_df['EmpCV'] / self.audit_df['CV'] - 1
self.audit_df['SkewErr'] = self.audit_df['EmpSkew'] / self.audit_df['Skew'] - 1
# add exa details
if add_exa:
self.add_exa(self.density_df, details=True)
# default priority analysis
logger.debug('Adding EPDs in Portfolio.update')
if epds is None:
epds = np.hstack(
[np.linspace(0.5, 0.1, 4, endpoint=False)] +
[np.linspace(10 ** -n, 10 ** -(n + 1), 9, endpoint=False) for n in range(1, 7)])
epds = np.round(epds, 7)
self.priority_capital_df = pd.DataFrame(index=pd.Index(epds))
for col in self.line_names:
for i in range(3):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](
epds)
col = 'not ' + col
for i in range(2):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](epds)
self.priority_capital_df.columns = self.priority_capital_df.columns.str.split("_", expand=True)
self.priority_capital_df.sort_index(axis=1, level=1, inplace=True)
self.priority_capital_df.sort_index(axis=0, inplace=True)
else:
# at least want F and S to get quantile functions
self.density_df['F'] = np.cumsum(self.density_df.p_total)
self.density_df['S'] = 1 - self.density_df.F
self.ex = self.audit_df.loc['total', 'EmpMean']
self.last_update = np.datetime64('now')
self.hash_rep_at_last_update = hash(self)
if trim_df:
self.trim_df()
# invalidate stored functions
self._linear_quantile_function = None
self.q_temp = None
self._cdf = None
def update_efficiently(self, log2, bs, approx_freq_ge=100, approx_type='slognorm',
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1):
"""
runs stripped down versions of update and add_exa - bare bones
code copied from those routines and cleaned for comments etc.
:param log2:
:param bs:
:param approx_freq_ge:
:param approx_type:
:param remove_fuzz:
:param sev_calc:
:param discretization_calc:
:param padding:
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = True
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
ft_line_density = {}
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# no tilt for efficient mode
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update_efficiently(xs, self.padding, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# make the not self.line_density = sum of all but the given line
ft_nots = {}
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
ft_nots[line] = ft_not
self.remove_fuzz(log='update_efficiently')
# no audit statistics_df
# BEGIN add_exa ================================================================================================
# add exa details now in-line
# def add_exa(self, df, details, ft_nots=None):
# Call is self.add_exa(self.density_df, details=True)
# name in add_exa, keeps code shorter
df = self.density_df
cut_eps = np.finfo(np.float).eps
# sum of p_total is so important...we will rescale it...
if not np.all(df.p_total >= 0):
# have negative densities...get rid of them
first_neg = np.argwhere((df.p_total < 0).to_numpy()).min()
sum_p_total = df.p_total.sum()
df['F'] = np.cumsum(df.p_total)
df['S'] = \
df.p_total.shift(-1, fill_value=min(df.p_total.iloc[-1], max(0, 1. - (df.p_total.sum()))))[::-1].cumsum()[::-1]
# E(min(X, a))
# df['exa_total'] = self.cumintegral(df['S'])
df['exa_total'] = df.S.shift(1, fill_value=0).cumsum() * self.bs
df['lev_total'] = df['exa_total']
df['exlea_total'] = \
(df.exa_total - df.loss * df.S) / df.F
n_ = df.shape[0]
if n_ < 1100:
mult = 1
elif n_ < 15000:
mult = 10
else:
mult = 100
loss_max = df[['loss', 'exlea_total']].query(' exlea_total>loss ').loss.max()
if np.isnan(loss_max):
loss_max = 0
else:
loss_max += mult * bs
# try nan in place of 0 V
df.loc[0:loss_max, 'exlea_total'] = np.nan
df['e_total'] = np.sum(df.p_total * df.loss)
df['exgta_total'] = df.loss + (df.e_total - df.exa_total) / df.S
df['exeqa_total'] = df.loss # E(X | X=a) = a(!) included for symmetry was exa
# FFT functions for use in exa calculations
# computing sums so minimal padding required
def loc_ft(x):
return ft(x, 1, None)
def loc_ift(x):
return ift(x, 1, None)
# where is S=0
Seq0 = (df.S == 0)
for col in self.line_names:
df['exeqa_' + col] = \
np.real(loc_ift(loc_ft(df.loss * df['p_' + col]) *
ft_nots[col])) / df.p_total
df.loc[df.p_total < cut_eps, 'exeqa_' + col] = 0
df['exeqa_ημ_' + col] = \
np.real(loc_ift(loc_ft(df.loss * df['ημ_' + col]) *
loc_ft(df['p_' + col]))) / df.p_total
df.loc[df.p_total < cut_eps, 'exeqa_ημ_' + col] = 0
stemp = 1 - df['p_' + col].cumsum()
# df['lev_' + col] = self.cumintegral(stemp)
df['lev_' + col] = stemp.shift(1, fill_value=0).cumsum() * self.bs
stemp = 1 - df['ημ_' + col].cumsum()
df['lev_ημ_' + col] = stemp.shift(1, fill_value=0).cumsum() * self.bs
# EX_i | X<= a; temp is used in le and gt calcs
temp = np.cumsum(df['exeqa_' + col] * df.p_total)
df['exlea_' + col] = temp / df.F
df.loc[0:loss_max, 'exlea_' + col] = 0 # df.loc[0:loss_max, 'loss']
temp_not = np.cumsum(df['exeqa_ημ_' + col] * df.p_total)
df['exlea_ημ_' + col] = temp_not / df.F
df.loc[0:loss_max, 'exlea_ημ_' + col] = 0 # df.loc[0:loss_max, 'loss']
# constant value, helpful in calculations
# df['e_' + col] = np.sum(df['p_' + col] * df.loss)
# df['e_ημ_' + col] = np.sum(df['ημ_' + col] * df.loss)
#
# df['exgta_' + col] = (df['e_' + col] - temp) / df.S
# temp = df.loss.iloc[0] # loss
# df.loss.iloc[0] = 1 # avoid divide by zero
#
# # df['exi_x_' + col] = np.sum(
# # df['exeqa_' + col] * df.p_total / df.loss)
# temp_xi_x = np.cumsum(df['exeqa_' + col] * df.p_total / df.loss)
# df['exi_xlea_' + col] = temp_xi_x / df.F
# df.loc[0, 'exi_xlea_' + col] = 0 # df.F=0 at zero
# # more generally F=0 error: V
# df.loc[df.exlea_total == 0, 'exi_xlea_' + col] = 0
# # not version
# df['exi_x_ημ_' + col] = np.sum(
# df['exeqa_ημ_' + col] * df.p_total / df.loss)
# # as above
# temp_xi_x_not = np.cumsum(
# df['exeqa_ημ_' + col] * df.p_total / df.loss)
# df['exi_xlea_ημ_' + col] = temp_xi_x_not / df.F
# df.loc[0, 'exi_xlea_ημ_' + col] = 0 # df.F=0 at zero
# # more generally F=0 error:
# df.loc[df.exlea_total == 0, 'exi_xlea_ημ_' + col] = 0
# # put value back
# df.loss.iloc[0] = temp
# this is so important we will calculate it directly
df['exi_xgta_' + col] = ((df[f'exeqa_{col}'] / df.loss *
df.p_total).shift(-1)[
::-1].cumsum()) / df.S
# need this NOT to be nan otherwise exa won't come out correctly
df.loc[Seq0, 'exi_xgta_' + col] = 0.
df['exi_xgta_ημ_' + col] = ((df[f'exeqa_ημ_{col}'] / df.loss *
df.p_total).shift(-1)[
::-1].cumsum()) / df.S
df.loc[Seq0, 'exi_xgta_ημ_' + col] = 0.
df['exi_xeqa_' + col] = df['exeqa_' + col] / df['loss']
df.loc[0, 'exi_xeqa_' + col] = 0
df['exi_xeqa_ημ_' + col] = df['exeqa_ημ_' + col] / df['loss']
df.loc[0, 'exi_xeqa_ημ_' + col] = 0
df[f'exa_{col}'] = (df.S * df['exi_xgta_' + col]).shift(1, fill_value=0).cumsum() * self.bs
df['exa_ημ_' + col] = (df.S * df['exi_xgta_ημ_' + col]).shift(1, fill_value=0).cumsum() * self.bs
# END add_exa ==================================================================================================
self.last_update = np.datetime64('now')
# invalidate stored functions
self._linear_quantile_function = None
self.q_temp = None
self._cdf = None
def trim_df(self):
"""
trim out unwanted columns from density_df
epd used in graphics
:return:
"""
self.density_df = self.density_df.drop(
self.density_df.filter(regex='^e_|^exi_xlea|^[a-z_]+ημ').columns,
axis=1
)
def gradient(self, epsilon=1 / 128, kind='homog', method='forward', distortion=None, remove_fuzz=True,
extra_columns=None, do_swap=True):
"""
Compute the gradient of various quantities relative to a change in the volume of each
portfolio component.
Focus is on the quantities used in rate calculations: S, gS, p_total, exa, exag, exi_xgta, exi_xeqq,
exeqa, exgta etc.
homog:
inhomog:
:param epsilon: the increment to use; scale is 1+epsilon
:param kind: homog[ogeneous] or inhomog: homog computes impact of f((1+epsilon)X_i)-f(X_i). Inhomog
scales the frequency and recomputes. Note inhomog will have a slight scale issues with
E[Severity]
:param method: forward, central (using epsilon/2) or backwards
:param distortion: if included derivatives of statistics using the distortion, such as exag are also
computed
:param extra_columns: extra columns to compute dervs of. Note there is virtually no overhead of adding additional
columns
:param do_swap: force the step to replace line with line+epsilon in all not line2's line2!=line1; whether you need
this or not depends on what variables you to be differentiated. E.g. if you ask for exa_total only you don't need
to swap. But if you want exa_A, exa_B you do, otherwise the d/dA exa_B won't be correct. TODO: replace with code!
:return: DataFrame of gradients and audit_df in an Answer class
"""
if kind == 'inhomog' or kind[:7] == 'inhomog':
raise NotImplementedError(f'kind=={kind} not yet implemented')
if method == 'central':
raise NotImplementedError(f'method=={method} not yet implemented')
if method not in ('forward', 'backwards', 'central'):
raise ValueError('Inadmissible option passed to gradient.')
if self.tilt_amount:
raise ValueError('Gradients do not allow tilts')
# central = run this code forwards and backwards with epsilon / 2 and average?!
# Forwards or backwards
if method == 'forward':
delta = 1 + epsilon
dx = epsilon
pm = '+'
else:
delta = 1 - epsilon
dx = -epsilon
pm = '-'
# FFT functions for use in exa calculations; padding needs to be consistent with agg
def loc_ft(x):
return ft(x, self.padding, None)
def loc_ift(x):
return ift(x, self.padding, None)
# setup (compare self.update)
xs = self.density_df['loss'].values
tilt_vector = None
# (1+e)X computed for each line
agg_epsilon_df = pd.DataFrame(index=xs)
# compute the individual line (1+epsilon)X_i and then the revised total
new_aggs = {}
for base_agg in self.agg_list:
agg = base_agg.rescale(delta, kind)
new_aggs[base_agg.name] = agg
_a = agg.update(xs, self.padding, tilt_vector, 'exact' if agg.n < self.approx_freq_ge else self.approx_type,
self.sev_calc, self.discretization_calc, verbose=False)
agg_epsilon_df[f'p_{agg.name}'] = agg.agg_density
# the total with the line incremented
agg_epsilon_df[f'p_total_{agg.name}'] = \
np.real(loc_ift(agg.ftagg_density * loc_ft(self.density_df[f'ημ_{agg.name}'])))
self.remove_fuzz(df=agg_epsilon_df, force=remove_fuzz, log='gradient')
percentiles = [0.9, 0.95, 0.99, 0.996, 0.999, 0.9999, 1 - 1e-6]
audit_df = pd.DataFrame(
columns=['Sum probs', 'EmpMean', 'EmpCV', 'EmpSkew', 'EmpEX1', 'EmpEX2', 'EmpEX3'] +
['P' + str(100 * i) for i in percentiles])
# 949 = epsilon 916 Delta
ep = chr(949)
D = chr(916)
for col in agg_epsilon_df.columns:
sump = np.sum(agg_epsilon_df[col])
t = agg_epsilon_df[col] * xs
ex1 = | np.sum(t) | numpy.sum |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfdbg module debug_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ParseNodeOrTensorNameTest(test_util.TensorFlowTestCase):
def testParseNodeName(self):
node_name, slot = debug_data.parse_node_or_tensor_name("namespace1/node_1")
self.assertEqual("namespace1/node_1", node_name)
self.assertIsNone(slot)
def testParseTensorName(self):
node_name, slot = debug_data.parse_node_or_tensor_name(
"namespace1/node_2:3")
self.assertEqual("namespace1/node_2", node_name)
self.assertEqual(3, slot)
class NodeNameChecksTest(test_util.TensorFlowTestCase):
def testIsCopyNode(self):
self.assertTrue(debug_data._is_copy_node("__copy_ns1/ns2/node3_0"))
self.assertFalse(debug_data._is_copy_node("copy_ns1/ns2/node3_0"))
self.assertFalse(debug_data._is_copy_node("_copy_ns1/ns2/node3_0"))
self.assertFalse(debug_data._is_copy_node("_copyns1/ns2/node3_0"))
self.assertFalse(debug_data._is_copy_node("__dbg_ns1/ns2/node3_0"))
def testIsDebugNode(self):
self.assertTrue(
debug_data._is_debug_node("__dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_data._is_debug_node("dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_data._is_debug_node("_dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_data._is_debug_node("_dbgns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(debug_data._is_debug_node("__copy_ns1/ns2/node3_0"))
class ParseDebugNodeNameTest(test_util.TensorFlowTestCase):
def testParseDebugNodeName_valid(self):
debug_node_name_1 = "__dbg_ns_a/ns_b/node_c:1_0_DebugIdentity"
(watched_node, watched_output_slot, debug_op_index,
debug_op) = debug_data._parse_debug_node_name(debug_node_name_1)
self.assertEqual("ns_a/ns_b/node_c", watched_node)
self.assertEqual(1, watched_output_slot)
self.assertEqual(0, debug_op_index)
self.assertEqual("DebugIdentity", debug_op)
def testParseDebugNodeName_invalidPrefix(self):
invalid_debug_node_name_1 = "__copy_ns_a/ns_b/node_c:1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid prefix"):
debug_data._parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_missingDebugOpIndex(self):
invalid_debug_node_name_1 = "__dbg_node1:0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid debug node name"):
debug_data._parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_invalidWatchedTensorName(self):
invalid_debug_node_name_1 = "__dbg_node1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError,
"Invalid tensor name in debug node name"):
debug_data._parse_debug_node_name(invalid_debug_node_name_1)
class HasNanOrInfTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dummy_datum = dummy_datum = debug_data.DebugTensorDatum(
"/foo", "bar_0_DebugIdentity_42")
def testNaN(self):
a = np.array([np.nan, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInf(self):
a = | np.array([np.inf, np.inf, 7.0]) | numpy.array |
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Script for converting Caffe (<= 1.0) models into the the simple state dict
format used by Detectron. For example, this script can convert the orignal
ResNet models released by MSRA.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cPickle as pickle
import numpy as np
import os
import sys
from caffe.proto import caffe_pb2
from caffe2.proto import caffe2_pb2
from caffe2.python import caffe_translator
from caffe2.python import utils
from google.protobuf import text_format
def parse_args():
parser = argparse.ArgumentParser(
description='Dump weights from a Caffe model'
)
parser.add_argument(
'--prototxt',
dest='prototxt_file_name',
help='Network definition prototxt file path',
default=None,
type=str
)
parser.add_argument(
'--caffemodel',
dest='caffemodel_file_name',
help='Pretrained network weights file path',
default=None,
type=str
)
parser.add_argument(
'--output',
dest='out_file_name',
help='Output file path',
default=None,
type=str
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def normalize_resnet_name(name):
if name.find('res') == 0 and name.find('res_') == -1:
# E.g.,
# res4b11_branch2c -> res4_11_branch2c
# res2a_branch1 -> res2_0_branch1
chunk = name[len('res'):name.find('_')]
name = (
'res' + chunk[0] + '_' + str(
int(chunk[2:]) if len(chunk) > 2 # e.g., "b1" -> 1
else ord(chunk[1]) - ord('a')
) + # e.g., "a" -> 0
name[name.find('_'):]
)
return name
def pickle_weights(out_file_name, weights):
blobs = {
normalize_resnet_name(blob.name): utils.Caffe2TensorToNumpyArray(blob)
for blob in weights.protos
}
with open(out_file_name, 'wb') as f:
pickle.dump(blobs, f, protocol=pickle.HIGHEST_PROTOCOL)
print('Wrote blobs:')
print(sorted(blobs.keys()))
def add_missing_biases(caffenet_weights):
for layer in caffenet_weights.layer:
if layer.type == 'Convolution' and len(layer.blobs) == 1:
num_filters = layer.blobs[0].shape.dim[0]
bias_blob = caffe_pb2.BlobProto()
bias_blob.data.extend(np.zeros(num_filters))
bias_blob.num, bias_blob.channels, bias_blob.height = 1, 1, 1
bias_blob.width = num_filters
layer.blobs.extend([bias_blob])
def remove_spatial_bn_layers(caffenet, caffenet_weights):
# Layer types associated with spatial batch norm
remove_types = ['BatchNorm', 'Scale']
def _remove_layers(net):
for i in reversed(range(len(net.layer))):
if net.layer[i].type in remove_types:
net.layer.pop(i)
# First remove layers from caffenet proto
_remove_layers(caffenet)
# We'll return these so we can save the batch norm parameters
bn_layers = [
layer for layer in caffenet_weights.layer if layer.type in remove_types
]
_remove_layers(caffenet_weights)
def _create_tensor(arr, shape, name):
t = caffe2_pb2.TensorProto()
t.name = name
t.data_type = caffe2_pb2.TensorProto.FLOAT
t.dims.extend(shape.dim)
t.float_data.extend(arr)
assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch'
return t
bn_tensors = []
for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]):
assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch'
blob_out = 'res' + bn.name[len('bn'):] + '_bn'
bn_mean = np.asarray(bn.blobs[0].data)
bn_var = np.asarray(bn.blobs[1].data)
scale = np.asarray(scl.blobs[0].data)
bias = | np.asarray(scl.blobs[1].data) | numpy.asarray |
import unittest
from ancb import NumpyCircularBuffer
from ancb import ( # type: ignore
star_can_broadcast, can_broadcast
)
from numpy import array_equal, allclose, shares_memory
from numpy import array, zeros, arange, ndarray, ones, empty
from numpy.random import rand, randint
from numpy import fill_diagonal, roll
from itertools import zip_longest
from operator import (
matmul, add, sub, mul, truediv, mod, floordiv, pow,
rshift, lshift, and_, or_, xor, neg, pos, abs, inv, invert,
iadd, iand, ifloordiv, ilshift, imod, imul,
ior, ipow, irshift, isub, itruediv, ixor
)
class TestBroadcastability(unittest.TestCase):
def test_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
self.assertTrue(can_broadcast(x.shape, y.shape))
self.assertFalse(can_broadcast(x.shape, z.shape))
self.assertFalse(can_broadcast(y.shape, z.shape))
self.assertTrue(can_broadcast(x.shape, x.shape))
self.assertTrue(can_broadcast(y.shape, y.shape))
self.assertTrue(can_broadcast(z.shape, z.shape))
self.assertTrue(can_broadcast(w.shape, w.shape))
self.assertTrue(can_broadcast(x.shape, w.shape))
self.assertTrue(can_broadcast(y.shape, w.shape))
self.assertTrue(can_broadcast(z.shape, w.shape))
def test_star_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
starexpr = zip_longest(x.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, x.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, z.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(w.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
class OperatorTestFactory(type):
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
bin_operators = [
matmul, add, sub, mul, truediv, mod, floordiv, pow
]
un_operators = [neg, pos, abs, invert, inv]
bitbin_operators = [rshift, lshift, and_, or_, xor]
i_operators = [
iadd, ifloordiv, imul, ipow, isub, itruediv
]
bit_ioperators = [
ilshift, irshift, ior, iand, ixor, imod
]
def unop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = -arange(3, dtype=int)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(-1)
buffer.append(-2)
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # unfrag
buffer.append(-3)
test -= 1
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # frag
return f
def bitbinop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = arange(1, 4, dtype=int)
x = randint(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
return f
def binop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
test = arange(1, 4, dtype=float)
x = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
return f
def iop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
data2 = zeros(3, dtype=float)
test1 = arange(1, 4, dtype=float)
test2 = arange(2, 5, dtype=float)
x = rand(3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer2 + 0, test2))
return f
def bitiop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
data2 = zeros(3, dtype=int)
test1 = arange(1, 4, dtype=int)
test2 = arange(2, 5, dtype=int)
x = randint(low=1, high=100, size=3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(allclose(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(allclose(buffer2 + 0, test2))
return f
for op in bin_operators:
setattr(obj, 'test_{}'.format(op.__name__), binop_testcase(op))
for op in bitbin_operators:
setattr(obj, 'test_{}'.format(op.__name__), bitbinop_testcase(op))
for op in un_operators:
setattr(obj, 'test_{}'.format(op.__name__), unop_testcase(op))
for op in i_operators:
setattr(obj, 'test_{}'.format(op.__name__), iop_testcase(op))
for op in bit_ioperators:
setattr(obj, 'test_{}'.format(op.__name__), bitiop_testcase(op))
return(obj)
class TestNumpyCircularBuffer(
unittest.TestCase, metaclass=OperatorTestFactory
):
"""
NumpyCircularBuffer tests
"""
def test_init(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertTrue(array_equal(data, buffer))
def test_fragmentation(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertFalse(buffer.fragmented)
buffer.append(0)
self.assertFalse(buffer.fragmented)
buffer.append(1)
self.assertFalse(buffer.fragmented)
buffer.append(2)
self.assertFalse(buffer.fragmented)
buffer.append(3)
self.assertTrue(buffer.fragmented)
buffer.append(4)
self.assertTrue(buffer.fragmented)
buffer.append(5)
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
def test_matmul_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(buffer @ C[:1], arange(1) @ C[:1]))
buffer.append(1)
self.assertTrue(allclose(buffer @ C[:2], arange(2) @ C[:2]))
buffer.append(2)
self.assertTrue(allclose(buffer @ C, arange(3) @ C))
buffer.append(3)
self.assertTrue(allclose(buffer @ C, (arange(1, 4)) @ C))
buffer.append(4)
self.assertTrue(allclose(buffer @ C, (arange(2, 5)) @ C))
buffer.append(5)
self.assertTrue(allclose(buffer @ C, (arange(3, 6)) @ C))
buffer.append(6)
self.assertTrue(allclose(buffer @ C, (arange(4, 7)) @ C))
buffer.pop()
self.assertTrue(allclose(buffer @ C[1:], (arange(5, 7)) @ C[1:]))
buffer.pop()
self.assertTrue(allclose(buffer @ C[2:], (arange(6, 7)) @ C[2:]))
def test_matmul_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer @ A
res_b = buffer @ B
res_c = buffer @ C
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = C[:1] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = C[:2] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = C[1:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = C[2:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = rand(12).reshape(4, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append([9, 10, 11])
test += 3
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_rmatmul_ndnd(self):
data = zeros((3, 3, 3))
A = zeros(27).reshape(3, 3, 3)
B = arange(27).reshape(3, 3, 3)
C = arange(3*8*3).reshape(3, 8, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append(filler + 27)
test += 9
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_matmul2_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(
buffer.matmul(C[:1], empty(1)), arange(1) @ C[:1]
)
)
buffer.append(1)
self.assertTrue(allclose(
buffer.matmul(C[:2], empty(2)), arange(2) @ C[:2]
)
)
buffer.append(2)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3) @ C
)
)
buffer.append(3)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(1, 4) @ C
)
)
buffer.append(4)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(2, 5) @ C
)
)
buffer.append(5)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3, 6) @ C
)
)
buffer.append(6)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(4, 7) @ C
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[1:], empty(2)), arange(5, 7) @ C[1:]
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[2:], empty(1)), arange(6, 7) @ C[2:]
)
)
def test_matmul2_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul2_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul2_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul2_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = buffer.rmatmul(C[:1], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = buffer.rmatmul(C[:2], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[1:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[2:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul2_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
buffer.rmatmul(A, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul2_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append( | arange(18, 27) | numpy.arange |
#------------------------------------------------------------------------------
# Image Classification Model Builder
# Copyright (c) 2019, scpepper All rights reserved.
#------------------------------------------------------------------------------
import os, shutil
import matplotlib.pyplot as plt
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
from datetime import datetime
from glob import glob
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import callbacks
from tensorflow.contrib import saved_model
from keras.preprocessing import image as keras_image
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import config as local_conf
from model import cnn_model, cnn_vgg16, cnn_w_dropout, cnn_w_batchnorm, resnet_v1, resnet_v2
#----------------------------------------------------------------
# Prepare environment
#----------------------------------------------------------------
# from config.py settings
#gdrive_base=local_conf.gdrive_base
#dataset_name=local_conf.dataset_name
#num_classes = local_conf.num_classes
#labels = local_conf.labels
#num_images = local_conf.num_images
#height= local_conf.height
#width= local_conf.width
#color= local_conf.color
#model_opt=local_conf.model_opt
#validate_rate=local_conf.validate_rate
#epochs=local_conf.epochs
#batch_size=local_conf.batch_size
def main(gdrive_base, dataset_name, num_classes, labels, num_images, width, height, color, model_opt, validate_rate=0.2, epochs=20, batch_size=4):
exec_date = datetime.now().strftime("%Y%m%d%H%M%S")
# Directory for TensorBorad Logs
log_dir=gdrive_base+'logs/'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Directory for Checkpoint and Froze Model
model_dir=gdrive_base+'models/'
if not os.path.exists(model_dir):
os.makedirs(model_dir)
#----------------------------------------------------------------
# Prepare Dataset
#----------------------------------------------------------------
# Prepare empty array
ary = np.zeros([num_classes, num_images, height, width, color], dtype=np.int)
counters = np.zeros(num_classes, dtype=np.int)
# Specify Dataset directory
# dir_name='datasets/'+dataset_name
dir_name='datasets/'
# Convert Image Data to Tensor
for file in glob(gdrive_base + dir_name + '/*.jpg'):
if color==1:
img = cv2.imread(file,cv2.IMREAD_GRAYSCALE)
else:
print(color)
img = cv2.imread(file,cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(len(labels)):
if "/"+labels[i] in file:
ary[i, counters[i]] = img
counters[i] += 1
# Save as npz
np.savez_compressed(f"{gdrive_base}{dir_name}np.npz", ary)
# Restore from npz
#ary = np.load(f"{gdrive_base}{dir_name}.npz")['arr_0']
# Sort train tensor for generating answer tensor
X_train = np.zeros([num_classes * num_images, height, width, color], dtype=np.int)
for i in range(num_classes):
for j in range(num_images):
X_train[(i * num_images) + j] = ary[i][j]
# Generate answer tensor
Y_train = np.repeat(np.arange(num_classes), num_images)
# Split the data
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=validate_rate)
# Convert answer tensor to "one-hot"
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
# Convert the image shape
x_train = x_train.reshape(-1, height, width, color).astype(np.float32)
x_test = x_test.reshape(-1, height, width, color).astype(np.float32)
input_shape = (height, width, color)
#----------------------------------------------------------------
# Build Model
#----------------------------------------------------------------
# for resolve "Could not create cudnn handle: CUDNN_STATUS_ALLOC_FAILED" error.
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
K.set_session(sess)
# Building model
if model_opt=="VGG16":
model = cnn_vgg16(input_shape=input_shape, num_classes=num_classes)
#elif model_opt=="RESNET1":
# model = resnet_v1(input_shape=input_shape, num_classes=num_classes)
elif model_opt=="RESNET":
model = resnet_v2(input_shape=input_shape, num_classes=num_classes)
else:
# model=cnn_model(input_shape=input_shape, num_classes=num_classes)
# model=cnn_w_dropout(input_shape=input_shape, num_classes=num_classes)
model=cnn_w_batchnorm(input_shape=input_shape, num_classes=num_classes)
# Compile Model
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.001, momentum=0.9), metrics=['accuracy'])
model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.001), metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=0.001), metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adagrad(lr=0.001), metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adadelta(lr=0.001), metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adamax(lr=0.001), metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizers.Nadam(lr=0.001), metrics=['accuracy'])
# Callback setting for TensorBoard
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1,write_images=1)
# Checkpoint setting
checkpoint_path = f"{model_dir}{dataset_name}_{model_opt}_{exec_date}" + "_cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Callback for checkpoint
cp_cb = callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1, period=5)
# Show model summary
model.summary()
# Restore learned parameters from checkpoint
#model.load_weights(f'{model_dir}run1/{model_structure}_{data_set}_cp-0010.ckpt')
#----------------------------------------------------------------
# Training the Model
#----------------------------------------------------------------
# Data generator parameter setting
params = {
'rotation_range': 20,
'zoom_range': 0.10,
'height_shift_range': 0.1,
'width_shift_range': 0.1
}
datagen = keras_image.ImageDataGenerator(**params)
datagen.fit(x_train)
from random import shuffle
from scipy import ndimage
def generator(x, y1, train):
while True:
if train:
keys = list(range(len(x)))
shuffle(keys)
else:
keys = list(range(len(y1)))
shuffle(keys)
inputs = []
label1 = []
for key in keys:
img = x[key]
if train:
# 画像の回転
rotate_rate = np.random.normal(0,0.5)*10
img = ndimage.rotate(x[key], rotate_rate)
img = cv2.resize(img,(width, height))
# 画像のぼかし
if np.random.randint(0,2):
filter_rate = np.random.randint(0,6)
img = ndimage.gaussian_filter(img, sigma=filter_rate)
inputs.append(img)
label1.append(y1[key])
if len(inputs) == batch_size:
tmp_inputs = | np.array(inputs) | numpy.array |
#!//anaconda/envs/py36/bin/python
#
# File name: kmc_pld.py
# Date: 2018/08/03 09:07
# Author: <NAME>
#
# Description:
#
import sys
import re
import random
import numpy as np
from itertools import product
from collections import Counter, defaultdict
#import events
from .events import EventTree
class KMCModel:
"""Class managing kmc moves and event modifications"""
def __init__(self, latt_type):
self.latt_type = latt_type
self.__setup_neighbors()
def __setup_neighbors(self):
"""Create lists of neighboring (nn & nnn) sites"""
nbrlist = []
if self.latt_type == 'fcc':
# NN
nbrlist.append(np.array([ 1, 1, 0]))
nbrlist.append(np.array([ 1,-1, 0]))
nbrlist.append(np.array([-1, 1, 0]))
nbrlist.append(np.array([-1,-1, 0]))
nbrlist.append(np.array([ 1, 0,-1]))
nbrlist.append(np.array([-1, 0,-1]))
nbrlist.append(np.array([ 0, 1,-1]))
nbrlist.append(np.array([ 0,-1,-1]))
nbrlist.append(np.array([ 1, 0, 1]))
nbrlist.append(np.array([-1, 0, 1]))
nbrlist.append(np.array([ 0, 1, 1]))
nbrlist.append(np.array([ 0,-1, 1]))
# NNN
nbrlist.append(np.array([ 2, 0, 0]))
nbrlist.append(np.array([-2, 0, 0]))
nbrlist.append(np.array([ 0, 2, 0]))
nbrlist.append(np.array([ 0,-2, 0]))
nbrlist.append(np.array([ 0, 0, 2]))
nbrlist.append(np.array([ 0, 0,-2]))
else:
raise ValueError(f'Chosen {self.latt_type} lattice. Currently only FCC lattice is supported.')
self.nbrlist = nbrlist
def make_lattice(self, xyz, box):
"""
Set up site lables on FCC lattice
"""
# create simulation box (fill with -1 denoting sites on on the FCC lattice
latt = -1*np.ones((tuple(box)), dtype=int)
# cycle through lattice sites and mark FCC sites with 0 (empty site)
for r in product(range(box[0]), range(box[1]), range(box[2])):
if sum(r) % 2 == 0:
latt[tuple(r)] = 0
# fill lattice sites with atom ids
for i, r in enumerate(xyz, start=1):
assert sum(r) % 2 == 0, f'{r} Atom not on FCC lattice!'
latt[tuple(r)] = i
self.latt = latt
self.box = box
self.xyz = xyz
self.nat = len(self.xyz)
# Set grain number of each substrate atom to 0
self.grain = [0 for _ in range(self.nat)]
def find_neighbors(self, ri):
"""
FInd neighbors and return ids usable in site_dict
"""
# search nearest neighbors
# to determine stable sites (needs at least 3)
neighbors = []
grain_numbers = []
for dr in self.nbrlist[0:12]:
rj = tuple((np.array(ri) + dr) % self.box)
iatom = self.latt[rj]
neighbors.append(rj)
if iatom > 0:
grain_numbers.append(self.grain[iatom-1])
return neighbors, grain_numbers
def get_grain(self, grain_numbers):
grain_counts = Counter(grain_numbers)
if 0 in grain_counts:
del grain_counts[0]
if any(grain_counts):
#g_number = sorted(grain_counts, key=grain_counts.__getitem__)[-1]
g_number = grain_counts.most_common(1)[0][0]
else:
g_number = max(self.grain) + 1
return g_number
def find_events(self, rj):
"""
Finds events for site j at rj.
Should be used in init_events
"""
ix, iy, iz = rj
iatom = self.latt[ix, iy, iz]
#print('rj', type(rj), rj, iatom)
events_found = []
# vacancy, test for possibility of deposition event
if iatom == 0:
# rj = np.array(rj, dtype=int)
# count number of atomic neighbors in the target position
_, grain_numbers_j = self.find_neighbors(rj)
# if 3 or more nearest neighbors with grain IDs, create a deposition event
if len(grain_numbers_j) > 2:
events_found.append((0, ix, iy, iz, ix, iy, iz))
# atom, find diffusion events
elif iatom > 0:
# search for possible diffusion events
# explore neighborhood of atom j
neighbors_j, grain_numbers_j = self.find_neighbors(rj)
for rk in neighbors_j:
# find if vacancy is a good destination spot
if self.latt[rk] == 0:
_, grain_numbers_k = self.find_neighbors(rk)
# if number of real atoms 3 or more, make vacancy available as
# a destination for deposition and diffusion
if len(grain_numbers_k)-1 > 2:
# do not diffuse upward
if rk[2] > rj[2]:
continue
events_found.append((1, ix, iy, iz, rk[0], rk[1], rk[2]))
return events_found
def init_events(self, rates):
rates = np.array(rates)
# structure to store event information list of sets, containing
# events dictionary keys
event_list = [set() for _ in range(rates.shape[0])]
# dictionary to store references to event_list
site_dict = defaultdict(list)
# Deposition event - find vacant sites available for deposition
for ix, iy in product(range(self.box[0]), range(self.box[1])):
# find z position
for iz in range(self.box[2]):
if self.latt[ix, iy, iz] == 0:
t_ri = (ix, iy, iz)
# count number of neighbors in the target position
neighbors, grain_numbers = self.find_neighbors(t_ri)
# if 3 or more nearest neighbors with grain IDs, create a deposition event
if len(grain_numbers) > 2:
event_tuple = (0, ix, iy, iz, ix, iy, iz)
event_list[0].add(event_tuple)
# add event information to the site
site_dict[t_ri].append(event_tuple)
break
# diffusion events for actual atoms (i.e., atom id > 0)
for i, ri in enumerate(self.xyz, start=1):
if ri[2] < 2: continue
# cycle over neighbor sites
for dr in self.nbrlist:
# do not diffuse upward
if dr[2] > 0: continue
rj = tuple((ri + dr) % self.box)
# is vacancy in the neighborhood of atom i?
if self.latt[rj] == 0:
# explore neighborhood of the target vacancy
neighbors, grain_numbers = self.find_neighbors(rj)
# if 3 or more nearest neighbors with grain IDs present, create a diffusion event
if len(grain_numbers) > 2:
event_tuple = (1, ri[0], ri[1], ri[2], rj[0], rj[1], rj[2])
event_list[1].add(event_tuple)
# add event information to the site
site_dict[tuple(ri)].append(event_tuple)
self.event_list = event_list
self.site_dict = site_dict
# Get dictionary of event type counts
n_events = np.array([len(e) for e in self.event_list])
print('Number of events:', n_events)
# Initiate event data structures
self.etree = EventTree(rates)
self.etree.update_events(n_events)
def move(self, event_type, event_number):
"""
Perform kmc move given by an event (deposition or diffusion)
and update event list
"""
# double check if there are some free spaces (just in case - should
# follow from zero remaining events)
if len(self.xyz) == self.box[0]*self.box[1]*self.box[2]/2:
raise ValueError(f'Lattice is full of atoms, no more events possible.')
# find a tuple containing information about the selected event
event = tuple(self.event_list[event_type])[event_number]
old_events = []
new_events = []
n_events = self.etree.n_events
print('# event:', event, 'ev#', [len(el) for el in self.event_list], end='')
print('at#',len(self.xyz), 'gr#', len(set(self.grain)), 'lxyz', self.xyz[-1])
for i in range(len(self.event_list)):
assert len(self.event_list[i]) == n_events[i], f'Start: Number of events of type {i} does not match: {len(self.event_list[i])} vs. {n_events[i]}'
# deposition event
if event_type == 0:
t_ri = event[4:7]
ri = np.array(t_ri)
# create a new atom
self.xyz.append(ri)
iatom = len(self.xyz)
# put it on a lattice
self.latt[t_ri] = iatom # id for the site properties with atom id and list of events
# search neighbors and grain numbers
neighbors, grain_numbers = self.find_neighbors(t_ri)
# assign a new grain number to the atom
self.grain.append(self.get_grain(grain_numbers))
# Identify old events for removal
# remove the current deposition event
old_events.extend(self.site_dict[t_ri])
# ... and the associated dictionary of site events
del self.site_dict[t_ri]
# find diffusion events of the deposited atom
events_found = self.find_events(t_ri)
new_events.extend(events_found)
# remove all old events of the new neighbors and add their new events
for t_rj in neighbors:
if t_rj == t_ri:
continue
# remove all current events of neighbor j
old_events.extend(self.site_dict[t_rj])
del self.site_dict[t_rj]
# add new events of neighbor j
events_found = self.find_events(t_rj)
new_events.extend(events_found)
elif event_type == 1: # diffusion
t_r0 = event[1:4] # initial position
t_ri = event[4:7] # final position
r0 = np.array(t_r0)
ri = | np.array(t_ri) | numpy.array |
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as patches
import yaml
import pandas as pd
from kitti_util import *
from matplotlib.lines import Line2D
import cv2
###instance point cloud###
v_path = '6397_Car_0.bin'
num_features = 4
points_v = np.fromfile(v_path, dtype=np.float32, count=-1).reshape([-1, num_features])
points_v_ = points_v[:,0:3]
#### 5.2*5.2*2.4
#### center 2.6 2.6 1.2
a = np.arange(0.2, 5.2, 0.4)
b = np.arange(0.2, 5.2, 0.4)
c = np.arange(0.4, 2.4, 0.8)
all = a.shape[0] * b.shape[0] * c.shape[0]
pos = np.zeros(points_v_.shape, dtype = np.int)
for i in range(points_v_.shape[0]):
x = points_v_[i,0]
y = points_v_[i,1]
z = points_v_[i,2]
diff_x = np.abs(x + 2.6 - a)
x_pos = np.where(diff_x==np.min(diff_x))[0]
diff_y = | np.abs(y + 2.6 - b) | numpy.abs |
import matplotlib.pyplot as plt
import numpy as np
import os
import flopter.core.constants
import flopter.magnum.database
from flopter.magnum.magopter import Magopter
from flopter.core.lputils import MagnumProbes
import glob
import flopter.magnum.readfastadc as adc
from flopter.core import constants as c, normalise as nrm, fitters as f, lputils as lp
import flopter.magnum.database as mag
from scipy.interpolate import interp1d
import scipy.signal as sig
import concurrent.futures as cf
import pathlib as pth
import pandas as pd
# from tkinter.filedialog import askopenfilename
def main_magopter_analysis():
folders = ['2018-05-01_Leland/', '2018-05-02_Leland/', '2018-05-03_Leland/',
'2018-06-05_Leland/', '2018-06-06_Leland/', '2018-06-07_Leland/']
files = []
file_folders = []
for folder1 in folders:
os.chdir(Magopter.get_data_path() + folder1)
files.extend(glob.glob('*.adc'))
file_folders.extend([folder1] * len(glob.glob('*.adc')))
# files = [f.replace(' ', '_') for f in files]
files.sort()
# file = '2018-05-01_12h_55m_47s_TT_06550564404491814477.adc' # 8
# file = '2018-05-03_11h_31m_41s_TT_06551284859908422561.adc' # 82
files_of_interest = {
8: "First analysed",
82: "Higher Temp",
97: "Angular Sweep with different probes"
}
file_index = 82
# file = files[file_index]
file = files[-2]
ts_file = files[-1]
folder = file_folders[-2]
print(folder, file)
print(flopter.magnum.database.human_time_str(adc.get_magnumdb_timestamp(ts_file)))
print(ts_file)
magopter = Magopter(folder, ts_file)
# print(file, magopter.magnum_db.get_time_range(filename=file))
# plt.figure()
# plt.errorbar(magopter.ts_coords, magopter.ts_temp, yerr=magopter.ts_temp_d, label='Temperature')
# exit()
# length = len(magopter.t_file)
# for i in range(1, 20):
# split = int(length / i)
# plt.figure()
# plt.title('i = {}'.format(i))
# plt.log
# for j in range(i):
# plt.semilogy(magopter.t_file[j*split:j+1*split], label='j = {}'.format(j))
# plt.show()
dsr = 10
magopter.prepare(down_sampling_rate=dsr, plot_fl=True)
# magopter.trim(trim_end=0.82)
magopter.trim(trim_end=0.83)
fit_df_0, fit_df_1 = magopter.fit()
iv_data = fit_df_0.iloc[[125]]
plt.figure()
for iv_curve in magopter.iv_arrs[0]:
plt.plot(iv_curve.time, iv_curve.current)
plt.axvline(x=iv_data.index)
# Flush probe measurements
L_small = 3e-3 # m
a_small = 2e-3 # m
b_small = 3e-3 # m
g_small = 2e-3 # m
theta_f_small = np.radians(72)
L_large = 5e-3 # m
a_large = 4.5e-3 # m
b_large = 6e-3 # m
g_large = 1e-3 # m
theta_f_large = np.radians(73.3)
L_reg = 5e-3 # m
a_reg = 2e-3 # m
b_reg = 3.34e-3 # m
g_reg = 1e-3 # m
theta_f_reg = np.radians(75)
L_cyl = 4e-3 # m
g_cyl = 5e-4 # m
# T_e = 1.78 # eV
# n_e = 5.1e19 # m^-3
# fwhm = 14.3 # mm
# T_e = 0.67 # eV
# n_e = 2.3e19 # m^-3
# fwhm = 16 # mm
# T_e = 1.68
# n_e = 1.93e19
# fwhm = 16.8
# T_e = 0.75
# n_e = 1.3e20
# fwhm = 16.8
# T_e = 0.76
# n_e = 1.0e20
# fwhm = 16.8
T_e = 1.61
n_e = 1.41e20
fwhm = 12.4
deg_freedom = 3
gamma_i = (deg_freedom + 2) / 2
d_perp = 3e-4 # m
theta_p = np.radians(10)
theta_perp = np.radians(10)
probe_s = lp.AngledTipProbe(a_small, b_small, L_small, g_small, d_perp, theta_f_small, theta_p)
probe_l = lp.AngledTipProbe(a_large, b_large, L_large, g_large, d_perp, theta_f_large, theta_p)
probe_r = lp.AngledTipProbe(a_reg, b_reg, L_reg, g_reg, d_perp, theta_f_reg, theta_p)
probe_c = lp.FlushCylindricalProbe(L_cyl / 2, g_cyl, d_perp)
A_coll_s = lp.calc_probe_collection_area(a_small, b_small, L_small, g_small, d_perp, theta_perp, theta_p,
print_fl=False)
A_coll_l = lp.calc_probe_collection_area(a_large, b_large, L_large, g_large, d_perp, theta_perp, theta_p,
print_fl=False)
A_coll_r = lp.calc_probe_collection_area(a_reg, b_reg, L_reg, g_reg, d_perp, theta_perp, theta_p, print_fl=False)
A_coll_c = probe_c.get_collection_area(theta_perp)
print('Small area: {}, Large area: {}, Regular area: {}, Cylindrical area: {}'.format(A_coll_s, A_coll_l, A_coll_r,
A_coll_c))
# Plotting analytical IV over the top of the raw IVs
print(fit_df_0)
plt.figure()
# for iv_curve in magopter.iv_arr_coax_0:
# plt.plot(iv_curve.voltage, iv_curve.current)
plt.plot(iv_data[c.RAW_X].tolist()[0], iv_data[c.RAW_Y].tolist()[0], 'x', label='Raw IV')
plt.plot(iv_data[c.RAW_X].tolist()[0], iv_data[c.FIT_Y].tolist()[0], label='Fit IV')
iv_v_f = -10
I_s = lp.analytical_iv_curve(iv_data[c.RAW_X].tolist()[0], iv_v_f, T_e, n_e, theta_perp, A_coll_s, L=L_small,
g=g_small)
I_c = lp.analytical_iv_curve(iv_data[c.RAW_X].tolist()[0], iv_v_f, T_e, n_e, theta_perp, A_coll_c, L=L_small,
g=g_small)
plt.plot(iv_data[c.RAW_X].tolist()[0], I_s, label='Analytical', linestyle='dashed', linewidth=1, color='r')
# plt.plot(iv_data[c.RAW_X].tolist()[0], I_c, label='Analytical (c)', linestyle='dashed', linewidth=1, color='g')
plt.legend()
plt.title('Comparison of analytical to measured IV curves for the small area probe')
plt.xlabel('Voltage (V)')
plt.ylabel('Current (A)')
# A_coll_s = calc_probe_collection_A_alt(a_small, b_small, L_small, theta_perp, theta_p)
# A_coll_l = calc_probe_collection_A_alt(a_large, b_large, L_large, theta_perp, theta_p)
# A_coll_l = (26.25 * 1e-6) * np.sin(theta_perp + theta_p)
# print('Small area: {}, Large area: {}'.format(A_coll_s, A_coll_l))
c_s = np.sqrt((flopter.core.constants.ELEM_CHARGE * (T_e + gamma_i * T_e)) / flopter.core.constants.PROTON_MASS)
n_e_0 = fit_df_0[c.ION_SAT] / (flopter.core.constants.ELEM_CHARGE * c_s * A_coll_s)
n_e_1 = fit_df_1[c.ION_SAT] / (flopter.core.constants.ELEM_CHARGE * c_s * A_coll_c)
I_sat_0 = c_s * n_e * flopter.core.constants.ELEM_CHARGE * A_coll_s
I_sat_1 = c_s * n_e * flopter.core.constants.ELEM_CHARGE * A_coll_c
J_sat_0 = fit_df_0[c.ION_SAT] / A_coll_s
J_sat_1 = fit_df_1[c.ION_SAT] / A_coll_c
plt.figure()
plt.subplot(221)
plt.title('Electron Temperature Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$T_e$ (eV)')
plt.errorbar(fit_df_0.index, c.ELEC_TEMP, yerr=c.ERROR_STRING.format(c.ELEC_TEMP), data=fit_df_0, fmt='x',
label='Half area')
plt.errorbar(fit_df_1.index, c.ELEC_TEMP, yerr=c.ERROR_STRING.format(c.ELEC_TEMP), data=fit_df_1, fmt='x',
label='Cylinder area')
plt.axhline(y=T_e, linestyle='dashed', linewidth=1, color='r', label='TS')
plt.legend()
plt.subplot(222)
plt.title('Ion Saturation Current Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$I^+_{sat}$ (eV)')
plt.errorbar(fit_df_0.index, c.ION_SAT, yerr=c.ERROR_STRING.format(c.ION_SAT), data=fit_df_0, label='Half area',
fmt='x')
plt.errorbar(fit_df_1.index, c.ION_SAT, yerr=c.ERROR_STRING.format(c.ION_SAT), data=fit_df_1, label='Cylinder area',
fmt='x')
# for arc in magopter.arcs:
# plt.axvline(x=arc, linestyle='dashed', linewidth=1, color='r')
plt.axhline(y=I_sat_0, linestyle='dashed', linewidth=1, color='r', label='Expected I_sat (s)')
plt.legend()
# plt.figure()
# plt.subplot(223)
# plt.title('Current Density Measurements')
# plt.xlabel('Time (s)')
# plt.ylabel(r'$J_{sat}$ (Am$^{-2}$)')
# plt.plot(fit_df_0.index, J_sat_0, label='Half area')
# plt.plot(fit_df_1.index, J_sat_1, label='Cylinder area')
# for arc in magopter.arcs:
# plt.axvline(x=arc, linestyle='dashed', linewidth=1, color='r')
# plt.legend()
# plt.figure()
plt.subplot(223)
plt.title('Electron Density Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$n_e$ (m$^{-3}$)')
plt.plot(fit_df_0.index, n_e_0, 'x', label='Half Area')
plt.plot(fit_df_1.index, n_e_1, 'x', label='Cylinder Area')
plt.axhline(y=n_e, linestyle='dashed', linewidth=1, color='r', label='TS')
plt.legend()
a_s = lp.calc_sheath_expansion_param(T_e, n_e, L_small, g_small, theta_perp)
a_c = lp.calc_sheath_expansion_param(T_e, n_e, L_cyl, g_cyl, theta_perp)
print(a_s, a_c)
plt.subplot(224)
plt.title('Sheath Expansion Coefficient Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$a$')
plt.errorbar(fit_df_0.index, c.SHEATH_EXP, yerr=c.ERROR_STRING.format(c.SHEATH_EXP), data=fit_df_0, fmt='x',
label='Half Area')
plt.errorbar(fit_df_1.index, c.SHEATH_EXP, yerr=c.ERROR_STRING.format(c.SHEATH_EXP), data=fit_df_1, fmt='x',
label='Cylinder Area')
plt.axhline(y=a_s, linestyle='dashed', linewidth=1, color='r', label='Expected - small')
plt.axhline(y=a_c, linestyle='dashed', linewidth=1, color='b', label='Expected - cyl')
plt.legend()
plt.show()
def integrated_analysis(probe_coax_0, probe_coax_1, folder, file, ts_file=None):
magopter = Magopter(folder, file, ts_filename=ts_file)
dsr = 1
magopter.prepare(down_sampling_rate=dsr, roi_b_plasma=True, crit_freq=4000, crit_ampl=None)
# magopter.trim(trim_end=0.83)
fit_df_0, fit_df_1 = magopter.fit()
theta_perp = np.radians(10)
A_coll_0 = probe_coax_0.get_collection_area(theta_perp)
A_coll_1 = probe_coax_1.get_collection_area(theta_perp)
if magopter.ts_temp is not None:
temps = [np.max(temp) / flopter.core.constants.ELEM_CHARGE for temp in magopter.ts_temp[mag.DATA]]
denss = [np.max(dens) for dens in magopter.ts_dens[mag.DATA]]
T_e = np.mean(temps)
d_T_e = np.std(temps) / np.sqrt(len(temps))
n_e = np.mean(denss)
d_n_e = np.std(denss) / np.sqrt(len(denss))
print('T = {}+-{}, n = {}+-{}'.format(T_e, d_T_e, n_e, d_n_e))
else:
T_e = 1.61
d_T_e = 0.01
n_e = 1.41e20
d_n_e = 0.01e20
fwhm = 12.4
# t_0 = -0.35
t_0 = 0
target_pos_t, target_pos_x = magopter.magnum_data[mag.TARGET_POS]
# target_pos_t, target_pos_x = magopter.magnum_db.pad_continuous_variable(magopter.magnum_data[mag.TARGET_POS])
target_pos_t = np.array(target_pos_t)
target_voltage_t = np.array(magopter.magnum_data[mag.TARGET_VOLTAGE][0])
target_voltage_x = np.array(magopter.magnum_data[mag.TARGET_VOLTAGE][1])
deg_freedom = 2
# gamma_i = (deg_freedom + 2) / 2
gamma_i = 1
c_s_0 = np.sqrt((flopter.core.constants.ELEM_CHARGE * (fit_df_0[c.ELEC_TEMP] + gamma_i * fit_df_0[c.ELEC_TEMP])) / flopter.core.constants.PROTON_MASS)
c_s_1 = | np.sqrt((flopter.core.constants.ELEM_CHARGE * (fit_df_1[c.ELEC_TEMP] + gamma_i * fit_df_1[c.ELEC_TEMP])) / flopter.core.constants.PROTON_MASS) | numpy.sqrt |
"""
Multiple Scattering code, By Dr <NAME>
For more information see:
<NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2015). Acta Cryst. A71, 20-25.
http://dx.doi.org/10.5281/zenodo.12866
Example:
xtl = dif.Crystal('Diamond.cif')
mslist = run_calcms(xtl, [0,0,3], [0,1,0], [1,0], [2.83, 2.85], plot=True)
Created from python package "calcms"
Version 1.0
12/12/2019
-------------------------------------------
Copyright 2014 Diamond Light Source Ltd.123
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Dr <NAME>, <EMAIL> Tel: +44 1235 778786
www.diamond.ac.uk
Diamond Light Source, Chilton, Didcot, Oxon, OX11 0DE, U.K.
"""
import numpy as np
import matplotlib.pyplot as plt
import itertools
__version__ = '1.0'
def run_calcms(xtl, hkl, azir=[0, 0, 1], pv=[1, 0], energy_range=[7.8, 8.2], numsteps=60,
full=False, pv1=False, pv2=False, sfonly=True, pv1xsf1=False):
"""
Run the multiple scattering code
mslist = run_calcms(xtl, [0,0,1])
:param xtl: Crystal structure from Dans_Diffraction
:param hkl: [h,k,l] principle reflection
:param azir: [h,k,l] reference of azimuthal 0 angle
:param pv: [s,p] polarisation vector
:param energy_range: [min, max] energy range in keV
:param numsteps: int: number of calculation steps from energy min to max
:param full: True/False: calculation type: full
:param pv1: True/False: calculation type: pv1
:param pv2: True/False: calculation type: pv2
:param sfonly: True/False: calculation type: sfonly *default
:param pv1xsf1: True/False: calculation type: pv1xsf1?
:return: array
"""
# ===============================================================================
# DMS Calculation
# ===============================================================================
mslist = [[np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN]]
# ================= Generate Reflist from Cif ===================================
sf, reflist, lattice, structure = loadcif(xtl, energy_range[-1])
refindex = ~np.isnan(Vfind(reflist, np.round(hkl) - reflist).vindex())
sf = sf[refindex]
reflist = reflist[refindex]
sf2 = sf[Vfind(reflist, np.round(hkl) - reflist).vindex()]
loopnum = 1
# ------------------------------------------------------------------------------
if pv1 + pv2 + sfonly + full + pv1xsf1 > 1:
print('Choose only one intensity option')
print('full=%s, pv1=%s, pv2=%s, sfonly=%s, pv1xsf1=%s' % (full, pv1, pv2, sfonly, pv1xsf1))
return None
elif pv1 + pv2 + sfonly + full + pv1xsf1 == 0:
print('Geometry Only')
mslist = [[np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN, np.NAN]]
for enval in np.linspace(energy_range[0], energy_range[1], numsteps):
print(str(loopnum) + ' of ' + str(numsteps))
# ===========================================================================
# SF0*Gauss*SF1*SF2*PV2
# ===========================================================================
if full:
#print('full calculation: SF1*SF2*PV2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2) # [:,[3,4,5]]
polfull = ms.polfull(pv)
mslist = np.concatenate((mslist, ms.polfull(pv)), 0)
# ===========================================================================
# PV1 only
# ===========================================================================
elif pv1:
#print('pv1 calculation: PV1')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.pol1only(pv)), 0)
# ===========================================================================
# PV2 only
# ===========================================================================
elif pv2:
#print('pv2 calculation: PV2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.pol2only(pv)), 0)
# ===========================================================================
# SF only
# ===========================================================================
elif sfonly:
#print('sfonly calculation: SF1*SF2')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf, sf2)
mslist = np.concatenate((mslist, ms.sfonly()), 0)
# ===========================================================================
# SF only
# ===========================================================================
elif pv1xsf1:
#print('pv1xsf1 calculation: SF1*PV1')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir, sf)
mslist = np.concatenate((mslist, ms.pv1xsf1(pv)), 0)
# ===========================================================================
# Geometry only - no structure factors
# ===========================================================================
else:
print('Geometry Only')
ms = Calcms(lattice, hkl, hkl, reflist, enval, azir)
mslist = np.concatenate((mslist, ms.geometry()), 0)
loopnum = loopnum + 1
keepindex = np.where([~np.isnan(mslist).any(1)])[1]
mslist = np.array(mslist[keepindex, :])
return mslist
########################################################################################################################
############################################### Ancillary Functions ##################################################
########################################################################################################################
def loadcif(xtl, energy_kev):
"""
New loadcif from Dans_Diffraction
returns:
intensity: Structure factor^2. I = sf x sf*
reflist: array of [h,k,l] reflections
lattice: [a,b,c,alpha,beta,gamma]
sf: complex structure factors
"""
lattice = xtl.Cell.lp()
reflist = xtl.Cell.all_hkl(energy_kev)
reflist = xtl.Cell.sort_hkl(reflist)
reflist = reflist[1:]
old_sf = xtl.Scatter._return_structure_factor
xtl.Scatter._return_structure_factor = True
sf = xtl.Scatter.intensity(reflist) # complex structure factor
xtl.Scatter._return_structure_factor = old_sf
intensity = np.real(sf * np.conj(sf))
print('MS Reflections: %d' % len(reflist))
return intensity, reflist, lattice, sf
class Bmatrix(object):
""" Convert to Cartesian coordinate system. Returns the Bmatrix and the metric tensors in direct and reciprocal spaces"""
def __init__(self, lattice):
self.lattice = lattice
lattice = self.lattice
a = lattice[0]
b = lattice[1]
c = lattice[2]
alph = lattice[3]
bet = lattice[4]
gamm = lattice[5]
alpha1 = alph * np.pi / 180.0
alpha2 = bet * np.pi / 180.0
alpha3 = gamm * np.pi / 180.0
beta1 = np.arccos((np.cos(alpha2) * np.cos(alpha3) - np.cos(alpha1)) / (np.sin(alpha2) * np.sin(alpha3)))
beta2 = np.arccos((np.cos(alpha1) * np.cos(alpha3) - np.cos(alpha2)) / (np.sin(alpha1) * np.sin(alpha3)))
beta3 = np.arccos((np.cos(alpha1) * np.cos(alpha2) - np.cos(alpha3)) / (np.sin(alpha1) * np.sin(alpha2)))
b1 = 1. / (a * np.sin(alpha2) * np.sin(beta3))
b2 = 1. / (b * np.sin(alpha3) * np.sin(beta1))
b3 = 1. / (c * np.sin(alpha1) * np.sin(beta2))
c1 = b1 * b2 * np.cos(beta3)
c2 = b1 * b3 * np.cos(beta2)
c3 = b2 * b3 * np.cos(beta1)
self.bmatrix = np.matrix([[b1, b2 * np.cos(beta3), b3 * np.cos(beta2)],
[0.0, b2 * np.sin(beta3), -b3 * np.sin(beta2) * np.cos(alpha1)], [0.0, 0.0, 1. / c]])
def bm(self):
return self.bmatrix
def ibm(self):
return self.bmatrix.I
def mt(self):
return self.bmatrix.I * self.bmatrix.transpose().I
def rmt(self):
mt = self.bmatrix.I * self.bmatrix.transpose().I
return mt.I
class Rotxyz(object):
"""Example p = Rotxyz(initial_vector, vectorrotateabout, angle)"""
def __init__(self, u, angle):
self.u = u
self.angle = angle
u = np.matrix(self.u) / np.linalg.norm(np.matrix(self.u))
e11 = u[0, 0] ** 2 + (1 - u[0, 0] ** 2) * np.cos(angle * np.pi / 180.0)
e12 = u[0, 0] * u[0, 1] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 2] * np.sin(angle * np.pi / 180.0)
e13 = u[0, 0] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 1] * np.sin(angle * np.pi / 180.0)
e21 = u[0, 0] * u[0, 1] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 2] * np.sin(angle * np.pi / 180.0)
e22 = u[0, 1] ** 2 + (1 - u[0, 1] ** 2) * np.cos(angle * np.pi / 180.0)
e23 = u[0, 1] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 0] * np.sin(angle * np.pi / 180.0)
e31 = u[0, 0] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) - u[0, 1] * np.sin(angle * np.pi / 180.0)
e32 = u[0, 1] * u[0, 2] * (1 - np.cos(angle * np.pi / 180.0)) + u[0, 0] * np.sin(angle * np.pi / 180.0)
e33 = u[0, 2] ** 2 + (1 - u[0, 2] ** 2) * np.cos(angle * np.pi / 180.0)
self.rotmat = np.matrix([[e11, e12, e13], [e21, e22, e23], [e31, e32, e33]])
def rmat(self):
return self.rotmat
class Dhkl(object):
"""calculate d-spacing for reflection from reciprocal metric tensor
d = Dhkl(lattice,HKL)
lattice = [a b c alpha beta gamma] (angles in degrees)
HKL: list of hkl. size(HKL) = n x 3 or 3 x n
!!! if size(HKL) is 3 x 3, HKL must be in the form:
HKL = [h1 k1 l1 ; h2 k2 l2 ; h3 k3 l3]
"""
def __init__(self, lattice, hkl):
self.lattice = lattice
self.hkl = np.matrix(hkl)
def d(self):
hkl = self.hkl
if np.shape(hkl)[0] == 3 and np.shape(hkl)[1] != 3:
hkl = hkl.transpose()
T = 1
else:
T = 0
G = Bmatrix(self.lattice).mt()
d = 1. / np.sqrt(np.diagonal(hkl * (G.I * hkl.transpose())))
# d = 1/np.sqrt(hkl*G.I*hkl.T)
if T == 1:
d = d.transpose()
return d
class Interplanarangle(object):
def __init__(self, lattice, hkl1, hkl2):
""" calculates interplanar angles in degrees for reflections using the metric tensor
Example Interplanarangle(lattice,hkl,hkl2) where hkl and hkl2 must have the same column length
Interplanarangle([3,3,3,90,90,120],[[1,2,3],[1,2,3]],[[1,1,3],[1,2,3]])
"""
self.lattice = lattice
if len(hkl1) != len(hkl2):
hkl1 = np.zeros((len(hkl2), 3)) + hkl1
self.hkl1 = np.matrix(hkl1)
self.hkl2 = np.matrix(hkl2)
def ang(self):
G = Bmatrix(self.lattice).mt()
dhkl1 = Dhkl(self.lattice, self.hkl1).d()
dhkl2 = Dhkl(self.lattice, self.hkl2).d()
term1 = np.diagonal(self.hkl1 * (G.I * self.hkl2.transpose()))
return np.arccos(np.multiply((term1 * dhkl1), dhkl2)) * 180 / np.pi
class Bragg(object):
def __init__(self, lattice, hkl, energy):
""" returns Bragg angle of a reflection
theta = Bragg(lattice,hkl,energy)
"""
self.lattice = lattice
self.hkl = hkl
self.energy = energy
def th(self):
keV2A = 12.3984187
wl = keV2A / self.energy
d = Dhkl(self.lattice, self.hkl).d()
# if wl/2.0/d <= 1:
theta = 180 / np.pi * np.arcsin(wl / 2.0 / d)
# else:
# theta = np.NAN;
return theta
class Hklgen(object):
def __init__(self, depth):
self.depth = depth
def v(self):
depth = self.depth
reflist = np.zeros((((2 * depth) + 1) ** 3) * 3).reshape(((((2 * depth) + 1) ** 3) * 3) / 3, 3)
list1 = [x + 1 for x in range(-depth - 1, depth)]
clist = itertools.cycle(list1)
for hh in range(depth, (((2 * depth) + 1) ** 3) - depth, (2 * depth + 1)): # 2 times depth +1
reflist[[hh + x + 1 for x in range(-depth - 1, depth)], 0] = [x + 1 for x in range(-depth - 1, depth)]
for kk in range(depth, (((2 * depth) + 1) ** 3) - depth, (2 * depth + 1)): # 2 times depth +1
reflist[[kk + x + 1 for x in range(-depth - 1, depth)], 1] = clist.next()
for kk in range(depth, (((2 * depth) + 1) ** 3) - depth, (2 * depth + 1)): # 2 times depth +1
reflist[[kk + x + 1 for x in range(-depth - 1, depth)], 2] = clist.next()
reflist[:, 2].sort()
return reflist.astype(int)
class Vfind(object):
def __init__(self, vlist, v):
# result1=list(np.where(vlist-v==0)[0])
# self.refindex=[x if result1.count(x) >= 3 else np.NAN for x in result1]
v = np.array(v)
refindex2 = []
for i1 in range(v.shape[0]):
result1 = list(np.where(vlist - v[i1, :] == 0)[0])
try:
refindex = [x for x in result1 if result1.count(x) >= 3][0]
except:
refindex = np.NAN
refindex2.append(refindex)
self.refindex = refindex2
def vindex(self):
return self.refindex
########################################################################################################################
##################################################### Calcms #########################################################
########################################################################################################################
class Calcms(object):
def __init__(self, lattice, hkl, hklint, hkl2, energy, azir, F=[], F2=[]):
self.F = np.matrix(F)
self.F2 = np.matrix(F2)
self.lattice = lattice
self.hkl = np.matrix(hkl)
self.hkl2 = np.matrix(hkl2)
self.hkl3 = hklint - self.hkl2
self.energy = energy
self.azir = np.matrix(azir)
bm = Bmatrix(self.lattice).bm()
# Convert primary hkl and reduced hkl2 list to orthogonal coordinate system
hklnotlist = (bm * self.hkl.transpose()).transpose()
self.hklrlv = hklnotlist
azir2 = (bm * self.azir.transpose()).transpose()
zref = (bm * np.matrix([0, 0, 1]).transpose()).transpose()
# Determin transformation to align primary reflection to the z direction
alignangle = Interplanarangle(self.lattice, [0, 0, 1], self.hkl).ang()
realvecthkl = (bm * self.hkl2.transpose()).transpose()
realvecthkl3 = (bm * self.hkl3.transpose()).transpose()
rotvect = np.cross(zref, hklnotlist)
if np.abs(rotvect[0][0]) + np.abs(rotvect[0][1]) + np.abs(rotvect[0][2]) >= 0.0001:
realvecthkl = realvecthkl * Rotxyz(rotvect, alignangle[0]).rmat()
self.tvprime = hklnotlist * Rotxyz(rotvect, alignangle[0]).rmat()
else:
self.tvprime = hklnotlist
# Build Ewald Sphere
brag1 = np.empty(self.hkl2.shape[0]) * 0 + 1.0 * Bragg(self.lattice, self.hkl, self.energy).th()
self.brag1 = brag1
keV2A = 12.398
ko = (self.energy / keV2A)
self.ko = ko
# height dependent radius of ewald slice in the hk plane
rewl = ko * np.cos((np.arcsin(
((ko * np.sin(-brag1 * np.pi / 180.0)) + (realvecthkl[:, 2])) / ko) * 180.0 / np.pi) * np.pi / 180.0)
rhk = np.sqrt(np.square(realvecthkl[:, 0]) + np.square(realvecthkl[:, 1]))
# Origin of intersecting circle
orighk = np.empty(self.hkl2.shape[0]) * 0 + (ko * np.cos(brag1[0] * np.pi / 180.))
####################### MS Calculation %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if np.abs(rotvect[0][0]) + np.abs(rotvect[0][1]) + np.abs(rotvect[0][2]) > 0.001:
azir2 = azir2 * Rotxyz(rotvect, alignangle[0]).rmat()
azirangle = | np.arctan2(azir2[0, 0], azir2[0, 1]) | numpy.arctan2 |
import sys
sys.path.append('../')
import numpy as np
#################################### args
import argparse
parser = argparse.ArgumentParser()
# model
parser.add_argument("--configuration", default='L1', nargs='?', type=str)
parser.add_argument("--mode", default='IWAE', nargs='?', type=str)
parser.add_argument("--M", default=128, nargs='?', type=int)
parser.add_argument("--likelihood_variance", default=1e-2, nargs='?', type=float)
parser.add_argument("--num_IW_samples", default=5, nargs='?', type=int)
# training
parser.add_argument("--minibatch_size", default=512, nargs='?', type=int)
parser.add_argument("--iterations", default=5000, nargs='?', type=int)
parser.add_argument("--gamma", default=1e-2, nargs='?', type=float)
parser.add_argument("--gamma_decay", default=0.98, nargs='?', type=float)
parser.add_argument("--lr", default=5e-3, nargs='?', type=float)
parser.add_argument("--lr_decay", default=0.98, nargs='?', type=float)
parser.add_argument("--fix_linear", default=True, nargs='?', type=bool)
parser.add_argument("--num_predict_samples", default=2000, nargs='?', type=int)
parser.add_argument("--predict_batch_size", default=1000, nargs='?', type=int) ## was 10 for experiments
# data
parser.add_argument("--dataset", default='kin8nm', nargs='?', type=str)
parser.add_argument("--split", default=0, nargs='?', type=int)
parser.add_argument("--seed", default=0, nargs='?', type=int)
parser.add_argument("--results_path", default='havasi_results', nargs='?', type=str)
ARGS = parser.parse_args()
#################################### paths
if ARGS.split == 0:
file_name = '{}_{}_{}'.format(ARGS.dataset, ARGS.configuration, ARGS.mode)
else:
file_name = '{}_{}_{}_{}'.format(ARGS.dataset, ARGS.configuration, ARGS.mode, ARGS.split)
print(file_name)
import os
tensorboard_path_base = os.path.join(ARGS.results_path, 'tensorboard')
checkpoints_path_base = os.path.join(ARGS.results_path, 'checkpoints')
figs_path_base = os.path.join(ARGS.results_path, 'figs')
tensorboard_path = os.path.join(tensorboard_path_base, file_name)
checkpoint_path = os.path.join(checkpoints_path_base, file_name)
figs_path = os.path.join(figs_path_base, file_name+'.png')
results_path = os.path.join(ARGS.results_path, 'results.db')
for p in [ARGS.results_path, tensorboard_path_base, checkpoints_path_base, figs_path_base]:
if not os.path.isdir(p):
os.mkdir(p)
#################################### data
from bayesian_benchmarks.data import get_regression_data
data = get_regression_data(ARGS.dataset)
data.X_test = data.X_test[:10000]
data.Y_test = data.Y_test[:10000]
#################################### model
from build_models import build_model
model = build_model(ARGS, data.X_train, data.Y_train)
#################################### init
sess = model.enquire_session()
model.init_op(sess)
#################################### monitoring
import gpflow.training.monitor as mon
print_freq = 1000
saving_freq = 500
tensorboard_freq = 500
print_task = mon.PrintTimingsTask() \
.with_name('print') \
.with_condition(mon.PeriodicIterationCondition(print_freq))
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
checkpoint_task = mon.CheckpointTask(checkpoint_dir=checkpoint_path, saver=saver) \
.with_name('checkpoint') \
.with_condition(mon.PeriodicIterationCondition(saving_freq)) \
.with_exit_condition(True)
writer = mon.LogdirWriter(tensorboard_path)
tensorboard_task = mon.ModelToTensorBoardTask(writer, model) \
.with_name('tensorboard') \
.with_condition(mon.PeriodicIterationCondition(tensorboard_freq))
monitor_tasks = [print_task, tensorboard_task, checkpoint_task]
#################################### training
with mon.Monitor(monitor_tasks, sess, model.global_step, print_summary=True) as monitor:
try:
mon.restore_session(sess, checkpoint_path)
except ValueError:
pass
iterations_to_go = max([ARGS.iterations - sess.run(model.global_step), 0])
print('Already run {} iterations. Running {} iterations'.format(sess.run(model.global_step), iterations_to_go))
for it in range(iterations_to_go):
monitor()
model.train_op(sess)
model.anchor(sess)
#################################### evaluation
from sklearn.neighbors import KernelDensity
from scipy.stats import norm, shapiro
res = {}
if 'SGHMC' == ARGS.mode:
spacing = 5
posterior_samples = model.sghmc_optimizer.collect_samples(sess, ARGS.num_predict_samples, spacing)
logp = np.empty(len(data.X_test))
rmse = np.empty(len(data.X_test))
shapiro_W = np.empty(len(data.X_test))
Xs_batch = np.array_split(data.X_test, max(1, int(len(data.X_test)/ARGS.predict_batch_size)))
for i, (x, y) in enumerate(zip(data.X_test, data.Y_test)):
if 'SGHMC' == ARGS.mode:
samples = np.empty((ARGS.num_predict_samples, 1, 1))
for j, s in enumerate(posterior_samples):
samples[j] = model.predict_y_samples(x.reshape(1, -1), 1, feed_dict=s)[0]
else:
samples = model.predict_y_samples(x.reshape(1, -1), ARGS.num_predict_samples)
Ss = samples[:, :, 0]
bandwidth = 1.06 * | np.std(Ss) | numpy.std |
import numpy as np
from queue import Queue
import time
class Quoridor(object):
HORIZONTAL = 1
VERTICAL = -1
def __init__(self, safe=False):
self.safe = safe
#self.action_space = 140 # 140 64+64 + 4 + 8possible actions in total
self.action_space = 84 # 84 36+36 + 4 + 8 possible actions in total
self.n_players = 2
self.players = [1, 2] # 两个玩家
self.reset()
# 待改
def load(self, p1, p2):
self.player1 = p1
self.player2 = p2
# 获取当前玩家
def get_current_player(self):
return self.current_player
def reset(self):
self.current_player = 1
self.last_player = -1
# Initialize Tiles
# self.tiles = np.zeros(81) #9*9 #瓷砖(棋盘板格)
self.tiles = np.zeros(49) #7*7
# Initialize Player Locations
self._positions = { #起始点
1: 3, # 这里要改
2: 45 # 这里要改
}
self._DIRECTIONS = {
'N': 0, 'S': 1, 'E': 2, 'W': 3,
'NN': 4, 'SS': 5, 'EE': 6, 'WW': 7,
'NE': 8, 'NW': 9, 'SE': 10, 'SW': 11
}
# self.N_DIRECTIONS = 12
# self.N_TILES = 81
# self.N_ROWS = 9
# self.N_INTERSECTIONS = 64
self.N_DIRECTIONS = 12 # 可以行动的选择总数
self.N_TILES = 49 # 瓷砖格子总数 7*7
self.N_ROWS = 7 # 行数
self.N_INTERSECTIONS = 36 # 可以插入的挡板数组 6*6 十字路口
# There are 36 possible intersection
# Horizontal Walls - 1
# No Wall - 0
# Vertical Wall - -1
self._intersections = np.zeros(36) # 初始化挡板数组(四个棋子格为一个点),水平置1 ,垂直为-1
self._player1_walls_remaining = 10 # 这里要改 到底有几块挡板?
self._player2_walls_remaining = 10 # 这里要改
def state(self):
"""Returns a set of 7x7 planes that represent the game state.
1. The current player position 当前玩家位置
2. The opponent position 对手位置
3. Vertical Walls 垂直挡板
4. Horizontal Walls 水平挡板
5 - 14. Number of walls remaining for current player 当前玩家剩余挡板数
15 - 24. Number of walls remaining for opponent 对手剩余挡板数
25. Whose turn it is (0 for player 1, 1 for player 2) 当前谁下
"""
player1_position_plane = self.tiles.copy()
player1_position_plane[self._positions[1]] = 1
player1_position_plane = player1_position_plane.reshape([7, 7])
player2_position_plane = self.tiles.copy()
player2_position_plane[self._positions[2]] = 1
player2_position_plane = player2_position_plane.reshape([7, 7])
player1_walls_plane = np.zeros([10, 7, 7])
player2_walls_plane = np.zeros([10, 7, 7])
player1_walls_plane[self._player1_walls_remaining - 1, :, :] = 1
player2_walls_plane[self._player2_walls_remaining - 1, :, :] = 1
# Set the wall planes
vertical_walls = np.pad(
np.int8(self._intersections == -1).reshape([6, 6]),
(0, 1),
mode='constant',
constant_values=0
)
horizontal_walls = np.pad(
np.int8(self._intersections == 1).reshape([6, 6]),
(0, 1),
mode='constant',
constant_values=0
)
no_walls = np.pad(
np.int8(self._intersections == 0).reshape([6, 6]),
(0, 1),
mode='constant',
constant_values=0
)
# 不同玩家调整平面顺序
if self.current_player == 1:
state = np.stack([
no_walls,
vertical_walls,
horizontal_walls,
player1_position_plane,
player2_position_plane,
])
# print('Shape is {shape}'.format(shape=state.shape))
current_player_plane = np.zeros([1, 7, 7])
state = | np.vstack([state, player1_walls_plane, player2_walls_plane, current_player_plane]) | numpy.vstack |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import cv2
import os
class TLClassifier(object):
def __init__(self, graph_source):
graph_file = graph_source + '/frozen_inference_graph.pb'
self.TL_COLOR_GREEN = 1
self.TL_COLOR_RED = 2
self.TL_COLOR_YELLOW = 3
self.TL_COLOR_UNKNOWN = 4
self.confidence_cutoff = 0.8
wd = os.path.dirname(os.path.realpath(__file__))
# load frozen graph
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(wd + graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = graph.get_tensor_by_name('image_tensor:0')
self.detection_scores = graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = graph.get_tensor_by_name('detection_classes:0')
self.detection_boxes = graph.get_tensor_by_name('detection_boxes:0')
# create session only once
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
self.session = tf.Session(graph=graph, config=config)
# front-load tf initialization
dummy_image = | np.empty([1, 300,400, 3], dtype=np.uint8) | numpy.empty |
#!/usr/bin/env python
"""
MagPy-General: Standard pymag package containing the following classes:
Written by <NAME>, <NAME> 2011/2012/2013/2014
Written by <NAME>, <NAME>, <NAME> 2015/2016
Version 0.3 (starting May 2016)
License:
https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import logging
import os
import sys
import tempfile
# ----------------------------------------------------------------------------
# Part 1: Import routines for packages
# ----------------------------------------------------------------------------
logpygen = '' # temporary logger variable
badimports = [] # List of missing packages
nasacdfdir = "c:\CDF Distribution\cdf33_1-dist\lib"
# Logging
# ---------
# Select the user's home directory (platform independent) or environment path
if "MAGPY_LOG_PATH" in os.environ:
path_to_log = os.environ["MAGPY_LOG_PATH"]
if not os.path.exists(path_to_log):
os.makedirs(path_to_log)
else:
path_to_log = tempfile.gettempdir()
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.addHandler(console)
return logger
# Package loggers to identify info/problem source
logger = setup_logger(__name__)
# DEPRECATED: replaced by individual module loggers, delete these when sure they're no longer needed:
loggerabs = logging.getLogger('abs')
loggertransfer = logging.getLogger('transf')
loggerdatabase = logging.getLogger('db')
loggerstream = logging.getLogger('stream')
loggerlib = logging.getLogger('lib')
loggerplot = logging.getLogger('plot')
# Special loggers for event notification
stormlogger = logging.getLogger('stream')
logger.info("Initiating MagPy...")
from magpy.version import __version__
logger.info("MagPy version "+str(__version__))
magpyversion = __version__
# Standard packages
# -----------------
try:
import csv
import pickle
import types
import struct
import re
import time, string, os, shutil
#import locale
import copy as cp
import fnmatch
import dateutil.parser as dparser
from tempfile import NamedTemporaryFile
import warnings
from glob import glob, iglob, has_magic
from itertools import groupby
import operator # used for stereoplot legend
from operator import itemgetter
# The following packages are not identically available for python3
try: # python2
import copy_reg as copyreg
except ImportError: # python3
import copyreg as copyreg
# Python 2 and 3: alternative 4
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request, ProxyHandler, install_opener, build_opener
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, ProxyHandler, install_opener, build_opener
"""
try: # python2
import urllib2
except ImportError: # python3
import urllib.request
"""
try: # python2
import thread
except ImportError: # python3
import _thread
try: # python2
from StringIO import StringIO
pyvers = 2
except ImportError: # python 3
from io import StringIO
pyvers = 3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: standard packages.\n"
badimports.append(e)
# operating system
try:
PLATFORM = sys.platform
logger.info("Running on platform: {}".format(PLATFORM))
except:
PLATFORM = 'unkown'
# Matplotlib
# ----------
try:
import matplotlib
gui_env = ['TKAgg','GTKAgg','Qt4Agg','WXAgg','Agg']
try:
if not os.isatty(sys.stdout.fileno()): # checks if stdout is connected to a terminal (if not, cron is starting the job)
logger.info("No terminal connected - assuming cron job and using Agg for matplotlib")
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
matplotlib.use('Agg') # For using cron
except:
logger.warning("Problems with identfying cron job - windows system?")
pass
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: problem with matplotlib.\n"
badimports.append(e)
try:
version = matplotlib.__version__.replace('svn', '')
try:
version = map(int, version.replace("rc","").split("."))
MATPLOTLIB_VERSION = list(version)
except:
version = version.strip("rc")
MATPLOTLIB_VERSION = version
logger.info("Loaded Matplotlib - Version %s" % str(MATPLOTLIB_VERSION))
for gui in gui_env:
try:
logger.info("Testing backend {}".format(gui))
try: # will be important from matplotlib3.3 onwards
matplotlib.use(gui, force=True)
except:
matplotlib.use(gui, warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
logger.info("Using backend: {}".format(matplotlib.get_backend()))
from matplotlib.colors import Normalize
from matplotlib.widgets import RectangleSelector, RadioButtons
#from matplotlib.colorbar import ColorbarBase
from matplotlib import mlab
from matplotlib.dates import date2num, num2date
import matplotlib.cm as cm
from pylab import *
from datetime import datetime, timedelta
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError with matplotlib package. Please install to proceed.\n"
logpygen += " ... if installed please check the permissions on .matplotlib in your homedirectory.\n"
badimports.append(e)
# Numpy & SciPy
# -------------
try:
logger.info("Loading Numpy and SciPy...")
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy import stats
from scipy import signal
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import filters
import scipy.optimize as op
import math
except ImportError as e:
logpygen += "CRITICAL MagPy initiation ImportError: Python numpy-scipy required - please install to proceed.\n"
badimports.append(e)
# NetCDF
# ------
try:
#print("Loading Netcdf4 support ...")
from netCDF4 import Dataset
except ImportError as e:
#logpygen += "MagPy initiation ImportError: NetCDF not available.\n"
#logpygen += "... if you want to use NetCDF format support please install a current version.\n"
#badimports.append(e)
pass
# NASACDF - SpacePy
# -----------------
def findpath(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return root
try:
logger.info("Loading SpacePy package cdf support ...")
try:
# check for windows
nasacdfdir = findpath('libcdf.dll','C:\CDF_Distribution') ## new path since nasaCDF3.6
if not nasacdfdir:
nasacdfdir = findpath('libcdf.dll','C:\CDF Distribution')
if nasacdfdir:
os.environ["CDF_LIB"] =str(nasacdfdir)
logger.info("Using CDF lib in %s" % nasacdfdir)
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
else:
# create exception and try linux
x=1/0
except:
os.putenv("CDF_LIB", "/usr/local/cdf/lib")
logger.info("using CDF lib in /usr/local/cdf")
### If files (with tt_2000) have been generated with an outdated leapsecondtable
### an exception will occur - to prevent that:
### 1. make sure to use a actual leapsecond table - update cdf regularly
### 2. temporarly set cdf_validate environment variable to no
# This is how option 2 is included TODO -- add this to initialization options
# as an update of cdf is the way to go and not just deactivating the error message
os.putenv("CDF_VALIDATE", "no")
logger.info("... deactivating cdf validation")
try:
import spacepy.pycdf as cdf
logger.info("... success")
except KeyError as e:
# Probably running at boot time - spacepy HOMEDRIVE cannot be detected
badimports.append(e)
except:
logger.info("... Could not import spacepy")
pass
except ImportError as e:
logpygen += "MagPy initiation ImportError: NASA cdf not available.\n"
logpygen += "... if you want to use NASA CDF format support please install a current version.\n"
badimports.append(e)
if logpygen == '':
logpygen = "OK"
else:
logger.info(logpygen)
logger.info("Missing packages:")
for item in badimports:
logger.info(item)
logger.info("Moving on anyway...")
### Some Python3/2 compatibility code
### taken from http://www.rfk.id.au/blog/entry/preparing-pyenchant-for-python-3/
try:
unicode = unicode
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
# Storing function - http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods#edit2155350
# by <NAME>
# Used here to pickle baseline functions from header and store it in a cdf key.
# Not really a transparent method but working nicely. Underlying functional parameters to reconstruct the fit
# are stored as well but would require a link to the absolute data.
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method)
# ----------------------------------------------------------------------------
# Part 2: Define Dictionaries
# ----------------------------------------------------------------------------
# Keys available in DataStream Object:
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
# Empty key values at initiation of stream:
KEYINITDICT = {'time':0,'x':float('nan'),'y':float('nan'),'z':float('nan'),'f':float('nan'),
't1':float('nan'),'t2':float('nan'),'var1':float('nan'),'var2':float('nan'),
'var3':float('nan'),'var4':float('nan'),'var5':float('nan'),'dx':float('nan'),
'dy':float('nan'),'dz':float('nan'),'df':float('nan'),'str1':'-','str2':'-',
'str3':'-','str4':'-','flag':'0000000000000000-','comment':'-','typ':'xyzf',
'sectime':float('nan')}
FLAGKEYLIST = KEYLIST[:16]
# KEYLIST[:8] # only primary values with time
# KEYLIST[1:8] # only primary values without time
# Formats supported by MagPy read function:
PYMAG_SUPPORTED_FORMATS = {
'IAGA':['rw','IAGA 2002 text format'],
'WDC':['rw','World Data Centre format'],
'IMF':['rw', 'Intermagnet Format'],
'IAF':['rw', 'Intermagnet archive Format'],
'BLV':['rw','Baseline format Intermagnet'],
'IYFV':['rw','Yearly mean format Intermagnet'],
'DKA':['rw', 'K value format Intermagnet'],
'DIDD':['rw','Output format from MinGeo DIDD'],
'GSM19':['r', 'Output format from GSM19 magnetometer'],
'COVJSON':['rw', 'Coverage JSON'],
'JSON':['rw', 'JavaScript Object Notation'],
'LEMIHF':['r', 'LEMI text format data'],
'LEMIBIN':['r','Current LEMI binary data format'],
'LEMIBIN1':['r','Deprecated LEMI binary format at WIC'],
'OPT':['r', 'Optical hourly data from WIK'],
'PMAG1':['r','Deprecated ELSEC from WIK'],
'PMAG2':['r', 'Current ELSEC from WIK'],
'GDASA1':['r', 'GDAS binary format'],
'GDASB1':['r', 'GDAS text format'],
'RMRCS':['r', 'RCS data output from Richards perl scripts'],
'RCS':['r', 'RCS raw output'],
'METEO':['r', 'Winklbauer METEO files'],
'NEIC':['r', 'WGET data from USGS - NEIC'],
'LNM':['r', 'Thies Laser-Disdrometer'],
'IWT':['r', 'IWT Tiltmeter data'],
'LIPPGRAV':['r', 'Lippmann Tiltmeter data'],
'GRAVSG':['r', 'GWR TSF data'],
'CR800':['r', 'CR800 datalogger'],
'IONO':['r', 'IM806 Ionometer'],
'RADON':['r', 'single channel analyser gamma data'],
'USBLOG':['r', 'USB temperature logger'],
#'SERSIN':['r', '?'],
#'SERMUL':['r', '?'],
'PYSTR':['rw', 'MagPy full ascii'],
'AUTODIF':['r', 'Deprecated - AutoDIF ouput data'],
'AUTODIF_FREAD':['r', 'Deprecated - Special format for AutoDIF read-in'],
'PYBIN':['r', 'MagPy own binary format'],
'PYASCII':['rw', 'MagPy basic ASCII'],
'POS1TXT':['r', 'POS-1 text format output data'],
'POS1':['r', 'POS-1 binary output at WIC'],
'PMB':['r', 'POS pmb file'],
'QSPIN':['r', 'QSPIN ascii output'],
#'PYNC':['r', 'MagPy NetCDF variant (too be developed)'],
#'DTU1':['r', 'ASCII Data from the DTUs FGE systems'],
#'BDV1':['r', 'Budkov GDAS data variant'],
'GFZTMP':['r', 'GeoForschungsZentrum ascii format'],
'GFZKP':['r', 'GeoForschungsZentrum KP-Index format'],
'PHA':['r', 'Potentially Hazardous Asteroids (PHAs) from the International Astronomical Unions Minor Planet Center, (json, incomplete)'],
'PREDSTORM':['r','PREDSTORM space weather prediction data format'],
'CSV':['rw','comma-separated CSV data'],
'IMAGCDF':['rw','Intermagnet CDF Format'],
'PYCDF':['rw', 'MagPy CDF variant'],
'NOAAACE':['r', 'NOAA ACE satellite data format'],
'NETCDF':['r', 'NetCDF4 format, NOAA DSCOVR satellite data archive format'],
'LATEX':['w','LateX data'],
'CS':['r','Cesium G823'],
#'SFDMI':['r', 'San Fernando variometer'],
#'SFGSM':['r', 'San Fernando GSM90'],
'UNKOWN':['-','Unknown']
}
"""
PYMAG_SUPPORTED_FORMATS = {
'IAGA':'rw', # IAGA 2002 text format
'WDC':'rw', # World Data Centre format
'IMF':'rw', # Intermagnet Format
'IAF':'rw', # Intermagnet archive Format
'IMAGCDF', # Intermagnet CDF Format
'BLV', # Baseline format Intermagnet
'IYFV', # Yearly mean format Intermagnet
'DKA', # K value format Intermagnet
'DIDD', # Output format from DIDD
'GSM19', # Output format from GSM19 magnetometer
'COVJSON', # Coverage JavaScript Object Notation
'JSON', # JavaScript Object Notation
'LEMIHF', # LEMI text format data
'LEMIBIN', # Current LEMI binary data format at WIC
'LEMIBIN1', # Deprecated LEMI binary format at WIC
'OPT', # Optical hourly data from WIK
'PMAG1', # Deprecated ELSEC from WIK
'PMAG2', # Current ELSEC from WIK
'GDASA1', # ?
'GDASB1', # ?
'RMRCS', # RCS data output from Richards perl scripts
'RCS', # RCS data output from Richards perl scripts
'METEO', # RCS data output in METEO files
'NEIC', # WGET data from USGS - NEIC
'LNM', # LaserNiederschlagsMonitor files
'IWT', # Tiltmeter data files at cobs
'LIPPGRAV', # Lippmann Tiltmeter data files at cobs
'CR800', # Data from the CR800 datalogger
'IONO', # Data from IM806 Ionometer
'RADON', # ?
'USBLOG', # ?
'SERSIN', # ?
'SERMUL', # ?
'PYSTR', # MagPy full ascii
'AUTODIF', # AutoDIF ouput data
'AUTODIF_FREAD',# Special format for AutoDIF read-in
'PYCDF', # MagPy CDF variant
'PYBIN', # MagPy own format
'PYASCII', # MagPy basic ASCII
'POS1TXT', # POS-1 text format output data
'POS1', # POS-1 binary output at WIC
'PMB', # POS pmb output
'QSPIN', # QSpin output
'PYNC', # MagPy NetCDF variant (too be developed)
'DTU1', # ASCII Data from the DTU's FGE systems
'SFDMI', # ?
'SFGSM', # ?
'BDV1', # ?
'GFZKP', # GeoForschungsZentrum KP-Index format
'NOAAACE', # NOAA ACE satellite data format
'PREDSTORM' # PREDSTORM space weather prediction data format
'CSV', # comma-separated CSV data with isoformat date in first column
'LATEX', # LateX data
'CS', # ?
'UNKOWN' # 'Unknown'?
}
"""
# ----------------------------------------------------------------------------
# Part 3: Example files for easy access and tests
# ----------------------------------------------------------------------------
from pkg_resources import resource_filename
example1 = resource_filename('magpy', 'examples/example1.zip') #Zip compressed IAGA02
example2 = resource_filename('magpy', 'examples/example2.cdf') #MagPy CDF with F
example3 = resource_filename('magpy', 'examples/example3.txt') #PyStr Baseline
example4 = resource_filename('magpy', 'examples/example4.cdf') #MagPy CDF
example5 = resource_filename('magpy', 'examples/example5.sec') #Imag CDF
example6a = resource_filename('magpy', 'examples/example6a.txt') #DI file
example6b = resource_filename('magpy', 'examples/example6b.txt') #DI file
# ----------------------------------------------------------------------------
# Part 4: Main classes -- DataStream, LineStruct and
# PyMagLog (To be removed)
# ----------------------------------------------------------------------------
class DataStream(object):
"""
Creates a list object from input files /url data
data is organized in columns
keys are column identifier:
key in keys: see KEYLIST
A note on headers:
ALWAYS INITIATE STREAM WITH >>> stream = DataStream([],{}).
All available methods:
----------------------------
- stream.ext(self, columnstructure): # new version of extend function for column operations
- stream.add(self, datlst):
- stream.clear_header(self):
- stream.extend(self,datlst,header):
- stream.union(self,column):
- stream.findtime(self,time):
- stream._find_t_limits(self):
- stream._print_key_headers(self):
- stream._get_key_headers(self,**kwargs):
- stream.sorting(self):
- stream._get_line(self, key, value):
- stream._remove_lines(self, key, value):
- stream._remove_columns(self, keys):
- stream._get_column(self, key):
- stream._put_column(self, column, key, **kwargs):
- stream._move_column(self, key, put2key):
- stream._clear_column(self, key):
- stream._reduce_stream(self, pointlimit=100000):
- stream._aic(self, signal, k, debugmode=None):
- stream._get_k(self, **kwargs):
- stream._get_k_float(self, value, **kwargs):
- stream._get_max(self, key, returntime=False):
- stream._get_min(self, key, returntime=False):
- stream._gf(self, t, tau):
- stream._hf(self, p, x):
- stream._residual_func(self, func, y):
- stream._tau(self, period):
- stream._convertstream(self, coordinate, **kwargs):
- stream._det_trange(self, period):
- stream._is_number(self, s):
- stream._normalize(self, column):
- stream._testtime(self, time):
- stream._drop_nans(self, key):
- stream.aic_calc(self, key, **kwargs):
- stream.baseline(self, absolutestream, **kwargs):
- stream.bindetector(self,key,text=None,**kwargs):
- stream.calc_f(self, **kwargs):
- stream.cut(self,length,kind=0,order=0):
- stream.dailymeans(self):
- stream.date_offset(self, offset):
- stream.delta_f(self, **kwargs):
- stream.dict2stream(self,dictkey='DataBaseValues')
- stream.differentiate(self, **kwargs):
- stream.eventlogger(self, key, values, compare=None, stringvalues=None, addcomment=None, debugmode=None):
- stream.extract(self, key, value, compare=None, debugmode=None):
- stream.extrapolate(self, start, end):
- stream.filter(self, **kwargs):
- stream.fit(self, keys, **kwargs):
- stream.flag_outlier(self, **kwargs):
- stream.flag_stream(self, key, flag, comment, startdate, enddate=None, samplingrate):
- stream.func2stream(self,function,**kwargs):
- stream.func_add(self,function,**kwargs):
- stream.func_subtract(self,function,**kwargs):
- stream.get_gaps(self, **kwargs):
- stream.get_sampling_period(self):
- stream.samplingrate(self, **kwargs):
- stream.integrate(self, **kwargs):
- stream.interpol(self, keys, **kwargs):
- stream.k_fmi(self, **kwargs):
- stream.mean(self, key, **kwargs):
- stream.multiply(self, factors):
- stream.offset(self, offsets):
- stream.randomdrop(self, percentage=None, fixed_indicies=None):
- stream.remove(self, starttime=starttime, endtime=endtime):
- stream.remove_flagged(self, **kwargs):
- stream.resample(self, keys, **kwargs):
- stream.rotation(self,**kwargs):
- stream.scale_correction(self, keys, scales, **kwargs):
- stream.smooth(self, keys, **kwargs):
- stream.steadyrise(self, key, timewindow, **kwargs):
- stream.stream2dict(self,dictkey='DataBaseValues')
- stream.stream2flaglist(self, userange=True, flagnumber=None, keystoflag=None, sensorid=None, comment=None)
- stream.trim(self, starttime=None, endtime=None, newway=False):
- stream.variometercorrection(self, variopath, thedate, **kwargs):
- stream.write(self, filepath, **kwargs):
Application methods:
----------------------------
- stream.aic_calc(key) -- returns stream (with !var2! filled with aic values)
- stream.baseline() -- calculates baseline correction for input stream (datastream)
- stream.dailymeans() -- for DI stream - obtains variometer corrected means fo basevalues
- stream.date_offset() -- Corrects the time column of the selected stream by the offst
- stream.delta_f() -- Calculates the difference of x+y+z to f
- stream.differentiate() -- returns stream (with !dx!,!dy!,!dz!,!df! filled by derivatives)
- stream.extrapolate() -- read absolute stream and extrapolate the data
- stream.fit(keys) -- returns function
- stream.filter() -- returns stream (changes sampling_period; in case of fmi ...)
- stream.find_offset(stream_a, stream_b) -- Finds offset of two data streams. (Not optimised.)
- stream.flag_stream() -- Add flags to specific times or time ranges
- stream.func2stream() -- Combine stream and function (add, subtract, etc)
- stream.func_add() -- Add a function to the selected values of the data stream
- stream.func_subtract() -- Subtract a function from the selected values of the data stream
- stream.get_gaps() -- Takes the dominant sample frequency and fills non-existing time steps
- stream.get_sampling_period() -- returns the dominant sampling frequency in unit ! days !
- stream.integrate() -- returns stream (integrated vals at !dx!,!dy!,!dz!,!df!)
- stream.interpol(keys) -- returns function
- stream.k_fmi() -- Calculating k values following the fmi approach
- stream.linestruct2ndarray() -- converts linestrcut data to ndarray. should be avoided
- stream.mean() -- Calculates mean values for the specified key, Nan's are regarded for
- stream.offset() -- Apply constant offsets to elements of the datastream
- stream.plot() -- plot keys from stream
- stream.powerspectrum() -- Calculating the power spectrum following the numpy fft example
- stream.remove_flagged() -- returns stream (removes data from stream according to flags)
- stream.resample(period) -- Resample stream to given sampling period.
- stream.rotation() -- Rotation matrix for rotating x,y,z to new coordinate system xs,ys,zs
- stream.selectkeys(keys) -- ndarray: remove all data except for provided keys (and flag/comment)
- stream.smooth(key) -- smooth the data using a window with requested size
- stream.spectrogram() -- Creates a spectrogram plot of selected keys
- stream.stream2flaglist() -- make flaglist out of stream
- stream.trim() -- returns stream within new time frame
- stream.use_sectime() -- Swap between primary and secondary time (if sectime is available)
- stream.variometercorrection() -- Obtain average DI values at certain timestep(s)
- stream.write() -- Writing Stream to a file
Supporting INTERNAL methods:
----------------------------
A. Standard functions and overrides for list like objects
- self.clear_header(self) -- Clears headers
- self.extend(self,datlst,header) -- Extends stream object
- self.sorting(self) -- Sorts object
B. Internal Methods I: Line & column functions
- self._get_column(key) -- returns a numpy array of selected columns from Stream
- self._put_column(key) -- adds a column to a Stream
- self._move_column(key, put2key) -- moves one column to another key
- self._clear_column(key) -- clears a column to a Stream
- self._get_line(self, key, value) -- returns a LineStruct element corresponding to the first occurence of value within the selected key
- self._reduce_stream(self) -- Reduces stream below a certain limit.
- self._remove_lines(self, key, value) -- removes lines with value within the selected key
- self.findtime(self,time) -- returns index and line for which time equals self.time
B. Internal Methods II: Data manipulation functions
- self._aic(self, signal, k, debugmode=None) -- returns float -- determines Akaki Information Criterion for a specific index k
- self._get_k(self, **kwargs) -- Calculates the k value according to the Bartels scale
- self._get_k_float(self, value, **kwargs) -- Like _get_k, but for testing single values and not full stream keys (used in filtered function)
- self._gf(self, t, tau): -- Gauss function
- self._hf(self, p, x) -- Harmonic function
- self._residual_func(self, func, y) -- residual of the harmonic function
- self._tau(self, period) -- low pass filter with -3db point at period in sec (e.g. 120 sec)
B. Internal Methods III: General utility & NaN handlers
- self._convertstream(self, coordinate, **kwargs) -- Convert coordinates of x,y,z columns in stream
- self._det_trange(self, period) -- starting with coefficients above 1%
- self._find_t_limits(self) -- return times of first and last stream data points
- self._testtime(time) -- returns datetime object
- self._get_min(key) -- returns float
- self._get_max(key) -- returns float
- self._normalize(column) -- returns list,float,float -- normalizes selected column to range 0,1
- nan_helper(self, y) -- Helper to handle indices and logical indices of NaNs
- self._print_key_headers(self) -- Prints keys in datastream with variable and unit.
- self._get_key_headers(self) -- Returns keys in datastream.
- self._drop_nans(self, key) -- Helper to drop lines with NaNs in any of the selected keys.
- self._is_number(self, s) -- ?
Supporting EXTERNAL methods:
----------------------------
Useful functions:
- array2stream -- returns a data stream -- converts a list of arrays to a datastream
- linestruct2ndarray -- returns a data ndarray -- converts a old linestruct format
- denormalize -- returns list -- (column,startvalue,endvalue) denormalizes selected column from range 0,1 ro sv,ev
- find_nearest(array, value) -- find point in array closest to value
- maskNAN(column) -- Tests for NAN values in array and usually masks them
- nearestPow2(x) -- Find power of two nearest to x
*********************************************************************
Standard function description format:
DEFINITION:
Description of function purpose and usage.
PARAMETERS:
Variables:
- variable: (type) Description.
Kwargs:
- variable: (type) Description.
RETURNS:
- variable: (type) Description.
EXAMPLE:
>>> alldata = mergeStreams(pos_stream, lemi_stream, keys=['<KEY>'])
APPLICATION:
Code for simple application.
*********************************************************************
Standard file description format:
Path: *path* (magpy.acquisition.pos1protocol)
Part of package: *package* (acquisition)
Type: *type* (type of file/package)
PURPOSE:
Description...
CONTAINS:
*ThisClass: (Class)
What is this class for?
thisFunction: (Func) Description
DEPENDENCIES:
List all non-standard packages required for file.
+ paths of all MagPy package dependencies.
CALLED BY:
Path to magpy packages that call this part, e.g. magpy.bin.acquisition
*********************************************************************
"""
KEYLIST = [ 'time', # Timestamp (date2num object)
'x', # X or I component of magnetic field (float)
'y', # Y or D component of magnetic field (float)
'z', # Z component of magnetic field (float)
'f', # Magnetic field strength (float)
't1', # Temperature variable (e.g. ambient temp) (float)
't2', # Secondary temperature variable (e.g. sensor temp) (float)
'var1', # Extra variable #1 (float)
'var2', # Extra variable #2 (float)
'var3', # Extra variable #3 (float)
'var4', # Extra variable #4 (float)
'var5', # Extra variable #5 (float)
'dx', # Errors in X (float)
'dy', # Errors in Y (float)
'dz', # Errors in Z (float)
'df', # Errors in F (float)
'str1', # Extra string variable #1 (str)
'str2', # Extra string variable #2 (str)
'str3', # Extra string variable #3 (str)
'str4', # Extra string variable #4 (str)
'flag', # Variable for flags. (str='0000000000000000-')
'comment', # Space for comments on flags (str)
'typ', # Type of data (str='xyzf')
'sectime' # Secondary time variable (date2num)
]
NUMKEYLIST = KEYLIST[1:16]
def __init__(self, container=None, header={},ndarray=None):
if container is None:
container = []
self.container = container
if ndarray is None:
ndarray = np.array([np.asarray([]) for elem in KEYLIST])
self.ndarray = ndarray ## Test this! -> for better memory efficiency
#if header is None:
# header = {'Test':'Well, it works'}
#header = {}
self.header = header
#for key in KEYLIST:
# setattr(self,key,np.asarray([]))
#self.header = {'Test':'Well, it works'}
self.progress = 0
# ------------------------------------------------------------------------
# A. Standard functions and overrides for list like objects
# ------------------------------------------------------------------------
def ext(self, columnstructure): # new version of extend function for column operations
"""
the extend and add functions must be replaced in case of
speed optimization
"""
for key in KEYLIST:
self.container.key = np.append(self.container.key, columnstructure.key, 1)
def add(self, datlst):
#try:
assert isinstance(self.container, (list, tuple))
self.container.append(datlst)
#except:
# print list(self.container).append(datlst)
def length(self):
#try:
if len(self.ndarray[0]) > 0:
ll = [len(elem) for elem in self.ndarray]
return ll
else:
try: ## might fail if LineStruct is empty (no time)
if len(self) == 1 and np.isnan(self[0].time):
return [0]
else:
return [len(self)]
except:
return [0]
def replace(self, datlst):
# Replace in stream
# - replace value with existing data
# Method was used by K calc - replaced by internal method there
newself = DataStream()
assert isinstance(self.container, (list, tuple))
ti = list(self._get_column('time'))
try:
ind = ti.index(datlst.time)
except ValueError:
self = self.add(datlst)
return self
except:
return self
li = [elem for elem in self]
del li[ind]
del ti[ind]
li.append(datlst)
return DataStream(li,self.header)
def copy(self):
"""
DESCRIPTION:
method for copying content of a stream to a new stream
APPLICATION:
for non-destructive methods
"""
#print self.container
#assert isinstance(self.container, (list, tuple))
co = DataStream()
#co.header = self.header
newheader = {}
for el in self.header:
newheader[el] = self.header[el]
array = [[] for el in KEYLIST]
if len(self.ndarray[0])> 0:
for ind, key in enumerate(KEYLIST):
liste = []
for val in self.ndarray[ind]: ## This is necessary to really copy the content
liste.append(val)
array[ind] = np.asarray(liste)
co.container = [LineStruct()]
else:
for el in self:
li = LineStruct()
for key in KEYLIST:
if key == 'time':
li.time = el.time
else:
#exec('li.'+key+' = el.'+key)
elkey = getattr(el,key)
setattr(li, key, elkey)
co.add(li)
return DataStream(co.container,newheader,np.asarray(array, dtype=object))
def __str__(self):
return str(self.container)
def __repr__(self):
return str(self.container)
def __getitem__(self, var):
try:
if var in NUMKEYLIST:
return self.ndarray[self.KEYLIST.index(var)].astype(np.float64)
else:
return self.ndarray[self.KEYLIST.index(var)]
except:
return self.container.__getitem__(var)
def __setitem__(self, var, value):
self.ndarray[self.KEYLIST.index(var)] = value
def __len__(self):
return len(self.container)
def clear_header(self):
"""
Remove header information
"""
self.header = {}
def extend(self,datlst,header,ndarray):
array = [[] for key in KEYLIST]
self.container.extend(datlst)
self.header = header
# Some initial check if any data set except timecolumn is contained
datalength = len(ndarray)
#t1 = datetime.utcnow()
if pyvers and pyvers == 2:
ch1 = '-'.encode('utf-8') # not working with py3
ch2 = ''.encode('utf-8')
else:
ch1 = '-'
ch2 = ''
try:
test = []
for col in ndarray:
col = np.array(list(col))
#print (np.array(list(col)).dtype)
if col.dtype in ['float64','float32','int32','int64']:
try:
x = np.asarray(col)[~np.isnan(col)]
except: # fallback 1 -> should not needed any more
#print ("Fallback1")
x = np.asarray([elem for elem in col if not np.isnan(elem)])
else:
#y = np.asarray(col)[col!='-']
#x = np.asarray(y)[y!='']
y = np.asarray(col)[col!=ch1]
x = np.asarray(y)[y!=ch2]
test.append(x)
test = np.asarray(test,dtype=object)
except:
# print ("Fallback -- pretty slowly")
#print ("Fallback2")
test = [[elem for elem in col if not elem in [ch1,ch2]] for col in ndarray]
#t2 = datetime.utcnow()
#print (t2-t1)
emptycnt = [len(el) for el in test if len(el) > 0]
if self.ndarray.size == 0:
self.ndarray = ndarray
elif len(emptycnt) == 1:
print("Tyring to extend with empty data set")
#self.ndarray = np.asarray((list(self.ndarray)).extend(list(ndarray)))
else:
for idx,elem in enumerate(self.ndarray):
if len(ndarray[idx]) > 0:
if len(self.ndarray[idx]) > 0 and len(self.ndarray[0]) > 0:
array[idx] = np.append(self.ndarray[idx], ndarray[idx]).astype(object)
#array[idx] = np.append(self.ndarray[idx], ndarray[idx],1).astype(object)
elif len(self.ndarray[0]) > 0: # only time axis present so far but no data within this elem
fill = ['-']
key = KEYLIST[idx]
if key in NUMKEYLIST or key=='sectime':
fill = [float('nan')]
nullvals = np.asarray(fill * len(self.ndarray[0]))
#array[idx] = np.append(nullvals, ndarray[idx],1).astype(object)
array[idx] = np.append(nullvals, ndarray[idx]).astype(object)
else:
array[idx] = ndarray[idx].astype(object)
self.ndarray = np.asarray(array, dtype=object)
def union(self,column):
seen = set()
seen_add = seen.add
return [ x for x in column if not (x in seen or seen_add(x))]
def removeduplicates(self):
"""
DESCRIPTION:
Identify duplicate time stamps and remove all data.
Lines with first occurence are kept.
"""
# get duplicates in time column
def list_duplicates(seq):
seen = set()
seen_add = seen.add
return [idx for idx,item in enumerate(seq) if item in seen or seen_add(item)]
if not len(self.ndarray[0]) > 0:
print ("removeduplicates: works only with ndarrays")
return
duplicateindicies = list_duplicates(self.ndarray[0])
array = [[] for key in KEYLIST]
for idx, elem in enumerate(self.ndarray):
if len(elem) > 0:
newelem = np.delete(elem, duplicateindicies)
array[idx] = newelem
return DataStream(self, self.header, np.asarray(array,dtype=object))
def start(self, dateformt=None):
st,et = self._find_t_limits()
return st
def end(self, dateformt=None):
st,et = self._find_t_limits()
return et
def findtime(self,time,**kwargs):
"""
DEFINITION:
Find a line within the container which contains the selected time step
or the first line following this timestep (since 0.3.99 using mode 'argmax')
VARIABLES:
startidx (int) index to start search with (speeding up)
endidx (int) index to end search with (speeding up)
mode (string) define search mode (fastest would be 'argmax')
RETURNS:
The index position of the line and the line itself
"""
startidx = kwargs.get('startidx')
endidx = kwargs.get('endidx')
mode = kwargs.get('mode')
#try:
# from bisect import bisect
#except ImportError:
# print("Import error")
st = date2num(self._testtime(time))
if len(self.ndarray[0]) > 0:
if startidx and endidx:
ticol = self.ndarray[0][startidx:endidx]
elif startidx:
ticol = self.ndarray[0][startidx:]
elif endidx:
ticol = self.ndarray[0][:endidx]
else:
ticol = self.ndarray[0]
try:
if mode =='argmax':
## much faster since 0.3.99 (used in flag_stream)
indexes = [np.argmax(ticol>=st)]
else:
## the following method is used until 0.3.98
indexes = [i for i,x in enumerate(ticol) if x == st] ### FASTER
# Other methods
# #############
#indexes = [i for i,x in enumerate(ticol) if np.allclose(x,st,rtol=1e-14,atol=1e-17)] # if the two time equal within about 0.7 milliseconds
#indexes = [bisect(ticol, st)] ## SELECTS ONLY INDEX WHERE VALUE SHOULD BE inserted
#indexes = [ticol.index(st)]
#print("findtime", indexes)
if not len(indexes) == 0:
if startidx:
retindex = indexes[0] + startidx
else:
retindex = indexes[0]
#print("Findtime index:",retindex)
return retindex, LineStruct()
else:
return 0, []
#return list(self.ndarray[0]).index(st), LineStruct()
except:
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
for index, line in enumerate(self):
if line.time == st:
return index, line
logger.warning("findtime: Didn't find selected time - returning 0")
return 0, []
def _find_t_limits(self):
"""
DEFINITION:
Find start and end times in stream.
RETURNS:
Two datetime objects, start and end.
"""
if len(self.ndarray[0]) > 0:
t_start = num2date(np.min(self.ndarray[0].astype(float))).replace(tzinfo=None)
t_end = num2date(np.max(self.ndarray[0].astype(float))).replace(tzinfo=None)
else:
try: # old type
t_start = num2date(self[0].time).replace(tzinfo=None)
t_end = num2date(self[-1].time).replace(tzinfo=None)
except: # empty
t_start,t_end = None,None
return t_start, t_end
def _print_key_headers(self):
print("%10s : %22s : %28s" % ("MAGPY KEY", "VARIABLE", "UNIT"))
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
except:
header = None
try:
unit = self.header['unit-col-'+key]
except:
unit = None
print("%10s : %22s : %28s" % (key, header, unit))
def _get_key_headers(self,**kwargs):
"""
DEFINITION:
get a list of existing numerical keys in stream.
PARAMETERS:
kwargs:
- limit: (int) limit the lenght of the list
- numerical: (bool) if True, select only numerical keys
RETURNS:
- keylist: (list) a list like ['x','y','z']
EXAMPLE:
>>> data_stream._get_key_headers(limit=1)
"""
limit = kwargs.get('limit')
numerical = kwargs.get('numerical')
if numerical:
TESTLIST = FLAGKEYLIST
else:
TESTLIST = KEYLIST
keylist = []
"""
for key in FLAGKEYLIST[1:]:
try:
header = self.header['col-'+key]
try:
unit = self.header['unit-col-'+key]
except:
unit = None
keylist.append(key)
except:
header = None
"""
if not len(keylist) > 0: # e.g. Testing ndarray
for ind,elem in enumerate(self.ndarray): # use the long way
if len(elem) > 0 and ind < len(TESTLIST):
if not TESTLIST[ind] == 'time':
keylist.append(TESTLIST[ind])
if not len(keylist) > 0: # e.g. header col-? does not contain any info
#for key in FLAGKEYLIST[1:]: # use the long way
for key in TESTLIST[1:]: # use the long way
col = self._get_column(key)
if len(col) > 0:
#if not len(col) == 1 and not ( # maybe add something to prevent reading empty LineStructs)
if len(col) == 1:
if col[0] in ['-',float(nan),'']:
pass
else:
keylist.append(key)
if limit and len(keylist) > limit:
keylist = keylist[:limit]
return keylist
def _get_key_names(self):
"""
DESCRIPTION:
get the variable names for each key
APPLICATION:
keydict = self._get_key_names()
"""
keydict = {}
for key in KEYLIST:
kname = self.header.get('col-'+key)
keydict[kname] = key
return keydict
def dropempty(self):
"""
DESCRIPTION:
Drop empty arrays from ndarray and store their positions
"""
if not len(self.ndarray[0]) > 0:
return self.ndarray, np.asarray([])
newndarray = []
indexarray = []
for ind,elem in enumerate(self.ndarray):
if len(elem) > 0:
newndarray.append(np.asarray(elem).astype(object))
indexarray.append(ind)
keylist = [el for ind,el in enumerate(KEYLIST) if ind in indexarray]
return np.asarray(newndarray), keylist
def fillempty(self, ndarray, keylist):
"""
DESCRIPTION:
Fills empty arrays into ndarray at all position of KEYLIST not provided in keylist
"""
if not len(ndarray[0]) > 0:
return self
if len(self.ndarray) == KEYLIST:
return self
lst = list(ndarray)
for i,key in enumerate(KEYLIST):
if not key in keylist:
lst.insert(i,[])
newndarray = np.asarray(lst,dtype=object)
return newndarray
def sorting(self):
"""
Sorting data according to time (maybe generalize that to some key)
"""
try: # old LineStruct part
liste = sorted(self.container, key=lambda tmp: tmp.time)
except:
pass
if len(self.ndarray[0]) > 0:
self.ndarray, keylst = self.dropempty()
#self.ndarray = self.ndarray[:, np.argsort(self.ndarray[0])] # does not work if some rows have a different length)
ind = np.argsort(self.ndarray[0])
for i,el in enumerate(self.ndarray):
if len(el) == len(ind):
self.ndarray[i] = el[ind]
else:
#print("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("Sorting: key %s has the wrong length - replacing row with NaNs" % KEYLIST[i])
logger.warning("len(t-axis)=%d len(%s)=%d" % (len(self.ndarray[0]), KEYLIST[i], len(self.ndarray[i])))
self.ndarray[i] = np.empty(len(self.ndarray[0])) * np.nan
self.ndarray = self.fillempty(self.ndarray,keylst)
for idx,el in enumerate(self.ndarray):
self.ndarray[idx] = np.asarray(self.ndarray[idx]).astype(object)
else:
self.ndarray = self.ndarray
return DataStream(liste, self.header, self.ndarray)
# ------------------------------------------------------------------------
# B. Internal Methods: Line & column functions
# ------------------------------------------------------------------------
def _get_line(self, key, value):
"""
returns a LineStruct elemt corresponding to the first occurence of value within the selected key
e.g.
st = st._get_line('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lines = [elem for elem in self if eval('elem.'+key) == value]
return lines[0]
def _take_columns(self, keys):
"""
DEFINITION:
extract selected columns of the given keys (Old LineStruct format - decrapted)
"""
resultstream = DataStream()
for elem in self:
line = LineStruct()
line.time = elem.time
resultstream.add(line)
resultstream.header = {}
for key in keys:
if not key in KEYLIST:
pass
elif not key == 'time':
col = self._get_column(key)
#print key, len(col)
try:
resultstream.header['col-'+key] = self.header['col-'+key]
except:
pass
try:
resultstream.header['unit-col-'+key] = self.header['unit-col-'+key]
except:
pass
resultstream = resultstream._put_column(col,key)
return resultstream
def _remove_lines(self, key, value):
"""
removes lines with value within the selected key
e.g.
st = st._remove_lines('time',734555.3442) will return the line with time 7...
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
lst = [elem for elem in self if not eval('elem.'+key) == value]
return DataStream(lst, self.header)
def _get_column(self, key):
"""
Returns a numpy array of selected column from Stream
Example:
columnx = datastream._get_column('x')
"""
if not key in KEYLIST:
raise ValueError("Column key not valid")
# Speeded up this technique:
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
col = self[key]
except:
col = self.ndarray[ind]
return col
# Check for initialization value
#testval = self[0][ind]
# if testval == KEYINITDICT[key] or isnan(testval):
# return np.asarray([])
try:
col = np.asarray([row[ind] for row in self])
#get the first ten elements and test whether nan is there -- why ??
"""
try: # in case of string....
novalfound = True
for ele in col[:10]:
if not isnan(ele):
novalfound = False
if novalfound:
return np.asarray([])
except:
return col
"""
return col
except:
return np.asarray([])
def _put_column(self, column, key, **kwargs):
"""
DEFINITION:
adds a column to a Stream
PARAMETERS:
column: (array) single list with data with equal length as stream
key: (key) key to which the data is written
Kwargs:
columnname: (string) define a name
columnunit: (string) define a unit
RETURNS:
- DataStream object
EXAMPLE:
>>> stream = stream._put_column(res, 't2', columnname='Rain',columnunit='mm in 1h')
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
columnname = kwargs.get('columnname')
columnunit = kwargs.get('columnunit')
if not key in KEYLIST:
raise ValueError("Column key not valid")
if len(self.ndarray[0]) > 0:
ind = KEYLIST.index(key)
self.ndarray[ind] = np.asarray(column)
else:
if not len(column) == len(self):
raise ValueError("Column length does not fit Datastream")
for idx, elem in enumerate(self):
setattr(elem, key, column[idx])
if not columnname:
try: # TODO correct that
if eval('self.header["col-%s"]' % key) == '':
exec('self.header["col-%s"] = "%s"' % (key, key))
except:
pass
else:
exec('self.header["col-%s"] = "%s"' % (key, columnname))
if not columnunit:
try: # TODO correct that
if eval('self.header["unit-col-%s"]' % key) == '':
exec('self.header["unit-col-%s"] = "arb"' % (key))
except:
pass
else:
exec('self.header["unit-col-%s"] = "%s"' % (key, columnunit))
return self
def _move_column(self, key, put2key):
'''
DEFINITION:
Move column of key "key" to key "put2key".
Simples.
PARAMETERS:
Variables:
- key: (str) Key to be moved.
- put2key: (str) Key for 'key' to be moved to.
RETURNS:
- stream: (DataStream) DataStream object.
EXAMPLE:
>>> data_stream._move_column('f', 'var1')
'''
if not key in KEYLIST:
logger.error("_move_column: Column key %s not valid!" % key)
if key == 'time':
logger.error("_move_column: Cannot move time column!")
if not put2key in KEYLIST:
logger.error("_move_column: Column key %s (to move %s to) is not valid!" % (put2key,key))
if len(self.ndarray[0]) > 0:
col = self._get_column(key)
self =self._put_column(col,put2key)
return self
try:
for i, elem in enumerate(self):
exec('elem.'+put2key+' = '+'elem.'+key)
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
try:
exec('self.header["col-%s"] = self.header["col-%s"]' % (put2key, key))
exec('self.header["unit-col-%s"] = self.header["unit-col-%s"]' % (put2key, key))
exec('self.header["col-%s"] = None' % (key))
exec('self.header["unit-col-%s"] = None' % (key))
except:
logger.error("_move_column: Error updating headers.")
logger.info("_move_column: Column %s moved to column %s." % (key, put2key))
except:
logger.error("_move_column: It's an error.")
return self
def _drop_column(self,key):
"""
remove a column of a Stream
"""
ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
try:
self.ndarray[ind] = np.asarray([])
except:
# Some array don't allow that, shape error e.g. PYSTRING -> then use this
array = [np.asarray(el) if idx is not ind else np.asarray([]) for idx,el in enumerate(self.ndarray)]
self.ndarray = np.asarray(array,dtype=object)
colkey = "col-%s" % key
colunitkey = "unit-col-%s" % key
try:
self.header.pop(colkey, None)
self.header.pop(colunitkey, None)
except:
print("_drop_column: Error while dropping header info")
else:
print("No data available or LineStruct type (not supported)")
return self
def _clear_column(self, key):
"""
remove a column to a Stream
"""
#init = kwargs.get('init')
#if init>0:
# for i in range init:
# self.add(float('NaN'))
if not key in KEYLIST:
raise ValueError("Column key not valid")
for idx, elem in enumerate(self):
if key in NUMKEYLIST:
setattr(elem, key, float("NaN"))
#exec('elem.'+key+' = float("NaN")')
else:
setattr(elem, key, "-")
#exec('elem.'+key+' = "-"')
return self
def _reduce_stream(self, pointlimit=100000):
"""
DEFINITION:
Reduces size of stream by picking for plotting methods to save memory
when plotting large data sets.
Does NOT filter or smooth!
This function purely removes data points (rows) in a
periodic fashion until size is <100000 data points.
(Point limit can also be defined.)
PARAMETERS:
Kwargs:
- pointlimit: (int) Max number of points to include in stream. Default is 100000.
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
EXAMPLE:
>>> lessdata = ten_Hz_data._reduce_stream(pointlimit=500000)
"""
size = len(self)
div = size/pointlimit
divisor = math.ceil(div)
count = 0.
lst = []
if divisor > 1.:
for elem in self:
if count%divisor == 0.:
lst.append(elem)
count += 1.
else:
logger.warning("_reduce_stream: Stream size (%s) is already below pointlimit (%s)." % (size,pointlimit))
return self
logger.info("_reduce_stream: Stream size reduced from %s to %s points." % (size,len(lst)))
return DataStream(lst, self.header)
def _remove_nancolumns(self):
"""
DEFINITION:
Remove any columsn soley filled with nan values
APPLICATION:
called by plot methods in mpplot
RETURNS:
- DataStream: (DataStream) New stream reduced to below pointlimit.
"""
array = [[] for key in KEYLIST]
if len(self.ndarray[0]) > 0:
for idx, elem in enumerate(self.ndarray):
if len(self.ndarray[idx]) > 0 and KEYLIST[idx] in NUMKEYLIST:
lst = list(self.ndarray[idx])
#print KEYLIST[idx],lst[0]
if lst[1:] == lst[:-1] and np.isnan(float(lst[0])):
array[idx] = np.asarray([])
else:
array[idx] = self.ndarray[idx]
else:
array[idx] = self.ndarray[idx]
else:
pass
return DataStream(self,self.header,np.asarray(array,dtype=object))
# ------------------------------------------------------------------------
# B. Internal Methods: Data manipulation functions
# ------------------------------------------------------------------------
def _aic(self, signal, k, debugmode=None):
try:
aicval = (k-1)* np.log(np.var(signal[:k]))+(len(signal)-k-1)*np.log(np.var(signal[k:]))
except:
if debugmode:
logger.debug('_AIC: could not evaluate AIC at index position %i' % (k))
pass
return aicval
def harmfit(self,nt, val, fitdegree):
# method for harminic fit according to Phil McFadden's fortran program
"""
DEFINITION:
Method for harmonic fit according to <NAME>en's fortran program
Used by k-value determination
PARAMETERS:
Kwargs:
- nt: (list) Normalized time array.
- val: (list) Value list.
- fitdegree: (int) hramonic degree default is 5.
RETURNS:
- newval: (array) an array with fitted values of length(val).
EXAMPLE:
>>> f_fit = self.harmfit(nt,val, 5)
"""
N = len(nt)
coeff = (val[-1]-val[0]) /(nt[-1]-nt[0])
newval = [elem-coeff*(nt[i]-nt[0]) for i, elem in enumerate(val)]
ReVal = []
ImVal = []
for h in range(0,fitdegree):
ReVal.append(newval[0])
ImVal.append(0.0)
angle = -h*(2.0*np.pi/N)
for i in range(1,len(newval)):
si = np.sin(i*angle)
co = np.cos(i*angle)
ReVal[h] = ReVal[h] + newval[i]*co
ImVal[h] = ImVal[h] + newval[i]*si
#print "Parameter:", len(newval)
#print len(ReVal), ReVal
angle = 2.0*np.pi*(float(N-1)/float(N))/(nt[-1]-nt[0])
harmval = []
for i,elem in enumerate(newval):
harmval.append(ReVal[0])
angle2 = (nt[i]-nt[0])*angle
for h in range(1,fitdegree):
si = np.sin(h*angle2)
co = np.cos(h*angle2)
harmval[i] = harmval[i]+(2.0*(ReVal[h]*co-ImVal[h]*si))
harmval[i] = harmval[i]/float(N)+coeff*(nt[i]-nt[0])
return np.asarray(harmval)
def _get_max(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmax(self.ndarray[key_ind].astype(float))
ind = np.nanargmax(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = max(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_min(self, key, returntime=False):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
t_ind = KEYLIST.index('time')
if len(self.ndarray[0]) > 0:
result = np.nanmin(self.ndarray[key_ind].astype(float))
ind = np.nanargmin(self.ndarray[key_ind].astype(float))
tresult = self.ndarray[t_ind][ind]
else:
elem = min(self, key=lambda tmp: eval('tmp.'+key))
result = eval('elem.'+key)
tresult = elem.time
if returntime:
return result, tresult
else:
return result
def _get_variance(self, key):
if not key in KEYLIST[:16]:
raise ValueError("Column key not valid")
key_ind = KEYLIST.index(key)
if len(self.ndarray[0]) > 0:
result = np.nanvar(self.ndarray[key_ind].astype(float))
return result
def amplitude(self,key):
"""
DESCRIPTION:
calculates maximum-minimum difference of the keys timeseries
REQUIRES:
_get_column()
RETURNS:
float: difference between maximum and minimim value in time range
APPLICATION
amp = stream.amplitude('x')
"""
ts = self._get_column(key).astype(float)
ts = ts[~np.isnan(ts)]
maxts = np.max(ts)
mints = np.min(ts)
return maxts-mints
def _gf(self, t, tau):
"""
Gauss function
"""
return np.exp(-((t/tau)*(t/tau))/2)
def _hf(self, p, x):
"""
Harmonic function
"""
hf = p[0]*cos(2*pi/p[1]*x+p[2]) + p[3]*x + p[4] # Target function
return hf
def _residual_func(self, func, y):
"""
residual of the harmonic function
"""
return y - func
def _tau(self, period, fac=0.83255461):
"""
low pass filter with -3db point at period in sec (e.g. 120 sec)
1. convert period from seconds to days as used in daytime
2. return tau (in unit "day")
- The value of 0.83255461 is obtained for -3db (see IAGA Guide)
"""
per = period/(3600*24)
return fac*per/(2*np.pi)
# ------------------------------------------------------------------------
# B. Internal Methods: General utility & NaN handlers
# ------------------------------------------------------------------------
def _convertstream(self, coordinate, **kwargs):
"""
DESCRIPTION:
Convert coordinates of x,y,z columns in other
coordinate system:
- xyz2hdz
- xyz2idf
- hdz2xyz
- idf2xyz
Helper method which call the tranformation routines
APPLICATION:
used by k_fmi, variocorrection
"""
ext = ''
if len(self.ndarray[4]) > 0:
ext = 'F'
if len(self.ndarray[KEYLIST.index('df')]) > 0:
ext = 'G'
if len(self.ndarray[0]) > 0:
if coordinate == 'xyz2hdz':
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'xyz2idf':
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
elif coordinate == 'hdz2xyz':
self = self.hdz2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2xyz':
self = self.idf2xyz()
self.header['DataComponents'] = 'XYZ'+ext
elif coordinate == 'idf2hdz':
self = self.idf2xyz()
self = self.xyz2hdz()
self.header['DataComponents'] = 'HDZ'+ext
elif coordinate == 'hdz2idf':
self = self.hdz2xyz()
self = self.xyz2idf()
self.header['DataComponents'] = 'IDF'+ext
else:
print("_convertstream: unkown coordinate transform")
return self
keep_header = kwargs.get('keep_header')
outstream = DataStream()
for elem in self:
row=LineStruct()
exec('row = elem.'+coordinate+'(unit="deg")')
row.typ = ''.join((list(coordinate))[4:])+'f'
outstream.add(row)
if not keep_header:
outstream.header['col-x'] = (list(coordinate))[4]
outstream.header['col-y'] = (list(coordinate))[5]
outstream.header['col-z'] = (list(coordinate))[6]
if (list(coordinate))[4] in ['i','d']:
outstream.header['unit-col-x'] = 'deg'
else:
outstream.header['unit-col-x'] = 'nT'
if (list(coordinate))[5] in ['i','d']:
outstream.header['unit-col-y'] = 'deg'
else:
outstream.header['unit-col-y'] = 'nT'
if (list(coordinate))[6] in ['i','d']:
outstream.header['unit-col-z'] = 'deg'
else:
outstream.header['unit-col-z'] = 'nT'
return DataStream(outstream,outstream.header)
def _delete(self,index):
"""
DESCRIPTION:
Helper method to delete all values at a specific index or range of indicies
from the ndarray
APPLICTAION:
Used by k_fmi with individual indicies
"""
for i,array in enumerate(self.ndarray):
if isinstance( index, (int) ): # removed long (not necessary for python3, error in win)
if len(array) > index:
self.ndarray[i] = np.delete(self.ndarray[i],index)
else:
self.ndarray[i] = np.delete(self.ndarray[i],index)
return self
def _append(self,stream):
"""
DESCRIPTION:
Helper method to append values from another stream to
a ndarray. Append only to columns already filled in self.
APPLICTAION:
Used by k_fmi
"""
for i,array in enumerate(self):
if len(array) > 0:
self.ndarray[i] = np.append(self.ndarray[i],stream.ndarray[i])
return self
def _det_trange(self, period):
"""
starting with coefficients above 1%
is now returning a timedelta object
"""
return np.sqrt(-np.log(0.01)*2)*self._tau(period)
def _is_number(self, s):
"""
Test whether s is a number
"""
if str(s) in ['','None',None]:
return False
try:
float(s)
return True
except ValueError:
return False
def _normalize(self, column):
"""
normalizes the given column to range [0:1]
"""
normcol = []
column = column.astype(float)
maxval = np.max(column)
minval = np.min(column)
for elem in column:
normcol.append((elem-minval)/(maxval-minval))
return normcol, minval, maxval
def _testtime(self, time):
"""
Check the date/time input and returns a datetime object if valid:
! Use UTC times !
- accepted are the following inputs:
1) absolute time: as provided by date2num
2) strings: 2011-11-22 or 2011-11-22T11:11:00
3) datetime objects by datetime.datetime e.g. (datetime(2011,11,22,11,11,00)
"""
if isinstance(time, float) or isinstance(time, int):
try:
timeobj = num2date(time).replace(tzinfo=None)
except:
raise TypeError
elif isinstance(time, str): # test for str only in Python 3 should be basestring for 2.x
try:
timeobj = datetime.strptime(time,"%Y-%m-%d")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%dT%H:%M:%S.%f")
except:
try:
timeobj = datetime.strptime(time,"%Y-%m-%d %H:%M:%S")
except:
try:
# Not happy with that but necessary to deal
# with old 1000000 micro second bug
timearray = time.split('.')
if timearray[1] == '1000000':
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")+timedelta(seconds=1)
else:
# This would be wrong but leads always to a TypeError
timeobj = datetime.strptime(timearray[0],"%Y-%m-%d %H:%M:%S")
except:
try:
timeobj = num2date(float(time)).replace(tzinfo=None)
except:
raise TypeError
elif not isinstance(time, datetime):
raise TypeError
else:
timeobj = time
return timeobj
def _drop_nans(self, key):
"""
DEFINITION:
Helper to drop all lines when NaNs or INFs are found within the selected key
RETURNS:
- DataStream: (DataStream object) a new data stream object with out identified lines.
EXAMPLE:
>>> newstream = stream._drop_nans('x')
APPLICATION:
used for plotting and fitting of data
"""
array = [np.asarray([]) for elem in KEYLIST]
if len(self.ndarray[0]) > 0 and key in NUMKEYLIST:
ind = KEYLIST.index(key)
#indicieslst = [i for i,el in enumerate(self.ndarray[ind].astype(float)) if np.isnan(el) or np.isinf(el)]
ar = np.asarray(self.ndarray[ind]).astype(float)
indicieslst = []
for i,el in enumerate(ar):
if np.isnan(el) or np.isinf(el):
indicieslst.append(i)
searchlist = ['time']
searchlist.extend(NUMKEYLIST)
for index,tkey in enumerate(searchlist):
if len(self.ndarray[index])>0: # Time column !!! -> index+1
array[index] = np.delete(self.ndarray[index], indicieslst)
#elif len(self.ndarray[index+1])>0:
# array[index+1] = self.ndarray[index+1]
newst = [LineStruct()]
else:
newst = [elem for elem in self if not isnan(eval('elem.'+key)) and not isinf(eval('elem.'+key))]
return DataStream(newst,self.header,np.asarray(array,dtype=object))
def _select_keys(self, keys):
"""
DESCRIPTION
Non-destructive method to select provided keys from Data stream.
APPLICATION:
streamxy = streamyxzf._select_keys(['x','y'])
"""
result = self.copy()
try:
if not len(keys) > 0:
return self
except:
return self
"""
print ("sel", keys)
if not 'time' in keys:
keys.append('time')
print ("sel", keys)
"""
ndarray = [[] for key in KEYLIST]
ndarray = np.asarray([np.asarray(elem) if KEYLIST[idx] in keys or KEYLIST[idx] == 'time' else np.asarray([]) for idx,elem in enumerate(result.ndarray)])
return DataStream([LineStruct()],result.header,ndarray)
def _select_timerange(self, starttime=None, endtime=None, maxidx=-1):
"""
DESCRIPTION
Non-destructive method to select a certain time range from a stream.
Similar to trim, leaving the original stream unchanged however.
APPLICATION:
Used by write
"""
ndarray = [[] for key in KEYLIST]
# Use a different technique
# copy all data to array and then delete everything below and above
#t1 = datetime.utcnow()
#ndarray = self.ndarray
startindices = []
endindices = []
if starttime:
starttime = self._testtime(starttime)
if self.ndarray[0].size > 0: # time column present
if maxidx > 0:
idx = (np.abs(self.ndarray[0][:maxidx]-date2num(starttime))).argmin()
else:
idx = (np.abs(self.ndarray[0]-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if self.ndarray[0][idx] < date2num(starttime):
idx += 1
startindices = list(range(0,idx))
if endtime:
endtime = self._testtime(endtime)
if self.ndarray[0].size > 0: # time column present
#print "select timerange", maxidx
if maxidx > 0: # truncate the ndarray
#print maxidx
#tr = self.ndarray[0][:maxidx].astype(float)
idx = 1 + (np.abs(self.ndarray[0][:maxidx].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
else:
idx = 1 + (np.abs(self.ndarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
if idx >= len(self.ndarray[0]): ## prevent too large idx values
idx = len(self.ndarray[0]) # - 1
try: # using try so that this test is passed in case of idx == len(self.ndarray)
endnum = date2num(endtime)
#print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
if self.ndarray[0][idx] > endnum and self.ndarray[0][idx-1] < endnum:
# case 1: value at idx is larger, value at idx-1 is smaller -> use idx
pass
elif self.ndarray[0][idx] == endnum:
# case 2: value at idx is endnum -> use idx
pass
elif not self.ndarray[0][idx] <= endnum:
# case 3: value at idx-1 equals endnum -> use idx-1
idx -= 1
#print ("Value now b", idx, self.ndarray[0][idx], date2num(endtime))
#if not self.ndarray[0][idx] <= date2num(endtime):
# # Make sure that last value is either identical to endtime (if existing or one index larger)
# # This is important as from this index on, data is removed
# idx -= 1
# print ("Value now", idx, self.ndarray[0][idx], date2num(endtime))
# print ("Value now", idx, self.ndarray[0][idx+1], date2num(endtime))
except:
pass
endindices = list(range(idx,len(self.ndarray[0])))
indices = startindices + endindices
#t2 = datetime.utcnow()
#print "_select_timerange - getting t range needed:", t2-t1
if len(startindices) > 0:
st = startindices[-1]+1
else:
st = 0
if len(endindices) > 0:
ed = endindices[0]
else:
ed = len(self.ndarray[0])
for i in range(len(self.ndarray)):
ndarray[i] = self.ndarray[i][st:ed] ## This is the correct length
#t3 = datetime.utcnow()
#print "_select_timerange - deleting :", t3-t2
return np.asarray(ndarray,dtype=object)
# ------------------------------------------------------------------------
# C. Application methods
# (in alphabetical order)
# ------------------------------------------------------------------------
def aic_calc(self, key, **kwargs):
"""
DEFINITION:
Picking storm onsets using the Akaike Information Criterion (AIC) picker
- extract one dimensional array from DataStream (e.g. H) -> signal
- take the first k values of the signal and calculates variance and log
- plus the rest of the signal (variance and log)
NOTE: Best results come from evaluating two data series - one with original
data, one of same data with AIC timerange offset by timerange/2 to cover
any signals that may occur at the points between evaluations.
PARAMETERS:
Variables:
- key: (str) Key to check. Needs to be an element of KEYLIST.
Kwargs:
- timerange: (timedelta object) defines the length of the time window
examined by the aic iteration. (default: timedelta(hours=1).)
- aic2key: (str) defines the key of the column where to save the aic values
(default = var2).
- aicmin2key: (str) defines the key of the column where to save the aic minimum val
(default: key = var1.)
- aicminstack: (bool) if true, aicmin values are added to previously present column values.
RETURNS:
- self: (DataStream object) Stream with results in default var1 + var2 keys.
EXAMPLE:
>>> stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
APPLICATION:
from magpy.stream import read
stream = read(datapath)
stream = stream.aic_calc('x',timerange=timedelta(hours=0.5))
stream = stream.differentiate(keys=['var2'],put2keys=['var3'])
stream_filt = stream.extract('var1',200,'>')
stream_new = stream_file.eventlogger('var3',[30,40,60],'>',addcomment=True)
stream = mergeStreams(stream,stream_new,key='comment')
"""
timerange = kwargs.get('timerange')
aic2key = kwargs.get('aic2key')
aicmin2key = kwargs.get('aicmin2key')
aicminstack = kwargs.get('aicminstack')
if not timerange:
timerange = timedelta(hours=1)
if not aic2key:
aic2key = 'var2'
if not aicmin2key:
aicmin2key = 'var1'
t = self._get_column('time')
signal = self._get_column(key)
#Clear the projected results column
array = []
aic2ind = KEYLIST.index(aic2key)
self = self._clear_column(aic2key)
if len(self.ndarray[0]) > 0.:
self.ndarray[aic2ind] = np.empty((len(self.ndarray[0],)))
self.ndarray[aic2ind][:] = np.NAN
# get sampling interval for normalization - need seconds data to test that
sp = self.get_sampling_period()*24*60
# corrcet approach
iprev = 0
iend = 0
while iend < len(t)-1:
istart = iprev
ta, iend = find_nearest(np.asarray(t), date2num(num2date(t[istart]).replace(tzinfo=None) + timerange))
if iend == istart:
iend += 60 # approx for minute files and 1 hour timedelta (used when no data available in time range) should be valid for any other time range as well
else:
currsequence = signal[istart:iend]
aicarray = []
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# CALCULATE AIC
aicval = self._aic(currsequence, idx)/timerange.seconds*3600 # *sp Normalize to sampling rate and timerange
if len(self.ndarray[0]) > 0:
self.ndarray[aic2ind][idx+istart] = aicval
else:
exec('self[idx+istart].'+ aic2key +' = aicval')
if not isnan(aicval):
aicarray.append(aicval)
# store start value - aic: is a measure for the significance of information change
#if idx == 2:
# aicstart = aicval
#self[idx+istart].var5 = aicstart-aicval
maxaic = np.max(aicarray)
# determine the relative amplitude as well
cnt = 0
for idx, el in enumerate(currsequence):
if idx > 1 and idx < len(currsequence):
# TODO: this does not yet work with ndarrays
try:
if aicminstack:
if not eval('isnan(self[idx+istart].'+aicmin2key+')'):
exec('self[idx+istart].'+ aicmin2key +' += (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
else:
exec('self[idx+istart].'+ aicmin2key +' = (-aicarray[cnt] + maxaic)')
exec('self[idx+istart].'+ aicmin2key +' = maxaic')
cnt = cnt+1
except:
msg = "number of counts does not fit usually because of nans"
iprev = iend
self.header['col-var2'] = 'aic'
return self
def baseline(self, absolutedata, **kwargs):
"""
DESCRIPTION:
calculates baseline correction for input stream (datastream)
Uses available baseline values from the provided absolute file
Special cases:
1) Absolute data covers the full time range of the stream:
-> Absolute data is extrapolated by duplicating the last and first entry at "extradays" offset
-> desired function is calculated
2) No Absolute data for the end of the stream:
-> like 1: Absolute data is extrapolated by duplicating the last entry at "extradays" offset or end of stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
2) No Absolute data for the beginning of the stream:
-> like 2: Absolute data is extrapolated by duplicating the first entry at "extradays" offset or beginning o stream
-> and info message is created, if timedifference exceeds the "extraday" arg then a warning will be send
VARIABLES:
required:
didata (DataStream) containing DI data- usually obtained by absolutes.absoluteAnalysis()
keywords:
plotbaseline (bool/string) will plot a baselineplot (if a valid path is provided
to file otherwise to to screen- requires mpplot
extradays (int) days to which the absolutedata is exteded prior and after start and endtime
##plotfilename (string) if plotbaseline is selected, the outputplot is send to this file
fitfunc (string) see fit
fitdegree (int) see fit
knotstep (int) see fit
keys (list) keys which contain the basevalues (default) is ['dx','dy','dz']
APPLICATION:
func = data.baseline(didata,knotstep=0.1,plotbaseline=True)
# fixed time range
func = data.baseline(didata,startabs='2015-02-01',endabs='2015-08-24',extradays=0)
OR:
funclist = []
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-01-01',endabs='2009-03-22'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='poly',
fitdegree=1,startabs='2009-03-22',endabs='2009-06-27'))
funclist.append(rawdata.baseline(basevalues, extradays=0, fitfunc='spline',
knotstep=0.2,startabs='2009-06-27',endabs='2010-02-01'))
stabilitytest (bool)
"""
keys = kwargs.get('keys')
fitfunc = kwargs.get('fitfunc')
fitdegree = kwargs.get('fitdegree')
knotstep = kwargs.get('knotstep')
extradays = kwargs.get('extradays',15)
plotbaseline = kwargs.get('plotbaseline')
plotfilename = kwargs.get('plotfilename')
startabs = kwargs.get('startabs')
endabs = kwargs.get('endabs')
orgstartabs = None
orgendabs = None
#if not extradays:
# extradays = 15
if not fitfunc:
fitfunc = self.header.get('DataAbsFunc')
if not fitfunc:
fitfunc = 'spline'
if not fitdegree:
fitdegree = self.header.get('DataAbsDegree')
if not fitdegree:
fitdegree = 5
if not knotstep:
knotstep = self.header.get('DataAbsKnots')
if not knotstep:
knotstep = 0.3
if not keys:
keys = ['<KEY>']
if len(self.ndarray[0]) > 0:
ndtype = True
starttime = np.min(self.ndarray[0])
endtime = np.max(self.ndarray[0])
else:
starttime = self[0].time
endtime = self[-1].time
fixstart,fixend = False,False
if startabs:
startabs = date2num(self._testtime(startabs))
orgstartabs = startabs
fixstart = True
if endabs:
endabs = date2num(self._testtime(endabs))
orgendabs = endabs
fixend = True
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataAcquisitionReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
#self.header['DataAbsFunc'] = fitfunc
#self.header['DataAbsDegree'] = fitdegree
#self.header['DataAbsKnots'] = knotstep
#self.header['DataAbsDate'] = datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S')
usestepinbetween = False # for better extrapolation
logger.info(' --- Start baseline-correction at %s' % str(datetime.now()))
absolutestream = absolutedata.copy()
#print("Baseline", absolutestream.length())
absolutestream = absolutestream.remove_flagged()
#print("Baseline", absolutestream.length())
#print("Baseline", absolutestream.ndarray[0])
absndtype = False
if len(absolutestream.ndarray[0]) > 0:
#print ("HERE1: adopting time range absolutes - before {} {}".format(startabs, endabs))
absolutestream.ndarray[0] = absolutestream.ndarray[0].astype(float)
absndtype = True
if not np.min(absolutestream.ndarray[0]) < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream.ndarray[0]
if not startabs or startabs < np.min(absolutestream.ndarray[0]):
startabs = np.min(absolutestream.ndarray[0])
if not endabs or endabs > np.max(absolutestream.ndarray[0]):
endabs = np.max(absolutestream.ndarray[0])
else:
# 1) test whether absolutes are in the selected absolute data stream
if absolutestream[0].time == 0 or absolutestream[0].time == float('nan'):
raise ValueError ("Baseline: Input stream needs to contain absolute data ")
# 2) check whether enddate is within abs time range or larger:
if not absolutestream[0].time-1 < endtime:
logger.warning("Baseline: Last measurement prior to beginning of absolute measurements ")
abst = absolutestream._get_column('time')
startabs = absolutestream[0].time
endabs = absolutestream[-1].time
# Initialze orgstartabd and orgendabs if not yet provided: orgabs values will be added to DataAbsInfo
if not orgstartabs:
orgstartabs = startabs
if not orgendabs:
orgendabs = endabs
#print ("HERE2a: Time range absolutes - {} {} {} {}".format(startabs, endabs, num2date(startabs), num2date(endabs)))
#print ("HERE2b: Time range datastream - {} {}".format(starttime, endtime))
# 3) check time ranges of stream and absolute values:
if startabs > starttime:
#print ('HERE2c: First absolute value measured after beginning of stream')
#logger.warning('Baseline: First absolute value measured after beginning of stream - duplicating first abs value at beginning of time series')
#if fixstart:
#
#absolutestream.add(absolutestream[0])
#absolutestream[-1].time = starttime
#absolutestream.sorting()
logger.info('Baseline: %d days without absolutes at the beginning of the stream' % int(np.floor(np.min(abst)-starttime)))
if endabs < endtime:
logger.info("Baseline: Last absolute measurement before end of stream - extrapolating baseline")
if num2date(endabs).replace(tzinfo=None) + timedelta(days=extradays) < num2date(endtime).replace(tzinfo=None):
usestepinbetween = True
if not fixend:
logger.warning("Baseline: Well... thats an adventurous extrapolation, but as you wish...")
starttime = num2date(starttime).replace(tzinfo=None)
endtime = num2date(endtime).replace(tzinfo=None)
# 4) get standard time rang of one year and extradays at start and end
# test whether absstream covers this time range including extradays
# ###########
# get boundaries
# ###########
extrapolate = False
# upper
if fixend:
#absolutestream = absolutestream.trim(endtime=endabs) # should I trim here already - leon ??
# time range long enough
baseendtime = endabs+extradays
if baseendtime < orgendabs:
baseendtime = orgendabs
extrapolate = True
else:
baseendtime = date2num(endtime+timedelta(days=1))
extrapolate = True
#if endabs >= date2num(endtime)+extradays:
# # time range long enough
# baseendtime = date2num(endtime)+extradays
# lower
if fixstart:
#absolutestream = absolutestream.trim(starttime=startabs) # should I trim here already - leon ??
basestarttime = startabs-extradays
if basestarttime > orgstartabs:
basestarttime = orgstartabs
extrapolate = True
else:
# not long enough
#basestarttime = date2num(starttime)
basestarttime = startabs-extradays
extrapolate = True
if baseendtime - (366.+2*extradays) > startabs:
# time range long enough
basestarttime = baseendtime-(366.+2*extradays)
baseendtime = num2date(baseendtime).replace(tzinfo=None)
basestarttime = num2date(basestarttime).replace(tzinfo=None)
#print ("HERE3a: basestart and end", basestarttime, baseendtime)
# Don't use trim here
#bas = absolutestream.trim(starttime=basestarttime,endtime=baseendtime)
basarray = absolutestream._select_timerange(starttime=basestarttime,endtime=baseendtime)
bas = DataStream([LineStruct()],absolutestream.header,basarray)
#print ("HERE3b: length of selected absolutes: ", bas.length()[0])
if extrapolate: # and not extradays == 0:
bas = bas.extrapolate(basestarttime,baseendtime)
#keys = ['<KEY>']
try:
print ("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
print (keys, fitfunc, fitdegree, knotstep)
logger.info("Fitting Baseline between: {a} and {b}".format(a=str(num2date(np.min(bas.ndarray[0]))),b=str(num2date(np.max(bas.ndarray[0])))))
#print ("Baseline", bas.length(), keys)
#for elem in bas.ndarray:
# print elem
func = bas.fit(keys,fitfunc=fitfunc,fitdegree=fitdegree,knotstep=knotstep)
except:
print ("Baseline: Error when determining fit - Enough data point to satisfy fit complexity?")
logger.error("Baseline: Error when determining fit - Not enough data point to satisfy fit complexity? N = {}".format(bas.length()))
return None
#if len(keys) == 3:
# ix = KEYLIST.index(keys[0])
# iy = KEYLIST.index(keys[1])
# iz = KEYLIST.index(keys[2])
# get the function in some readable equation
#self.header['DataAbsDataT'] = bas.ndarray[0],bas.ndarray[ix],bas.ndarray[iy],bas.ndarray[iz]]
if plotbaseline:
#check whether plotbaseline is valid path or bool
try:
try:
import magpy.mpplot as mp
except ImportError:
print ("baseline: Could not load package mpplot")
if plotfilename:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data',outfile=plotfilename)
else:
mp.plot(bas,variables=['dx','dy','dz'],padding = [5,0.005,5], symbollist = ['o','o','o'],function=func,plottitle='Absolute data')
except:
print("using the internal plotting routine requires mpplot to be imported as mp")
keystr = '_'.join(keys)
pierlong = absolutedata.header.get('DataAcquisitionLongitude','')
pierlat = absolutedata.header.get('DataAcquisitionLatitude','')
pierel = absolutedata.header.get('DataElevation','')
pierlocref = absolutedata.header.get('DataLocationReference','')
pierelref = absolutedata.header.get('DataElevationRef','')
if not pierlong == '' and not pierlat == '' and not pierel == '':
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr,pierlong,pierlat,pierlocref,pierel,pierelref]))
else:
absinfostring = '_'.join(map(str,[orgstartabs,orgendabs,extradays,fitfunc,fitdegree,knotstep,keystr]))
existingabsinfo = self.header.get('DataAbsInfo','').replace(', EPSG',' EPSG').split(',')
if not existingabsinfo[0] == '':
existingabsinfo.append(absinfostring)
else:
existingabsinfo = [absinfostring]
# Get minimum and maximum times out of existing absinfostream
minstarttime=100000000.0
maxendtime=0.0
for el in existingabsinfo:
ele = el.split('_')
mintime = float(ele[0])
maxtime = float(ele[1])
if minstarttime > mintime:
minstarttime = mintime
if maxendtime < maxtime:
maxendtime = maxtime
exabsstring = ','.join(existingabsinfo)
self.header['DataAbsInfo'] = exabsstring # 735582.0_735978.0_0_spline_5_0.3_dx_dy_dz
#print ("HERE5a:", minstarttime, maxendtime, absolutestream.length()[0])
bas2save = absolutestream.trim(starttime=minstarttime,endtime=maxendtime)
tmpdict = bas2save.stream2dict()
#print ("HERE5b:", bas2save.length()[0])
self.header['DataBaseValues'] = tmpdict['DataBaseValues']
# Get column heads of dx,dy and dz
# default is H-base[nT],D-base[deg],Z-base[nT]
basecomp = "HDZ"
try:
basecomp = "{}{}{}".format(absolutestream.header.get('col-dx')[0],absolutestream.header.get('col-dy')[0],absolutestream.header.get('col-dz')[0])
except:
pass
if not basecomp == "HDZ":
print (" -> basevalues correspond to components {}".format(basecomp))
self.header['DataBaseComponents'] = basecomp
#self.header['DataAbsMinTime'] = func[1] #num2date(func[1]).replace(tzinfo=None)
#self.header['DataAbsMaxTime'] = func[2] #num2date(func[2]).replace(tzinfo=None)
#self.header['DataAbsFunctionObject'] = func
logger.info(' --- Finished baseline-correction at %s' % str(datetime.now()))
return func
def stream2dict(self, keys=['dx','dy','dz'], dictkey='DataBaseValues'):
"""
DESCRIPTION:
Method to convert stream contents into a list and assign this to a dictionary.
You can use this method to directly store magnetic basevalues along with
data time series (e.g. using NasaCDF). Multilayer storage as supported by NetCDF
might provide better options to combine both data sets in one file.
PARAMETERS:
stream (DataStream) data containing e.g. basevalues
keys (list of keys) keys which are going to be stored
dictkey (string) name of the dictionaries key
RETURNS:
dict (dictionary) with name dictkey
APPLICATION:
>>> d = absdata.stream2dict(['dx','dy','dz'],'DataBaseValues')
>>> d = neicdata.stream2dict(['f','str3'],'Earthquakes')
"""
if not self.length()[0] > 0:
return {}
if not len(keys) > 0:
return {}
d = {}
keylst = ['time']
keylst.extend(keys)
array,headline,addline = [],[],[]
for key in keylst:
try:
pos = KEYLIST.index(key)
except ValueError:
pos = -1
if pos in range(0,len(KEYLIST)):
headline.append(key)
if not key == 'time':
addline.append(self.header.get('col-'+key))
else:
addline.append(self.header.get('DataID'))
column = self.ndarray[pos]
array.append(column)
rowlst = np.transpose( | np.asarray(array) | numpy.asarray |
import unittest
from collections import defaultdict
import numpy as np
import enstat.mean
class Test_mean(unittest.TestCase):
"""
tests
"""
def test_scalar(self):
"""
Basic test of "mean" and "std" using a random sample.
"""
average = enstat.scalar()
average.add_sample(np.array(1.0))
self.assertFalse(np.isnan(average.mean()))
self.assertTrue(np.isnan(average.std()))
average.add_sample(np.array(1.0))
self.assertFalse(np.isnan(average.mean()))
self.assertFalse(np.isnan(average.std()))
def test_scalar_division(self):
"""
Check for zero division.
"""
average = enstat.scalar()
a = np.random.random(50 * 20).reshape(50, 20)
for i in range(a.shape[0]):
average.add_sample(a[i, :])
self.assertTrue(np.isclose(average.mean(), np.mean(a)))
self.assertTrue(np.isclose(average.std(), np.std(a), rtol=1e-3))
def test_static(self):
"""
Basic test of "mean" and "std" using a random sample.
"""
average = enstat.static()
a = np.random.random(35 * 50 * 20).reshape(35, 50, 20)
for i in range(a.shape[0]):
average.add_sample(a[i, :, :])
self.assertTrue(np.allclose(average.mean(), np.mean(a, axis=0)))
self.assertTrue(np.allclose(average.std(), np.std(a, axis=0), rtol=5e-1, atol=1e-3))
self.assertTrue(average.shape() == a.shape[1:])
self.assertTrue(average.size() == np.prod(a.shape[1:]))
def test_static_ravel(self):
"""
Like :py:func:`test_static` but with a test of `ravel`.
"""
arraylike = enstat.static()
scalar = enstat.scalar()
a = np.random.random(35 * 50 * 20).reshape(35, 50, 20)
for i in range(a.shape[0]):
arraylike.add_sample(a[i, :, :])
scalar.add_sample(a[i, :, :])
flat = arraylike.ravel()
self.assertTrue(np.allclose(flat.mean(), np.mean(a)))
self.assertTrue(np.allclose(flat.std(), np.std(a), rtol=5e-1, atol=1e-3))
self.assertTrue(np.allclose(flat.mean(), scalar.mean()))
self.assertTrue(np.allclose(flat.std(), scalar.std(), rtol=5e-1, atol=1e-3))
def test_static_division(self):
"""
Check for zero division.
"""
average = enstat.static()
average.add_sample(np.array([1.0]))
self.assertFalse(np.isnan(average.mean()))
self.assertTrue(np.isnan(average.std()))
average.add_sample(np.array([1.0]))
self.assertFalse(np.isnan(average.mean()))
self.assertFalse(np.isnan(average.std()))
def test_static_mask(self):
average = enstat.static()
a = np.random.random(35 * 50 * 20).reshape(35, 50, 20)
m = np.random.random(35 * 50 * 20).reshape(35, 50, 20) > 0.8
for i in range(a.shape[0]):
average.add_sample(a[i, :, :], m[i, :, :])
self.assertTrue(
np.isclose(
np.sum(average.first()) / np.sum(average.norm()),
np.mean(a[np.logical_not(m)]),
)
)
self.assertTrue(
np.isclose(
np.sum(average.first()) / np.sum(average.norm()),
np.mean(a[np.logical_not(m)]),
)
)
self.assertTrue(np.all(np.equal(average.norm(), np.sum(np.logical_not(m), axis=0))))
def test_dynamic1d(self):
average = enstat.dynamic1d()
average.add_sample(np.array([1, 2, 3]))
average.add_sample(np.array([1, 2, 3]))
average.add_sample(np.array([1, 2]))
average.add_sample(np.array([1]))
self.assertTrue(np.allclose(average.mean(), np.array([1, 2, 3])))
self.assertTrue(np.allclose(average.std(), np.array([0, 0, 0])))
self.assertEqual(average.shape(), (3,))
self.assertEqual(average.size(), 3)
class Test_defaultdict(unittest.TestCase):
"""
functionality
"""
def test_scalar(self):
average = defaultdict(enstat.scalar)
a = np.random.random(50 * 20).reshape(50, 20)
b = np.random.random(52 * 21).reshape(52, 21)
for i in range(a.shape[0]):
average["a"].add_sample(a[i, :])
for i in range(b.shape[0]):
average["b"].add_sample(b[i, :])
self.assertTrue(np.isclose(average["a"].mean(), | np.mean(a) | numpy.mean |
import numbers
from scipy.stats import mode
import pandas as pd
import numpy as np
import datetime
from mlapp.utils.exceptions.framework_exceptions import UnsupportedFileType
class ClassificationFeatureEngineering(object):
def drop_features(self, data_df, features_to_drop=None):
"""
Dropping requested features
:param data_df: the DataFrame
:param features_to_drop: list of features names to drop
:return: data_df after dropping requested featuers
"""
if not features_to_drop:
features_to_drop = []
original_columns = data_df.columns
filtered_columns_to_drop = filter(lambda x: x in original_columns, features_to_drop)
return data_df.drop(filtered_columns_to_drop, axis=1)
def bin_continuous_features(self, data_df, features_to_bin=None):
"""
Bin continuous features by the configuration in 'features_to_bin'
:param data_df: the DataFrame
:param features_to_bin: configuration of bin
example:
"features_to_bin":[
{"name": "feature_name_1", "bins": [5, 15]},
{"name": "feature_name_2", "bins": [15, 23]}
]
:return: the DataFrame with requested features transformed
"""
if not features_to_bin:
features_to_bin = []
for feature_to_bin in features_to_bin:
if feature_to_bin['name'] in data_df.columns:
full_bins = [data_df[feature_to_bin['name']].min() - 1] + feature_to_bin['bins'] + [data_df[feature_to_bin['name']].max() + 1]
data_df[feature_to_bin['name']] = pd.cut(
data_df[feature_to_bin['name']],
bins=full_bins,
labels=range(len(full_bins) - 1)).astype(float)
return data_df
def handle_y_variable(self, data_df, variable_to_predict, options):
"""
Transform variable to predict by options given in config
:param data_df: the DataFrame containing all features and variable to predict
:param variable_to_predict: the variable to predict columns name
:param options: options containing the configuration of the transformation for the variable to predict
example:
"y_variable": {
"type": "binary", # binary/multi/continuous - string
"categories_labels": ["LABEL_1", "LABEL_2"], # category labels - list
"continuous_to_category_bins": [-0.5, 0.5, 1.5], # bins values - list
"label_to_predict": ["LABEL_1"] # target label to predict - list
},
:return: 'data_df' - without the variable to predict, 'final_y' - the variable to predict after transformation
"""
# y variable configurations
y_df = data_df[variable_to_predict]
final_y = pd.DataFrame()
y_variable_type = options['type']
target_label = options['label_to_predict']
# y variable is binary OR one vs all
if y_variable_type == 'binary' or (y_variable_type == 'multi' and len(target_label) == 1):
y_dummies = pd.get_dummies(y_df)
final_y = y_dummies[target_label[0]]
# y variable is multi class
elif y_variable_type == 'multi' and len(target_label) < len(y_df.unique()):
final_y = y_df.apply(lambda x: x if x in target_label else "other")
# Example for indexing the labels
# labels_y = final_y.copy()
# for i in range(len(target_model)):
# labels_y = labels_y.apply(lambda x: i + 1 if x == target_model[i] else x)
# final_y = labels_y.apply(lambda x: 0 if not type(x)==int else x)
elif y_variable_type == 'continuous':
bins = options["continuous_to_category_bins"]
labels = options["categories_labels"]
final_y = pd.cut(y_df, bins=bins, labels=labels)
else:
final_y = y_df
data_df = data_df.drop(variable_to_predict, axis=1)
return data_df, final_y
def transform_and_split_features_to_categorical_and_continuous(self, data, dates_format=None, auto_bin_continuous_features=False, max_categories_num=10):
"""
Transforming DataFrame features by their value types
:param data: the DataFrame
:param dates_format: date formats expected in the DataFrame
:param auto_bin_continuous_features: whether to bin continuous features automatically
:param max_categories_num: max unique values in a feature before deciding to auto bin
:return: the DataFrame with transformed date columns, lists of features by their type, and binned features
"""
if dates_format is None:
dates_format = ["%d/%m/%Y", "%Y-%m-%d"]
data_types = data.dtypes
today = datetime.datetime.now()
continuous_columns = []
continuous_bins = {}
categorical_columns = []
binary_columns = []
for feature, curr_type in data_types.iteritems():
mysql_type, date_format = self._convert_text_to_date_type(curr_type, feature, data, dates_format)
if mysql_type == "DATETIME": # converting features from datetime to time_passed_from_date
data[feature] = data[feature].apply(
lambda x: x if self._is_nan(x) else self._elapsed_time_from_date(x, today, date_format))
if auto_bin_continuous_features:
continuous_bins[feature] = np.sort(list(
{
min(data[feature]) - 1,
np.quantile(data[feature].dropna(), 0.2),
np.quantile(data[feature].dropna(), 0.4),
np.quantile(data[feature].dropna(), 0.6),
np.quantile(data[feature].dropna(), 0.8),
max(data[feature]) + 1
}))
else:
continuous_columns += [feature]
elif mysql_type == 'NUMERIC':
unique_values = data[feature].dropna().unique()
if len(unique_values) == 1:
data = data.drop(feature, axis=1)
elif len(unique_values) == 2:
binary_columns += [feature]
elif (2 < len(unique_values) <= max_categories_num) and auto_bin_continuous_features:
categorical_columns += [feature]
elif auto_bin_continuous_features:
continuous_bins[feature] = np.sort(list(
{
min(data[feature]) - 1,
np.quantile(data[feature].dropna(), 0.2),
np.quantile(data[feature].dropna(), 0.4),
np.quantile(data[feature].dropna(), 0.6),
np.quantile(data[feature].dropna(), 0.8),
max(data[feature]) + 1
}))
else:
continuous_columns += [feature]
else: # mysql_type == TEXT
categorical_columns += [feature]
return data, categorical_columns, continuous_columns, binary_columns, continuous_bins
def combine_categorical_features(self, data_df, evaluated_df, sep='_|_'):
"""
Combining categories for each feature
:param data_df: original DataFrame
:param evaluated_df: calculated evaluated DataFrame for each category for each feature
:param sep: separation string
:return: DataFrame with combined categories
"""
features_mapping = {}
results_df = pd.DataFrame()
groups = pd.DataFrame.groupby(evaluated_df, 'feature_original_name')
for feature_original_name, group in groups:
if group.shape[0] > 1:
# feature_dummies_df = pd.get_dummies(data_df[feature_original_name])
filtered_feature_dummies_df = data_df[group['feature']]
combined_feature = filtered_feature_dummies_df.sum(axis=1)
# preparing feature output name
categorical_values = group['feature'].apply(lambda x: x.replace(feature_original_name + "_", ""))
categorical_values = categorical_values.astype(data_df.columns.dtype)
feature_output_name = feature_original_name + "_"
for val in categorical_values:
feature_output_name += "_" + str(val)
# adds combined feature to results DataFrame
results_df[feature_output_name] = combined_feature
else:
# save features mappings
custom_feature_full_name = group['feature'].iloc[0]
_, new_feature_value = custom_feature_full_name.split(sep)
features_mapping[feature_original_name] = [{
"name": custom_feature_full_name,
"categories": [new_feature_value]
}]
results_df[group['feature']] = data_df[group['feature']]
return results_df, features_mapping
def fillna_features(self, data, features_handling, default_filling=0, missing_values=None):
"""
Feature handling with filling missing values strategies
:param data: DataFrame
:param features_handling: configuration of how to handle each feature
:return: updated DataFrame with the requested filling
"""
if missing_values:
missing_values = {}
methods = {
"mean": lambda a: np.mean(a),
"median": lambda a: | np.median(a) | numpy.median |
import numpy as np
import colorsys
def get_colors(n):
hue = np.arange(0., 360., 360. / n) / 360
lightness = (50 + 10 * | np.random.rand(n) | numpy.random.rand |
# 2D electromagnetic PIC
import numpy as np
import py_platypus as pla
from py_platypus.models.pic_2d import PIC_2D as PIC_2D
from py_platypus.utils.charge_step import ChargeStep as ChargeStep
from py_platypus.utils.charge_step import ChargeStepDivider as ChargeStepDivider
from py_platypus.utils import math_utils, constants
import matplotlib.pyplot as plt
class PIC_2D_EM(PIC_2D):
def __init__(self, params):
super(PIC_2D_EM, self).__init__(params)
# magnetic field on cell corners
self.bz = np.zeros(self.cells)
# value to update magnetic field each half step
self.delta_bz = np.zeros(self.cells)
self.jx = np.zeros(self.cells) # current density in positive x direction
self.jy = np.zeros(self.cells) # current density in positive y direction
# vertical cell boundaries holding Ex values
self.ex_edges = np.zeros(self.cells)
# horizontal cell boundaries holding Ey values
self.ey_edges = np.zeros(self.cells)
# variables pointing to the current and previous electron positions
self.electron_x_last = np.zeros(self.n_particles)
self.electron_y_last = np.zeros(self.n_particles)
# electric and magnetic field interpolated to each particle
# (ex, ey)
self.e_particle = np.zeros((self.n_particles, self.dimensions))
self.b_particle = | np.zeros(self.n_particles) | numpy.zeros |
import os
import pandas as pd
import numpy as np
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Convolution2D
from keras.layers import Flatten, Dense, Dropout,BatchNormalization, Activation, Lambda
from keras.applications import InceptionV3
from keras.regularizers import l2
from keras.layers import Input, Concatenate, concatenate
import keras.backend as K
import tensorflow as tf
from keras.models import Model,load_model
from keras.callbacks import ReduceLROnPlateau
from keras.utils import plot_model,np_utils
from keras import regularizers
from pprint import pprint
import cv2
DATA_FORMAT='channels_last' # Theano:'channels_first' Tensorflow:'channels_last'
WEIGHT_DECAY=0.0005
LRN2D_NORM=False
USE_BN=True
IM_WIDTH=299
IM_HEIGHT=299
batch_num = 16
#inception_weights = 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
#normalization
def conv2D_lrn2d(x,filters,kernel_size,strides=(1,1),padding='same',data_format=DATA_FORMAT,dilation_rate=(1,1),activation='relu',use_bias=True,kernel_initializer='glorot_uniform',bias_initializer='zeros',kernel_regularizer=None,bias_regularizer=None,activity_regularizer=None,kernel_constraint=None,bias_constraint=None,lrn2d_norm=LRN2D_NORM,weight_decay=WEIGHT_DECAY,name=None):
#l2 normalization
if weight_decay:
kernel_regularizer=regularizers.l2(weight_decay)
bias_regularizer=regularizers.l2(weight_decay)
else:
kernel_regularizer=None
bias_regularizer=None
x=Conv2D(filters=filters,kernel_size=kernel_size,strides=strides,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint,name=name)(x)
if lrn2d_norm:
#batch normalization
x=BatchNormalization()(x)
return x
def inception_module(x,params,concat_axis,padding='same',data_format=DATA_FORMAT,dilation_rate=(1,1),activation='relu',use_bias=True,kernel_initializer='glorot_uniform',bias_initializer='zeros',kernel_regularizer=None,bias_regularizer=None,activity_regularizer=None,kernel_constraint=None,bias_constraint=None,weight_decay=None):
(branch1,branch2,branch3,branch4)=params
if weight_decay:
kernel_regularizer=regularizers.l2(weight_decay)
bias_regularizer=regularizers.l2(weight_decay)
else:
kernel_regularizer=None
bias_regularizer=None
#1x1
if branch1[1]>0:
pathway1=Conv2D(filters=branch1[1],kernel_size=(1,1),strides=branch1[0],padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
#1x1->3x3
pathway2=Conv2D(filters=branch2[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
pathway2=Conv2D(filters=branch2[1],kernel_size=(3,3),strides=branch1[0],padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway2)
#1x1->5x5
pathway3=Conv2D(filters=branch3[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(x)
pathway3=Conv2D(filters=branch3[1],kernel_size=(5,5),strides=branch1[0],padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway3)
#3x3->1x1
pathway4=MaxPooling2D(pool_size=(3,3),strides=branch1[0],padding=padding,data_format=DATA_FORMAT)(x)
if branch4[0]>0:
pathway4=Conv2D(filters=branch4[0],kernel_size=(1,1),strides=1,padding=padding,data_format=data_format,dilation_rate=dilation_rate,activation=activation,use_bias=use_bias,kernel_initializer=kernel_initializer,bias_initializer=bias_initializer,kernel_regularizer=kernel_regularizer,bias_regularizer=bias_regularizer,activity_regularizer=activity_regularizer,kernel_constraint=kernel_constraint,bias_constraint=bias_constraint)(pathway4)
if branch1[1]>0:
return concatenate([pathway1,pathway2,pathway3,pathway4],axis=concat_axis)
else:
return concatenate([pathway2, pathway3, pathway4], axis=concat_axis)
def conv_block(input, nb_filter, dropout_rate=None, weight_decay=1E-4):
x = Activation('relu')(input)
x = Convolution2D(nb_filter, (3, 3), kernel_initializer="glorot_uniform", padding="same", use_bias=False,
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate is not None:
x = Dropout(dropout_rate)(x)
return x
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
feature_list = [x]
for i in range(nb_layers):
x = conv_block(x, growth_rate, dropout_rate, weight_decay)
feature_list.append(x)
x = Concatenate(axis=concat_axis)(feature_list)
nb_filter += growth_rate
return x, nb_filter
def l2_norm(x):
x = x**2
x = K.sum(x, axis=1)
x = K.sqrt(x)
return x
def triplet_loss(y_true, y_pred):
batch = batch_num
ref1 = y_pred[0:batch,:]
pos1 = y_pred[batch:batch+batch,:]
neg1 = y_pred[batch+batch:3*batch,:]
dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
#dis_pos = K.sqrt(dis_pos)
#dis_neg = K.sqrt(dis_neg)
a1pha = 0.2
d1 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d2 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d = d1 + d2
return K.mean(d)
def create_model():
#Data format:tensorflow,channels_last;theano,channels_last
if DATA_FORMAT=='channels_first':
INP_SHAPE=(3,299,299)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=1
elif DATA_FORMAT=='channels_last':
INP_SHAPE=(299,299,3)
img_input=Input(shape=INP_SHAPE)
CONCAT_AXIS=3
else:
raise Exception('Invalid Dim Ordering')
base_model = InceptionV3(weights='imagenet', include_top=False)
base_model.summary()
for layer in base_model.layers:
layer.trainable = False
x = base_model.get_layer('mixed7').output
x = Convolution2D(512, (1, 1), kernel_initializer="glorot_uniform", padding="same", name="DenseNet_initial_conv2D", use_bias=False,
kernel_regularizer=l2(WEIGHT_DECAY))(x)
x = BatchNormalization()(x)
x, nb_filter = dense_block(x, 5, 512, growth_rate=64,dropout_rate=0.5)
x = AveragePooling2D(pool_size=(7, 7), strides=1, padding='valid', data_format=DATA_FORMAT)(x)
x = Dense(512, activation='relu')(x)
#x = Dropout(0.5)(x)
x = Dense(16)(x)
x = Lambda(lambda x:tf.nn.l2_normalize(x))(x)
model = Model(inputs=base_model.input, outputs=x)
return model
def load_triplet_images(csvpath,target_size):
data = pd.read_csv(csvpath,error_bad_lines=False)
trainX = []
print(data)
trainX1 = []
trainX2 = []
trainX3 = []
for i in range(0,int(target_size/3)):
mode = data.iloc[i, 5]
#print(mode)
img1 = cv2.imread(data.iloc[i, 1])
img2 = cv2.imread(data.iloc[i, 2])
img3 = cv2.imread(data.iloc[i, 3])
#print(img1)
if img1 is None or img2 is None or img3 is None:
continue
if mode == 1:
trainX1.append(np.array(img2))
trainX2.append(np.array(img3))
trainX3.append(np.array(img1))
elif mode == 2:
trainX1.append(np.array(img3))
trainX2.append(np.array(img1))
trainX3.append(np.array(img2))
elif mode == 3:
trainX1.append(np.array(img1))
trainX2.append(np.array(img2))
trainX3.append( | np.array(img3) | numpy.array |
"""
CNN model for raw audio classification
Model contributed by: MITRE Corporation
Adapted from: https://github.com/mravanelli/SincNet
"""
import logging
from art.classifiers import PyTorchClassifier
import numpy as np
import torch
from torch import nn
# Load model from MITRE external repo: https://github.com/hkakitani/SincNet
# This needs to be defined in your config's `external_github_repo` field to be
# downloaded and placed on the PYTHONPATH
from SincNet import dnn_models
logger = logging.getLogger(__name__)
# NOTE: Underlying dataset sample rate is 16 kHz. SincNet uses this SAMPLE_RATE to
# determine internal filter high cutoff frequency.
SAMPLE_RATE = 8000
WINDOW_STEP_SIZE = 375
WINDOW_LENGTH = int(SAMPLE_RATE * WINDOW_STEP_SIZE / 1000)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def numpy_random_preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
Then generate a random cut of the input
"""
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float32)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# make a pseudorandom cut of size equal to WINDOW_LENGTH
# (from SincNet's create_batches_rnd)
signal_length = len(signal)
np.random.seed(signal_length)
signal_start = int(
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch)
def numpy_all_preprocessing_fn(batch):
"""
Input is comprised of one or more clips, where each clip i
is given as an ndarray with shape (n_i,).
Preprocessing normalizes each clip and breaks each clip into an integer number
of non-overlapping segments of length WINDOW_LENGTH.
Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH)
"""
if len(batch) != 1:
raise NotImplementedError(
"Requires ART variable length input capability for batch size != 1"
)
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float64)
signal = signal / np.max(np.abs(signal))
# break into a number of chunks of equal length
num_chunks = int(len(signal) / WINDOW_LENGTH)
signal = signal[: num_chunks * WINDOW_LENGTH]
signal = np.reshape(signal, (num_chunks, WINDOW_LENGTH), order="C")
processed_batch.append(signal)
# remove outer batch (of size 1)
processed_batch = processed_batch[0]
return np.array(processed_batch)
def torch_random_preprocessing_fn(x):
"""
Standardize, then normalize sound clips
"""
if x.shape[0] != 1:
raise ValueError(f"Shape of batch x {x.shape[0]} != 1")
if x.dtype != torch.float32:
raise ValueError(f"dtype of batch x {x.dtype} != torch.float32")
if x.max() > 1.0:
raise ValueError(f"batch x max {x.max()} > 1.0")
if x.min() < -1.0:
raise ValueError(f"batch x min {x.min()} < -1.0")
x = x.squeeze(0)
# Signal normalization
x = x / x.abs().max()
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(x)
| np.random.seed(signal_length) | numpy.random.seed |
"""Equirectangular projection module."""
import numpy as np
from matplotlib.path import Path
from .__main__ import GroundProjection
class Equirectangular(GroundProjection):
"""Equirectangular projection object.
a.k.a. `Plate Carrée` and `Equidistant Cylindrical`.
Parameters
----------
lon_w_0: float, optional
Center west longitude.
lat_0: float, optional
Center latitude (North Pole by default).
lat_ts: float, optional
Latitude of true scale.
target: str or pyvims.planets.Planet
Planet name.
radius: float, optional
Planet radius [km]. Use the target mean radius if
the target is a `Planet` object.
Source
------
https://proj.org/operations/projections/eqc.html
https://github.com/proj4js/proj4js/blob/master/lib/projections/eqc.js
"""
DEFAULT_RADIUS_KM = 180e-3 / np.pi # Unitary degree representation
PROJ4 = 'eqc' # Proj4 projection key
def __init__(self, lon_w_0=180, lat_0=0, lat_ts=0, target=None, radius=None):
self.lon_w_0 = lon_w_0
self.lat_0 = lat_0
self.target = target
self.radius = radius
self.lat_ts = lat_ts
@property
def lat_ts(self):
"""Latitude of true scale [degree]."""
return self.__lat_ts
@lat_ts.setter
def lat_ts(self, value):
"""Set latitude of true scale value."""
self.__lat_ts = value
self.__rc = np.cos(np.radians(value))
self.__xc = np.pi * self.r * self.__rc
self.__yc = np.pi / 2 * self.r
@property
def rc(self):
"""Cosine of latitude of origin."""
return self.__rc
@property
def xc(self):
"""Projected x crossing meridian value."""
return self.__xc
@property
def yc(self):
"""Projected y pole value."""
return self.__yc
@property
def proj4(self):
"""Proj4 definition."""
return ' '.join([
f'+proj={self.PROJ4}',
f'+lat_0={self.lat_0}',
f'+lon_0={self.lon_0}',
f'+lat_ts={self.lat_ts}',
'+x_0=0',
'+y_0=0',
f'+a={self.r}',
f'+b={self.r}',
'+units=m',
'+no_defs',
])
@property
def wkt(self):
"""WKT definition."""
return (
f'PROJCS["PROJCS_{self.target}_{self}",'
f'GEOGCS["GCS_{self.target}",'
f'DATUM["D_{self.target}",'
f'SPHEROID["{self.target}_Mean_Sphere", {int(self.r)}, 0]],'
'PRIMEM["Greenwich",0],'
'UNIT["Degree",0.017453292519943295]],'
f'PROJECTION["{self}"],'
'PARAMETER["false_easting", 0],'
'PARAMETER["false_northing", 0],'
f'PARAMETER["standard_parallel_1", {self.lat_ts}],'
f'PARAMETER["central_meridian", {self.lon_0}],'
f'PARAMETER["latitude_of_origin", {self.lat_0}],'
'UNIT["Meter", 1]]'
)
def xy(self, lon_w, lat):
"""Convert latitude/longitude coordinates in map coordinates.
Parameters
----------
lon_w: float or array
West longitude [degree].
lat: float or array
Latitude [degree].
Returns
-------
float or array, float or array
X-Y map coordinates.
"""
dlon = np.radians((np.subtract(self.lon_w_0, lon_w) + 180) % 360 - 180)
dlat = np.radians(np.subtract(lat, self.lat_0))
if np.ndim(lon_w) == 0 and (self.lon_w_0 - lon_w) == 180:
dlon = np.pi
elif np.ndim(lon_w) > 0:
dlon[np.equal(lon_w, self.lon_w_0 - 180)] = np.pi
x = self.r * dlon * self.rc
y = self.r * dlat
if np.ndim(x) == 0 and np.ndim(y) > 0:
x = np.broadcast_to(x, y.shape)
elif np.ndim(x) > 0 and np.ndim(y) == 0:
y = np.broadcast_to(y, x.shape)
return x, y
def lonlat(self, x, y):
"""Convert map coordinates in latitude/longitude coordinates.
Parameters
----------
x: float or array
X-coordinate on the map [m].
y: float or array
Y-coordinate on the map [m].
Returns
-------
float or array, float or array
West longitude and latitude [degree].
"""
lon_w = (-np.degrees(np.divide(x, self.r * self.rc)) - self.lon_w_0) % 360
lat = np.degrees(np.divide(y, self.r)) + self.lat_0
if np.ndim(x) == 0 and np.abs(lon_w - 360) < 1e-5:
lon_w = 0
elif np.ndim(x) > 0:
lon_w[np.abs(lon_w - 360) < 1e-5] = 0
if np.ndim(lon_w) == 0 and np.ndim(lat) > 0:
lon_w = np.broadcast_to(lon_w, lat.shape)
elif np.ndim(lon_w) > 0 and np.ndim(y) == 0:
lat = np.broadcast_to(lat, lon_w.shape)
return lon_w, lat
def xy_path(self, path):
"""Convert path vertices in map coordinates.
Parameters
----------
path: matplotlib.path.Path
Matplotlib path in west-longitude and latitude coordinates.
Returns
-------
matplotlib.path.Path
Path in map coordinates.
Raises
------
ValueError
If the polygon cross more than 2 times the anti-meridian.
"""
if path is None:
return None
vertices, codes = self._vc(path)
x, y = vertices.T
cross = np.abs(x[1:] - x[:-1]) > self.xc # [i + 1] - [i]
n_cross = np.sum(cross)
if n_cross == 1:
vertices, codes = self._cross_pole(x, y, cross)
elif n_cross == 2:
vertices, codes = self._cross_antimeridian(x, y)
elif n_cross > 2:
raise ValueError('Path vertices cross more than 2 time the anti-meridian.')
return Path(vertices, codes)
def _cross_pole(self, x, y, cross):
"""Redraw vertices path around the North/South Pole.
Parameters
----------
x: [float]
Map x coordinate.
y: [float]
Map y coordinate.
cross: [bool]
Bool list if the vertices crossed the anti-meridian.
Returns
-------
matplotlib.path.Path
New vertice surrounding the pole.
"""
pole = self.yc if y[np.argmax(np.abs(y))] >= 0 else -self.yc
verts = [[x[0], y[0]]]
for i in range(len(cross)):
if cross[i]:
if x[i] > 0:
_x1, _x2 = self.xc, -self.xc # Right cross
_f = (self.xc - x[i]) / (x[i + 1] + 2 * self.xc - x[i])
else:
_x1, _x2 = -self.xc, self.xc # Left cross
_f = (self.xc + x[i]) / (x[i] - x[i + 1] + 2 * self.xc)
_y = (y[i + 1] - y[i]) * _f + y[i]
verts.append([_x1, _y])
verts.append([_x1, pole])
verts.append([_x2, pole])
verts.append([_x2, _y])
verts.append([x[i + 1], y[i + 1]])
codes = [Path.MOVETO] + [Path.LINETO] * (len(verts) - 2) + [Path.CLOSEPOLY]
return verts, codes
def _cross_antimeridian(self, x, y):
"""Redraw vertices path around the anti-meridian.
Parameters
----------
x: [float]
Map x coordinate.
y: [float]
Map y coordinate.
Returns
-------
matplotlib.path.Path
New vertice (in 2 pieces) splitted by the anti-meridian.
"""
npt = len(x) - 1
# Right polygon
rv = []
_xr = x % (2 * self.xc)
for i in range(npt):
if _xr[i] <= self.xc:
rv.append([_xr[i], y[i]])
if (_xr[i] <= self.xc and _xr[i + 1] > self.xc) \
or (_xr[i] > self.xc and _xr[i + 1] <= self.xc):
_f = (self.xc - _xr[i]) / (_xr[i + 1] - _xr[i])
_y = (y[i + 1] - y[i]) * _f + y[i]
rv.append([self.xc, _y])
rv.append(rv[0])
# Left polygon
lv = []
_xl = _xr - 2 * self.xc
for i in range(npt):
if _xl[i] >= -self.xc:
lv.append([_xl[i], y[i]])
if (_xl[i] >= -self.xc and _xl[i + 1] < -self.xc) \
or (_xl[i] < -self.xc and _xl[i + 1] >= -self.xc):
_f = (-self.xc - _xl[i]) / (_xl[i + 1] - _xl[i])
_y = (y[i + 1] - y[i]) * _f + y[i]
lv.append([-self.xc, _y])
lv.append(lv[0])
# Create codes
codes = ([Path.MOVETO] + [Path.LINETO] * (len(lv) - 2) + [Path.CLOSEPOLY]
+ [Path.MOVETO] + [Path.LINETO] * (len(rv) - 2) + [Path.CLOSEPOLY])
return | np.vstack([lv, rv]) | numpy.vstack |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
from numpy.core.numeric import identity
import pytest
from onnx_graphsurgeon.ir.graph import Graph
from onnx_graphsurgeon.ir.node import Node
from onnx_graphsurgeon.ir.tensor import Constant, Variable
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.util.exception import OnnxGraphSurgeonException
from onnx_graphsurgeon.util.misc import SynchronizedList
G_LOGGER.severity = G_LOGGER.ULTRA_VERBOSE
@Graph.register()
def shape(self, inp):
return self.layer(op="Shape", inputs=[inp], outputs=["shape_out"])[0]
@Graph.register()
def constant(self, values):
return self.layer(op="Constant", inputs=[], outputs=["constant_out"], attrs={"value": Constant("values", values)})[0]
@Graph.register()
def identity(self, inp):
out = self.layer(op="Identity", inputs=[inp], outputs=["identity_out"])[0]
out.dtype = inp.dtype
return out
@Graph.register()
def add(self, a, b, name=None):
outputs = [Variable(name=name)] if name else ["add_out"]
out = self.layer(op="Add", inputs=[a, b], outputs=outputs)[0]
out.dtype = a.dtype or b.dtype
return out
# A fake op that can be used to ensure things work even when there is an invalid
# node present in the model.
@Graph.register()
def fake(self, inp, name=None):
outputs = [Variable(name=name)] if name else ["fake_out"]
out = self.layer(op="Fake", inputs=[inp], outputs=outputs)[0]
out.dtype = inp.dtype
return out
# Generates a graph where an outer node has no outputs except
# within the subgraph. ONNX-GS should recognize that the node
# is being used, and should not remove it during cleanup().
@pytest.fixture
def nested_graph():
inp = Variable("input")
id_out = Variable("id_out")
identity = Node(op="Identity", inputs=[inp], outputs=[id_out])
# Subgraph outputs come from the parent node, but nodes in the subgraph
# can use nodes from the outer graphs too.
subgraph_inputs = [Variable("subgraph_inp")]
subgraph_id_out = Variable("subgraph_id_out")
subgraph_outputs = [Variable("subgraph_out")]
subgraph_identity0 = Node(op="Identity", inputs=[id_out], outputs=[subgraph_id_out])
subgraph_identity1 = Node(op="Identity", inputs=[subgraph_id_out], outputs=subgraph_outputs)
subgraph = Graph(nodes=[subgraph_identity0, subgraph_identity1],
inputs=subgraph_inputs, outputs=subgraph_outputs)
nested_out = Variable("nested_out")
nested_node = Node(op="Nested", attrs={"body": subgraph}, inputs=[inp], outputs=[nested_out])
yield Graph(nodes=[identity, nested_node], inputs=[inp], outputs=[nested_out])
class TestBasic(object):
def test_generate_name(self):
graph = Graph()
names = set()
num_names = 100
# This function should not return the same name more than once
for idx in range(num_names):
names.add(graph._generate_name("name"))
assert len(names) == 100
class TestRegister(object):
def test_register(self):
@Graph.register()
def fake_add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out"])
graph = Graph()
[output] = graph.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
def test_register_opset(self):
@Graph.register(opsets=[11])
def fake_add(self, a, b):
return self.layer(op="Add", inputs=[a, b], outputs=["add_out"])
@Graph.register(opsets=[10])
def fake_add(self, a, b):
return self.layer(op="Add-10", inputs=[a, b], outputs=["add_out"])
graph = Graph()
[output] = graph.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
graph_opset10 = Graph(opset=10)
[output] = graph_opset10.fake_add("a", "b")
assert "add_out" in output.name
assert len(graph_opset10.nodes) == 1
assert graph_opset10.nodes[-1].op == "Add-10"
class TestLayer(object):
def test_layer_with_attrs(self):
graph = Graph()
outputs = graph.layer(op="Add", name="node", attrs={"fake_attr": 0})
assert len(graph.nodes) == 1
assert graph.nodes[-1].op == "Add"
assert graph.nodes[-1].name == "node"
assert graph.nodes[-1].attrs["fake_attr"] == 0
def test_layer_with_tensors(self):
x0 = Variable("x0")
x1 = Variable("x1")
y0 = Variable("y0")
y1 = Variable("y1")
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert outputs == [y0, y1]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs == [x0, x1]
assert graph.nodes[-1].outputs == outputs
def test_layer_with_strings(self):
x0 = "x0"
x1 = "x1"
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert len(graph.nodes) == 1
assert [prefix in tensor.name for prefix, tensor in zip([x0, x1], graph.nodes[-1].inputs)]
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert graph.nodes[-1].outputs == outputs
def test_layer_with_arrays(self):
x0 = np.array([1])
x1 = np.array([1])
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs[0].values == x0
assert graph.nodes[-1].inputs[1].values == x1
assert graph.nodes[-1].outputs == outputs
def test_layer_with_iterables(self):
x0 = [1]
x1 = (1, )
y0 = "y0"
y1 = "y1"
graph = Graph()
outputs = graph.layer(op="Fake", inputs=[x0, x1], outputs=[y0, y1])
assert [prefix in tensor.name for prefix, tensor in zip([y0, y1], graph.nodes[-1].outputs)]
assert len(graph.nodes) == 1
assert graph.nodes[-1].inputs[0].values == x0
assert graph.nodes[-1].inputs[1].values == x1
assert graph.nodes[-1].outputs == outputs
def tensors_linear_graph():
inputs = [Variable(name="x")]
intermediate0 = Variable(name="intermediate0")
intermediate1 = Variable(name="intermediate1")
intermediate2 = Variable(name="intermediate2")
outputs = [Variable(name="y")]
tensors = inputs + [intermediate0, intermediate1, intermediate2] + outputs
tensors = {tensor.name: tensor for tensor in tensors}
# Nodes are NOT in topo order.
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), nodes, tensors
class TestTensors(object):
# Calling `tensors()` should not modify tensors in the graph.
def test_tensors_does_not_modify_tensors(self):
graph, _, _ = tensors_linear_graph()
graph_tensors = graph.tensors()
# Generate a new graph to compare against
_, _, tensors = tensors_linear_graph()
assert set(tensors.keys()) == set(graph_tensors.keys())
for name, tensor in tensors.items():
graph_tensor = graph_tensors[name]
assert tensor == graph_tensor
assert tensor.inputs == graph_tensor.inputs
assert tensor.outputs == graph_tensor.outputs
# Check that tensors includes tensors not attached to nodes
def test_tensors_includes_non_node_tensors(self):
X = Constant("X", values=np.ones(shape=(64, 64), dtype=np.float32))
graph = Graph(inputs=[], outputs=[X])
tensor_map = graph.tensors()
assert "X" in tensor_map
assert tensor_map["X"] == X
def test_tensors_check_duplicates(self):
inputs = [Variable(name="x")]
outputs = [Variable(name="x")] # Distinct tensors with the same name
nodes = [
Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
]
graph = Graph(nodes=nodes, inputs=inputs, outputs=outputs)
with pytest.raises(OnnxGraphSurgeonException):
graph.tensors(check_duplicates=True)
def toposort_linear_graph():
inputs = [Variable(name="x")]
intermediate0 = Variable(name="intermediate0")
intermediate1 = Variable(name="intermediate1")
intermediate2 = Variable(name="intermediate2")
outputs = [Variable(name="y")]
# Nodes are NOT in topo order.
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate0]),
Node(op="Add", name="Test2", inputs=[intermediate1], outputs=[intermediate2]),
Node(op="Add", name="Test3", inputs=[intermediate2], outputs=outputs),
Node(op="Add", name="Test1", inputs=[intermediate0], outputs=[intermediate1]),
]
expected_node_order = [nodes[0], nodes[3], nodes[1], nodes[2]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
# Graph structure:
# x
# |
# Test0 -> out0 (graph output)
# |
# out0
# |
# Test1 -> out1 (graph output)
# |
# out1
# |
# Test2 -> out2 (graph_output)
def toposort_multi_tier_output_graph():
inputs = [Variable(name="x")]
outputs = [Variable(name="out0"), Variable(name="out1"), Variable(name="out2")]
out0, out1, out2 = outputs
nodes = [
Node(op="Add", name="Test2", inputs=[out1], outputs=[out2]),
Node(op="Add", name="Test0", inputs=inputs, outputs=[out0]),
Node(op="Add", name="Test1", inputs=[out0], outputs=[out1]),
]
expected_node_order = [nodes[1], nodes[2], nodes[0]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
# Graph structure:
# x2 x1
# | |
# Test0
# |
# int0 x0
# | /
# Test1
# |
# int1 x3
# | /
# Test2 -> out (graph_output)
def toposort_multi_tier_input_graph():
inputs = [Variable(name="x0"), Variable(name="x1"), Variable(name="x2"), Variable(name="x3")]
int0, int1 = [Variable(name="intermediate0"), Variable(name="intermediate1")]
outputs = [Variable(name="out")]
x0, x1, x2, x3 = inputs
nodes = [
Node(op="Add", name="Test2", inputs=[int1, x3], outputs=outputs),
Node(op="Add", name="Test0", inputs=[x2, x1], outputs=[int0]),
Node(op="Add", name="Test1", inputs=[int0, x0], outputs=[int1]),
]
expected_node_order = [nodes[1], nodes[2], nodes[0]]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs), expected_node_order
TOPOSORT_TEST_CASES = [
toposort_linear_graph,
toposort_multi_tier_output_graph,
toposort_multi_tier_input_graph,
]
class TestToposort(object):
@pytest.mark.parametrize("toposort_test_case", TOPOSORT_TEST_CASES)
def test_topologically_sort(self, toposort_test_case):
graph, expected_node_order = toposort_test_case()
assert graph.nodes != expected_node_order
graph.toposort()
assert graph.nodes == expected_node_order
@pytest.mark.parametrize("toposort_test_case", TOPOSORT_TEST_CASES)
def test_toposort_nested(self, toposort_test_case):
subgraph, expected_node_order = toposort_test_case()
assert subgraph.nodes != expected_node_order
# Wrap the graph within a subgraph
inp = Variable("input")
id_out = Variable("id_out")
identity = Node(op="Identity", inputs=[inp], outputs=[id_out])
# Make the subgraph take an input from the outer graph node
# If toposort tries to take the node id, it'll fault.
subgraph.nodes[0].inputs.append(id_out)
out = Variable("output")
nested = Node(op="Nested", inputs=[id_out], outputs=[out], attrs={"subgraph": subgraph})
graph = Graph(nodes=[identity, nested], inputs=[inp], outputs=[out])
graph.toposort(recurse_subgraphs=True)
assert subgraph.nodes == expected_node_order
def build_basic_graph():
inputs = [Variable(name="x")]
outputs = [Variable(name="y")]
nodes = [
Node(op="Add", name="Test", inputs=inputs, outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def build_two_layer_graph():
inputs = [Variable(name="x")]
intermediate_tensor = Variable(name="intermediate")
outputs = [Variable(name="y")]
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]),
Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
def build_two_layer_graph_multiple_io():
inputs = [Variable(name="x0"), Variable(name="x1")]
intermediate_tensor = Variable(name="intermediate")
outputs = [Variable(name="y0"), Variable(name="y1")]
nodes = [
Node(op="Add", name="Test0", inputs=inputs, outputs=[intermediate_tensor]),
Node(op="Add", name="Test1", inputs=[intermediate_tensor], outputs=outputs),
]
return Graph(nodes=nodes, inputs=inputs, outputs=outputs)
CLEANUP_TEST_CASES = [
build_basic_graph(),
build_two_layer_graph(),
build_two_layer_graph_multiple_io(),
]
class TestCleanup(object):
@pytest.mark.parametrize("graph", CLEANUP_TEST_CASES)
def test_get_used_node_ids(self, graph):
graph_used_nodes = copy.copy(graph.nodes)
graph_used_tensors = copy.copy(list(graph.tensors().values()))
unused_tensor = Variable(name="Unused")
unused_node = Node(op="Unused", inputs=[graph.inputs[0]], outputs=[unused_tensor])
graph.nodes.append(unused_node)
with graph.node_ids():
used_node_ids, used_tensors = graph._get_used_node_ids()
assert len(used_node_ids) == len(graph.nodes) - 1
assert all([node.id in used_node_ids for node in graph_used_nodes])
assert unused_node.id not in used_node_ids
assert unused_tensor not in used_tensors
assert all([used_tensor in used_tensors for used_tensor in graph_used_tensors])
def test_cleanup_multi_tier(self):
graph, _ = toposort_multi_tier_output_graph()
tensor = graph.outputs.pop()
unused_node = tensor.inputs[0]
graph.cleanup() # Should remove just the Test2 node as out1 is still an output.
assert unused_node not in graph.nodes
assert len(graph.nodes) == 2
assert len(graph.outputs) == 2
tensor_map = graph.tensors()
assert tensor.name not in tensor_map
def test_cleanup_remove_unused_node_outputs(self):
graph, _ = toposort_linear_graph()
graph.toposort()
graph_output = graph.outputs[0]
dummy = Variable("dummy")
# Add unused tensor to a node in the middle of the graph.
# Since it does not contribute to graph outputs, it should be removed.
graph.nodes[1].outputs.append(dummy)
graph.cleanup(remove_unused_node_outputs=True)
assert dummy not in graph.nodes[1].outputs
assert graph.outputs[0] == graph_output # Graoh outputs will never be removed
def test_cleanup_graph_input_producers(self):
graph, _ = toposort_linear_graph()
tensor_map = graph.tensors()
assert "x" in tensor_map
graph.inputs = [tensor_map["intermediate0"]]
graph.cleanup()
cleaned_tensor_map = graph.tensors()
assert "x" not in cleaned_tensor_map
def test_cleanup_independent_path(self):
graph, _ = toposort_linear_graph()
# Build out a path totally unrelated to rest of the graph
indep0 = Variable(name="indep0")
indep1 = Variable(name="indep1")
node = Node(op="IndepTest", inputs=[indep0], outputs=[indep1])
graph.inputs.append(indep0) # Unused inputs should be removed as well
graph.nodes.append(node)
graph.cleanup()
assert indep0 not in graph.inputs
assert node not in graph.nodes
tensor_map = graph.tensors()
assert indep0.name not in tensor_map
assert indep1.name not in tensor_map
def test_cleanup_nested_graph(self, nested_graph):
nested_node = nested_graph.nodes[1]
nested_inp = nested_node.inputs[0]
nested_out = nested_node.outputs[0]
subgraph = nested_node.attrs["body"]
assert "id_out" in nested_graph.tensors()
nested_graph.cleanup(recurse_subgraphs=True)
# Clean up should not remove a tensor whose only output node is a subgraph.
assert "id_out" in nested_graph.tensors()
# Clean up should not modify the nested nodes inputs or outputs
assert nested_node.inputs == [nested_inp]
assert nested_node.outputs == [nested_out]
# Next we'll clean up the subgraph by recursing from the top-level
assert subgraph.nodes
subgraph.outputs.clear()
nested_graph.cleanup(recurse_subgraphs=True)
assert not subgraph.nodes
class TestCopy(object):
def test_copy(self):
def make_graph():
graph, _ = toposort_multi_tier_output_graph()
graph.outputs.pop()
# Deep copy should work with empty tensors
graph.nodes[0].inputs.append(Variable.empty())
graph.nodes[0].outputs.append(Variable.empty())
return graph
graph = make_graph()
new_graph = graph.copy()
assert graph == new_graph
# Running cleanup on the first graph should not affect the copy
graph.cleanup()
assert graph != new_graph
assert new_graph == make_graph()
def test_copy_with_subgraph(self, nested_graph):
new_graph = nested_graph.copy()
assert new_graph == nested_graph
new_subgraph = new_graph.nodes[1].attrs["body"]
new_subgraph.nodes[0].outputs.clear()
new_subgraph.nodes[1].inputs.clear()
subgraph = nested_graph.nodes[1].attrs["body"]
assert subgraph.nodes[0].outputs
assert subgraph.nodes[1].inputs
new_graph.outputs.clear()
new_graph.cleanup()
assert nested_graph.outputs
assert len(nested_graph.nodes) == 2
assert len(subgraph.nodes) == 2
@pytest.fixture
def simple_foldable():
# Graph:
# c = (a + b)
# output = input + c
# Should fold to:
# output = input + c
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
out = graph.add(inp, c)
graph.inputs = [inp]
graph.outputs = [out]
yield graph
@pytest.fixture
def one_hop_foldable():
# Graph:
# c = (a + b)
# e = (c + d)
# output = input + e
# Should fold to:
# output = input + e
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
e = graph.add(c, weights, name="e")
out = graph.add(inp, e)
graph.inputs = [inp]
graph.outputs = [out]
yield graph
@pytest.fixture
def foldable_with_invalid_node():
# Graph
# c = (a + b)
# e = fake(d)
# f = (e + c)
# out = inp + f
#
# c should be folded even though e is the output of an
# invalid node.
weights = np.ones(shape=(1, 3), dtype=np.float32)
graph = Graph()
inp = Variable("input", shape=(1, 3), dtype=np.float32)
c = graph.add(weights, weights, name="c")
e = graph.fake(weights, name="e")
f = graph.add(e, c, name="f")
out = graph.add(inp, f, name="output")
graph.inputs = [inp]
graph.outputs = [out]
yield graph
class TestFoldConstants(object):
@pytest.mark.parametrize("partitioning", [None, "basic", "recursive"])
def test_basic(self, simple_foldable, partitioning):
inp = simple_foldable.inputs[0]
simple_foldable.fold_constants(partitioning=partitioning).cleanup()
# Extra node should be removed
assert len(simple_foldable.nodes) == 1
assert simple_foldable.nodes[0].inputs[0] == inp
assert simple_foldable.nodes[0].inputs[1].name == "c"
# Value should be computed correctly
assert np.all(simple_foldable.nodes[0].inputs[1].values == np.ones(shape=(1, 3), dtype=np.float32) * 2)
def test_one_hop(self, one_hop_foldable):
inp = one_hop_foldable.inputs[0]
one_hop_foldable.fold_constants().cleanup()
# Extra nodes should be removed
assert len(one_hop_foldable.nodes) == 1
assert one_hop_foldable.nodes[0].inputs[0] == inp
assert one_hop_foldable.nodes[0].inputs[1].name == "e"
# Value should be computed correctly
assert np.all(one_hop_foldable.nodes[0].inputs[1].values == | np.ones(shape=(1, 3), dtype=np.float32) | numpy.ones |
import numpy as np, matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
# computes relic density of freeze out dark matter
# parameter: mass of the wimp m
# thermally averaged crosssection < sigma v > for scattering with SM particle (we call this just sigma in the code)
# everything is in units of MeV
# Y = n / s
# x = m / T
# https://arxiv.org/pdf/1606.07494.pdf
log10_T_in_MeV, g_rho, g_rho_over_g_s = np.array(
((0.00, 10.71, 1.00228),
(0.50, 10.74, 1.00029),
(1.00, 10.76, 1.00048),
(1.25, 11.09, 1.00505),
(1.60, 13.68, 1.02159),
(2.00, 17.61, 1.02324),
(2.15, 24.07, 1.05423),
(2.20, 29.84, 1.07578),
(2.40, 47.83, 1.06118),
(2.50, 53.04, 1.04690),
(3.00, 73.48, 1.01778),
(4.00, 83.10, 1.00123),
(4.30, 85.56, 1.00389),
(4.60, 91.97, 1.00887),
(5.00, 102.17, 1.00750),
(5.45, 104.98, 1.00023),)).T
g_rho_interp = interp1d(log10_T_in_MeV, g_rho, kind="cubic", bounds_error=False, fill_value=(g_rho[0], g_rho[-1]))
g_rho_over_g_s_interp = interp1d(log10_T_in_MeV, g_rho_over_g_s, kind="cubic", bounds_error=False, fill_value=(g_rho_over_g_s[0], g_rho_over_g_s[-1]))
g_s_interp = lambda log10_T_in_MeV: g_rho_interp(log10_T_in_MeV) / g_rho_over_g_s_interp(log10_T_in_MeV)
g = 2 # dofs of the wimp
M_pl = 2.435e18 * 1e3 # [MeV] # https://en.m.wikiversity.org/wiki/Physics/Essays/Fedosin/Planck_mass
def calc_entropy_density(T):
# s = 2pi**2/45 * g_(*,s)(T) * T**3
return 2*np.pi**2/45 * g_s_interp(np.log10(T)) * T**3
def calc_H(x):
# 3*M_pl^2*H^2 = pi^2/30 g_*(T) T^4
T = m / x
return np.pi/ | np.sqrt(45) | numpy.sqrt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.