prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 11:54:39 2018
@author: esteban
"""
# Libraries
import numpy as np
import solver as sol
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy.linalg import norm
from matplotlib.gridspec import GridSpec
# Numbers size in graphs
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['font.size'] = label_size
mpl.rcParams['agg.path.chunksize'] = 10000
# Predefined-time stabilizing function
def tau0(Sr, q, ga, r1):
return -np.pi / (2 * q * r1) * (1 + ga**(2 * q) * norm(Sr, axis=0)**(4 * q)) * ga**(1 - q) * sol.vec_pow(Sr, 1 - 2 * q)
# System
def system(t, x):
# Positions and velocities
q1, q2, dq1, dq2 = x
q = np.array([[q1], [q2]])
dq = np.array([[dq1], [dq2]])
# Robot parameters
m1, m2, l1, l2 = 1.5, 0.7, 0.15, 0.07
# Matrices
H11 = l1**2 * (m1 + m2) + 2 * (l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)) - l2**2 * m2
H12 = l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)
H21 = l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)
H22 = l2**2 * m2
h = l1 * l2 * m2 * np.sin(q2)
C11 = -h * dq2
C12 = -h * (dq1 + dq2)
C21 = h * dq1
C22 = 0
H = np.array([[H11, H12],
[H21, H22]])
C = np.array([[C11, C12],
[C21, C22]])
g = np.array([[0], [0]])
detH = H11 * H22 - H12 * H21
invH = np.array([[H22, -H12],
[-H21, H11]]) / detH
# Perturbations
d = 0.2 * dq + 0.1 * np.tanh(1000 * dq)
# Control parameters
r0, r1, r2, r3 = 10, 1, 10, 0.3
ga = 0.3
qp = 0.1
# Reference
i = np.array([list(range(5))]).T
qd = 4 / np.pi * (np.sin((2 * i + 1) * t) / (2 * i + 1)).sum(axis=0) * np.array([[1], [2]])
dqd = 4 / np.pi * (np.cos((2 * i + 1) * t)).sum(axis=0) * np.array([[1], [2]])
d2qd = -4 / np.pi * ((2 * i + 1) * np.sin((2 * i + 1) * t)).sum(axis=0) * np.array([[1], [2]])
dqr = dqd - r0 * (q - qd)
d2qr = d2qd - r0 * (dq - dqd)
# Controller
Sr = dq - dqr
tau = tau0(Sr, qp, ga, r1) - r2 * Sr / (norm(Sr, axis=0) + r3)
# Model
d2q = invH.dot(tau + d - C.dot(dq) + g)
# Term Yr
Yr = H.dot(d2qr) + C.dot(dqr) + g - d
return np.concatenate((dq, d2q), axis=0).T[0]
# Simulation
t0, tf, h = 0, 10, 1e-4
x0 = np.array([0, 0, 0, 0])
# Simulation
t, x = sol.ode1(system, x0, t0, tf, h)
# Positions and velocities
q1, q2, dq1, dq2 = x
q = np.array([q1, q2])
dq = np.array([dq1, dq2])
# Robot parameters
m1, m2, l1, l2 = 1.5, 0.7, 0.15, 0.07
# Matrices
H11 = l1**2 * (m1 + m2) + 2 * (l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)) - l2**2 * m2
H12 = l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)
H21 = l2**2 * m2 + l1 * l2 * m2 * np.cos(q2)
H22 = l2**2 * m2
h = l1 * l2 * m2 * np.sin(q2)
C11 = -h * dq2
C12 = -h * (dq1 + dq2)
C21 = h * dq1
C22 = 0
# Perturbations
d = 0.2 * dq + 0.1 * np.tanh(1000 * dq)
# Control parameters
r0, r1, r2, r3 = 10, 1, 10, 0.3
ga = 0.3
qp = 0.1
# Reference
i = np.array([list(range(5))]).T
qd = 4 / np.pi * (np.sin((2 * i + 1) * t) / (2 * i + 1)).sum(axis=0) * np.array([[1], [2]])
dqd = 4 / np.pi * (np.cos((2 * i + 1) * t)).sum(axis=0) * np.array([[1], [2]])
d2qd = -4 / np.pi * ((2 * i + 1) * np.sin((2 * i + 1) * t)).sum(axis=0) * np.array([[1], [2]])
dqr = dqd - r0 * (q - qd)
d2qr = d2qd - r0 * (dq - dqd)
# Controller
Sr = dq - dqr
tau = tau0(Sr, qp, ga, r1) - r2 * Sr / (norm(Sr, axis=0) + r3)
# Term Yr
Yr = np.array([H11 * d2qr[0] + H12 * d2qr[1],
H21 * d2qr[0] + H22 * d2qr[1]]) + \
np.array([C11 * dqr[0] + C12 * dqr[1],
C21 * dqr[0] + C22 * dqr[1]]) - d
# Norm of Sr
plt.figure(num=1)
plt.subplots_adjust(hspace=0.5)
gs1 = GridSpec(4, 1)
plt.subplot(gs1[:3,0])
plt.plot(t, norm(Sr, axis=0), lw = 2, color=0*np.ones(3))
plt.ylabel('$||S_r(t)||$')
plt.text(1.2, 6, r'$T_c=\rho_1=1$')
plt.axvline(x=1, ymin=0, ymax=15, linestyle='dashed', color = 0.6*np.ones(3))
plt.grid()
plt.subplot(gs1[3,0])
plt.plot(t, norm(Sr, axis=0), lw = 2, color=0*np.ones(3))
plt.axvline(x=1, ymin=0, ymax=15, linestyle='dashed', color = 0.6*np.ones(3))
plt.axhline(y=0.2, xmin=0, xmax=10, linestyle='dashed', color = 0.3*np.ones(3))
plt.text(2, 0.25, r'$b=0.2$')
plt.grid()
plt.xlabel('$t$')
plt.ylim(0, 0.5)
plt.savefig('figures/norm_sr.eps', bbox_inches='tight', format='eps', dpi=1500)
## Tracking
plt.figure(num=2)
plt.plot(t, qd[0], lw = 3, color=0.7* | np.ones(3) | numpy.ones |
from numba import jit
import time
from project_code.misc_functions import sub_matrix, combine_sets
from project_code.classes import Result_IF, Result_IF_generators
import numpy as np
import logging
def compute_IFs(branches, setI, setT, setR, LODF, PATL, PTDF):
t0 = time.clock()
results = []
sizeI = len(setI)
sizeT = len(setT)
current_ring = 1
setR_this_ring = [branch for branch in setR if branch.ring == current_ring]
while len(setR_this_ring) > 0:
sizeR = len(setR_this_ring)
logging.info(f"Assessing IF for ring # {current_ring} with {sizeR} elements.")
set_size_RIT = np.array([sizeR, sizeI, sizeT], dtype=np.int32)
vPTDF_I = [i.PTDF for i in setI]
vPTDF_R = [r.PTDF for r in setR_this_ring]
mxPTDF_IR = sub_matrix(setI, setR_this_ring, PTDF)
mxPTDF_IT = sub_matrix(setI, setT, PTDF)
mxPTDF_RI = sub_matrix(setR_this_ring, setI, PTDF)
mxPTDF_RT = sub_matrix(setR_this_ring, setT, PTDF)
res_T = np.zeros((sizeI, sizeR), dtype=np.int32) # Most influenced t element in N-i-r
res_IF = np.zeros((sizeI, sizeR)) # IF of the most influenced t element in N-i-r situation
set_IR = combine_sets(setI, setR_this_ring) # elms i in R set to avoid i = r situation
set_RT = combine_sets(setR_this_ring, setT) # elms r in T set to avoid r = t situation
set_TI = combine_sets(setT, setI)
mxPATL_RT = sub_matrix(setR_this_ring, setT, PATL)
res_norm_T = np.zeros((sizeI, sizeR), dtype=np.int32) # same but normalized
res_norm_IF = np.zeros((sizeI, sizeR)) # same but normalized
res_norm_IF_non_norm = np.zeros((sizeI, sizeR))
res_T_max = np.zeros(sizeR, dtype=np.int32) # most influenced t element
res_norm_T_max = np.zeros(sizeR, dtype=np.int32) # same but normalized
res_I_max = np.zeros(sizeR, dtype=np.int32)
res_norm_I_max = np.zeros(sizeR, dtype=np.int32)
res_IF_max = np.zeros(sizeR)
res_norm_IF_max = np.zeros(sizeR)
res_norm_IF_non_norm_max = np.zeros(sizeR)
LODF_RT = sub_matrix(setR_this_ring, setT, LODF)
LODFn_RT = LODF_RT * sub_matrix(setR_this_ring, setT, PATL)
compute_IF_CPU(set_size_RIT,
vPTDF_I, vPTDF_R, mxPTDF_IR, mxPTDF_IT, mxPTDF_RI, mxPTDF_RT,
res_T, res_IF, set_IR, set_RT, set_TI,
mxPATL_RT, res_norm_IF, res_norm_T, res_norm_IF_non_norm)
get_max_results(res_T, res_IF, res_norm_T, res_norm_IF, res_norm_IF_non_norm,
res_T_max, res_norm_T_max, res_I_max, res_norm_I_max, res_IF_max,
res_norm_IF_max, res_norm_IF_non_norm_max)
for idx in range(len(setR_this_ring)):
# Template : "name,N-1 IF, N-1 nIF,IF,i,t,nIF,i,t,NNnIF"
r = setR_this_ring[idx]
IF_1 = max( | np.absolute(LODF_RT[:, idx]) | numpy.absolute |
import os
import sys
import glob
import colorsys
sys.path.insert(0, './')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pyvista as pv
import tensorflow as tf
from scene_network.model import ObjDetectorModel
from utils import helpers, tf_utils, losses
def plot_scene(scene, scene_pts, scene_ext, gt_pts, gt_ext):
pts = scene[:, 0:3]
cols = scene[:, 3:6]
cols = cols[pts[:, 2] < np.max(pts[:, 2])-1.5]
pts = pts[pts[:, 2] < np.max(pts[:, 2])-1.5]
plot = pv.Plotter()
plot.set_background('white')
plot.add_points(pts, scalars=cols, rgb=True, opacity=1, render_points_as_spheres=True, point_size=10)
if scene_pts.shape[0] > 0:
ext_hwd = scene_ext[:, :3]
ext_theta = scene_ext[:, 3:5]
boxes_min = scene_pts - (ext_hwd / 2)
boxes_max = scene_pts + (ext_hwd / 2)
boxes = np.hstack((boxes_min, boxes_max))
box_pts = helpers.rotate_boxes(boxes, scene_pts, ext_theta)
classes = np.linspace(0, 1, box_pts.shape[0]+1)
rgb_classes = np.array([colorsys.hsv_to_rgb(c, 0.8, 0.8) for c in classes])
for i, box in enumerate(box_pts):
lines = helpers.make_lines(box)
[plot.add_mesh(l, color=rgb_classes[i], line_width=4) for l in lines]
plot.view_xy()
plot.show()
def parse_room():
model = ObjDetectorModel(1, config['n_pred'])
model(tf.zeros((1, config['n_pts'], 3), tf.float32))
model.load_weights(config['weights'])
if config['dataset'] == 's3d':
room = 'Area_' + str(config['area']) + '_' + config['room'] + '.npy'
scene = np.load(os.path.join(config['dataset_dir'], 'processed', room))
scene_extent = [
np.min(scene[:, 0]), | np.min(scene[:, 1]) | numpy.min |
# -*- coding: utf-8 -*-
import unittest
import env
import hiisi
import h5py
import numpy as np
import uuid
import os
class Test(unittest.TestCase):
def setUp(self):
self.unique_attr_path = None
self.unique_attr_value = None
self.reoccuring_attr_paths = []
self.reoccuring_attr_items = []
self.dataset_paths = []
self.group_paths = ['/']
self.data_filename = 'hiisi_test_data.h5'
self.create_hdf5_test_data()
self.h5file = hiisi.HiisiHDF(self.data_filename, 'r')
print('run setUp')
def create_hdf5_test_data(self):
"""Creates random hdf5 file for testing
"""
n_branches = 3
n_datasets = 3
unique_attr = uuid.uuid1().hex
reoccuring_attr = [uuid.uuid1().hex for x in range(n_branches)]
dataset_data = np.zeros((3,3))
h5f = h5py.File(self.data_filename, 'w')
for i in range(n_branches):
group_path = '/branch{}'.format(i)
self.group_paths.append(group_path)
branch = h5f.create_group(group_path)
branch.attrs['reoccuring_attr'] = reoccuring_attr[i]
self.reoccuring_attr_paths.append(branch.name)
self.reoccuring_attr_items.append((branch.name, reoccuring_attr[i]))
for j in range(n_datasets):
dataset_name='/branch{}/data{}/dataset'.format(i, j)
self.group_paths.append('/branch{}/data{}'.format(i, j))
dataset = h5f.create_dataset(dataset_name, data=np.int8(dataset_data), dtype='int8')
self.dataset_paths.append(dataset.name)
if i==1 and j==1:
dataset.attrs['unique_attr'] = unique_attr
self.unique_attr_path = dataset.name
self.unique_attr_value = unique_attr
h5f.close()
def tearDown(self):
os.remove(self.data_filename)
def test_is_unique_attribute_true(self):
self.assertTrue(self.h5file.is_unique_attr('unique_attr'))
def test_is_unique_attribute_false(self):
self.assertFalse(self.h5file.is_unique_attr('reoccuring_attr'))
self.assertFalse(self.h5file.is_unique_attr('not_existing_attr'))
def test_attr_exists_true(self):
self.assertTrue(self.h5file.attr_exists('unique_attr'))
def test_attr_exists_false(self):
self.assertFalse(self.h5file.attr_exists('not_existing_attr'))
def test_datasets(self):
assert list(self.h5file.datasets()) == self.dataset_paths
def test_datasets_no_datasets_found(self):
with hiisi.HiisiHDF('tmp.h5', 'w') as h5f:
assert list(h5f.datasets()) == []
os.remove('tmp.h5')
def test_groups(self):
assert list(self.h5file.groups()) == self.group_paths
def test_groups_no_groups_found(self):
with hiisi.HiisiHDF('tmp.h5', 'w') as h5f:
assert h5f.groups() == ['/']
os.remove('tmp.h5')
def test_attr_gen(self):
attr_gen = self.h5file.attr_gen('reoccuring_attr')
attr_items = []
for i in attr_gen:
attr_items.append((i.path, i.value))
assert attr_items == self.reoccuring_attr_items
def test_attr_gen_no_match(self):
attr_gen = self.h5file.attr_gen('not_existing_attr')
with self.assertRaises(StopIteration):
next(attr_gen)
def test_create_from_filedict_new_file(self):
filename = 'create_from_filedict_test.h5'
with hiisi.HiisiHDF(filename, 'w') as h5f:
file_dict = {}
file_dict['/'] = {'A':1, 'B':2}
file_dict['/dataset1/data1/data'] = {'DATASET':np.arange(9).reshape((3,3)), 'C':'c'}
file_dict['/dataset1/data1/what'] = {'D':123}
h5f.create_from_filedict(file_dict)
with hiisi.HiisiHDF(filename, 'r') as h5f:
assert h5f['/'].attrs['A'] == 1
assert h5f['/'].attrs['B'] == 2
assert h5f['/dataset1/data1/data'].attrs['C'] == 'c'
np.testing.assert_array_equal(h5f['/dataset1/data1/data'][:], np.arange(9).reshape((3,3)))
assert h5f['/dataset1/data1/what'].attrs['D'] == 123
os.remove(filename)
def test_create_from_filedict_append_new_group(self):
filename = './create_from_filedict_test.h5'
# Create the file
with hiisi.HiisiHDF(filename, 'w') as h5f:
file_dict = {}
file_dict['/'] = {'A':1, 'B':2}
file_dict['/dataset1/data1/data'] = {'DATASET': | np.arange(9) | numpy.arange |
'''
python functions to do various useful date processing/manipulation
'''
import numpy as np
from scipy.special import erf
import fitsio
import glob
import os
import astropy.io.fits as fits
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import desimodel.footprint
import desimodel.focalplane
from random import random
from desitarget.io import read_targets_in_tiles
from desitarget.sv3 import sv3_targetmask
from LSS.Cosmo import distance
def tile2rosette(tile):
if tile < 433:
return (tile-1)//27
else:
if tile >= 433 and tile < 436:
return 13
if tile >= 436 and tile < 439:
return 14
if tile >= 439 and tile < 442:
return 15
if tile >= 442 and tile <=480:
return (tile-442)//3
if tile > 480:
return tile//30
return 999999 #shouldn't be any more?
def calc_rosr(rosn,ra,dec):
#given rosetter number and ra,dec, calculate distance from center
roscen = {0:(150.100,2.182),1:(179.6,0),2:(183.1,0),3:(189.9,61.8),4:(194.75,28.2)\
,5:(210.0,5.0),6:(215.5,52.5),7:(217.8,34.4),8:(216.3,-0.6),9:(219.8,-0.6)\
,10:(218.05,2.43),11:(242.75,54.98),12:(241.05,43.45),13:(245.88,43.45),14:(252.5,34.5)\
,15:(269.73,66.02),16:(194.75,24.7),17:(212.8,-0.6),18:(269.73,62.52),19:(236.1,43.45)}
ra = ra*np.pi/180.
dec = dec*np.pi/180.
rac,decc = roscen[rosn]
rac = rac*np.pi/180.
decc = decc*np.pi/180.
cd = np.sin(dec)*np.sin(decc)+np.cos(dec)*np.cos(decc)*np.cos(rac-ra)
ad = np.arccos(cd)*180./np.pi
if ad > 2.5:
print(rosn,ra,dec,rac,decc)
return ad
def combtile_spec(tiles,outf='',rel='daily'):
s = 0
n = 0
if os.path.isfile(outf):
specd = Table.read(outf)
s = 1
tdone = np.unique(specd['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile,zdate in zip(tiles[tmask]['TILEID'],tiles[tmask]['LASTNIGHT']):
zdate = str(zdate)
tspec = combspecdata(tile,zdate,rel=rel)
tspec['TILEID'] = tile
if s == 0:
specd = tspec
s = 1
else:
specd = vstack([specd,tspec],metadata_conflicts='silent')
specd.sort('TARGETID')
kp = (specd['TARGETID'] > 0)
specd = specd[kp]
n += 1
print(tile,n,len(tiles[tmask]),len(specd))
specd.write(outf,format='fits', overwrite=True)
def combspecdata(tile,zdate,specroot='/global/cfs/cdirs/desi/spectro/redux/',rel='daily' ):
#put data from different spectrographs together, one table for fibermap, other for z
coaddir=specroot+rel+'/tiles/cumulative/'
specs = []
#find out which spectrograph have data
for si in range(0,10):
try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
fitsio.read(ff)
specs.append(si)
except:
print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
print('spectrographs with data:')
print(specs)
if len(specs) == 0:
return None
tspec = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tn = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='ZBEST')
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tspec = vstack([tspec,tn],metadata_conflicts='silent')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
#tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBER','FIBERSTATUS','PRIORITY','FA_TARGET','FA_TYPE',\
#'OBJTYPE','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','NIGHT','EXPID','MJD','SV3_DESI_TARGET','SV3_BGS_TARGET'])
tspec = join(tspec,tf,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
tspec = join(tspec,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
print(len(tspec),len(tf))
#tspec['LOCATION'] = tf['LOCATION']
#tspec['FIBERSTATUS'] = tf['FIBERSTATUS']
#tspec['PRIORITY'] = tf['PRIORITY']
return tspec
def combfibmap(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
return tf
def combfibmap_and_scores(tile,zdate,coaddir='/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/' ):
#put data from different spectrographs together, one table for fibermap, other for z
specs = []
#find out which spectrograph have data
for si in range(0,10):
#try:
ff = coaddir+str(tile)+'/'+zdate+'/zbest-'+str(si)+'-'+str(tile)+'-thru'+zdate+'.fits'
if os.path.isfile(ff):
#fitsio.read(ff)
specs.append(si)
#except:
# print('no spectrograph '+str(si)+ ' for tile '+str(tile))
#print(ff)
#print('spectrographs with data:')
#print(specs)
if len(specs) == 0:
return None
tf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
ts = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[0])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
for i in range(1,len(specs)):
tnf = Table.read(coaddir+str(tile)+'/'+zdate+'/zbest-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='FIBERMAP')
tf = vstack([tf,tnf],metadata_conflicts='silent')
try:
tns = Table.read(coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits',hdu='SCORES')
ts = vstack([ts,tns],metadata_conflicts='silent')
except:
print('did not find '+coaddir+str(tile)+'/'+zdate+'/coadd-'+str(specs[i])+'-'+str(tile)+'-thru'+zdate+'.fits')
tf = unique(tf,keys=['TARGETID'])
tf.keep_columns(['FIBERASSIGN_X','FIBERASSIGN_Y','TARGETID','LOCATION','FIBERSTATUS','PRIORITY','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE'])
tf = join(tf,ts,keys=['TARGETID'],join_type='left',metadata_conflicts='silent')
return tf
def goodlocdict(tf):
'''
Make a dictionary to map between location and priority
tf should come from combspecdata above
'''
wloc = tf['FIBERSTATUS'] == 0
print(str(len(tf[wloc])) + ' locations with FIBERSTATUS 0')
goodloc = tf[wloc]['LOCATION']
pdict = dict(zip(tf['LOCATION'], tf['PRIORITY'])) #to be used later for randoms
return pdict,goodloc
def cutphotmask(aa,bits):
print(str(len(aa)) +' before imaging veto' )
keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0)
for biti in bits:
keep &= ((aa['MASKBITS'] & 2**biti)==0)
aa = aa[keep]
print(str(len(aa)) +' after imaging veto' )
return aa
def combtiles_wdup(tiles,mdir='',fout='',tarcol=['RA','DEC','TARGETID','SV3_DESI_TARGET','SV3_BGS_TARGET','SV3_MWS_TARGET','SUBPRIORITY','PRIORITY_INIT','TARGET_STATE','TIMESTAMP','ZWARN','PRIORITY']):
s = 0
n = 0
if os.path.isfile(fout):
tarsn = Table.read(fout)
s = 1
tdone = np.unique(tarsn['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ts = str(tile).zfill(6)
faf = '/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz'
fht = fitsio.read_header(faf)
wt = tiles['TILEID'] == tile
#tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'])
tars = read_targets_in_tiles(mdir,tiles[wt],mtl=True,isodate=fht['MTLTIME'],columns=tarcol)
#tars.keep_columns(tarcols)
#tars = tars[[b for b in tarcol]]
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
tars = join(tars,tt,keys=['TARGETID'])
tars['TILEID'] = tile
tars['ZWARN'].name = 'ZWARN_MTL'
if s == 0:
tarsn = tars
s = 1
else:
tarsn = vstack([tarsn,tars],metadata_conflicts='silent')
tarsn.sort('TARGETID')
n += 1
print(tile,n,len(tiles[tmask]),len(tarsn))
tarsn.write(fout,format='fits', overwrite=True)
def gettarinfo_type(faf,tars,goodloc,pdict,tp='SV3_DESI_TARGET'):
#get target info
#in current files on SVN, TARGETS has all of the necessary info on potential assignments
#no more, so commented out
#tt = Table.read(faf,hdu='TARGETS')
#tt.keep_columns(['TARGETID','FA_TARGET','FA_TYPE','PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
tt = Table.read(faf,hdu='POTENTIAL_ASSIGNMENTS')
#if len(tt) != len(tfa):
# print('!!!mismatch between targets and potential assignments, aborting!!!')
# return None
#tt = join(tt,tfa,keys=['TARGETID'])
wgt = (np.isin(tt['LOCATION'],goodloc))
print(str(len(np.unique(tt[wgt]['LOCATION']))) + ' good locations')
print('comparison of number targets, number of targets with good locations')
print(len(tt),len(tt[wgt]))
tt = tt[wgt]
tt = join(tt,tars,keys=['TARGETID'],table_names = ['_AVAIL', ''], uniq_col_name='{col_name}{table_name}')
#Mark targets that actually got assigned fibers
tfall = Table.read(faf,hdu='FIBERASSIGN')
tfall.keep_columns(['TARGETID','LOCATION','PRIORITY'])
tt = join(tt,tfall,keys=['TARGETID'],join_type='left',table_names = ['', '_ASSIGNED'], uniq_col_name='{col_name}{table_name}')
wal = tt['LOCATION_ASSIGNED']*0 == 0
tt['LOCATION'][wal] = tt['LOCATION_ASSIGNED'][wal]
tt['LOCATION_AVAIL'][wal] = tt['LOCATION_ASSIGNED'][wal]
#print('differences between assigned locations')
#print(np.unique(tt['LOCATION_AVAIL'][wal]-tt['LOCATION_ASSIGNED'][wal]))
#print(tt.columns)
tt = unique(tt,keys=['TARGETID']) #cut to unique target ids
#print(tarf)
#tars = Table.read(tarf)
#tars.remove_columns(['Z','ZWARN'])#,'PRIORITY','SUBPRIORITY','OBSCONDITIONS'])
#we want to get these from the zbest file that is specific to the tile and thus when it was observed
#tfa = unique(tfa[wgt],keys=['TARGETID'])
#wtype = ((tt[tp] & 2**tarbit) > 0) #don't cut by type here any more
#tt = tt[wtype]
#tfa = join(tfa,tt,keys=['TARGETID'])
#tft = join(tft,tt,keys=['TARGETID'])
#print(str(len(tfa)) +' unique targets with good locations and at '+str(len(np.unique(tfa['LOCATION'])))+' unique locations and '+str(len(tft))+ ' total unique targets at '+str(len(np.unique(tft['LOCATION']))) +' unique locations ')
#wgl = np.isin(tfa['LOCATION_ASSIGNED'],goodloc)
#wtype = ((tfa[tp] & 2**tarbit) > 0)
#wtfa = wgl & wtype
#print('number of assigned fibers at good locations '+str(len(tfa[wtfa])))
wal = tt['LOCATION_ASSIGNED']*0 == 0
print('number of assigned fibers '+str(len(tt[wal])))
print('number of unique target id '+str(len(np.unique(tt[wal]['TARGETID']))))
print('max priority of assigned '+str(np.max(tt[wal]['PRIORITY_ASSIGNED'])))
#tt[wal]['LOCATION'] = tt[wal]['LOCATION_ASSIGNED']
#tt[wal]['LOCATION_AVAIL'] = tt[wal]['LOCATION_ASSIGNED']
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#print('are location_avail and location_assigned the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION_ASSIGNED'], tt[wal]['LOCATION_AVAIL']))
tt['LOCATION_ASSIGNED'] = np.zeros(len(tt),dtype=int)
tt['LOCATION_ASSIGNED'][wal] = 1
wal = tt['LOCATION_ASSIGNED'] == 1
print('number of assigned fibers '+str(len(tt[wal]))+' (check to match agrees with above)')
wal = tt['LOCATION']*0 == 0
print('number of locations from z file '+str(len(tt[wal]))+' (check to match agrees with above)')
#print('are location and location_avail the same for assigned targets?')
#print(np.array_equal(tt[wal]['LOCATION'], tt[wal]['LOCATION_AVAIL']))
#tt['PRIORITY_ASSIGNED'] = np.vectorize(pdict.__getitem__)(tt['LOCATION'])
return tt
def find_znotposs(dz):
dz.sort('TARGETID')
tidnoz = []
tids = np.unique(dz['TARGETID'])
ti = 0
i = 0
print('finding targetids that were not observed')
while i < len(dz):
za = 0
while dz[i]['TARGETID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tidnoz.append(tids[ti])
if ti%30000 == 0:
print(ti)
ti += 1
selnoz = np.isin(dz['TARGETID'],tidnoz)
tidsb = np.unique(dz[selnoz]['TILELOCID'])
#dz = dz[selnoz]
dz.sort('TILELOCID')
tids = np.unique(dz['TILELOCID'])
print('number of targetids with no obs '+str(len(tidnoz)))
tlidnoz = []
lznposs = []
ti = 0
i = 0
while i < len(dz):
za = 0
while dz[i]['TILELOCID'] == tids[ti]:
if dz[i]['ZWARN'] != 999999:
za = 1
#break
i += 1
if i == len(dz):
break
if za == 0:
tlidnoz.append(tids[ti])
#if np.isin(tids[ti],tidsb):
# lznposs.append(tids[ti])
if ti%30000 == 0:
print(ti,len(tids))
ti += 1
#the ones to veto are now the join of the two
wtbtlid = np.isin(tlidnoz,tidsb)
tlidnoz = np.array(tlidnoz)
lznposs = tlidnoz[wtbtlid]
print('number of locations where assignment was not possible because of priorities '+str(len(lznposs)))
return lznposs
def count_tiles_better(fs,dr,pd,rann=0,specrel='daily',fibcol='COADD_FIBERSTATUS'):
'''
from files with duplicates that have already been sorted by targetid, quickly go
through and get the multi-tile information
dr is either 'dat' or 'ran'
returns file with TARGETID,NTILE,TILES,TILELOCIDS
'''
#fs = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_specwdup_Alltiles.fits')
#wf = fs['FIBERSTATUS'] == 0
wf = fs[fibcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = np.unique(stlid[wf])
if dr == 'dat':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/datcomb_'+pd+'_tarspecwdup_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/datcomb_'+pd+'ntileinfo.fits'
if dr == 'ran':
fj = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/'+specrel+'/rancomb_'+str(rann)+pd+'wdupspec_Alltiles.fits')
#outf = '/global/cfs/cdirs/desi/survey/catalogs/SV3/LSS/random'+str(rann)+'/rancomb_'+pd+'ntileinfo.fits'
wg = np.isin(fj['TILELOCID'],gtl)
fjg = fj[wg]
tids = np.unique(fjg['TARGETID'])
nloc = []#np.zeros(len(np.unique(f['TARGETID'])))
nt = []
tl = []
tli = []
ti = 0
i = 0
while i < len(fjg):
tls = []
tlis = []
nli = 0
while fjg[i]['TARGETID'] == tids[ti]:
nli += 1
tls.append(fjg[i]['TILEID'])
tlis.append(fjg[i]['TILELOCID'])
i += 1
if i == len(fjg):
break
nloc.append(nli)
tlsu = np.unique(tls)
tlisu = np.unique(tlis)
nt.append(len(tlsu))
tl.append("-".join(tlsu.astype(str)))
tli.append("-".join(tlisu.astype(str)))
if ti%100000 == 0:
print(ti)
ti += 1
tc = Table()
tc['TARGETID'] = tids
tc['NTILE'] = nt
tc['TILES'] = tl
tc['TILELOCIDS'] = tli
return tc
def count_tiles(tiles,catdir,pd,ttp='ALL',imask=False):
'''
For list of tileids, simply track the tiles a target shows up as available in
pd is dark or bright
just output targetid and tiles, meant to be matched to other processing
don't worry about what was assigned, purpose is to just count tile overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun.keep_columns(['TARGETID','TILELOCID'])
print(len(fgun),len(np.unique(fgun['TARGETID'])))
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILES'] = aa
ai = np.chararray(len(fgun),unicode=True,itemsize=300)
tlids = np.copy(fgun['TILELOCID']).astype('<U300')
fgun['TILELOCIDS'] = tlids
if s == 0:
fgu = fgun
s =1
else:
fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu = unique(fgu,keys='TARGETID')#,keep='last')
#I think this works when the ordering is the same; things got messed up other places with sorts
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles),len(fgu))
cnt += 1
fu = fgu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#np.unique()#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print(np.unique(fu['TILES']))
fu.write(catdir+'Alltiles_'+pd+'_tilelocs.dat.fits',format='fits', overwrite=True)
def combtiles(tiles,catdir,tp,tmask,tc='SV3_DESI_TARGET',ttp='ALL',imask=False):
'''
For list of tileids, combine data generated per tile , taking care of overlaps
'''
s = 0
cnt = 0
for tile in tiles:
fl = catdir+ttp+str(tile)+'_full.dat.fits'
fgun = Table.read(fl)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
if tp != 'dark' and tp != 'bright':
wt = (fgun[tc] & tmask[tp]) > 0
fgun = fgun[wt]
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION_AVAIL']
fgun['TILELOCID_ASSIGNED'] = np.zeros(len(fgun))
wm = fgun['LOCATION_ASSIGNED'] == 1
fgun['TILELOCID_ASSIGNED'][wm] = fgun['TILELOCID'][wm]
nl,nla = countloc(fgun)
fgun['ZPOSS'] = np.zeros(len(fgun)).astype(int)
if tp != 'dark' and tp != 'bright':
#fgun['LOC_NOTBLOCK'] = np.zeros(len(fgun)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
was = ~np.isin(fgun['LOCATION_AVAIL'],locsna)
#fgun['LOC_NOTBLOCK'][was] = 1
wg = was
fgun['ZPOSS'][wg] = 1
#fgun.sort('ZPOSS')
#aa = np.chararray(len(fgun),unicode=True,itemsize=100)
#aa[:] = str(tile)
fgun['TILE'] = int(tile)
#fgun['TILES'] = aa
#tlids = np.copy(fgun['TILELOCID']).astype('<U300')
#fgun['TILELOCIDS'] = tlids
#print('sum of assigned,# of unique TILELOCID (should match)')
#print(np.sum(fgun['LOCATION_ASSIGNED'] == 1),len(np.unique(fgun['TILELOCID'])))
#ai = np.chararray(len(fgun),unicode=True,itemsize=300)
#
#
if s == 0:
fgu = fgun
s =1
else:
#fgo = fgu.copy()
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
#wn = fgu['PRIORITY_ASSIGNED']*0 != 0
#wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
#fgu[wn]['PRIORITY_ASSIGNED'] = 0
#fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED'] #create this column so assigned always show up in order of highest priority
#wa = fgu['LOCATION_ASSIGNED'] == 1
#wa &= fgu['PRIORITY_ASSIGNED'] >= 2000 #this was put SV2 to ignore BGS repeats
#fa = fgu[wa]
#print(len(fa),len(np.unique(fa['TARGETID'])))
#fgu.sort('sort')
#fgu = unique(fgu,keys='TARGETID',keep='last')
#dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
#didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
#fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if tp != 'dark' and tp != 'bright':
# fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
# fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
#aa[:] = '-'+str(tile)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
#fgu['TILES'][didsc] = ms #add the tile info
#aa = np.copy(fgun[dids]['TILELOCIDS'])#np.chararray(len(fgu['TILELOCIDS']),unicode=True,itemsize=100)
#aa[:] = np.core.defchararray.add('-',aa)
#rint(aa)
#ms = np.core.defchararray.add(fgu['TILELOCIDS'][didsc],aa)
#print(ms)
#fgu['TILELOCIDS'][didsc] = ms #add the tile info
print(tile,cnt,len(tiles))#,np.sum(fgu['LOCATION_ASSIGNED']),len(fgu),len(np.unique(fgu['TILELOCID'])),np.sum(fgu['ZPOSS']))#,np.unique(fgu['TILELOCIDS'])
cnt += 1
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#tlids = np.copy(fgu['TILELOCID']).astype('<U300')
#fgu['TILELOCIDS'] = tlids
tsnrcol = 'TSNR2_'+tp
if tp == 'ELG_HIP':
tsnrcol = 'TSNR2_ELG'
if tp == 'BGS_ANY':
tsnrcol = 'TSNR2_BGS'
wt = (fgu[tsnrcol] == 1e20) | (fgu[tsnrcol]*0 != 0)
print('number with bad tsnrcol is '+str(len(fgu[wt])))
fgu[tsnrcol][wt] = 0
wn = fgu['PRIORITY_ASSIGNED']*0 != 0
wn |= fgu['PRIORITY_ASSIGNED'] == 999999
#print(len(fgu[~wn]),np.max(fgu[~wn]['PRIORITY_ASSIGNED']),'max priority assigned')
fgu[wn]['PRIORITY_ASSIGNED'] = 0
fgu['sort'] = -1.*fgu['LOCATION_ASSIGNED']*fgu['PRIORITY_ASSIGNED']*fgu[tsnrcol] #create this column so assigned always show up in order of highest priority
if tp != 'dark' and tp != 'bright':
#wa = fgu['LOCATION_ASSIGNED'] == 1
#print('ZPOSS for LOCATION_ASSIGNED = 1:')
#print(np.unique(fgu[wa]['ZPOSS']))
fgu['sort'] = fgu['sort']*fgu['ZPOSS']-fgu['ZPOSS']
wa = fgu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fgu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fgu['ZPOSS'] == 1
natloc = ~np.isin(fgu[wp]['TILELOCID'],loclz)
print('number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
fgu.sort('sort')
#fgu.sort('ZPOSS')
fu = unique(fgu,keys='TARGETID')#,keep='last')
tidsu = fu['TARGETID']#[wp][natloc]
tids = fgu['TARGETID']
if tp != 'dark' and tp != 'bright':
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
nalz = ~np.isin(fu['TILELOCID'],loclz)
natloc = wp & nalz#~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
tlocs = fgu['TILELOCID']
ntl = []
ch = 0
bl = 0
print(len(tidsu),len(natloc))
for ii in range(0,len(tidsu)):
#if wp[ii] & natloc[ii]:
if natloc[ii]:
bl += 1
tid = tidsu[ii]
wt = tids == tid
tls = tlocs[wt]
s = 0
for tl in tls:
if s == 0:
if np.isin(tl,loclz):
#wu = fu['TARGETID'] == tid
fu[ii]['TILELOCID'] = tl
#ntl.append(tl)
ch += 1
s = 1
if ii%10000 == 0:
print(ii,len(tidsu),ch,bl)
wa = fu['LOCATION_ASSIGNED'] == 1
#wp = fgu['ZPOSS']
loclz,nloclz = np.unique(fu[wa]['TILELOCID_ASSIGNED'],return_counts=True)
wp = fu['ZPOSS'] == 1
natloc = ~np.isin(fu[wp]['TILELOCID'],loclz)
print('after cutting to unique and reassignment, number of zposs with tilelocid not showing up in tilelocid_assigned:')
print(np.sum(natloc))
#print(len(np.unique(fgu['TARGETID'])),np.sum(fgu['LOCATION_ASSIGNED']))
# tiles = fgu['TILES']
# tilesu = fu['TILES']
# tlids = fgu['TILELOCIDS']
# tlidsu = fu['TILELOCIDS']
#
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# otl = tlidsu[ii]
# tt = tiles[wt]
# tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# for ti in tti:
# if ti != otl:
# tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
# fu['TILELOCIDS'] = tlidsu
#
# #wa = fu['LOCATION_ASSIGNED'] == 1
# #wa &= fu['PRIORITY_ASSIGNED'] >= 2000
print(np.sum(fu['LOCATION_ASSIGNED']))
#need to resort tile string
# fl = np.chararray(len(fu),unicode=True,itemsize=100)
# for ii in range(0,len(fu)):
# tl = fu['TILES'][ii]
# tls = tl.split('-')#.astype('int')
# tli = tls[0]
# if len(tls) > 1:
# #tls = tls.astype('int')
# tls.sort()
# tli = tls[0]
# for i in range(1,len(tls)):
# tli += '-'+tls[i]
# #else:
# # tli = tls
# #print(tli)
# fl[ii] = tli
#
# fu['TILES'] = fl
#print(np.unique(fu['TILES']))
# print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
#fu.write(catdir+tp+'Alltiles_'+pd+'_full.dat.fits',format='fits', overwrite=True)
fu.write(catdir+'/datcomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def countloc(aa):
locs = aa['LOCATION_AVAIL']
locsa = aa['LOCATION_ASSIGNED']
la = np.max(locs)+1
nl = np.zeros(la)
nla = np.zeros(la)
for i in range(0,len(aa)):
nl[locs[i]] += 1
nla[locs[i]] += locsa[i]
return nl,nla
def combran_wdup(tiles,rann,randir,tp,sv3dir,specf,keepcols=[]):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
outf = randir+str(rann)+'/rancomb_'+tp+'wdup_Alltiles.fits'
if os.path.isfile(outf):
fgu = Table.read(outf)
#tarsn.keep_columns(['RA','DEC','TARGETID''LOCATION','FIBER','TILEID'])
s = 1
tdone = np.unique(fgu['TILEID'])
tmask = ~np.isin(tiles['TILEID'],tdone)
else:
tmask = np.ones(len(tiles)).astype('bool')
for tile in tiles[tmask]['TILEID']:
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fa = Table.read(ffa,hdu='FAVAIL')
ffna = Table.read(ffna)
fgun = join(fa,ffna,keys=['TARGETID'])
#fgun.remove_columns(delcols)
td += 1
fgun['TILEID'] = int(tile)
fgun.keep_columns(['RA','DEC','TARGETID','LOCATION','FIBER','TILEID'])
if s == 0:
fgu = fgun
s = 1
else:
fgu = vstack([fgu,fgun],metadata_conflicts='silent')
fgu.sort('TARGETID')
print(tile,td, len(tiles), len(fgun),len(fgu))
else:
print('did not find '+ffa)
if len(tiles[tmask]['TILEID']) > 0:
fgu.write(outf,format='fits', overwrite=True)
#specf = Table.read(sv3dir+'datcomb_'+tp+'_specwdup_Alltiles.fits')
specf['TILELOCID'] = 10000*specf['TILEID'] +specf['LOCATION']
specf.keep_columns(keepcols)
#specf.keep_columns(['ZWARN','LOCATION','TILEID','TILELOCID','FIBERSTATUS','FIBERASSIGN_X','FIBERASSIGN_Y','PRIORITY','DELTA_X','DELTA_Y','EXPTIME','PSF_TO_FIBER_SPECFLUX','TSNR2_ELG_B','TSNR2_LYA_B','TSNR2_BGS_B','TSNR2_QSO_B','TSNR2_LRG_B','TSNR2_ELG_R','TSNR2_LYA_R','TSNR2_BGS_R','TSNR2_QSO_R','TSNR2_LRG_R','TSNR2_ELG_Z','TSNR2_LYA_Z','TSNR2_BGS_Z','TSNR2_QSO_Z','TSNR2_LRG_Z','TSNR2_ELG','TSNR2_LYA','TSNR2_BGS','TSNR2_QSO','TSNR2_LRG'])
fgu = join(fgu,specf,keys=['LOCATION','TILEID','FIBER'])
fgu.sort('TARGETID')
outf = sv3dir+'/rancomb_'+str(rann)+tp+'wdupspec_Alltiles.fits'
print(outf)
fgu.write(outf,format='fits', overwrite=True)
def combran(tiles,rann,randir,ddir,tp,tmask,tc='SV3_DESI_TARGET',imask=False):
s = 0
td = 0
#tiles.sort('ZDATE')
print(len(tiles))
delcols = ['DESI_TARGET','BGS_TARGET','MWS_TARGET','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT',\
'NUMOBS_INIT','SCND_TARGET','NUMOBS_MORE','NUMOBS','Z','ZWARN','TARGET_STATE','TIMESTAMP','VERSION','PRIORITY']
for tile,zdate in zip(tiles['TILEID'],tiles['ZDATE']):
tspec = combfibmap_and_scores(tile,zdate)
pdict,gloc = goodlocdict(tspec)
tspec.keep_columns(['LOCATION','FIBERSTATUS','DELTA_X','DELTA_Y','PSF_TO_FIBER_SPECFLUX','EXPTIME','OBJTYPE','TSNR2_ELG','TSNR2_LRG','TSNR2_QSO','TSNR2_BGS'])
dt = ddir+'ALL'+str(tile)+'_full.dat.fits'
ffa = randir+str(rann)+'/fba-'+str(tile).zfill(6)+'.fits'
ffna = randir+str(rann)+'/tilenofa-'+str(tile)+'.fits'
if os.path.isfile(ffa):
fd = Table.read(dt)
# print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
#gloc = np.unique(fd['LOCATION_AVAIL']) #bad locations already removed from this files
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd),len(gloc))
if tp != 'dark' and tp != 'bright':
wt = (fd[tc] & tmask[tp]) > 0
fd = fd[wt]
#print(np.sum(fd['LOCATION_ASSIGNED']),len(fd))
nl,nla = countloc(fd)
#commenting out zfailure stuff, not vetoing randoms based on that
#wzf = fd['ZWARN'] != 0
#wzf &= fd['ZWARN'] != 999999
#wzf &= fd['ZWARN']*0 == 0
#loc_fail = np.unique(fd[wzf]['LOCATION'])
#print('number of zfail locations',len(loc_fail))
#
#print(np.sum(fd['LOCATION_ASSIGNED']),len(np.unique(fd['LOCATION_AVAIL'])),np.sum(nla),np.sum(nl))
#
#find the locations that were requested by type but not assigned
fa = Table.read(ffa,hdu='FAVAIL')
wg = np.isin(fa['LOCATION'],gloc)
fa = fa[wg]
fa = join(fa,tspec,keys=['LOCATION'],join_type='left')
#fa['FIBER_GOOD'] = np.zeros(len(fa)).astype(int)
#fa['FIBER_GOOD'][wg] = 1
#fa['Z_NOTBAD'] = np.zeros(len(fa)).astype(int)
#wnzf = ~np.isin(fa['LOCATION'],loc_fail)
#fa['Z_NOTBAD'][wnzf] = 1
fa['ZPOSS'] = np.zeros(len(fa)).astype(int)
#fa['ZPOSSNOTBAD'] = np.zeros(len(fa)).astype(int)
if tp != 'dark' and tp != 'bright':
#fa['LOC_NOTBLOCK'] = np.zeros(len(fa)).astype(int)
locsna = []
for i in range(0,len(nla)):
if nla[i] == 0 and nl[i] > 0:
locsna.append(i)
print('number of unassigned locations',len(locsna))
ntloc = len(gloc)-len(locsna)#-len(loc_fail)
print('total number of assignable positions',ntloc)
was = ~np.isin(fa['LOCATION'],locsna)
#fa['LOC_NOTBLOCK'][was] = 1
#wg &= was
fa['ZPOSS'][was] = 1
#fa['ZPOSSNOTBAD'][was&wnzf] = 1
#if maskzfail:
# wg &= wnzf
#wzt = wpr & ~wzf & ~wna
#fg = fa[wg]
#print(len(fa),np.sum(fa['ZPOSSNOTBAD']))
#fg = fa
#print('before,after vetoing locations:')
#print(len(fa),len(fg))
#if tp != 'dark' and tp != 'bright':
# fa.sort('ZPOSS')
#else:
# fg.sort('FIBER_GOOD')
fgun = unique(fa,keys=['TARGETID'],keep='last')
ffna = Table.read(ffna)
fgun = join(fgun,ffna,keys=['TARGETID'])
fgun.remove_columns(delcols)
if imask:
wm = fgun['MASKBITS'] == 0
fgun = fgun[wm]
print(tile,td, len(tiles), str(len(fgun))+' unique new randoms')
td += 1
aa = np.chararray(len(fgun),unicode=True,itemsize=100)
aa[:] = str(tile)
fgun['TILE'] = int(tile)
fgun['TILES'] = aa
fgun['TILELOCID'] = 10000*tile +fgun['LOCATION']
if s == 0:
fgu = fgun
s = 1
else:
fv = vstack([fgu,fgun],metadata_conflicts='silent')
fgo = fgu.copy()
fgu = unique(fv,keys='TARGETID')#,keep='last')
dids = np.isin(fgun['TARGETID'],fgo['TARGETID']) #get the rows with target IDs that were duplicates in the new file
didsc = np.isin(fgu['TARGETID'],fgun['TARGETID'][dids]) #get the row in the concatenated table that had dup IDs
#print(len(fgu),len(fgo),len(fgun),len(fgu[didsc]),len(fgun[dids]))
fgu['TILELOCID'][didsc] = fgun['TILELOCID'][dids] #give the repeats the new tilelocids, since those are the most likely to be available to low priority targets
#if this works, can save vetoing until the end
fgu['TSNR2_ELG'][didsc] = np.maximum(fgu['TSNR2_ELG'][didsc],fgun['TSNR2_ELG'][dids])
fgu['TSNR2_QSO'][didsc] = np.maximum(fgu['TSNR2_QSO'][didsc],fgun['TSNR2_QSO'][dids])
fgu['TSNR2_BGS'][didsc] = np.maximum(fgu['TSNR2_BGS'][didsc],fgun['TSNR2_BGS'][dids])
fgu['TSNR2_LRG'][didsc] = np.maximum(fgu['TSNR2_LRG'][didsc],fgun['TSNR2_LRG'][dids])
if tp != 'dark' and tp != 'bright':
#fgu['FIBER_GOOD'][didsc] = np.maximum(fgu['FIBER_GOOD'][didsc],fgun['FIBER_GOOD'][dids])
#fgu['LOC_NOTBLOCK'][didsc] = np.maximum(fgu['LOC_NOTBLOCK'][didsc],fgun['LOC_NOTBLOCK'][dids])
#fgu['Z_NOTBAD'][didsc] = np.maximum(fgu['Z_NOTBAD'][didsc],fgun['Z_NOTBAD'][dids])
fgu['ZPOSS'][didsc] = np.maximum(fgu['ZPOSS'][didsc],fgun['ZPOSS'][dids])
#fgu['ZPOSSNOTBAD'][didsc] = np.maximum(fgu['ZPOSSNOTBAD'][didsc],fgun['ZPOSSNOTBAD'][dids])
aa = np.chararray(len(fgu['TILES']),unicode=True,itemsize=20)
aa[:] = '-'+str(tile)
#rint(aa)
ms = np.core.defchararray.add(fgu['TILES'][didsc],aa[didsc])
#print(ms)
fgu['TILES'][didsc] = ms #add the tile info
print(str(len(fgu))+' unique total randoms')
else:
print('did not find '+ffa)
#fgu.sort('ZPOSS')
#fgu['TILES'] = np.copy(fgu['TILE']).astype('<U100')
#fu = unique(fgu,keys=['TARGETID'])#,keep='last')
fu = fgu
#fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
#return True
# tiles = fgu['TILES']
# tilesu = fu['TILES']
#tlids = fgu['TILELOCIDS']
#tlidsu = fu['TILELOCIDS']
# for ii in range(0,len(tidsu)): #this takes a long time and something more efficient will be necessary
# tid = tidsu[ii]#fu[ii]['TARGETID']
# wt = tids == tid
# ot = tilesu[ii]
# #otl = tlidsu[ii]
# tt = tiles[wt]
# #tti = tlids[wt]
# for tl in tt:
# if tl != ot:
# tilesu[ii] += '-'+str(tl)
# #for ti in tti:
# # if ti != otl:
# # tlidsu[ii] += '-'+str(ti)
# if ii%1000 == 0:
# print(ii)
# fu['TILES'] = tilesu
#fu['TILELOCIDS'] = tlidsu
fl = np.chararray(len(fu),unicode=True,itemsize=100)
for ii in range(0,len(fu)):
tl = fu['TILES'][ii]
tls = tl.split('-')#.astype('int')
tli = tls[0]
if len(tls) > 1:
#tls = tls.astype('int')
tls.sort()
tli = tls[0]
for i in range(1,len(tls)):
tli += '-'+tls[i]
#else:
# tli = tls
#print(tli)
fl[ii] = tli
fu['TILES'] = fl
print('number of unique tiles configurations '+str(len(np.unique(fu['TILES']))))
NT = np.zeros(len(fgu))
ros = np.zeros(len(fgu))
print('counting tiles and finding rosette')
for ii in range(0,len(fu['TILES'])): #not sure why, but this only works when using loop for Table.read but array option works for fitsio.read
NT[ii] = np.char.count(fu['TILES'][ii],'-')+1
ti = int(fu['TILES'][ii].split('-')[0])
ros[ii] = tile2rosette(ti)
fu['NTILE'] = NT
fu['rosette_number'] = ros
print(np.unique(fu['rosette_number'],return_counts=True))
fu.write(randir+str(rann)+'/rancomb_'+tp+'_Alltiles.fits',format='fits', overwrite=True)
def mkfullran(fs,indir,rann,imbits,outf,tp,pd,bit,desitarg='SV3_DESI_TARGET',tsnr= 'TSNR2_ELG',notqso='',qsobit=4,fbcol='COADD_FIBERSTATUS'):
'''
indir is directory with inputs
rann is the random file number (0-17)
imbits are the maskbits for the imaging veto mask
outf is the name (including full path) of the output file
tp is the target type
pd is the program, dark or bright
bit is the bit to use to select to the target type
randir doesn't get used anymore
desitarg is the column to use to select the target type
tsnr is the tsnr2 used for this sample
'''
#first, need to find locations to veto based on data
#the same is done in mkfulldat
#fs = fitsio.read(indir+'datcomb_'+pd+'_specwdup_Alltiles.fits')
wf = fs[fbcol] == 0
stlid = 10000*fs['TILEID'] +fs['LOCATION']
gtl = | np.unique(stlid[wf]) | numpy.unique |
import os, sys
from math import sqrt, copysign
import pandas as pd
import numpy as np
import helpers as nhp
from helpers import rotmat_dict, rotmats
from LatticeModel import LatticeModel
from cached_property import cached_property
import random
from itertools import combinations
import plotly as py
import plotly.graph_objs as go
from Bio.PDB import PDBParser
from Bio.PDB.QCPSuperimposer import QCPSuperimposer
pdb_parser = PDBParser()
imposer = QCPSuperimposer()
neighbor_mods = np.array([
[2, 2, 2],[-2, -2, -2],
[-2, 2, 2],[2, -2, 2],[2, 2, -2],
[-2, -2, 2],[-2, 2, -2],[2, -2, -2]
])
cubic_neighbor_mods = np.array([
[0,0,4], [0,4,0], [4,0,0],
[0,0,-4],[0,-4,0], [-4,0,0],
])
neighbor_mods_d2 = np.unique(np.vstack([nm + neighbor_mods for nm in neighbor_mods]), axis=0)
neighbor_mods2 = np.vstack((neighbor_mods, neighbor_mods_d2))
mod2mod_dict = {nmi: np.argwhere(nhp.inNd(neighbor_mods2, nm1 * 2))[0,0] for nmi, nm1 in enumerate(neighbor_mods)}
tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in neighbor_mods]
# test: allow cubic paths for tags
cubic_tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in cubic_neighbor_mods]
tag_mods_single.extend(cubic_tag_mods_single)
tag_mods_bulk = []
for tm in tag_mods_single:
tmb = np.unique(np.vstack([tms + neighbor_mods2 for tmi, tms in enumerate(tm) if tmi > 1]), axis=0)
tmb = tmb[np.invert(nhp.inNd(tmb, tm))]
tag_mods_bulk.append(tmb)
tag_mods = list(zip(tag_mods_single, tag_mods_bulk))
quad_neighbor_mods_abs = np.array([
[0, 0, 4],
[0, 4, 0],
[4, 0, 0]
])
helix_array = np.array([[0, 0, 0],
[2, -2, 2],
[4, 0, 4],
[2, 2, 6],
[0, 0, 8]])
rotated_helix_array_list = [np.matmul(helix_array, rot) for rot in rotmats]
# mirror_dims = list(combinations([0,1,2], 2)) + [tuple([i]) for i in range(3)] + [(0, 1, 2)]
# mirrored_rotated_helix_array_list = [rhm for rhm in rotated_helix_array_list]
# helix_mod = np.array([[2, -2, 2],
# [2, 2, 2],
# [-2, 2, 2],
# [-2, -2, 2]])
# helix with equidistant neighbors
helix_v_truth = np.array([[6, 2, -2, 2],
[-6, -2, 2, -2]])
helix_h_truth = np.array([0, 0, 8])
# helix with 1 quad face transition
# helix_h_truth = np.array([0, 0, 6])
#
# helix_v_truth = np.array([[6, 0, -2, 2],
# [-6, 0, 2, -2]])
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
from: https://stackoverflow.com/questions/45142959/calculate-rotation-matrix-to-align-two-vectors-in-3d-space
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Lattice(LatticeModel):
"""Class containing all that pertains to a particular type of lattice (initialization, allowed moves etc.)
lattice type: body-centered cubic (bcc)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pdb_id = kwargs.get('pdb_id', 'unknown')
self.experimental_mode = kwargs['experimental_mode']
self.no_regularization = kwargs.get('no_regularization', False)
self.ca_dist = 3.8 # actual CA distance
self.lat_dist = sqrt((0.5 * self.ca_dist) ** 2 / 3) # distance of lattice edge
self.linker_dist = 21 # Distance tagged CA to dye
self.linker_dist_lat = sqrt(self.linker_dist ** 2 / 3)
self.n1_dist = 1.48 # estimate of distance N to CA
self.pairs_mat = kwargs['pairs_mat']
self.ss_df = kwargs['secondary_structure']
# self.sheet_series = nhp.list_sheet_series(self.ss_sequence)
self.coords = kwargs.get('coords', None)
self.prev_coords = self.coords.copy()
self.branch_rotation_idx_list = list(range(len(rotmats)))
self.cm_coords = kwargs.get('cm_coords', None)
self.finetune_structure = kwargs.get('finetune_structure', False)
def coords_are_valid(self):
"""
For testing purposes!
"""
for i, c in enumerate(self.coords[:-1]):
if not np.all(np.abs(self.coords[i+1] - c) == 2): return False
return True
@property
def cm_coords(self):
return self._cm_coords
@cm_coords.setter
def cm_coords(self, coords):
"""
Translate cm coords to unit lattice
"""
if coords is None:
self._cm_coords = None
return
self._cm_coords = (coords - coords[0]) / self.lat_dist
@cached_property
def sheet_block_dict(self):
out_dict = {}
cur_block_idx = 0
in_block = False
for si, ss in enumerate(self.ss_sequence):
if ss == 'S':
if not in_block:
cur_block_idx += 1
in_block = True
out_dict[si] = cur_block_idx
else:
in_block = False
return out_dict
@property
def ss_df(self):
return self._ss_df
@ss_df.setter
def ss_df(self, df):
df.loc[self.tagged_resi, :] = 00, 4, 4
df.loc[:, 'L'] = 0
df[df > 0] = 0
self._ss_df = df
# --- mutations ---
def apply_n_steps(self, n):
global_fun_list = [
# self.apply_crankshaft_move,
self.apply_branch_rotation,
self.apply_corner_flip,
# self.apply_pull_move # screws up helices, can't get it right
]
for _ in range(n):
random.shuffle(global_fun_list)
if global_fun_list[0](): pass
elif global_fun_list[1](): pass
# elif global_fun_list[2](): pass
# elif global_fun_list[3](): pass
else: return False
self.set_hash_list()
self.__dict__.pop('e_matrix', None)
return True
def check_helicity(self):
# test: see if helices are still in place
for ci, ss in self.ss_df.iterrows():
if ss.H >= 0: continue
helix_candidate = self.coords[ci:ci + 5] - self.coords[ci]
hel_dists = [np.linalg.norm(helix_candidate - hel) for hel in rotated_helix_array_list]
if not np.any(np.array(hel_dists) == 0):
return ci
@property
def branch_rotation_idx_list(self):
random.shuffle(self._branch_rotation_idx_list)
return self._branch_rotation_idx_list
@branch_rotation_idx_list.setter
def branch_rotation_idx_list(self, bri_list):
self._branch_rotation_idx_list = bri_list
def apply_branch_rotation(self):
mutations = list(range(-3, 4))
mutations.remove(0)
random.shuffle(mutations)
idx_list = list(range(self.seq_length - 1))
idx_list = | np.array(idx_list) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This modules handles synthetic photometry data formats."""
# STDLIB
import os
import warnings
# THIRD-PARTY
import numpy as np
# ASTROPY
from astropy import log
from astropy import units as u
from astropy.io import ascii, fits
from astropy.utils.data import get_readable_fileobj
from astropy.utils.exceptions import AstropyUserWarning
# LOCAL
from . import exceptions, units
from synphot import __version__
__all__ = ['read_remote_spec', 'read_spec', 'read_ascii_spec',
'read_fits_spec', 'write_fits_spec']
def read_remote_spec(filename, encoding='binary', cache=True,
show_progress=True, **kwargs):
"""Read FITS or ASCII spectrum from a remote location.
Parameters
----------
filename : str
Spectrum filename.
encoding, cache, show_progress
See :func:`~astropy.utils.data.get_readable_fileobj`.
kwargs : dict
Keywords acceptable by :func:`read_fits_spec` (if FITS) or
:func:`read_ascii_spec` (if ASCII).
Returns
-------
header : dict
Metadata.
wavelengths, fluxes : `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
"""
with get_readable_fileobj(filename, encoding=encoding, cache=cache,
show_progress=show_progress) as fd:
header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs)
return header, wavelengths, fluxes
def read_spec(filename, fname='', **kwargs):
"""Read FITS or ASCII spectrum.
Parameters
----------
filename : str or file pointer
Spectrum file name or pointer.
fname : str
Filename. This is *only* used if ``filename`` is a pointer.
kwargs : dict
Keywords acceptable by :func:`read_fits_spec` (if FITS) or
:func:`read_ascii_spec` (if ASCII).
Returns
-------
header : dict
Metadata.
wavelengths, fluxes : `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
Raises
------
synphot.exceptions.SynphotError
Read failed.
"""
if isinstance(filename, str):
fname = filename
elif not fname: # pragma: no cover
raise exceptions.SynphotError('Cannot determine filename.')
if fname.endswith('fits') or fname.endswith('fit'):
read_func = read_fits_spec
else:
read_func = read_ascii_spec
return read_func(filename, **kwargs)
def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs):
"""Read ASCII spectrum.
ASCII table must have following columns:
#. Wavelength data
#. Flux data
It can have more than 2 columns but the rest is ignored.
Comments are discarded.
Parameters
----------
filename : str or file pointer
Spectrum file name or pointer.
wave_unit, flux_unit : str or `~astropy.units.core.Unit`
Wavelength and flux units, which default to Angstrom and FLAM,
respectively.
kwargs : dict
Keywords accepted by :func:`astropy.io.ascii.ui.read`.
Returns
-------
header : dict
This is just an empty dictionary, so returned values
are the same as :func:`read_fits_spec`.
wavelengths, fluxes : `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
They are set to 'float64' percision.
"""
header = {}
dat = ascii.read(filename, **kwargs)
wave_unit = units.validate_unit(wave_unit)
flux_unit = units.validate_unit(flux_unit)
wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit
fluxes = dat.columns[1].data.astype(np.float64) * flux_unit
return header, wavelengths, fluxes
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX',
wave_unit=u.AA, flux_unit=units.FLAM):
"""Read FITS spectrum.
Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2``
keywords, respectively, from data table (not primary) header.
If these keywords are not present, units are taken from
``wave_unit`` and ``flux_unit`` instead.
Parameters
----------
filename : str or file pointer
Spectrum file name or pointer.
ext: int
FITS extension with table data. Default is 1.
wave_col, flux_col : str
Wavelength and flux column names (case-insensitive).
wave_unit, flux_unit : str or `~astropy.units.core.Unit`
Wavelength and flux units, which default to Angstrom and FLAM,
respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2``
keywords are not present in table (not primary) header.
Returns
-------
header : dict
Primary header only. Extension header is discarded.
wavelengths, fluxes : `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
"""
fs = fits.open(filename)
header = dict(fs[str('PRIMARY')].header)
wave_dat = fs[ext].data.field(wave_col).copy()
flux_dat = fs[ext].data.field(flux_col).copy()
fits_wave_unit = fs[ext].header.get('TUNIT1')
fits_flux_unit = fs[ext].header.get('TUNIT2')
if fits_wave_unit is not None:
try:
wave_unit = units.validate_unit(fits_wave_unit)
except (exceptions.SynphotError, ValueError) as e: # pragma: no cover
warnings.warn(
'{0} from FITS header is not valid wavelength unit, using '
'{1}: {2}'.format(fits_wave_unit, wave_unit, e),
AstropyUserWarning)
if fits_flux_unit is not None:
try:
flux_unit = units.validate_unit(fits_flux_unit)
except (exceptions.SynphotError, ValueError) as e: # pragma: no cover
warnings.warn(
'{0} from FITS header is not valid flux unit, using '
'{1}: {2}'.format(fits_flux_unit, flux_unit, e),
AstropyUserWarning)
wave_unit = units.validate_unit(wave_unit)
flux_unit = units.validate_unit(flux_unit)
wavelengths = wave_dat * wave_unit
fluxes = flux_dat * flux_unit
if isinstance(filename, str):
fs.close()
return header, wavelengths, fluxes
def write_fits_spec(filename, wavelengths, fluxes, pri_header={},
ext_header={}, overwrite=False, trim_zero=True,
pad_zero_ends=True, precision=None, epsilon=0.00032,
wave_col='WAVELENGTH', flux_col='FLUX',
wave_unit=u.AA, flux_unit=units.FLAM):
"""Write FITS spectrum.
.. warning::
If data is being written out as single-precision but wavelengths
are in double-precision, some rows may be omitted.
Parameters
----------
filename : str
Output spectrum filename.
wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity`
Wavelength and flux of the spectrum.
pri_header, ext_header : dict
Metadata to be added to primary and given extension FITS header,
respectively. Do *not* use this to define column names and units.
overwrite : bool
Overwrite existing file. Defaults to `False`.
trim_zero : bool
Remove rows with zero-flux. Default is `True`.
pad_zero_ends : bool
Pad each end of the spectrum with a row of zero flux
like :func:`synphot.spectrum.BaseSpectrum.taper`.
This is unnecessary if input is already tapered.
precision : {`None`, 'single', 'double'}
Precision of values in output file.
Use native flux precision by default.
epsilon : float
Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ.
This is the minimum separation in wavelengths necessary for SYNPHOT
to read the entries as distinct single-precision numbers.
This is *only* used if ``precision='single'`` but data are in
double-precision. Default from the FAQ is 0.00032.
wave_col, flux_col : str
Wavelength and flux column names (case-insensitive).
wave_unit, flux_unit : str or `~astropy.units.core.Unit`
Wavelength and flux units, which default to Angstrom and FLAM,
respectively. These are *only* used if wavelengths and fluxes
are not in astropy quantities.
Raises
------
synphot.exceptions.SynphotError
Wavelengths and fluxes have difference shapes or value precision
is not supported.
"""
if isinstance(wavelengths, u.Quantity):
wave_unit = wavelengths.unit
wave_value = wavelengths.value
else:
wave_value = wavelengths
if isinstance(fluxes, u.Quantity):
flux_unit = fluxes.unit
flux_value = fluxes.value
else:
flux_value = fluxes
wave_unit = units.validate_unit(wave_unit).to_string().upper()
flux_unit = units.validate_unit(flux_unit).to_string().upper()
if wave_value.shape != flux_value.shape:
raise exceptions.SynphotError(
'Wavelengths have shape {0} but fluxes have shape {1}'.format(
wave_value.shape, flux_value.shape))
# Remove rows with zero flux. Putting this before precision logic to avoid
# keeping duplicate wavelengths with zero flux.
if trim_zero:
idx = np.where(flux_value != 0)
wave_value = wave_value[idx]
flux_value = flux_value[idx]
n_thrown = wave_value.size - len(idx[0])
if n_thrown != 0:
log.info('{0} zero-flux rows are thrown out'.format(n_thrown))
# Only these Numpy types are supported
# 'f' np.float32
# 'd' np.float64
pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion
# Use native flux precision
if precision is None:
precision = flux_value.dtype.char
if precision not in pcodes:
raise exceptions.SynphotError('flux is not float32 or float64')
# Use user specified precision
else:
precision = precision.lower()
if precision == 'single':
precision = 'f'
elif precision == 'double':
precision = 'd'
else:
raise exceptions.SynphotError(
'precision must be single or double')
# Now check wavelength precision
wave_precision = wave_value.dtype.char
if wave_precision not in pcodes:
raise exceptions.SynphotError(
'wavelength is not float32 or float64')
# If wavelength is double-precision but data is written out as
# single-precision, wavelength values have to be recalculated
# so that they will still be sorted with no duplicates.
if wave_precision == 'd' and precision == 'f':
orig_size = wave_value.size
idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon)
wave_value = np.append(wave_value[idx], wave_value[-1])
flux_value = | np.append(flux_value[idx], flux_value[-1]) | numpy.append |
from __future__ import print_function, division, absolute_import
import glob
import random
import time
# Python 2/3 support
try:
import queue
except ImportError:
import Queue as queue
import cv2
import numpy as np
import torch as th
from joblib import Parallel, delayed
from torch.multiprocessing import Queue, Process
from .preprocess import IMAGE_WIDTH, IMAGE_HEIGHT
from .utils import preprocessInput
def sample_coordinates(coord_1, max_distance, percentage):
"""
Sampling from a coordinate A, a second one B within a maximum distance [max_distance X percentage]
:param coord_1: (int) sample first coordinate
:param max_distance: (int) max value of coordinate in the axis
:param percentage: (float) maximum occlusion as a percentage
:return: (tuple of int)
"""
min_coord_2 = max(0, coord_1 - max_distance * percentage)
max_coord_2 = min(coord_1 + max_distance * percentage, max_distance)
coord_2 = np.random.randint(low=min_coord_2, high=max_coord_2)
return min(coord_1, coord_2), max(coord_1, coord_2)
def preprocessImage(image, convert_to_rgb=True, apply_occlusion=False, occlusion_percentage=0.5):
"""
:param image: (np.ndarray) image (BGR or RGB)
:param convert_to_rgb: (bool) whether the conversion to rgb is needed or not
:param apply_occlusion: (bool) whether to occludes part of the images or not
(used for training denoising autoencoder)
:param occlusion_percentage: (float) max percentage of occlusion (in width and height)
:return: (np.ndarray)
"""
# Resize
#print('empty',image.shape())
im = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation=cv2.INTER_AREA)
# Convert BGR to RGB
if convert_to_rgb:
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# Normalize
im = preprocessInput(im.astype(np.float32), mode="image_net")
if apply_occlusion:
h_1 = np.random.randint(IMAGE_HEIGHT)
h_1, h_2 = sample_coordinates(h_1, IMAGE_HEIGHT, percentage=occlusion_percentage)
w_1 = | np.random.randint(IMAGE_WIDTH) | numpy.random.randint |
import numpy as np
import pylab as plt
from scipy.special import erf
from scipy.integrate import simps
from scipy.linalg import cho_solve
#from ChoSolver import choSolve, choBackSubstitution
def styblinsky(x):
return (x[0]**4 - 16*x[0]**2 + 5*x[0] + x[1]**4 - 16*x[1]**2 + 5*x[1])/2.
def rosenbrock(x):
a = 1
b = 100
return (a-x[0])**2 + b*(x[1] - x[0]**2)**2
def complexInjunction(x):
Nm = len(x)
a = np.arange(Nm)
A = np.outer(np.cos(np.arange(Nm)),np.sin(1j*np.arange(Nm))-Nm)
y = np.exp(1j*A.dot(x))
return -np.abs((np.min(y)/np.max(y)).real)
def mean(x):
#return styblinsky(x)
return np.log10(1+rosenbrock(x))# + rosenbrock((x-1))
return np.sqrt((x[0]-0.5)**2 + (x[1])**2)
def M52(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
r2 = np.zeros([N,N],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
r2 += (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K += r2*(5./3.)
np.sqrt(5*r2,out=r2)
K += 1+r2
np.exp(-r2,out=r2)
K *= r2
K *= theta0
return K
def expK(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
K = np.zeros([N,N],dtype=np.double)
i = 0
while i < len(lengthScales):
K -= (XX[:,i,:,i]/lengthScales[i])**2
i += 1
K /= 2.
np.exp(K,out=K)
K *= theta0
#K += nu**2*np.eye(N)
return K
def expK_derivative(XX,theta):
theta0 = theta[0]
nu = theta[1]
lengthScales = theta[2:]
N = XX.shape[0]
Kdiff = np.zeros([N,N,len(theta)],dtype=np.double)
K = np.zeros([N,N],dtype=np.double)
#0 -> exp(-r^2)
#1 -> 2*eye(N)*nu
#2: ->-2r*eye(-r^2)*-2*(x1[i]-x2[i])^2/(lengthScale[i])^3
i = 0
while i < len(lengthScales):
Kdiff[:,:,0] -= (XX[:,i,:,i]/lengthScales[i])**2
Kdiff[:,:,2+i] += 4*XX[:,i,:,i]**2/lengthScales[i]**3
i += 1
#*r
#np.rollaxis(K[:,:,2:],2,0) *= np.sqrt(-Kdiff[:,:,0])
K /= 2.
np.exp(K,out=K)
K *= theta0
K += nu**2*np.eye(N)
return K
class Prior(object):
def __init__(self, **kwargs):
for key in kwargs.keys():
setattr(self,key,kwargs[key])
def domain(self):
'''Get domain of prior'''
return None
def sample(self,N=1):
'''get a sample from the distribution'''
return None
def pdf(self,x):
'''get the pdf at x'''
return None
class UniformPrior(Prior):
def __init__(self,xmin,xmax):
d = {"xmin":float(min(xmin,xmax)),"xmax":float(max(xmin,xmax)),"width":float(max(xmin,xmax) - min(xmin,xmax))}
super(UniformPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.uniform(low=self.xmin,high=self.xmax,size=N)
def pdf(self,x):
out = np.ones_like(x)
out /= self.width
out[x>self.xmax] *= 0.
out[x<self.xmin] *= 0.
return out
class NormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(NormalPrior,self).__init__(**d)
def sample(self,N=1):
return self.mean + self.std*np.random.normal(size=N)
def pdf(self,x):
return np.exp(-(x - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std
class LogNormalPrior(Prior):
def __init__(self,mean,std):
d = {"mean":float(mean),"std":float(std)}
super(LogNormalPrior,self).__init__(**d)
def sample(self,N=1):
return np.random.lognormal(mean=self.mean, sigma=self.std, size=N)
def pdf(self,x):
return np.exp(-(np.log(x) - self.mean)**2/self.std**2/2.)/np.sqrt(2*np.pi)/self.std/x
class ClassPrior(Prior):
def __init__(self,numClasses,weights=None):
if weights is None:
weights = np.ones(numClasses,dtype=np.double)/numClasses
d = {"numClasses":float(numClasses),"weights":float(weights)}
super(ClassPrior,self).__init__(**d)
def sample(self,N=1):
samples = np.zeros(N,dtype=np.int64)
i = 0
while i < N:
c = -1
while c == -1:
c_ = | np.random.randint(self.numClasses) | numpy.random.randint |
import numpy as np
from pathlib import Path
from math import inf, isnan
from time import time
from RegLib.load_save_data import save_checkpoint
from RegLib.HelperFunctions import progressBar
from nnreg.model import Model
from nnreg.dataloader import DataLoader
# Needed for types:
from typing import Callable
from yacs.config import CfgNode as CN
class Trainer():
def train_and_save(self, cfg:CN, data_loader:DataLoader, checkpoints_path:Path = None):
self.model = Model(cfg, data_loader.X_train.shape[1], data_loader.y_train.shape[1])
if(checkpoints_path == None):
checkpoints_path = Path.cwd()
self.train(cfg, self.model, data_loader.X_train, data_loader.X_test, data_loader.X_val, data_loader.y_train, data_loader.y_test, data_loader.y_val, checkpoints_path)
return self
def train(self, cfg:CN, model, X_train:np.ndarray, X_test:np.ndarray, X_val:np.ndarray, y_train:np.ndarray, y_test:np.ndarray, y_val:np.ndarray,
checkpoints_path:Path):
batch_size = cfg.OPTIM.BATCH_SIZE
num_batches_per_epoch = X_train.shape[0] // batch_size
learning_rate = cfg.OPTIM.LR
decay = cfg.OPTIM.LR_DECAY
use_shuffle = cfg.SHUFFLE
eval_step = cfg.EVAL_STEP if cfg.EVAL_STEP > 1 else 1
log_step = cfg.LOG_STEP * eval_step
use_momentum = cfg.OPTIM.USE_MOMENTUM
if use_momentum:
velocity = [0 for i in range(len(model.ws))]
momentum_gamma = cfg.OPTIM.MOMENTUM
train_eval = {}
val_eval = {}
learning_rate_all = {}
train_r2 = {}
use_accuracy = cfg.MODEL.EVAL_FUNC == "acc"
best_eval = inf * (-1 if use_accuracy else 1) # 1 is best for accuracy, 0 for MSE
best_eval_step = -1
global_step = 0
total_steps = cfg.OPTIM.NUM_EPOCHS * num_batches_per_epoch
self.start_time = time()
for epoch in range(cfg.OPTIM.NUM_EPOCHS):
_lr = Trainer.learning_schedule(learning_rate, decay, epoch)
if use_shuffle:
s = np.arange(X_train.shape[0])
np.random.shuffle(s)
X_train = X_train[s]
y_train = y_train[s]
for step in range(num_batches_per_epoch):
# Select the mini-batch
start = step * batch_size
end = start + batch_size
X_batch, y_batch = X_train[start:end], y_train[start:end]
# Compute gradient:
y_pred = model.forward(X_batch)
model.backward(y_pred, y_batch)
# Update the weights
_lr_step = | np.multiply(model.grads, _lr) | numpy.multiply |
# Designed by <NAME>
# Sanitized by <NAME>
#==================================================================
#***************** Loading the libraries ************************
#==================================================================
import sys
sys.path.insert(0,'/GlobalLibrary')
import os
home_address = os.getcwd()
from tensorflow_circuit import TF_DEFAULT, make_var, np_elu, np_sigmoid, np_sigmoid_inv
from Netlist_Database import VCOSpice,INBUF2Spice
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import math
import tensorflow as tf
from scipy.io import savemat
from pickle import dump
#==================================================================
#********************** User Intent *****************************
#==================================================================
bw=0.04e9 # Bandwidth
nbit=10 # Number of Bits
#==================================================================
#******************* Initialization *****************************
#==================================================================
KT=4.14e-21 # Boltzman Constant * 300
epsilon=1e-5 # Epsilon in GD
tedad=50 # number of parameter candidates
fing_clk=4 # number of fingers for the initial driver
maxiter=10000
minosr=8
maxosr=1000
minamp=0.05
maxamp=0.3
weights = np.array([1.,1.,1.,1.,0.,0.,1000.,1000.,1000.,1.])
#==================================================================
#**************** Loading the Regressors ************************
#==================================================================
# MR. INBUF2
class INBUF2(TF_DEFAULT):
def __init__(self,tech=65):
self.tech=tech
self.default_loading()
def default_loading(self):
if self.tech==65:
drive = home_address+'/reg_files/INBUF2'
sx_f = drive + '/scX_inbuf265.pkl'
sy_f = drive + '/scY_inbuf265.pkl'
w_f = drive + '/w8_inbuf265.p'
self.w_json = drive + '/model_inbuf265.json'
self.w_h5 = drive + '/reg_inbuf265.h5'
self.minx = np.array([[1 ,1 , 60e-9 ,1 ,0.55 ,0.2e-6 , 2 ]])
self.maxx = np.array([[20 ,50 ,400e-9 ,50 ,0.9 ,1.2e-6 , 20 ]])
self.step = np.array([[1 ,1 , 10e-9 ,1 ,0.01 ,10.0e-9, 1 ]])
self.loading(sx_f,sy_f,w_f)
# MR. VCO
class VCO(TF_DEFAULT):
def __init__(self,tech=65):
self.tech=tech
self.default_loading()
def default_loading(self):
if self.tech==65:
drive = home_address+'/reg_files/VCO'
sx_f = drive + '/scX_vco65.pkl'
sy_f = drive + '/scY_vco65.pkl'
w_f = drive + '/w8_vco65.p'
self.w_json = drive + '/model_vco65.json'
self.w_h5 = drive + '/reg_vco65.h5'
self.minx = np.array([[0.2e-6 ,2 ,0.2e-6,2 ]])
self.maxx = | np.array([[1.2e-6 ,20 ,1.2e-6,20 ]]) | numpy.array |
import tensorflow as tf
import unittest
from megnet.data.graph import GaussianDistance, GraphBatchGenerator, GraphBatchDistanceConvert,\
EmbeddingMap
import numpy as np
class TestGraph(unittest.TestCase):
def test_gaussian_expansion(self):
x = np.random.normal(size=(10, ))
centers = np.linspace(0, 5, 20)
width = 0.5
gd = GaussianDistance(centers, width)
out = gd.convert(x)
self.assertListEqual(list(out.shape), [10, 20])
def test_graph_generator(self):
feature = [np.random.normal(size=(3, 4)), np.random.normal(size=(2, 4))]
bond = [np.random.normal(size=(2, 5)), np.random.normal(size=(1, 5))]
glob_features = [np.random.normal(size=(1, 2)), np.random.normal(size=(1, 2))]
index1 = [np.array([0, 1]), | np.array([0]) | numpy.array |
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import argparse, sys, time, math, json
from scipy.fftpack import fft, fftshift, ifft
class Waterfall():
"""Waterfall Tool Main Class"""
def __init__(self, fs, fc, f_chan, BW):
self.fs = fs
self.BW = BW
self.fc = fc
self.f_chan = f_chan
def calFFT(self, sig):
norm_fft = (1/self.fs)*fftshift(fft(sig))
abs_fft = np.abs(norm_fft)
return abs_fft
def calFFTPower(self, afft, fs):
transform = 10 * np.log10(afft/127)
return transform
def run(self,filename, save_flag=False):
"""Loads the data and performs fft in chunks.
Params:
filename: str
name of the iq file in .wav
save_flag: bool
Enable to the save the fft of iq in .npy
which will autoload next time.
"""
self.overlap = 0.5
offset = 44
T = 1/fs
iterate = 0
adc_offset = -127.5
window = self.fs
self.filename = filename
self.save_flag = save_flag
data = np.memmap(filename, offset=offset)
self.total_duration = T*(len(data)/2)
self.num_chunks = int(len(data)/(window*2))
file_name = 'Spec_'+self.filename.split('.wav')[0]+'.npy'
# self.filename = self.filename.split('/')[-1].split('.wav')[0]
#ErrorHandling:
if len(self.BW) > 1:
if not len(self.BW) == len(self.f_chan):
print('Error: Number of bw need to be equal to number of f_chan given')
sys.exit()
elif len(self.f_chan) > 1:
self.BW = self.BW * len(self.f_chan)
for j in range(len(self.f_chan)):
if np.abs(self.f_chan[j] - self.fc) > self.fs/2:
print('Error: Frequency offset is out of range')
sys.exit()
#Load file if flag set to true
skip = False
if (self.save_flag == True):
try:
print('Loading Data')
self.specx = np.load(file_name)
skip=True
except:
pass
if(skip == False):
data_slice = []
self.specx = np.zeros([self.num_chunks, window])
time_a = time.time()
for slice in range(0, int(len(data) // (window * 2)) * window * 2, window*2):
data_slice = adc_offset + (data[slice: slice + window * 2: 2]) + 1j * (adc_offset + data[slice+1: slice + window * 2: 2])
fft_iq = self.calFFT(data_slice)
transform = self.calFFTPower(fft_iq, self.fs)
self.specx[self.num_chunks-iterate-1] = transform
iterate +=1
progress(iterate, self.num_chunks)
del data_slice,transform,fft_iq
del data
self.specx = np.flip(self.specx, 0)
time_b = time.time()
if self.save_flag==True:
print('\nSaving data to disk')
np.save(file_name, self.specx)
print('iq_spec saved.', end=' ')
print('Time:',round(time_b - time_a, 2))
if self.num_chunks>100:
self.jump = 5
else:
self.jump = 2
self.time_bins = np.linspace(0, self.total_duration, self.specx.shape[0])
self.leftlim = (0, self.fs)
self.rightlim = [0, self.total_duration]
def plot_default(self):
"""Plots the full waterfall only.
Params:
show_signal: bool
Enable to show the signal centre track in the plot
default: False
"""
plt.figure(figsize=(12,8))
plt.imshow(self.specx[::2], extent=self.leftlim + self.rightlim, origin='lower', aspect='auto')
plt.xlabel('Frequency Bins \n'+'\nFile:'+ self.filename + '\nRuntime:' + str(self.t_fs))
plt.ylabel('Time (s)')
plt.title('Waterfall')
plt.colorbar()
plt.savefig('waterfal_just_plot.png', dpi=400, transparent=False)
plt.show()
def plot(self):
"""Plots the full waterfall and the signal track.
Params:
show_signal: bool
Enable to show the signal centre track in the plot
default: False
"""
timebin = np.linspace(0, self.total_duration, self.specx.shape[0])
freq_vector = [self.fc - (self.fs/2), (self.fs/2) + self.fc]
plt.figure(figsize=(12,8))
plt.imshow(self.specx[::self.jump], extent=freq_vector + self.rightlim, origin='lower', aspect='auto')
plt.plot(self.track_center[0], self.time_bins[0], color = 'k')
plt.plot(self.raw_center[:, 0], timebin, color = 'white', marker='.', alpha=0.5)
plt.xlabel('Frequency (Hz) \n'+'\nFile:'+ self.filename + '\nRuntime:' + str(self.t_fs))
plt.ylabel('Time (s)')
plt.title('Waterfall')
plt.xlim([self.f_chan[0] - self.BW[0]/2, self.f_chan[0] + self.BW[0]/2])
plt.colorbar()
plt.savefig('waterfal_plot.png', dpi=400, transparent=False)
plt.show()
def multi_plot(self):
"""Plots multiple channels in one figure
Params:
show_signal: bool
Enable to show the signal centre track in the plot
default: False
"""
n_plots = len(self.track_center)
freq_vector = [self.fc - (self.fs/2), (self.fs/2) + self.fc]
fig, ax = plt.subplots(nrows=1,ncols=n_plots)
fig.suptitle('Waterfall Multi Channel View\n'+'File:'+self.filename, fontsize=10)
fig.set_size_inches(15,8)
fig.tight_layout(pad=3)
self.jump = 5
for n in range(0, n_plots):
ax[n].imshow(self.specx[::self.jump], extent=freq_vector + self.rightlim, origin='lower', aspect='auto')
ax[n].plot(self.track_center[n], self.time_bins[n], color = 'k', marker=".")
ax[n].set_xlabel('Frequency (Hz) \n F_chan: {}Hz F_c: {}Hz'.format(self.f_chan[n], self.fc))
ax[n].set_ylabel('Time (s)')
chan_start = self.f_chan[n] - self.BW[n]/2
chan_end = self.f_chan[n] + self.BW[n]/2
ax[n].set_xlim([chan_start, chan_end])
ax[n].set_title('Channel: {} BW: {}'.format(n, self.BW[n]))
# fig.colorbar(im)
plt.savefig('waterfall_multi_plot.png', dpi=200, transparent=False)
plt.show()
def find_signal(self, draw=False):
"""Finds the signal by taking decision of neighbouring frequency bins when above a calculated threshold.
Plots the spectra and fits the final track.
Params:
draw: bool
Enable to view: Four Frames of Spectra in one figure.
"""
self.draw = draw
self.fc_track = []
self.fc_middle = []
self.sig_present = False
self.track_center = []
pc = 0
time_a = time.time()
#mean
sum_fft = np.zeros(int(self.fs))
for i in range(self.specx.shape[0]):
sum_fft += self.specx[i]
fft = sum_fft/self.num_chunks
channel_start, channel_end = find_channel(self.fs, self.fc, self.f_chan, self.BW)
frame = np.linspace(0, self.num_chunks-1, 4).astype(int)
sig_center = np.zeros([self.specx.shape[0], len(self.f_chan)])
sig_freqs = np.zeros([self.specx.shape[0], len(self.f_chan)])
# print(self.BW, self.f_chan, channel_start, channel_end)
for step in range(self.specx.shape[0]):
# progress(step, self.num_chunks)
#Spectral Average
fft_vals = self.specx[step]
fft_vals = self.specx[step] - fft
#Threshold
mean = np.mean(fft_vals[fft_vals > 0])
sd = np.std(fft_vals[fft_vals > 0])
safety = 0.5
threshold = mean + sd + safety
#Decision Type 2:
c = 0
fft_threshold_idx = []
full_spectrum = False
for f_c in self.f_chan:
if full_spectrum == False:
for i in range(int(channel_start[c]), int(channel_end[c]), 1):
if(fft_vals[i] > threshold and fft_vals[i-1] > threshold and fft_vals[i-2] > threshold):
fft_threshold_idx.append(i)
else:
for i in range(self.specx.shape[0]):
if(fft_vals[i] > threshold and fft_vals[i-1] > threshold and fft_vals[i-2] > threshold):
fft_threshold_idx.append(i)
centroid = find_center(fft_vals[fft_threshold_idx], fft_threshold_idx)
if len(fft_threshold_idx) > 200:
sig_center[step, c] = centroid
sig_freqs[step, c] = self.f_chan[c] + centroid - (channel_start[c] + self.BW[c]/2)
else:
sig_center[step, c] = np.nan
sig_freqs[step, c] = np.nan
print(step, c, centroid, len(fft_threshold_idx))
c +=1
if self.draw==True:
"""Plots a multiple frames of channel spectra and one full spectra with channel markings"""
if step == 8:
if step == 8:
fig, ax = plt.subplots(2, 1)
fig.tight_layout()
fig.set_size_inches(15,10)
ax[0].plot(fft_vals)
ax[0].axvline(sig_center[step, 0], color='r', label="centroid")
ax[0].plot(fft_threshold_idx, fft_vals[fft_threshold_idx], color='k', label="fft_threshold")
ax[0].set_title('Chan Spectra ts=' + str(step))
ax[0].set_xlabel('Frequency Bins M:{} SD:{} T:{}'.format(round(mean), round(sd), round(threshold)))
ax[0].set_ylabel('Magnitude (dBFS)')
ax[0].axhline(mean, color='k', label="mean(+)")
ax[0].axhline(sd, color='orange', label="sd(+)")
ax[0].axhline(threshold, color='yellow', label="threhshold")
ax[0].set_xlim([channel_start[0], channel_end[0]])
ax[0].grid()
ax[0].legend(loc="best")
ax[1].plot(fft_vals)
ax[1].plot(fft_threshold_idx, fft_vals[fft_threshold_idx], color='k', label="fft_threshold")
ax[1].set_title('Chan Spectra ts=' + str(step))
ax[1].set_xlabel('Frequency Bins M:{} SD:{} T:{}'.format(round(mean), round(sd), round(threshold)))
ax[1].set_ylabel('Magnitude (dBFS)')
ax[1].axhline(mean, color='k', label="mean(+)")
ax[1].axhline(sd, color='orange', label="sd(+)")
ax[1].axhline(threshold, color='yellow', label="threhshold")
ax[1].axvspan(channel_start[0], channel_end[0], facecolor='green', alpha=0.4, label="channel")
ax[1].set_xlim([0, self.fs])
ax[1].grid()
ax[1].legend(loc="best")
plt.savefig('spectra-plt.png', dpi=200, transparent=False)
plt.show()
if step == frame[-1]:
for i in range(len(self.f_chan)):
ax[pc].axvline(sig_center[step, i], color='r', label="centroid")
ax[pc].axvspan(channel_start[i], channel_end[i], facecolor='green', alpha=0.4, label="chan "+str(i))
ax[pc].set_title('Full Spectra ts=' + str(step))
ax[pc].set_xlim([0, self.fs])
plt.savefig('spectra-plt.png', dpi=200, transparent=False)
plt.show()
pc+=1
time_b = time.time()
self.t_fs = round(time_b - time_a, 2)
print('\nTime(find_signal):', self.t_fs)
#Polyfit
time_bin = []
new_freqs = []
self.raw_center = []
win = int(self.num_chunks*0.3)
win = win if win%2>0 else win+1
for i in range(0, sig_freqs.shape[1]):
freqs = sig_freqs[:,i]
valid = ~np.isnan(freqs)
freqs = freqs[valid]
self.raw_center.append(freqs.tolist())
time_bin.append(self.time_bins[valid].tolist())
if len(time_bin[i]) > 0:
# freqs = signal.medfilt(freqs, win)
p = np.poly1d(np.polyfit(time_bin[i], freqs, 10))
result = p(time_bin[i])
new_freqs.append(result.tolist())
self.sig_present = True
else:
new_freqs = []
time_bin = []
print('No signal found')
#dump to json
data = {'filename': self.filename,
'sampling rate': self.fs,
'centre frequency':self.fc,
'channel frequency': self.f_chan,
'bandwidth': self.BW,
"frequency": new_freqs,
"raw-frequency": self.raw_center,
'time': time_bin}
with open("data.json", "w") as outfile:
json.dump(data, outfile, indent=2)
# self.plot_default()
self.track_center = new_freqs
self.time_bins = | np.array(time_bin) | numpy.array |
"""
Module pphiloc plots color histogram of the probability of local density as
function of the local density and either the persistence time at fixed
self-propelling velocity or the self-propelling velocity at fixed persistence
time.
Simulation directories must follow the active_particles.naming.AHB2D naming
standard and input files in simulation directories must follow the
active_particles.naming.VarN naming standard.
Environment modes
-----------------
VARIABLE : string
Plot x-coordinate variable.
______________________________________________________
| Mode | Variable | x-coordinate |
|_________|_____________________________|______________|
| 'dr' | Rotation diffusion constant | \\tau = 1/dr |
|_________|_____________________________|______________|
| 'vzero' | self-propelling velocity | vzero |
|_________|_____________________________|______________|
DEFAULT: dr
PECLET : bool
Plot as function of the Péclet number Pe = vzero/dr.
DEFAULT: True
PHILOCMAX : bool
Plot most probable packing fraction instead of global system packing
fraction.
DEFAULT: False
TITLE : bool
Display title on figure.
DEFAULT: True
Environment parameters
----------------------
DATA_DIRECTORY : string
Data directory.
DEFAULT: active_particles.naming.sim_directory
EXCLUDE : string
Simulation directories in DATA_DIRECTORY to exclude from the plot.
DEFAULT:
PARAMETERS_FILE : string
Simulation parameters file name.
DEFAULT: active_particles.naming.parameters_file
DENSITY : float
Packing fraction of particles.
DEFAULT: active_particles.plot.pphiloc._density
N : int
Number of particles.
DEFAULT: active_particles.plot.pphiloc._N
VZERO ['dr' mode] : float
Self-propulsion velocity.
DEFAULT: active_particles.plot.pphiloc._vzero
DR_MIN ['dr' mode] : float
Minimum rotation diffusion constant.
DEFAULT: active_particles.plot.pphiloc._dr_min
DR_MAX ['dr' mode] : float
Maximum rotation diffusion constant.
DEFAULT: active_particles.plot.pphiloc._dr_max
DR ['vzero' mode] : float
Rotation diffusion constant.
DEFAULT: active_particles.plot.pphiloc._dr
VZERO_MIN ['vzero' mode] : float
Minimum self-propulsion velocity.
DEFAULT: active_particles.plot.pphiloc._vzero_min
VZERO_MAX ['vzero' mode] : float
Maximum self-propulsion velocity.
DEFAULT: active_particles.plot.pphiloc._vzero_max
INITIAL_FRAME : int
Frame to consider as initial.
DEFAULT: active_particles.plot.pphiloc._init_frame
INTERVAL_MAXIMUM : int
Maximum number of frames on which densities are calculated.
DEFAULT: active_particles.analysis.varn._int_max
BOX_SIZE : float
Length of the square boxes in which particles are counted.
DEFAULT: active_particles.analysis.varn._box_size
N_CASES : int
Number of boxes in each direction in which local density is computed.
DEFAULT: active_particles.plot.pphiloc._Ncases
N_BINS : int
Number of bins for the histogram.
DEFAULT: active_particles.analysis.varn._Nbins
PHIMAX : float
Maximum local density for histogram.
DEFAULT: active_particles.plot.pphiloc._phimax
PPHILOC_MIN : float
Minimum local density probability.
DEFAULT: active_particles.plot.pphiloc._pphilocmin
PPHILOC_MAX : float
Maximum local density probability.
DEFAULT: active_particles.plot.pphiloc._pphilocmin
CONTOURS : int
Number of contour lines.
DEFAULT: active_particles.plot.pphiloc._contours
FONT_SIZE : int
Font size for the plot.
DEFAULT: active_particles.plot.pphiloc._font_size
COLORMAP : string
Plot colormap.
DEFAULT: active_particles.plot.pphiloc._colormap
PAD : float
Separation between label and colormap.
DEFAULT: active_particles.plot.pphiloc._colormap_label_pad
"""
import active_particles.naming as naming
from active_particles.init import get_env, dir_list
from os import environ as envvar
if __name__ == '__main__': envvar['SHOW'] = 'True'
from os.path import join as joinpath
from active_particles.analysis.varn import _int_max, _box_size, _Nbins,\
_phimax, histogram as get_histogram
import numpy as np
np.seterr(divide='ignore')
import pickle
from collections import OrderedDict
import matplotlib.colors as colors
import matplotlib.cm as cmx
import matplotlib as mp
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
# DEFAULT VARIABLES
_dr = 3e-4 # default rotation diffusion constant
_dr_min = 1e-5 # default minimum diffusion rotation constant
_dr_max = 1e-2 # default maximum diffusion rotation constant
_vzero = 1e-2 # default self-propulsion velocity
_vzero_min = 1e-2 # default minimum self-propulsion velocity
_vzero_max = 1e-1 # default maximum self-propulsion velocity
_density = 0.8 # default packing fraction of particles
_N = int(1e5) # default number of particles
_init_frame = 0 # default frame to consider as initial
_Ncases = 500 # default number of boxes in each direction to compute the local density
_pphilocmin = 1e-4 # default minimum local density probability
_pphilocmax = 1e-1 # default maximum local density probability
_contours = 20 # default contour level value
_font_size = 15 # default font size for the plot
_colormap = 'inferno' # default plot colormap
_colormap_label_pad = 20 # separation between label and colormap
# FUNCTIONS AND CLASSES
class Philoc:
"""
Search and read local densities file, compute local densities histogram.
"""
def __init__(self, data_dir, dir_standard, dir_attributes, parameters_file,
var, var_min, var_max, excluded_dir=''):
"""
Create list of directories to consider and compute plot variable values
associated to them.
Parameters
----------
data_dir : string
Data directory.
dir_standard : active_particles.naming._File standard
Simulation directory naming object.
dir_attributes : hash table
Attributes to be displayed in directory names.
parameters_file : string
Simulations parameters file name.
var : string
Plot variable name.
var_min : float
Minimum plot variable value.
var_max : float
Maximum plot variable value.
excluded_dir : string
Names of directories to be ignored. (default: '')
"""
self.data_dir = data_dir
self.dir_standard = dir_standard
self.dir_attributes = dir_attributes
self.excluded_dir = excluded_dir
self.parameters_file = parameters_file
self.var = var
self.var_min = var_min
self.var_max = var_max
self.dirs, self.var_hash, self.var_list, _, _ = dir_list(
self.data_dir, self.dir_standard, self.dir_attributes,
self.var, self.var_min, self.var_max,
self.parameters_file, excluded_dir=self.excluded_dir,
include_out=False)
def calculate(self, varN_standard, varN_attributes, Nbins, phimax):
"""
Calculate local densities histogram.
Parameters
----------
varN_standard : active_particles.naming._File standard
Local densities files naming object.
varN_attributes : hash table
Attributes to be displayed in local densities file names.
Nbins : int
Number of bins for the histogram.
phimax : float
Maximum local density for histogram.
"""
self.varN_standard = varN_standard
self.varN_attributes = varN_attributes
self.Nbins = Nbins
self.phimax = phimax
self.time_step = {} # hash table of directories' simulation time step
self.histogram3D = [] # local densities histogram
self.philocmax = {} # hash table of most probable local density with directory name as keys
for dir in sorted(self.dirs):
try:
varN_filename, = self.varN_standard.get_files(
directory=joinpath(self.data_dir, dir),
**self.varN_attributes)
except ValueError: continue
with open(
joinpath(self.data_dir, dir, self.parameters_file), 'rb')\
as param_file:
self.time_step[dir] = pickle.load(param_file)['time_step'] # simulation parameters hash table
with open(joinpath(self.data_dir, dir, varN_filename),
'rb') as varN_file:
var_value = np.full(self.Nbins,
fill_value=self.var_hash[dir]) # plot variable value
densities = pickle.load(varN_file) # list of local densities
bins, histogram = get_histogram(densities,
self.Nbins, self.phimax) # histogram of local densities with corresponding bins
histogram = np.log10(histogram)
histogram3D_dir = np.transpose(
[var_value, bins, histogram]).tolist()
self.histogram3D += histogram3D_dir
_, self.philocmax[dir], _ = max(histogram3D_dir,
key=lambda el: el[2])
self.histogram3D = | np.transpose(self.histogram3D) | numpy.transpose |
import csv
from logging import Logger
import os
import sys
from typing import List
import numpy as np
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from torch.optim import Adam, SGD
import wandb
from .evaluate import evaluate, evaluate_predictions
from .predict import predict
from .train import train
from chemprop.args import TrainArgs
from chemprop.data import StandardScaler, MoleculeDataLoader
from chemprop.data.utils import get_class_sizes, get_data, get_task_names, split_data
from chemprop.models import MoleculeModel
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint, save_smiles_splits
from chemprop.bayes_utils import neg_log_like, scheduler_const
from .bayes_tr.swag_tr import train_swag
from .bayes_tr.sgld_tr import train_sgld
from .bayes_tr.gp_tr import train_gp
from .bayes_tr.bbp_tr import train_bbp
from .bayes_tr.dun_tr import train_dun
from chemprop.bayes import predict_std_gp, predict_MCdepth
def run_training(args: TrainArgs, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
debug = info = print
# Print command line and args
debug('Command line')
debug(f'python {" ".join(sys.argv)}')
debug('Args')
debug(args)
# Save args
args.save(os.path.join(args.save_dir, 'args.json'))
# Get data
debug('Loading data')
args.task_names = args.target_columns or get_task_names(args.data_path)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.features_scaling:
features_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(features_scaler)
test_data.normalize_features(features_scaler)
else:
features_scaler = None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = neg_log_like
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Automatically determine whether to cache
if len(data) <= args.cache_cutoff:
cache = True
num_workers = 0
else:
cache = False
num_workers = args.num_workers
# Create data loaders
train_data_loader = MoleculeDataLoader(
dataset=train_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache,
class_balance=args.class_balance,
shuffle=True,
seed=args.seed
)
val_data_loader = MoleculeDataLoader(
dataset=val_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
test_data_loader = MoleculeDataLoader(
dataset=test_data,
batch_size=args.batch_size,
num_workers=num_workers,
cache=cache
)
###########################################
########## Outer loop over ensemble members
###########################################
for model_idx in range(args.ensemble_start_idx, args.ensemble_start_idx + args.ensemble_size):
# Set pytorch seed for random initial weights
torch.manual_seed(args.pytorch_seeds[model_idx])
######## set up all logging ########
# make save_dir
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
# make results_dir
results_dir = os.path.join(args.results_dir, f'model_{model_idx}')
makedirs(results_dir)
# initialise wandb
os.environ['WANDB_MODE'] = 'dryrun'
wandb.init(
name=args.wandb_name+'_'+str(model_idx),
project=args.wandb_proj,
reinit=True)
print('WANDB directory is:')
print(wandb.run.dir)
####################################
# Load/build model
if args.checkpoint_path is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_path}')
model = load_checkpoint(args.checkpoint_path + f'/model_{model_idx}/model.pt', device=args.device, logger=logger)
else:
debug(f'Building model {model_idx}')
model = MoleculeModel(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.to(args.device)
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
# Optimizer
optimizer = Adam([
{'params': model.encoder.parameters()},
{'params': model.ffn.parameters()},
{'params': model.log_noise, 'weight_decay': 0}
], lr=args.init_lr, weight_decay=args.weight_decay)
# Learning rate scheduler
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in range(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data_loader=train_data_loader,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger
)
val_scores = evaluate(
model=model,
data_loader=val_data_loader,
args=args,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
wandb.log({"Validation MAE": avg_val_score})
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, features_scaler, args)
if epoch == args.noam_epochs - 1:
optimizer = Adam([
{'params': model.encoder.parameters()},
{'params': model.ffn.parameters()},
{'params': model.log_noise, 'weight_decay': 0}
], lr=args.final_lr, weight_decay=args.weight_decay)
scheduler = scheduler_const([args.final_lr])
# load model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), device=args.device, logger=logger)
# SWAG training loop, returns swag_model
if args.swag:
model = train_swag(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# SGLD loop, which saves nets
if args.sgld:
model = train_sgld(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# GP loop
if args.gp:
model, likelihood = train_gp(
model,
train_data,
val_data,
num_workers,
cache,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# BBP
if args.bbp:
model = train_bbp(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
# DUN
if args.dun:
model = train_dun(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir)
##################################
########## Inner loop over samples
##################################
for sample_idx in range(args.samples):
# draw model from SWAG posterior
if args.swag:
model.sample(scale=1.0, cov=args.cov_mat, block=args.block)
# draw model from collected SGLD models
if args.sgld:
model = load_checkpoint(os.path.join(save_dir, f'model_{sample_idx}.pt'), device=args.device, logger=logger)
# make predictions
test_preds = predict(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
test_data=True,
bbp_sample=True)
#######################################################################
#######################################################################
##### SAVING STUFF DOWN
if args.gp:
# get test_preds_std (scaled back to original data)
test_preds_std = predict_std_gp(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
likelihood = likelihood)
# 1 - MEANS
np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds))
# 2 - STD, combined aleatoric and epistemic (we save down the stds, always)
np.savez(os.path.join(results_dir, f'predsSTDEV_{sample_idx}'), np.array(test_preds_std))
else:
# save test_preds and aleatoric uncertainties
if args.dun:
log_cat = model.log_cat.detach().cpu().numpy()
cat = np.exp(log_cat) / np.sum(np.exp(log_cat))
np.savez(os.path.join(results_dir, f'cat_{sample_idx}'), cat)
# samples from categorical dist and saves a depth MC sample
depth_sample = np.random.multinomial(1, cat).nonzero()[0][0]
test_preds_MCdepth = predict_MCdepth(
model=model,
data_loader=test_data_loader,
args=args,
scaler=scaler,
d=depth_sample
)
np.savez(os.path.join(results_dir, f'predsMCDEPTH_{sample_idx}'), np.array(test_preds_MCdepth))
if args.swag:
log_noise = model.base.log_noise
else:
log_noise = model.log_noise
noise = np.exp(log_noise.detach().cpu().numpy()) * np.array(scaler.stds)
np.savez(os.path.join(results_dir, f'preds_{sample_idx}'), np.array(test_preds))
np.savez(os.path.join(results_dir, f'noise_{sample_idx}'), noise)
#######################################################################
#######################################################################
# add predictions to sum_test_preds
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# evaluate predictions using metric function
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# compute average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx}, sample {sample_idx} test {args.metric} = {avg_test_score:.6f}')
#################################
########## Bayesian Model Average
#################################
# note: this is an average over Bayesian samples AND components in an ensemble
# compute number of prediction iterations
pred_iterations = args.ensemble_size * args.samples
# average predictions across iterations
avg_test_preds = (sum_test_preds / pred_iterations).tolist()
# evaluate
BMA_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# average scores across tasks
avg_BMA_test_score = | np.nanmean(BMA_scores) | numpy.nanmean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ~~aliGater~~
# (semi)automated gating software
#
# /^^\
# /^^\_______/0 \_
# ( `~+++,,_________,,++~^^^^^^^
#..V^V^V^V^V^V^\.................................
#
#
# Parsing .fcs files with fcsparser from Eugene Yurtsevs FlowCytometryTools (very slightly modified)
# Check out his toolkit for flow cytometry analysis:
# http://eyurtsev.github.io/FlowCytometryTools/
#
# <NAME> & <NAME> 2016~
# https://www.med.lu.se/labmed/hematologi_och_transfusionsmedicin/forskning/bjoern_nilsson
# Distributed under the MIT License
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.patches import Ellipse, Arrow
from matplotlib.ticker import Locator, Formatter
from matplotlib import transforms as mtransforms
from matplotlib import rcParams
import math
import six
from scipy.ndimage.filters import gaussian_filter1d
#For computing bin width similarly to scipys histogram_bin_edges
from scipy.stats import iqr
from sklearn.decomposition import PCA
import sys
#AliGater imports
import aligater.AGConfig as agconf
from aligater.AGFileSystem import getGatedVector, AliGaterError
from aligater.AGCythonUtils import __vectorlogicleTransform, __vectorInverselogicleTransform, __vectorBilogTransform, __vectorInverseBilogTransform
sentinel = object()
def plotHeatmap(fcsDF, x, y, vI=sentinel, bins=300, scale='linear', xscale='linear', yscale='linear', thresh=1000, aspect='auto', **kwargs):
"""
Core plotting function of AliGater. Mainly intended to be called internally, but may be called directly.
Only plots. No gating functionalities.
**Parameters**
fcsDF : pandas.DataFrame
Flow data loaded in a pandas DataFrame.
x, y : str
Marker labels.
vI : list-like, optional
list-like index of events in the fcsDF that correspond to the parent population.
Defaults to plotting all events in fcsDF.
bins : int, optional, default: 300
Resolution of the plotted heatmap.
scale : str, optional, default: 'linear'
Which scale to be used on both axes.
xscale : str, optional, default: 'linear'
Which scale to be used on the x-axis.
yscale : str, optional, default: 'linear'
Which scale to be used on the y-axis.
T : int, optional, default: 1000
If the threshold for linear-loglike transition for bilog or logicle scales.
aspect : str
Aspect of plotted heatmap. Passed on to matplotlib.pyplot.imshow()
**Keyword arguments**
cmap : matplotlib.colors.Colormap or str, default: 'jet'
Color map to use.
Either string name of existing matplotlib colormap, or a colormap object.
rcParams : matplotlib.rcParams
Overrides rcParams with the passed rcParams object.
mask_where : float,int, default : 0
scalar of heatmap values to mask, these become white when plotted
**Returns**
fig, matplotlib.pyplot.Figure
matplotlib Figure object
ax. matplotlib.pyplot.Axes
matplotlib axes object
**Examples**
None currently.
"""
if vI is sentinel:
vI=fcsDF.index
elif len(vI)<2:
sys.stderr.write("Passed index contains no events\n")
return None, None
if not isinstance(bins,str) and len(vI)<bins:
bins=len(vI)
if scale.lower()=='logicle':
xscale='logicle'
yscale='logicle'
if scale.lower()=='bilog':
xscale='bilog'
yscale='bilog'
#Default x and y lims
bYlim=False
bXlim=False
if 'xlim' in kwargs:
if not isinstance(kwargs['xlim'],list):
raise TypeError("if xlim is passed, it must be a list of float/int")
elif not all(isinstance(i,(float,int)) for i in kwargs['xlim']):
raise TypeError("Non float/int element encountered in xlim")
else:
xscale_limits=kwargs['xlim']
if xscale.lower()=='logicle':
xscale_limits=logicleTransform(xscale_limits,thresh)
bXlim=True
if 'ylim' in kwargs:
if not isinstance(kwargs['ylim'],list):
raise TypeError("if ylim is passed, it must be a list of float/int")
elif not all(isinstance(i,(float,int)) for i in kwargs['ylim']):
raise TypeError("Non float/int element encountered in ylim")
else:
yscale_limits=kwargs['ylim']
if yscale.lower()=='logicle':
yscale_limits=logicleTransform(yscale_limits,thresh)
bYlim=True
if 'cmap' in kwargs:
cmap = kwargs['cmap']
if not isinstance(cmap, str):
collect_default=False
else:
collect_default=True
else:
collect_default=True
cmap='jet'
if 'rcParams' in kwargs:
if not isinstance(kwargs['rcParams'],dict):
raise TypeError("if rcParams is passed, it must be a dict")
else:
rcParams=kwargs['rcParams']
custom_rcParams=True
else:
custom_rcParams=False
if 'mask_where' in kwargs:
mask_value = kwargs['mask_where']
assert isinstance(mask_value,(float,int))
else:
mask_value=0
vX=getGatedVector(fcsDF, x, vI, return_type="nparray")
vY=getGatedVector(fcsDF, y, vI, return_type="nparray")
plt.clf()
if custom_rcParams:
plt.rcParams=rcParams
else:
plt.rcParams['figure.figsize']=10,10
plt.rcParams['image.cmap']=cmap
#extra
plt.rcParams['font.size'] = 22
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
#plt.rcParams['label.size': 22]
heatmap, xedges, yedges = getHeatmap(vX, vY, bins, scale, xscale, yscale, thresh)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
heatmap=np.ma.masked_where(heatmap <= mask_value, heatmap)
plt.clf()
fig, ax = plt.subplots()
plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, cmap=cmap)
#CLOSES ALL OPEN FIGURES ON CALL - PERHAPS BAD ?
plt.close('all')
fig = plt.figure()
ax = plt.gca()
#matplotlib 3.2.x changed behaviour of interpolation
#see https://github.com/matplotlib/matplotlib/issues/17722
#and https://matplotlib.org/3.2.1/api/api_changes.html#default-image-interpolation
plt.imshow(heatmap.T, extent=extent, origin='lower',aspect=aspect, interpolation='none')
plt.xlabel(x)
plt.ylabel(y)
if collect_default:
cmap=plt.get_cmap()
cmap.set_bad(color='white') #Zeroes should be white, not blue
if xscale.lower()=='logicle':
ax=plt.gca()
ax.xaxis.set_major_locator(logicleLocator(linCutOff=thresh))
ax.xaxis.set_major_formatter(logicleFormatter(linCutOff=thresh))
if yscale.lower()=='logicle':
ax=plt.gca()
ax.yaxis.set_major_locator(logicleLocator(linCutOff=thresh))
ax.yaxis.set_major_formatter(logicleFormatter(linCutOff=thresh))
if xscale.lower()=='bilog':
ax=plt.gca()
ax.xaxis.set_major_locator(BiLogLocator(linCutOff=thresh))
ax.xaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh))
if yscale.lower()=='bilog':
ax=plt.gca()
ax.yaxis.set_major_locator(BiLogLocator(linCutOff=thresh))
ax.yaxis.set_major_formatter(BiLogFormatter(linCutOff=thresh))
if bXlim:
ax.xaxis.set_xlim(left=xscale_limits[0], right=xscale_limits[1])
if bYlim:
ax.yaxis.set_xlim(left=yscale_limits[0], right=yscale_limits[1])
return fig,ax
def getHeatmap(vX, vY, bins='auto', scale='linear', xscale='linear', yscale='linear', T=1000, normalize=False, xlim=None, ylim=None, range=None):
if not any(isinstance(i,str) for i in [scale,xscale,yscale]):
raise TypeError("scale, xscale, yscale must be specified as string, such as: 'linear', 'logicle'")
if not all(i.lower() in ['linear', 'logicle', 'bilog'] for i in [scale,xscale,yscale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle'")
if not isinstance(bins,(int,str)):
raise TypeError("bins can only be either of int or str")
if range is not None:
if isinstance(range,list):
if len(range)==2:
if not all(isinstance(i,(list)) for i in range):
AliGaterError("in getHeatmap, invalid dtype encountered in range, expected two list-likes")
else:
if not all(isinstance(i,(float,int)) for i in range[0]) or not all(isinstance(i,(float,int)) for i in range[1]):
AliGaterError("in getHeatmap,invalid dtype encountered in range")
else:
defaultRange=range
xRange=range[0]
yRange=range[1]
else:
AliGaterError("in getHeatmap, range must be list, found "+str(type(range)))
else:
AliGaterError("in getHeatmap, custom range passed but is not list, found type: "+str(type(range)))
else:
defaultRange=None
xRange=None
yRange=None
if not len(vX) == len(vY):
raise AliGaterError("in getHeatmap: ","Coordinate vectors are of unequal length")
if len(vX)==0:
raise AliGaterError("in getHeatmap: ","Coordinate vectors are empty")
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in getHeatmap: ", "Couldn't coerce x-value vectors into numpy array format")
if not isinstance(vY,np.ndarray):
try:
vY=np.asarray(vY)
except:
raise AliGaterError("in getHeatmap: ", "Couldn't coerce x-value vectors into numpy array format")
index_mask=[]
for i in np.arange(len(vX)-1,-1,-1):
if xlim is not None:
if vX[i] < xlim[0] or vX[i] > xlim[1]:
index_mask.append(i)
continue
if vY[i] < ylim[0] or vY[i] > ylim[1]:
index_mask.append(i)
if len(index_mask) > 0:
vX = np.delete(vX, index_mask)
vY = np.delete(vY, index_mask)
assert len(vX) == len(vY)
if isinstance(bins, str):
xbin_edges=np.histogram_bin_edges(vX,bins=bins)
ybin_edges=np.histogram_bin_edges(vY,bins=bins)
else:
xbin_edges=bins
ybin_edges=bins
if scale.lower()=='linear' and xscale.lower()=='linear' and yscale.lower() == 'linear':
return np.histogram2d(vX, vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
#if not linear probably just transform and calc edges after
#attempt at fix, still some redundancy...
t_xbin_edges = t_ybin_edges = None
if scale.lower()!='linear' or (xscale.lower()!='linear' and yscale.lower()!='linear'):
t_vX = transformWrapper(vX, scale=xscale, T=T)
t_xbin_edges=np.histogram_bin_edges(t_vX,bins=bins)
xbin_edges = inverseTransformWrapper(t_xbin_edges, scale=xscale, T=T)
t_vY = transformWrapper(vY, scale=yscale, T=T)
t_ybin_edges=np.histogram_bin_edges(t_vY,bins=bins)
ybin_edges = inverseTransformWrapper(t_ybin_edges, scale=yscale, T=T)
return np.histogram2d(vX,vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
if xscale.lower()!='linear':
t_vX = transformWrapper(vX, scale=xscale, T=T)
t_xbin_edges=np.histogram_bin_edges(t_vX,bins=bins)
xbin_edges = inverseTransformWrapper(t_xbin_edges, scale=xscale, T=T)
ybin_edges = np.histogram_bin_edges(vY, bins=bins)
if yscale.lower()!='linear':
t_vY = transformWrapper(vY, scale=yscale, T=T)
t_ybin_edges=np.histogram_bin_edges(t_vY,bins=bins)
ybin_edges = inverseTransformWrapper(t_ybin_edges, scale=yscale, T=T)
xbin_edges = np.histogram_bin_edges(vX, bins=bins)
#print(ybin_edges)
#print("\n\n")
#print(xbin_edges)
#print("\n\n")
return np.histogram2d(vX,vY, [xbin_edges, ybin_edges], normed=normalize, range=defaultRange)
#-------------------------DEPRECATED below---------------------------
if scale=='logicle' or (xscale == 'logicle' and yscale == 'logicle'):
xBinEdges=logicleBin(vX,bins,T, xRange)
yBinEdges=logicleBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [xBinEdges,yBinEdges], normed=normalize)
if xscale=='logicle':
xBinEdges=logicleBin(vX,bins,T, xRange)
return np.histogram2d(vX, vY, [xBinEdges,bins], normed=normalize)
if yscale=='logicle':
yBinEdges=logicleBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [bins,yBinEdges], normed=normalize)
if scale=='bilog' or (xscale == 'bilog' and yscale == 'bilog'):
xBinEdges=bilogBin(vX,bins,T, xRange)
yBinEdges=bilogBin(vY,bins,T, yRange)
#print("xBinEdges: ")
#print(xBinEdges)
#print("\n\n")
#print("yBinEdges: ")
#print(yBinEdges)
return np.histogram2d(vX, vY, [xBinEdges,yBinEdges], normed=normalize)
if xscale=='bilog':
xBinEdges=bilogBin(vX,bins,T, xRange)
return np.histogram2d(vX, vY, [xBinEdges,bins], normed=normalize)
if yscale=='bilog':
yBinEdges=bilogBin(vY,bins,T, yRange)
return np.histogram2d(vX, vY, [bins,yBinEdges], normed=normalize)
def plot_flattened_heatmap(heatmap_array, nOfBins, mask=True):
reshaped_array = heatmap_array.reshape(nOfBins, nOfBins)
fig, ax = plt.subplots()
if mask:
heatmap=np.ma.masked_where(reshaped_array == 0, reshaped_array)
cmap=plt.get_cmap()
cmap.set_bad(color='white')
else:
heatmap=reshaped_array
plt.imshow(heatmap.T[::-1])
plt.show()
plt.clf()
return None
def transformWrapper(vX, T, scale):
"""
General function for converting values or arrays of values to AliGater scales; bilog and logicle.
See inverseTransformWrapper to convert the other way around.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert to; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
result=None
single_val=False
#ToDo raise if more than 1 dim?
if not isinstance(vX, (list, np.ndarray, tuple)):
if isinstance(vX, (float, int)):
vInput=np.asarray(vX).reshape(1,)
single_val=True
else:
raise AliGaterError("in transformWrapper","invalid dType of passed vX, must be either a single float/int value or list/np.ndarray/tuple of float/int values")
else:
vInput=vX
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in transformWrapper: ", "Couldn't coerce input vector to numpy array format")
if scale.lower() == 'logicle':
result = logicleTransform(vInput, T)
elif scale.lower() == 'bilog':
result=bilogTransform(vInput, T)
elif scale.lower() == 'linear':
result=vX
if result is None:
raise
if single_val:
result=result[0]
return result
def inverseTransformWrapper(vX, T, scale):
"""
General function for converting values or arrays of values from AliGater scales; bilog and logicle back to linear values.
See transformWrapper to convert into AliGater scales.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert from; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
result=None
single_val=False
if not isinstance(vX, (list, np.ndarray, tuple)):
if isinstance(vX, (float, int)):
vInput=np.asarray(vX).reshape(1,)
single_val=True
else:
raise AliGaterError("in inverseTransformWrapper","invalid dType of passed vX, must be either a single float/int value or list/np.ndarray/tuple of float/int values")
else:
vInput=vX
if not isinstance(vX,np.ndarray):
try:
vX=np.asarray(vX)
except:
raise AliGaterError("in inverseTransformWrapper: ", "Couldn't coerce input vector to numpy array format")
if scale.lower() == 'logicle':
result = inverselogicleTransform(vInput, T)
elif scale.lower() == 'bilog':
result=inverseBilogTransform(vInput, T)
elif scale.lower() == 'linear':
result=vX
if result is None:
raise
if single_val:
result=result[0]
return result
def bilogBin(vX, bins, T, customRange=None):
if customRange is not None:
defaultRange=customRange
else:
defaultRange=[min(vX),max(vX)]
transformedRange=bilogTransform(defaultRange,T)
transformedBinEdges=np.linspace(transformedRange[0],transformedRange[1],bins+1)
return inverseBilogTransform(transformedBinEdges, T)
def bilogTransform(a, T):
vA = np.asarray(a, dtype = np.float64, order='C')
tA=__vectorBilogTransform(vA, np.float64(T))
return tA
# old python implementation, moved to AGCythonUtils
# tA = np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= T:
# tA[a_idx] = np.log(10 * a[a_idx] / T)/np.log(10)
# elif a[a_idx] < T and a[a_idx] > -T:
# tA[a_idx] = (a[a_idx]/T + np.log(10) - 1) / np.log(10)
# else:
# tA[a_idx] = -np.log(10 * abs(a[a_idx]) / T) / np.log(10)+1.13141103619349642 #This shift ensures that the transformed coordinates are continous, important for bins and plotting
# a_idx+=1
# return tA
def inverseBilogTransform(a, T):
vA = np.asarray(a, dtype = np.float64, order='C')
invA = __vectorInverseBilogTransform(vA, np.float64(T))
return invA
# old python implementation, moved to AGCythonUtils
# invA=np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= 1.0: #transformed linCutOff, always 1.0 at T; np.log(10 * linCutOff / linCutOff)/np.log(10) -> np.log(10)/np.log(10) = 1
# invA[a_idx] = T*np.exp(np.log(10)*a[a_idx])/10
# elif a[a_idx] <= 0.13141103619349642: #This is (np.log(10)-2)/np.log(10) I.e. the linear scale value at X=-T
# tmpX=a[a_idx]-1.13141103619349642 #This shift ensures that the transformed coordinates are continous, important for bins and plotting
# invA[a_idx] = -T*np.exp(np.log(10)*-tmpX)/10
# else:
# invA[a_idx] = T * (np.log(10)*a[a_idx] - np.log(10) + 1)
# a_idx+=1
# return invA
def logicleBin(vX, bins, T, customRange=None):
if customRange is not None:
defaultRange=customRange
else:
defaultRange=[min(vX),max(vX)]
transformedRange=logicleTransform(defaultRange,T)
transformedBinEdges=np.linspace(transformedRange[0],transformedRange[1],bins+1)
return inverselogicleTransform(transformedBinEdges, T)
def logicleTransform(a, linCutOff):
vA = np.asarray(a, dtype = np.float64, order='C')
tA=__vectorlogicleTransform(vA, np.float64(linCutOff))
return tA
# old python implementation, moved to AGCythonUtils
# tA = np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= linCutOff:
# tA[a_idx] = np.log(10 * a[a_idx] / linCutOff)/np.log(10)
# else:
# tA[a_idx] = (a[a_idx]/linCutOff + np.log(10.0) - 1)/np.log(10)
# a_idx+=1
#return tA
def inverselogicleTransform(a, linCutOff):
vA = np.asarray(a, dtype = np.float64, order='C')
invA = __vectorInverselogicleTransform(vA, np.float64(linCutOff))
return invA
# old python implementation, moved to AGCythonUtils
# invA=np.empty_like(a).astype(float)
# a_idx=0
# while a_idx < len(a):
# if a[a_idx] >= 1.0: #transformed linCutOff, always 1.0 at T; np.log(10 * linCutOff / linCutOff)/np.log(10) -> np.log(10)/np.log(10) = 1
# invA[a_idx] = linCutOff*np.exp(np.log(10)*a[a_idx])/10
# #invA[a_idx]= (np.exp(a[a_idx])+10)*linCutOff/10
# else:
# invA[a_idx] = linCutOff*(np.log(10.0)*a[a_idx] - np.log(10.0) + 1)
# a_idx+=1
# return invA
def addAxLine(fig, ax, pos, orientation, size=2, scale='linear', xscale='linear', yscale='linear', T=1000):
if not all(i in ['linear', 'logicle', 'bilog'] for i in [scale, xscale, yscale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if orientation.lower()=='vertical':
if scale.lower() != 'linear' or xscale.lower() != 'linear':
lims=ax.get_xlim()
vmin = lims[0]
vmax = lims[1]
if scale.lower() == 'logicle' or xscale.lower() == 'logicle':
pos = convertTologiclePlotCoordinate(pos,vmin,vmax,T)
if scale.lower() == 'bilog' or xscale.lower() == 'bilog':
pos = convertToBiLogPlotCoordinate(pos,vmin,vmax,T)
ax.axvline(pos, c='r')
else:
if scale.lower() !='linear' or yscale.lower() != 'linear':
lims=ax.get_ylim()
vmin = lims[0]
vmax = lims[1]
if scale=='logicle' or yscale.lower() == 'logicle':
pos = convertTologiclePlotCoordinate(pos,vmin,vmax,T)
if scale.lower() == 'bilog' or yscale.lower() == 'bilog':
pos = convertToBiLogPlotCoordinate(pos,vmin,vmax,T)
ax.axhline(pos, c='r')
return fig
def addLine(fig, ax, lStartCoordinate, lEndCoordinate, size=2, scale='linear', T=1000):
if not scale.lower() in ['linear', 'logicle', 'bilog']:
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if scale.lower()=='logicle':
view=ax.xaxis.get_view_interval()
xCoordinates=convertTologiclePlotCoordinates([lStartCoordinate[0],lEndCoordinate[0]], vmin=view[0], vmax=view[1], T=T)
view=ax.yaxis.get_view_interval()
yCoordinates=convertTologiclePlotCoordinates([lStartCoordinate[1],lEndCoordinate[1]], vmin=view[0], vmax=view[1], T=T)
lStartCoordinate=[xCoordinates[0],yCoordinates[0]]
lEndCoordinate=[xCoordinates[1],yCoordinates[1]]
if scale.lower()=='bilog':
view=ax.xaxis.get_view_interval()
xCoordinates=convertToBiLogPlotCoordinates([lStartCoordinate[0],lEndCoordinate[0]], vmin=view[0], vmax=view[1], T=T)
view=ax.yaxis.get_view_interval()
yCoordinates=convertToBiLogPlotCoordinates([lStartCoordinate[1],lEndCoordinate[1]], vmin=view[0], vmax=view[1], T=T)
lStartCoordinate=[xCoordinates[0],yCoordinates[0]]
lEndCoordinate=[xCoordinates[1],yCoordinates[1]]
plt.plot([lStartCoordinate[0], lEndCoordinate[0]], [lStartCoordinate[1], lEndCoordinate[1]], color='r', linestyle='-', linewidth=size,figure=fig)
return fig, ax
def addArrow(fig, ax, lStartCoordinate, lEndCoordinate, size=5000):
arrow=Arrow(lStartCoordinate[0],lStartCoordinate[1],lEndCoordinate[0]-lStartCoordinate[0],lEndCoordinate[1]-lStartCoordinate[1],width=size, transform=ax.transAxes,head_width=size, head_length=size, fc='r', ec='r')
#ax.arrow(lStartCoordinate[0], lStartCoordinate[1], lEndCoordinate[0]-lStartCoordinate[0], lEndCoordinate[1]-lStartCoordinate[1], head_width=size, head_length=size, fc='r', ec='r')
ax.add_patch(arrow)
return fig
def draw_ellipse(position, covariance, sigma=2, ax=None, **kwargs):
if agconf.execMode in ["jupyter","ipython"]:
plot=True
else:
plot=False
if plot:
ax = ax or plt.gca();
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width,height = np.sqrt(s)*sigma
else:
angle = 0
width, height = np.sqrt(covariance)*sigma
#Note width, height here is the full width and height and not the semiaxis length
# Draw the Ellipse
if plot:
ax.add_patch(Ellipse(position, width, height,
angle, **kwargs));
return width, height, angle
def plot_gmm(fcsDF, xCol, yCol, vI, gmm, sigma, ax):
if agconf.execMode in ["jupyter","ipython"]:
plot=True
else:
plot=False
ax = ax or plt.gca()
vEllipses=[]
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
width, height, angle = draw_ellipse(pos, covar, sigma, fill=False,edgecolor='#FF0000', linestyle='dashed');
vEllipses.append([pos,width,height,angle])
if plot:
plt.show();
return vEllipses
def plot_densityFunc(fcsDF, xCol,vI=sentinel, sigma=3, bins=300, scale='linear', T=1000, *args, **kwargs):
"""
General function for converting values or arrays of values from AliGater scales; bilog and logicle back to linear values.
See transformWrapper to convert into AliGater scales.
**Parameters**
vX, list-like or float/int
value or values to convert.
T, int/float
Threshold for linear-log transition for bilog and logicle scales
scale, str
Scale to convert from; 'bilog' or 'logicle'
**Returns**
If a scalar is passed, scalar
If list like is passed, list
**Examples**
None currently.
"""
if xCol not in fcsDF.columns:
raise TypeError("Specified gate not in dataframe, check spelling or control your dataframe.columns labels")
if vI is sentinel:
vI=fcsDF.index
elif len(vI)==0:
sys.stderr.write("Passed index contains no events\n")
return None
if not all(i in ['linear', 'logicle', 'bilog'] for i in [scale]):
raise TypeError("scale, xscale, yscale can only be either of: 'linear', 'logicle', 'bilog'")
if not isinstance(sigma,(float,int)):
raise AliGaterError("Sigma must be float or int, found: "+str(type(sigma)),"in plot_densityFunc")
if 'sigma' in kwargs:
if not isinstance(kwargs['sigma'],(float,int)):
raise AliGaterError("Sigma must be float or int, found: "+str(type(sigma)),"in plot_densityFunc")
else:
sigma=kwargs['sigma']
data=getGatedVector(fcsDF, xCol, vI, return_type="nparray")
if isinstance(bins,int):
if len(vI)<bins:
sys.stderr.write("Fewer events than bins, readjusting number of bins\n")
bins=len(vI)
elif bins=='auto':
if scale.lower()!='linear':
t_data = transformWrapper(data, T=T, scale=scale)
else:
t_data=data
bins=__autoBinCount(t_data)
else:
raise AliGaterError("bins must be integer or string 'auto'","in plot_densityFunc")
if scale == 'logicle':
BinEdges=logicleBin(data,bins,T)
histo = np.histogram(data, BinEdges)
elif scale == 'bilog':
BinEdges=bilogBin(data,bins,T)
histo = np.histogram(data, BinEdges)
else:
histo=np.histogram(data, bins)
vHisto=np.linspace(min(histo[1]),max(histo[1]),bins)
smoothedHisto=gaussian_filter1d(histo[0].astype(float),sigma)
plt.clf()
fig,ax = plt.subplots()
ax.plot(vHisto,smoothedHisto, label="pdf for "+str(xCol)+"\nsigma: "+str(sigma))
plt.legend(loc='upper right', shadow=True, fontsize='medium')
if scale.lower()!='linear':
ax=plt.gca()
ax.set_xlim(left=min(data),right=max(data))
if scale.lower()=='logicle':
ax.xaxis.set_major_locator(logicleLocator(linCutOff=T))
ax.xaxis.set_major_formatter(logicleFormatter(linCutOff=T))
if scale.lower()=='bilog':
ax.xaxis.set_major_locator(BiLogLocator(linCutOff=T))
ax.xaxis.set_major_formatter(BiLogFormatter(linCutOff=T))
#plt.show()
return fig,ax
def __autoBinCount(data):
#Internal function that mimics numpus numpy.histogram_bin_edges functionality to guess appropriate number of bins
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram_bin_edges.html
data_IQR = iqr(data)
n=len(data)
fd_h = 2*(data_IQR/(np.power(n,(1/3)))) #Freedman Diaconis Estimator
fd_bins = np.round(np.ceil((max(data)-min(data)) / fd_h)) #np.round(np.ceil(range / h))
s_bins = np.log2(n)+1 #Sturges estimator
bins=int(max([fd_bins,s_bins]))
return bins
def imagePCA_cluster(imlist, samplelist, nOfComponents=2):
immatrix = np.array([im.flatten() for im in imlist],'f')
#Check for nan elements in matrix
if np.isnan(immatrix).any():
array_has_nan = np.array([np.isnan(arr).any() for arr in immatrix])
removed_images = samplelist[array_has_nan]
imlist = imlist[~array_has_nan]
samplelist=samplelist[~array_has_nan]
n_of_nan=array_has_nan.sum()
reportStr=str(n_of_nan)+" samples had invalid images and where removed:\n"+"\n".join(removed_images)+"\n"
sys.stderr.write(reportStr)
immatrix = np.array([im.flatten() for im in imlist],'f')
if immatrix.shape[0] == 0:
reportStr="No data in passed image matrix\n"
sys.stderr.write(reportStr)
return None
if immatrix.shape[0] < nOfComponents:
reportStr="WARNING: fewer samples than requested components for PC analysis, adjusting\n"
sys.stderr.write(reportStr)
nOfComponents=immatrix.shape[0]
pca_obj = PCA(n_components=nOfComponents)
pca_obj.fit(immatrix)
projection_d = pca_obj.transform(immatrix)
projection_d_df = pd.DataFrame(projection_d)
projection_d_df.index = samplelist
columnNames=[]
for i in np.arange(1,nOfComponents+1,1):
columnStr="PC"+str(i)
columnNames.append(columnStr)
projection_d_df.columns = columnNames
reportStr="PCs explained variance: \n"+str(pca_obj.explained_variance_ratio_)+"\nTotal visual variation explained: "+str(sum(pca_obj.explained_variance_ratio_))+"\n"
sys.stderr.write(reportStr)
#center the coordinate system on the mean of each PC
projection_d_df = projection_d_df - projection_d_df.mean()
projection_d_df['length']=np.sqrt(np.square(projection_d_df).sum(axis=1))
projection_d_df.sort_values(by='length', inplace=True)
return projection_d_df
def imagePCA(imlist):
"""
Perform Principal Component Analysis of downsampled heatmap images, plots results.
Takes a list-like of heatmap images, flattens them and calls image_pca.
**Parameters**
X, list-like of list-like
Matrix with image data stored
**Returns**
None
**Examples**
None currently.
"""
m=16
n=16
# create matrix to store all flattened images
immatrix = np.array([im.flatten() for im in imlist],'f')
# perform PCA
V, S, immean = image_pca(immatrix)
#show some images (mean and 7 first modes)
plt.figure(figsize=(20,30))
plt.subplot(2,4,1)
plt.imshow(immean.reshape(m,n))
for i in range(20):
plt.subplot(4,5,i+1)
plt.imshow(V[i].reshape(m,n).T[::-1], cmap="bwr")
plt.show()
def image_pca(X):
#Based on Stack Overflow discussion and code here
#https://math.stackexchange.com/questions/409239/compute-pca-with-this-useful-trick
"""
Principal Component Analysis of flattened heatmap images, main purpose is to be called internally by imagePCA
**Parameters**
X, list-like
List-like matrix with image data stored as flattened arrays in rows.
**Returns**
List-like
Projection matrix (with important dimensions first)
Float
Variance
Float
Mean
**Examples**
None currently.
"""
# get dimensions
try:
num_data,dim = X.shape
except ValueError:
sys.stderr.write("WARNING in image_pca: input matrix invalid\n")
return None,None,None
# center data
mean_X = X.mean(axis=0)
X = X - mean_X
if dim>num_data:
# PCA - compact trick used
M = np.dot(X,X.T) # covariance matrix
e,EV = np.linalg.eigh(M) # eigenvalues and eigenvectors
tmp = np.dot(X.T,EV).T # this is the compact trick
V = tmp[::-1] # reverse since last eigenvectors are the ones we want
S = np.sqrt(e)[::-1] # reverse since eigenvalues are in increasing order
for i in range(V.shape[1]):
V[:,i] /= S
else:
# PCA - SVD used
U,S,V = np.linalg.svd(X)
V = V[:num_data] # only makes sense to return the first num_data
# return the projection matrix, the variance and the mean
return V,S,mean_X
#From Ticker.py
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
#From Ticker.py
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
#From Ticker.py
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_int(x)) < 1e-10
#From Ticker.py
def nearest_int(x):
if x == 0:
return int(0)
elif x > 0:
return int(x + 0.5)
else:
return int(x - 0.5)
def convertToBiLogPlotCoordinates(Ticlocs, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = bilogTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
tTiclocs=bilogTransform(Ticlocs, T)
plotTics=[]
for tTic in tTiclocs:
plotTic=(tTic-tMinMax[0])/transformedRange*actualRange+vmin
plotTics.append(plotTic)
assert len(tTiclocs)==len(Ticlocs)
return plotTics
def convertToBiLogPlotCoordinate(Ticloc, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = bilogTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
tTicloc=bilogTransform([Ticloc], T)[0]
plotTic=(tTicloc-tMinMax[0])/transformedRange*actualRange+vmin
return plotTic
def invertBiLogPlotcoordinates(plotTics, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = bilogTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
invPlotTics=[]
for tTic in plotTics:
invPlotTic=(tTic-vmin)/actualRange*transformedRange+tMinMax[0]
invPlotTics.append(invPlotTic)
result=inverseBilogTransform(invPlotTics, T)
return result
def invertBiLogPlotcoordinate(plotTic, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = bilogTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
invPlotTic=(plotTic-vmin)/actualRange*transformedRange+tMinMax[0]
result=inverseBilogTransform([invPlotTic], T)[0]
return result
def convertTologiclePlotCoordinates(Ticlocs, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = logicleTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
tTiclocs=logicleTransform(Ticlocs, T)
plotTics=[]
for tTic in tTiclocs:
plotTic=(tTic-tMinMax[0])/transformedRange*actualRange+vmin
plotTics.append(plotTic)
assert len(tTiclocs)==len(Ticlocs)
return plotTics
def convertTologiclePlotCoordinate(Ticloc, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = logicleTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
tTicloc=logicleTransform([Ticloc], T)[0]
plotTic=(tTicloc-tMinMax[0])/transformedRange*actualRange+vmin
return plotTic
def invertlogiclePlotcoordinates(plotTics, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = logicleTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
invPlotTics=[]
for tTic in plotTics:
invPlotTic=(tTic-vmin)/actualRange*transformedRange+tMinMax[0]
invPlotTics.append(invPlotTic)
result=inverselogicleTransform(invPlotTics, T)
return result
def invertlogiclePlotcoordinate(plotTic, vmin, vmax, T):
actualRange=vmax-vmin
tMinMax = logicleTransform([vmin, vmax], T)
transformedRange = tMinMax[1]-tMinMax[0]
invPlotTic=(plotTic-vmin)/actualRange*transformedRange+tMinMax[0]
result=inverselogicleTransform([invPlotTic], T)[0]
return result
class logicleLocator(Locator):
#Modified from matplotlibs LogLocator
#https://matplotlib.org/3.1.1/_modules/matplotlib/ticker.html#LogLocator
"""
Determine the tick locations for logicle axes based on LogLocator. Only locates and formats tics for the plot view.
Transform of underlying data and heatmap is handled outside matplotlib.
Hacked version of LogLogator that covers normal usecases of the logicle scale
Only defined with ticlocations for data in range -1 000 000 < x
"""
def __init__(self, linCutOff=1000, subs=(1.0,), numdecs=4, numticks=None):
"""
Place ticks on the locations : subs[j] * base**i
Parameters
----------
subs : None, string, or sequence of float, optional, default (1.0,)
Gives the multiples of integer powers of the base at which
to place ticks. The default places ticks only at
integer powers of the base.
The permitted string values are ``'auto'`` and ``'all'``,
both of which use an algorithm based on the axis view
limits to determine whether and how to put ticks between
integer powers of the base. With ``'auto'``, ticks are
placed only between integer powers; with ``'all'``, the
integer powers are included. A value of None is
equivalent to ``'auto'``.
"""
if numticks is None:
if rcParams['_internal.classic_mode']:
numticks = 15
else:
numticks = 'auto'
self._base=10.0 #np.exp(1)
self.subs(subs)
self.numdecs = numdecs
self.numticks = numticks
if linCutOff > 10000:
raise AliGaterError("in logicleLocator: ","linear-log scale threshold can max be 10000")
if linCutOff <=0:
raise AliGaterError("in logicleLocator: ","linear-log scale threshold must be > 0")
self.T = linCutOff
def set_params(self, subs=None, numdecs=4, numticks=None):
"""Set parameters within this locator."""
if subs is not None:
self.subs(subs)
if numdecs is not None:
self.numdecs = numdecs
if numticks is not None:
self.numticks = numticks
# FIXME: these base and subs functions are contrary to our
# usual and desired API.
def subs(self, subs):
"""
set the minor ticks for the log scaling every base**i*subs[j]
"""
if subs is None: # consistency with previous bad API
self._subs = 'auto'
elif isinstance(subs, six.string_types):
if subs not in ('all', 'auto'):
raise ValueError("A subs string must be 'all' or 'auto'; "
"found '%s'." % subs)
self._subs = subs
else:
self._subs = np.asarray(subs, dtype=float)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.view_limits()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if self.numticks == 'auto':
if self.axis is not None:
numticks = np.clip(self.axis.get_tick_space(), 2, 9)
else:
numticks = 9
else:
numticks = self.numticks
if vmax < vmin:
vmin, vmax = vmax, vmin
#If vmax-vmin flipped, correct it
#How many decs in the log part?
log_vmin = math.log(self.T) / math.log(self._base)
try:
log_vmax = math.log(vmax) / math.log(self._base) #If empty input in log-span this can lead to math domain error. Return small default span in that case
except ValueError:
log_vmax = log_vmin + 1.0
numdec = math.floor(log_vmax) - math.ceil(log_vmin)
ticklocs = self._base ** numdec #Base ** decades
if numdec > 10:
subs = np.array([1.0])
else:
subs = np.arange(1.0, self._base) #(1.0, Base)
stride = 1
if rcParams['_internal.classic_mode']:
# Leave the bug left over from the PY2-PY3 transition.
while numdec / stride + 1 > numticks:
stride += 1
else:
while numdec // stride + 1 > numticks:
stride += 1
# Does subs include anything other than 1?
have_subs = len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0)
decades = np.arange(math.ceil(log_vmin) - stride, math.ceil(log_vmax) + 2 * stride, stride)
if have_subs:
ticklocs = []
if stride == 1:
for decadeStart in self._base ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = self._base ** decades #Base ** decades
#Now we have number of tics and decs in the log part
tmpTicLoc=[]
if vmin < -100000:
tmpTicLoc.extend(np.arange(-1000000,-90000,100000))
if vmin < -10000:
tmpTicLoc.extend(np.arange(-100000,-9000,10000))
if vmin < -1000:
tmpTicLoc.extend(np.arange(-10000,-900,1000))
if vmin < 0:
tmpTicLoc.extend(np.arange(-1000,1,200))
Ticlocs = list(set(np.clip(tmpTicLoc, vmin, self.T)))
Ticlocs = list(np.sort(Ticlocs))
if vmax >= 0:
tmpTicLoc.extend(np.arange(0, self.T, 1000))
Ticlocs.extend(tmpTicLoc)
#ticklocs.extend(Ticlocs)
Ticlocs.extend(ticklocs)
clip_Ticlocs=np.sort(list(set(np.clip(Ticlocs,vmin, vmax))))
Ticlocs=convertTologiclePlotCoordinates(np.sort(clip_Ticlocs),vmin, vmax, self.T)
#ADD HOC POSSIBLY
Ticlocs=Ticlocs[1:-1]
#Ticlocs=convertTologiclePlotCoordinates(Ticlocs, vmin, vmax, self.T)
return self.raise_if_exceeds(np.asarray(Ticlocs))
def view_limits(self, vmin=None, vmax=None):
'Try to choose the view limits intelligently'
vmin, vmax = self.axis.get_view_interval()
return vmin, vmax
class logicleFormatter(Formatter):
#Modified from matplotlibs LogFormatter
#https://matplotlib.org/3.1.1/_modules/matplotlib/ticker.html#LogFormatter
"""
Base class for formatting ticks on a logicle scale. Only locates and formats tics for the plot view.
Transform of underlying data and heatmap is handled outside matplotlib.
Modfied version of LogFormatter that covers normal usecases of the logicle scale
Only defined with formatting ticlabels for data in range -1 000 000 < x
The passed parameters only affect plotting of the log-part of the scale
Parameters
----------
labelOnlyBase : bool, optional, default: False
If True, label ticks only at integer powers of base.
This is normally True for major ticks and False for
minor ticks.
minor_thresholds : (subset, all), optional, default: (1, 0.4)
If labelOnlyBase is False, these two numbers control
the labeling of ticks that are not at integer powers of
base; normally these are the minor ticks. The controlling
parameter is the log of the axis data range. In the typical
case where base is 10 it is the number of decades spanned
by the axis, so we can call it 'numdec'. If ``numdec <= all``,
all minor ticks will be labeled. If ``all < numdec <= subset``,
then only a subset of minor ticks will be labeled, so as to
avoid crowding. If ``numdec > subset`` then no minor ticks will
be labeled.
linthresh : float, optional, default: 1000
The threshold for the logicle scale change from linear-like to log-like scaling
Notes
-----
The `set_locs` method must be called to enable the subsetting
logic controlled by the ``minor_thresholds`` parameter.
In some cases such as the colorbar, there is no distinction between
major and minor ticks; the tick locations might be set manually,
or by a locator that puts ticks at integer powers of base and
at intermediate locations. For this situation, disable the
minor_thresholds logic by using ``minor_thresholds=(np.inf, np.inf)``,
so that all ticks will be labeled.
To disable labeling of minor ticks when 'labelOnlyBase' is False,
use ``minor_thresholds=(0, 0)``. This is the default for the
"classic" style.
Examples
--------
To label a subset of minor ticks when the view limits span up
to 2 decades, and all of the ticks when zoomed in to 0.5 decades
or less, use ``minor_thresholds=(2, 0.5)``.
To label all minor ticks when the view limits span up to 1.5
decades, use ``minor_thresholds=(1.5, 1.5)``.
"""
def __init__(self, labelOnlyBase=True,
minor_thresholds=None,
linCutOff=1000):
self.labelOnlyBase = labelOnlyBase
if minor_thresholds is None:
if rcParams['_internal.classic_mode']:
minor_thresholds = (0, 0)
else:
minor_thresholds = (1, 0.4)
self.minor_thresholds = minor_thresholds
self._sublabels = None
self._linthresh = linCutOff
self._base = np.exp(1)
def _num_to_string(self, x, vmin, vmax):
x = round(x,0)
s = self.pprint_val(x, vmax - vmin)
return s
def __call__(self, x, pos=None):
"""
Return the format for tick val `x`.
"""
if x == 0.0: # Symlog
return '0'
vmin, vmax = self.axis.get_view_interval()
tx=invertlogiclePlotcoordinate(x,vmin,vmax,self._linthresh)
if tx > self._linthresh+1:
fx = math.log(tx) / math.log(10.0)
is_x_decade = is_close_to_int(fx)
exponent = np.round(fx) if is_x_decade else np.floor(fx)
coeff = np.round(x / 10.0 ** exponent)
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
else:
#Manually define acceptable negative values
accepted_range=list(np.arange(-1000,1001,500))
accepted_range.extend(np.arange(-10000,-1000,5000))
accepted_range.extend(np.arange(-100000,-9000,10000))
accepted_range.extend(np.arange(-1000000,-90000,100000))
if not np.round(tx) in accepted_range:
return ''
s = self._num_to_string(tx, vmin, vmax)
return self.fix_minus(s)
def pprint_val(self, x, d):
#If the number is at or below the set lin-cutoff (_lintrehsh)
#Print it as an int
if x <= self._linthresh+1:
return '%d' % x
fmt = '%1.3e'
s = fmt % x
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
exponent = int(tup[1])
if exponent:
if float(mantissa) > 1:
s = '$%s*10^{%d}$' % (mantissa, exponent)
else:
s = '$%s0^{%d}$' % (mantissa, exponent)
else:
s = mantissa
else:
s = s.rstrip('0').rstrip('.')
return s
class BiLogLocator(Locator):
"""
Modified version of SymmetricalLogLocator. Only locates and formats tics for the plot view.
Transform of underlying data and heatmap is handled outside matplotlib classes.
Determine the tick locations for symmetric log axes
"""
def __init__(self, subs=(1.0,), linCutOff=100):
"""
place ticks on the location= base**i*subs[j]
"""
self._base = 10 #np.exp(1)
if isinstance(linCutOff, (float,int)):
self.T = linCutOff
else:
raise AliGaterError("in BiLogLocator: ","linthresh must be float/int. Found: "+str(type(linCutOff)))
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
#if view limits are to be chosen intelligently it must be done prior to heatmap creation,
#thus at the level of plotheatmap. Before any ticformatting is made.
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
t = self.T
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(total_ticks // (self.numticks - 1), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
subs = np.arange(2.0, b)
ticklocs = []
for decade in decades:
if decade == 0:
ticklocs.append(decade)
else:
ticklocs.append(decade)
if len(subs) > 1:
ticklocs.extend(subs * decade)
clip_Ticlocs=np.sort(list(set(np.clip(ticklocs,vmin, vmax))))
Ticlocs=convertToBiLogPlotCoordinates(np.sort(clip_Ticlocs),vmin, vmax, self.T)
#dont want extra tic at min and max val
Ticlocs=Ticlocs[1:-1]
return self.raise_if_exceeds( | np.array(Ticlocs) | numpy.array |
import numpy as np
from functools import reduce
from collections import OrderedDict
class TensorExpression:
def __init__(self,expr_type,*tensors):
self.tensors = tensors
self.expr_type = expr_type
def __getitem__(self,names):
if self.expr_type == "mul":
try:
expr = ",".join([tensor.names for tensor in self.tensors])
except:
# Attempt naive multiplication if a tensor is missing names
return Tensor(reduce(lambda x, y: x * y, [tensor._data for tensor in self.tensors]))
expr += "->"
expr += names
return Tensor(np.einsum(expr,*[tensor._data for tensor in self.tensors]))
elif self.expr_type == "add":
try:
all_names = "".join([tensor.names for tensor in self.tensors])
except:
# Attempt naive addition if a tensor is missing names
return Tensor(reduce(lambda x, y: x + y, [tensor._data for tensor in self.tensors]))
order,K,used_names = self.names_to_order([tensor.names for tensor in self.tensors])
tensors = self.permute_to_order(self.tensors,order,K)
out = Tensor(reduce(lambda x, y: x + y, [tensor._data for tensor in tensors]),used_names)[names]
return out
elif self.expr_type == "eq":
try:
all_names = "".join([tensor.names for tensor in self.tensors])
except:
# Attempt naive equality if a tensor is missing names
return Tensor(reduce(lambda x, y: x == y, [tensor._data for tensor in self.tensors]))
order,K,used_names = self.names_to_order([tensor.names for tensor in self.tensors])
tensors = self.permute_to_order(self.tensors,order,K)
out = Tensor(reduce(lambda x, y: x == y, [tensor._data for tensor in tensors]),used_names)[names]
return out
def names_to_order(self,names):
k=0
used = OrderedDict()
order = []
for tensor_names in names:
tensor_order = []
for c in tensor_names:
if c in used:
tensor_order.append(used[c])
else:
used[c] = k
tensor_order.append(k)
k += 1
order.append(tensor_order)
order = | np.array(order) | numpy.array |
# -*- coding: utf-8 -*-
"""Tools to plot tensors."""
import numpy as np
def sample_sphere(N=100):
"""Define all spherical angles."""
phi = np.linspace(0, 2 * np.pi, N)
theta = np.linspace(0, np.pi, N)
# Cartesian coordinates that correspond to the spherical angles:
r = np.array(
[
np.outer(np.cos(phi), np.sin(theta)),
np.outer(np.sin(phi), np.sin(theta)),
np.outer(np.ones_like(phi), np.cos(theta)),
]
)
return r
def sample_circle(plane="xy", N=100):
"""Define all angles in a certain plane."""
phi = np.linspace(0, 2 * np.pi, N)
if plane == "xy":
return np.array([np.cos(phi), np.sin(phi), np.ones_like(phi)])
elif plane == "xz":
return np.array([np.cos(phi), np.ones_like(phi), | np.sin(phi) | numpy.sin |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import base64
import random
import requests
import numpy as np
from PIL import Image, ImageSequence, ImageDraw, ImageFont
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
from settings import *
def process_gif(image_gif):
frame_list = [frame.copy() for frame in ImageSequence.Iterator(image_gif)]
frame_list = [frame_list[i] for i in range(len(frame_list)) if i % GIF_FRAME_INTERVAL == 0]
if len(frame_list) > GIF_MAX_FRAME:
return random.sample(frame_list, GIF_MAX_FRAME)
else:
return frame_list
def download_image(image_url):
response = requests.get(image_url)
response = response.content
bytes_obj = io.BytesIO(response)
image = Image.open(bytes_obj)
image = image.convert("RGB")
return image
def base64_to_image(image_base64):
data = base64.b64decode(image_base64)
image = io.BytesIO(data)
image = Image.open(image)
image = image.convert("RGB")
return image
def image_to_base64(image):
if type(image) == "numpy.ndarray":
image = Image.fromarray(image)
image_pil = io.BytesIO()
image.save(image_pil, format='PNG')
return base64.b64encode(image_pil.getvalue()).decode('utf-8')
def draw_boxes(image_url, text_list, boxes):
image = download_image(image_url)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(FONT_FILE_PATH, FONT_SIZE)
for box, text in zip(boxes, text_list):
x1, y1, x2, y2 = box
p1, p2, p3, p4 = [x1, y1], [x2, y1], [x2, y2], [x1, y2]
draw.text(p4, text, font=font, fill=FONT_COLOR)
draw.line(tuple(p1 + p2), fill=LINE_COLOR)
draw.line(tuple(p2 + p3), fill=LINE_COLOR)
draw.line(tuple(p3 + p4), fill=LINE_COLOR)
draw.line(tuple(p4 + p1), fill=LINE_COLOR)
path = SAVE_TEMP_IMAGE_PATH + random_str(TEMP_IMAGE_RANDOM_NAME_LENGTH) + ".png"
image.save(path)
return "/" + path
def random_str(length):
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
salt = ''
for i in range(length):
salt += random.choice(chars)
return salt
def calculate_vector_cosine_similarity(embedding_a, embedding_b):
cos = np.dot(embedding_a, embedding_b) / ( | np.linalg.norm(embedding_a) | numpy.linalg.norm |
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from timeit import default_timer as timer
import cuml.tsa.arima as arima
from scipy.optimize.optimize import _approx_fprime_helper
from cuml.utils.input_utils import input_to_host_array
# test data time
t = np.array([1, 2, 3, 4, 5, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21,
24, 25, 26, 28, 39, 40, 41, 42, 43, 45, 46, 48, 50, 51, 52, 53,
55, 56, 58, 59, 60, 63, 71, 72, 74])
# test dataset 0
data0 = np.array([16454, 12708, 14084, 20929, 11888, 13378, 20503, 17422,
16574, 16567, 14222, 14471, 11988, 17122, 15448, 14290,
13679, 10690, 17240, 17900, 16673, 1070, 16165, 15832, 18495,
15160, 15638, 21688, 18284, 2306, 10159, 8224, 7517, 14363,
11185, 15804, 2816, 12217, 7739, 5459, 6241, 171, 11118])
# test dataset 1
data1 = np.array([16492, 12671, 13644, 18120, 11420, 10904, 20723, 17011,
15109, 15791, 13014, 14622, 12029, 15932, 14731, 13573,
13229, 11371, 16400, 16939, 16672, 2520, 14627, 14035, 14724,
15094, 12812, 20126, 16411, 2687, 9582, 8291, 7352, 14313,
10552, 14143, 2790, 12960, 7362, 4606, 6155, 158, 11435])
# The ARIMA model of dataset 0. ("smoothed dataset 0")
data_smooth = np.array([16236.380267964598, 14198.707110817017,
13994.129600585984, 15705.975404284243,
14455.226246272636, 14114.076675764649,
15033.216755054425, 15021.10438408751,
14954.822759706418, 14904.042532492134,
14557.421649530697, 14347.41471896904,
13877.476483976807, 14059.990544916833,
13888.386639087348, 13665.988312305493,
13436.674608089721, 12979.25813798955,
13199.416272194492, 13243.849692596767,
13157.053784142185, 11904.470827085499,
12356.442250181439, 12279.590418507576,
12401.153685335092, 12190.66504090282,
12122.442825730872, 12444.119210649873,
12326.524612239178, 11276.55939500802,
11278.522346300862, 10911.26233776968,
10575.493222628831, 10692.727355175008,
10395.405550019213, 10480.90443078538,
9652.114779061498, 9806.45087894164, 9401.00281392505,
9019.688213508754, 8766.056499652503,
8158.794074075997, 8294.86605488629])
def create_yp_ref():
""" creates reference prediction """
(_, y) = get_data()
model = arima.ARIMAModel((1, 1, 1), np.array([-217.7230173548441,
-206.81064091237104]),
[np.array([0.0309380078339684]),
np.array([-0.0371740508810001])],
[np.array([-0.9995474311219695]),
np.array([-0.9995645146854383])], y)
y_p_ref = model.predict_in_sample()
print("yp_ref=", y_p_ref)
# reference prediction for (1,1,1). recreate with `create_yp_ref()`
yp_ref = [[16236.276982645155, 14199.83635121614, 13993.612504802639,
15701.001917841138, 14457.318782427961, 14114.436684625534,
15028.287621746756, 15018.826402730409, 14953.184455915669,
14902.339385888643, 14557.310116753155, 14346.872075971714,
13878.49920540047, 14056.977409351373, 13886.543137497267,
13664.846049477095, 13435.78531068983, 12980.316970030086,
13195.421878944875, 13239.340147071023, 13153.408671153384,
11912.022478836143, 12352.451020219527, 12275.76344198953,
12395.309983436986, 12187.223001418526, 12118.535320809358,
12435.729542924131, 12320.661934977046, 11282.214722260982,
11278.023270572445, 10911.916572651637, 10576.37138790725,
10688.356982664653, 10393.213992661886, 10475.175832966357,
9655.699137880823, 9802.85623495, 9400.49054615417,
9020.574181472959, 8766.084012642543, 8162.945169968312,
8291.973806637427],
[16285.189359087628, 14338.460909054174,
13850.63823251114, 14556.56635360983, 13954.695497411303,
13244.780548562172, 13744.642846463914, 14080.121846941318,
14051.566389907626, 13986.915237521414, 13786.62136453952,
13582.380651361393, 13344.11985120289, 13170.332862411682,
13105.025676475907, 12962.955049014487, 12776.960524427446,
12553.108594193804, 12354.900642927994, 12350.399980965518,
12352.327486277976, 12215.518342586416, 11656.131573206087,
11563.059813979233, 11449.754138979828, 11362.05755263616,
11286.236966021392, 11116.378254211602, 11223.015986560224,
11233.463302287848, 10709.250034043267, 10466.998468513524,
10192.800693817426, 9840.980314287335, 9786.651333552647,
9559.92129655608, 9584.118472336395, 9080.57441537021,
9030.024898020312, 8807.168013053131, 8470.279842824808,
8280.44295003853, 7648.106311322318]]
def get_data():
"""Convenience function to get reference data"""
d = np.zeros((len(t), 2))
d[:, 0] = data0
d[:, 1] = data1
return (t, d)
def test_transform():
"""Test the parameter transformation code."""
x0 = np.array([-36.24493319, -0.76159416, -0.76159516, -167.65533746,
-0.76159416, -0.76159616])
# Without corrections to the MA parameters, this inverse transform will
# return NaN
Tx0 = arima._batch_invtrans(0, 1, 2, 2, x0)
assert(not np.isnan(Tx0).any())
Tx0 = arima._batch_invtrans(2, 1, 0, 2, x0)
assert(not np.isnan(Tx0).any())
Tx0 = arima._batch_invtrans(1, 1, 1, 2, np.array([-1.27047619e+02,
1.90024682e-02,
-5.88867176e-01,
-1.20404762e+02,
5.12333137e-05,
-6.14485076e-01]))
np.testing.assert_allclose(Tx0, np.array([-1.27047619e+02,
3.80095119e-02,
-1.35186024e+00,
-1.20404762e+02,
1.02466627e-04,
-1.43219144e+00]))
def test_log_likelihood():
"""
Test loglikelihood against reference results using reference parameters
"""
x0 = [[-220.35376518754148,
-0.2617000627224417,
-2.1893003751753457],
[-2.3921544864718811e+02, -1.3525124433776395e-01,
-7.5978156540072991e-02,
-2.4055488944465053e+00]]
ref_ll = [-415.7117855771454, -415.32341960785186]
_, y = get_data()
for p in range(1, 3):
order = (p, 1, 1)
y0 = np.zeros((len(t), 1), order='F')
y0[:, 0] = y[:, 0]
ll = arima.ll_f(1, len(t), order, y0, np.copy(x0[p-1]), trans=True)
np.testing.assert_almost_equal(ll, ref_ll[p-1])
x = [-1.2704761899e+02, 3.8009501900e-02, -1.3518602400e+00,
-1.2040476199e+02, 1.0245662700e-04, -1.4321914400e+00]
ll = arima.ll_f(2, len(t), (1, 1, 1), y, np.array(x))
np.set_printoptions(precision=14)
ll_ref = np.array([-418.2732740315433, -413.7692130741877])
np.testing.assert_allclose(ll, ll_ref)
def test_gradient_ref():
"""Tests the gradient based on a reference output"""
x = np.array([-1.2704761899e+02, 3.8009511900e-02, -1.3518602400e+00,
-1.2040476199e+02, 1.0246662700e-04, -1.4321914400e+00])
_, y = get_data()
np.set_printoptions(precision=14)
g = arima.ll_gf(2, len(t), 3, (1, 1, 1), y, x)
g_ref = np.array([-7.16227077646181e-04, -4.09565927839139e+00,
-4.10715017551411e+00, -1.02602371043758e-03,
-4.46265460141149e+00,
-4.18378931499319e+00])
np.testing.assert_allclose(g, g_ref, rtol=1e-6)
def test_gradient():
"""test gradient implementation using FD"""
num_samples = 100
xs = np.linspace(0, 1, num_samples)
np.random.seed(12)
noise = np.random.normal(scale=0.1, size=num_samples)
ys = noise + 0.5*xs
for num_batches in range(1, 5):
ys_df = np.reshape(np.tile(np.reshape(ys,
(num_samples, 1)),
num_batches),
(num_batches, num_samples), order="C").T
order = (1, 1, 1)
mu = 0.0
arparams = np.array([-0.01])
maparams = np.array([-1.0])
x = np.r_[mu, arparams, maparams]
x = np.tile(x, num_batches)
num_samples = ys_df.shape[0]
num_batches = ys_df.shape[1]
p, d, q = order
num_parameters = d + p + q
g = arima.ll_gf(num_batches, num_samples,
num_parameters, order, ys_df, x)
grad_fd = np.zeros(len(x))
h = 1e-8
for i in range(len(x)):
def fx(xp):
return arima.ll_f(num_batches, num_samples, order,
ys_df, xp).sum()
xph = np.copy(x)
xmh = np.copy(x)
xph[i] += h
xmh[i] -= h
f_ph = fx(xph)
f_mh = fx(xmh)
grad_fd[i] = (f_ph-f_mh)/(2*h)
np.testing.assert_allclose(g, grad_fd, rtol=1e-4)
def f(xk):
return arima.ll_f(num_batches, num_samples, order,
ys_df, xk).sum()
# from scipy
g_sp = _approx_fprime_helper(x, f, h)
np.testing.assert_allclose(g, g_sp, rtol=1e-4)
def test_bic():
"""Test "Bayesian Information Criterion" metric. BIC penalizes the
log-likelihood with the number of parameters.
"""
np.set_printoptions(precision=16)
bic_reference = [[851.0904458614862, 842.6620993460326],
[854.747970752074, 846.2220267762417]]
_, y = get_data()
for p in range(1, 3):
order = (p, 1, 1)
mu0, ar0, ma0 = arima.estimate_x0(order, y)
batched_model = arima.fit(y, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
np.testing.assert_allclose(batched_model.bic,
bic_reference[p-1], rtol=1e-4)
def test_fit():
"""Test the `fit()` function against reference parameters."""
_, y = get_data()
mu_ref = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar_ref = [
np.array([[0.0309380078339684, -0.0371740508810001]], order='F'),
np.array([[0.0309027562133337, -0.0386322768036704],
[-0.0191533926207157, -0.0330133336831984]], order='F')]
ma_ref = [
np.array([[-0.9995474311219695, -0.9995645146854383]], order='F'),
np.array([[-0.999629811305126, -0.9997747315789454]], order='F')]
ll_ref = [[-414.7628631782474, -410.049081775547],
[-414.7559799310751, -410.0285309839064]]
for p in range(1, 3):
order = (p, 1, 1)
mu0, ar0, ma0 = arima.estimate_x0(order, y)
batched_model = arima.fit(y, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
print("num iterations: ", batched_model.niter)
x = arima.pack(p, 1, 1, 2, batched_model.mu,
batched_model.ar_params, batched_model.ma_params)
llx = arima.ll_f(2, len(t), (p, 1, 1), y, x, trans=False)
rtol = 1e-2
# parameter differences are more difficult to test precisely due to the
# nonlinear-optimization.
np.testing.assert_allclose(batched_model.mu, mu_ref[p-1], rtol=rtol)
np.testing.assert_allclose(batched_model.ar_params, ar_ref[p-1],
rtol=rtol)
np.testing.assert_allclose(batched_model.ma_params, ma_ref[p-1],
rtol=rtol)
# more important is that the loglikelihood is close to a relatively
# higher tolerance.
np.testing.assert_allclose(llx, ll_ref[p-1], rtol=1e-6)
def test_predict(plot=False):
"""Test the `predict_in_sample()` function using provided parameters"""
_, y = get_data()
mu = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar = [[np.array([0.0309380078339684]), np.array([-0.0371740508810001])],
[np.array([0.0309027562133337, -0.0191533926207157]),
np.array([-0.0386322768036704, -0.0330133336831984])]]
ma = [[np.array([-0.9995474311219695]), np.array([-0.9995645146854383])],
[np.array([-0.999629811305126]), np.array([-0.9997747315789454])]]
l2err_ref = [[7.611525998416604e+08, 7.008862739645946e+08],
[7.663156224285843e+08, 6.993847054122686e+08]]
for p in range(1, 3):
order = (p, 1, 1)
model = arima.ARIMAModel(order, mu[p-1], ar[p-1], ma[p-1], y)
d_y_b_p = model.predict_in_sample()
y_b_p = input_to_host_array(d_y_b_p).array
if plot:
import matplotlib.pyplot as plt
nb_plot = 2
fig, axes = plt.subplots(nb_plot, 1)
axes[0].plot(t, y[:, 0], t, y_b_p[:, 0], "r-")
axes[1].plot(t, y[:, 1], t, y_b_p[:, 1], "r-")
if p == 1:
axes[0].plot(t, yp_ref[p-1][0], "g--")
axes[1].plot(t, yp_ref[p-1][1], "g--")
plt.show()
l2_error_predict = np.sum((y_b_p - y)**2, axis=0)
np.testing.assert_allclose(l2err_ref[p-1], l2_error_predict)
if p == 1:
np.testing.assert_allclose(y_b_p[:, 0], yp_ref[0])
np.testing.assert_allclose(y_b_p[:, 1], yp_ref[1])
def test_forecast():
"""Test forecast using provided parameters"""
_, y = get_data()
mu = [np.array([-217.7230173548441, -206.81064091237104]),
np.array([-217.72325384510506, -206.77224439903458])]
ar = [[np.array([0.0309380078339684]), np.array([-0.0371740508810001])],
[np.array([0.0309027562133337, -0.0191533926207157]),
np.array([-0.0386322768036704, -0.0330133336831984])]]
ma = [[np.array([-0.9995474311219695]), np.array([-0.9995645146854383])],
[np.array([-0.999629811305126]), np.array([-0.9997747315789454])]]
y_fc_ref = [np.array([[8291.97380664, 7993.55508519, 7773.33550351],
[7648.10631132, 7574.38185979, 7362.6238661]]),
np.array([[7609.91057747, 7800.22971962, 7473.00968599],
[8016.79544837, 7472.39902223, 7400.83781943]])]
for p in range(1, 3):
order = (p, 1, 1)
model = arima.ARIMAModel(order, mu[p-1], ar[p-1], ma[p-1], y)
d_y_b_fc = model.forecast(3)
y_b_fc = input_to_host_array(d_y_b_fc).array
np.testing.assert_allclose(y_fc_ref[p-1], y_b_fc.T)
def test_fit_predict_forecast(plot=False):
"""Full integration test: Tests fit followed by in-sample prediction and
out-of-sample forecast
"""
np.set_printoptions(precision=16)
_, y = get_data()
ns_train = 35
ns_test = len(t) - ns_train
y_b_p = []
y_f_p = []
for p in range(1, 3):
order = (p, 1, 1)
nb = 2
y_train = np.zeros((ns_train, nb))
for i in range(nb):
y_train[:, i] = y[:ns_train, i]
p, _, _ = order
mu0, ar0, ma0 = arima.estimate_x0(order, y_train)
batched_model = arima.fit(y_train, order,
mu0,
ar0,
ma0,
opt_disp=-1, h=1e-9)
d_y_b = batched_model.predict_in_sample()
y_b = input_to_host_array(d_y_b).array
d_y_fc = batched_model.forecast(ns_test)
y_fc = input_to_host_array(d_y_fc).array
y_b_p.append(y_b)
y_f_p.append(y_fc)
if plot:
import matplotlib.pyplot as plt
nb_plot = 2
_, axes = plt.subplots(nb_plot, 1)
axes[0].plot(t, y[:, 0], t[:ns_train], y_b_p[0][:, 0], "r-",
t[ns_train-1:-1], y_f_p[0][:, 0], "--")
axes[0].plot(t[:ns_train], y_b_p[1][:, 0], "g-",
t[ns_train-1:-1], y_f_p[1][:, 0], "y--")
axes[0].plot(t, yp_ref[0], "b--")
axes[1].plot(t, y[:, 1], t[:ns_train], y_b_p[0][:, 1], "r-",
t[ns_train-1:-1], y_f_p[0][:, 1], "--")
axes[1].plot(t[:ns_train], y_b_p[1][:, 1], "g-",
t[ns_train-1:-1], y_f_p[1][:, 1], "y--")
axes[1].plot(t, yp_ref[1], "b--")
plt.show()
l2_error_predict0 = np.sum((y_b_p[0][:, :] - y[:ns_train, :])**2, axis=0)
l2_error_predict1 = np.sum((y_b_p[1][:, :] - y[:ns_train, :])**2, axis=0)
l2_error_ref0 = [5.1819845778009456e+08, 4.4313075823450834e+08]
l2_error_ref1 = [5.4015810529295897e+08, 4.6489505018349826e+08]
l2_error_forecast0 = np.sum((y_f_p[0][:, :] - y[ns_train-1:-1, :])**2,
axis=0)
l2_error_forecast1 = np.sum((y_f_p[1][:, :] - y[ns_train-1:-1, :])**2,
axis=0)
l2_error_fc_ref0 = [2.7841860168252653e+08, 2.4003239604745972e+08]
l2_error_fc_ref1 = [3.728470033076098e+08, 3.039953059636233e+08]
rtol = 5e-5
np.testing.assert_allclose(l2_error_predict0, l2_error_ref0, rtol=rtol)
np.testing.assert_allclose(l2_error_predict1, l2_error_ref1, rtol=rtol)
rtol = 1e-3
np.testing.assert_allclose(l2_error_forecast0, l2_error_fc_ref0, rtol=rtol)
np.testing.assert_allclose(l2_error_forecast1, l2_error_fc_ref1, rtol=rtol)
def test_grid_search(num_batches=2):
"""Tests grid search using random data over the default range of p,q
parameters"""
ns = len(t)
y_b = np.zeros((ns, num_batches))
for i in range(num_batches):
y_b[:, i] = np.random.normal(size=ns, scale=2000) + data_smooth
best_order, best_mu, best_ar, best_ma, best_ic = arima.grid_search(y_b,
d=1)
if num_batches == 2:
np.testing.assert_array_equal(best_order, [(0, 1, 1), (0, 1, 1)])
def demo():
"""Demo example from the documentation"""
import matplotlib.pyplot as plt
num_samples = 200
xs = np.linspace(0, 1, num_samples)
np.random.seed(12)
noise = np.random.normal(scale=0.05, size=num_samples)
noise2 = np.random.normal(scale=0.05, size=num_samples)
ys1 = noise + 0.5*xs + 0.1*np.sin(xs/np.pi)
ys2 = noise2 + 0.25*xs + 0.15*np.sin(0.8*xs/np.pi)
ys = np.zeros((num_samples, 2))
ys[:, 0] = ys1
ys[:, 1] = ys2
plt.plot(xs, ys1, xs, ys2)
mu0, ar0, ma0 = arima.estimate_x0((1, 1, 1), ys)
model = arima.fit(ys, (1, 1, 1), mu0, ar0, ma0)
d_yp = model.predict_in_sample()
yp = input_to_host_array(d_yp).array
d_yfc = model.forecast(50)
yfc = input_to_host_array(d_yfc).array
dx = xs[1] - xs[0]
xfc = np.linspace(1, 1+50*dx, 50)
plt.plot(xs, yp)
plt.plot(xfc, yfc)
def bench_arima(num_batches=240, plot=False):
"""A parameterized benchmark allowing an arbitrarily large number of similar,
but not identical batches"""
ns = len(t)
y_b = np.zeros((ns, num_batches))
for i in range(num_batches):
y_b[:, i] = | np.random.normal(size=ns, scale=2000) | numpy.random.normal |
import numpy as np
# from tsnecuda import TSNE
# from sklearn.manifold import TSNE
from data.IncrementalTSNE import IncrementalTSNE
import fastlapjv
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from fastlapjv import fastlapjv
import math
from time import time
class GridLayout(object):
def __init__(self):
super().__init__()
self.tsner = IncrementalTSNE(n_components=2, init='pca', method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
def fit(self, X: np.ndarray, labels: np.ndarray = None, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None):
"""main fit function
Args:
X (np.ndarray): n * d, n is the number of samples, d is the dimension of a sample
labels (np.ndarray): label of each sample in X
"""
X_embedded = self.tsne(X, constraintX = constraintX, constraintY = constraintY, labels = labels, constraintLabels = constraintLabels, init = init)
grid_ass, grid_size = self.grid(X_embedded)
return X_embedded, grid_ass, grid_size
def tsne(self, X: np.ndarray, labels: np.ndarray = None, perplexity: int = 15, learning_rate: int = 3, constraintX: np.ndarray = None, constraintY: np.ndarray = None, constraintLabels: np.ndarray = None, init = None) -> np.ndarray:
# remove empty labels
labelcnt = 0
removeEmptyTransform = np.zeros((np.max(labels)+1), dtype=int)-1
for label in labels:
if removeEmptyTransform[label]==-1:
removeEmptyTransform[label]=labelcnt
labelcnt += 1
labels = removeEmptyTransform[labels]
constraintLabels = removeEmptyTransform[constraintLabels]
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=30, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
if constraintX is None:
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.5, labels=labels, label_alpha=0.9)
else:
self.tsner = IncrementalTSNE(n_components=2, init='pca' if init is None else init, method='barnes_hut', perplexity=5, angle=0.3, n_jobs=8, n_iter=1000, random_state = 100)
X_embedded = self.tsner.fit_transform(X, constraint_X = constraintX, constraint_Y = constraintY, constraint_labels = constraintLabels, prev_n = 0 if constraintX is None else len(constraintX),
alpha = 0.3, labels = labels, label_alpha=0.2)
return X_embedded
def grid(self, X_embedded: np.ndarray):
X_embedded -= X_embedded.min(axis=0)
X_embedded /= X_embedded.max(axis=0)
num = X_embedded.shape[0]
square_len = math.ceil( | np.sqrt(num) | numpy.sqrt |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Affine Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class AffineBijectorTest(test_case.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
mu = -1.
# scale corresponds to 1.
bijector = tfb.Affine(shift=mu)
self.assertStartsWith(bijector.name, "affine")
def testNoBatchMultivariateIdentity(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[1., 0], [0, 1.]]
bijector = tfb.Affine(shift=mu)
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1], [-1., -1]]
self.assertAllClose([[2., 0], [0., -2]], run(bijector.forward, x))
self.assertAllClose([[0., 2], [-2., 0]], run(bijector.inverse, x))
self.assertAllClose(
0., run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateDiag(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[2., 0], [0, 1.]]
bijector = tfb.Affine(shift=mu, scale_diag=[2., 1])
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
# Reset bijector.
bijector = tfb.Affine(shift=mu, scale_diag=[2., 1])
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1],
[-1., -1]]
self.assertAllClose([[3., 0],
[-1., -2]],
run(bijector.forward, x))
self.assertAllClose([[0., 2],
[-1., 0]],
run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateFullDynamic(self):
x_value = np.array([[1., 1]], dtype=np.float32)
mu_value = np.array([1., -1], dtype=np.float32)
scale_diag_value = np.array([2., 2], dtype=np.float32)
x = tf1.placeholder_with_default(x_value, shape=None)
mu = tf1.placeholder_with_default(mu_value, shape=None)
scale_diag = tf1.placeholder_with_default(
scale_diag_value, shape=None)
bijector = tfb.Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[3., 1]], self.evaluate(bijector.forward(x)))
self.assertAllClose([[0., 1]], self.evaluate(bijector.inverse(x)))
self.assertAllClose(
-np.log(4),
self.evaluate(bijector.inverse_log_det_jacobian(x, event_ndims=1)))
def testBatchMultivariateIdentity(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale = 2.
bijector = tfb.Affine(shift=mu, scale_identity_multiplier=scale)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateDiag(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale_diag = [[2., 2]]
bijector = tfb.Affine(shift=mu, scale_diag=scale_diag)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(4)],
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateFullDynamic(self):
x_value = np.array([[[1., 1]]], dtype=np.float32)
mu_value = np.array([[1., -1]], dtype=np.float32)
scale_diag_value = np.array([[2., 2]], dtype=np.float32)
x = tf1.placeholder_with_default(x_value, shape=None)
mu = tf1.placeholder_with_default(mu_value, shape=None)
scale_diag = tf1.placeholder_with_default(
scale_diag_value, shape=None)
bijector = tfb.Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[[3., 1]]], self.evaluate(bijector.forward(x)))
self.assertAllClose([[[0., 1]]], self.evaluate(bijector.inverse(x)))
self.assertAllClose(
[-np.log(4)],
self.evaluate(bijector.inverse_log_det_jacobian(
x, event_ndims=1)))
def testIdentityWithDiagUpdate(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = tfb.Affine(
shift=mu, scale_identity_multiplier=1., scale_diag=[1., 1., 1.])
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.**3),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithTriL(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 2]]
bijector = tfb.Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 5]], run(bijector.forward, x))
self.assertAllClose([[1., 0.5]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithTriL(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 3]]
bijector = tfb.Affine(
shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 7]], run(bijector.forward, x))
self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(6.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityAndDiagWithTriL(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[3., 0], [2, 4]]
bijector = tfb.Affine(
shift=mu,
scale_identity_multiplier=1.0,
scale_diag=[1., 2.],
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[2., 9]], run(bijector.forward, x))
self.assertAllClose([[2 / 3., 5 / 12.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(12.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithVDVTUpdate(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
bijector = tfb.Affine(
shift=mu,
scale_identity_multiplier=2.,
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = tfb.Affine(shift=mu, scale_diag=[10., 2, 3])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 3, 8], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1.5, 4 / 3.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(60.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithVDVTUpdate(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
bijector = tfb.Affine(
shift=mu,
scale_diag=[2., 3, 4],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = tfb.Affine(shift=mu, scale_diag=[10., 3, 5])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 5, 14], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdate(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = tfb.Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = tfb.Affine(
shift=mu, scale_tril=[[10., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 14 / 15., 4 / 25.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdateNoDiagonal(self):
def static_run(fun, x, **kwargs):
return self.evaluate(fun(x, **kwargs))
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value, dtype=np.float32)
placeholder = tf1.placeholder_with_default(x_value, shape=None)
return self.evaluate(fun(placeholder, **kwargs))
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = tfb.Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=None,
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = tfb.Affine(
shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([5., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(90.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateRaisesWhenSingular(self):
mu = [1., -1]
with self.assertRaisesOpError("diagonal part must be non-zero"):
bijector = tfb.Affine(
shift=mu,
# Has zero on the diagonal.
scale_diag=[0., 1],
validate_args=True)
self.evaluate(bijector.forward([1., 1.]))
def _makeScale(self,
x,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None):
"""Create a scale matrix. Return None if it can not be created."""
c = scale_identity_multiplier
d1 = scale_diag
tril = scale_tril
v = scale_perturb_factor
d2 = scale_perturb_diag
# Ambiguous low rank update.
if v is None and d2 is not None:
return None
if c is None and d1 is None and tril is None:
# Special case when no scale args are passed in. This means use an
# identity matrix.
c = 1.
matrix = np.float32(0.)
if c is not None:
# Infer the dimension from x.
matrix += c * self._matrix_diag(np.ones_like(x))
if d1 is not None:
matrix += self._matrix_diag(np.array(d1, dtype=np.float32))
if tril is not None:
matrix += np.array(tril, dtype=np.float32)
if v is not None:
v = np.array(v, dtype=np.float32)
if v.ndim < 2:
vt = v.T
else:
vt = np.swapaxes(v, axis1=v.ndim - 2, axis2=v.ndim - 1)
if d2 is not None:
d2 = self._matrix_diag(np.array(d2, dtype=np.float32))
right = np.matmul(d2, vt)
else:
right = vt
matrix += np.matmul(v, right)
return matrix
def _matrix_diag(self, d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int( | np.prod(d.shape[:-1]) | numpy.prod |
import argparse
import numpy as np
import os
import torch
import random
from collections import defaultdict
from utils.data_util import *
from utils.corruption import *
from utils.lib import *
from utils.pgd_attack import LinfPGDAttack
from utils.cw_attack import CW
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
from torchvision import transforms
import torch.optim as optim
from model import ResNet18
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
def train_model(model, train_dataloader, nepoch):
model.train()
loss_class = torch.nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(nepoch):
for i, (s_img, s_label) in enumerate(train_dataloader):
s_img = s_img.cuda()
s_label = s_label.cuda()
class_output = model(s_img)
loss = loss_class(class_output, s_label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
i += 1
def test(model, dataloader):
model.eval()
n_correct, n_total = 0, 0
for img, label in iter(dataloader):
batch_size = len(label)
img, label = img.cuda(), label.cuda()
with torch.no_grad():
class_output = model(img)
pred = class_output.data.max(1, keepdim=True)[1]
n_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
n_total += batch_size
acc = n_correct.double() / n_total
return acc
def predict(model, X):
model.eval()
test_dataset = MyDatasetNoLabel(X)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
preds = []
for batch_x in iter(test_dataloader):
batch_x = batch_x.cuda()
with torch.no_grad():
batch_output = model(batch_x)
batch_preds = torch.argmax(batch_output, axis=1)
preds.extend(batch_preds.cpu().numpy())
return np.array(preds)
def predict_proba(model, X):
model.eval()
test_dataset = MyDatasetNoLabel(X)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
probas = []
for batch_x in iter(test_dataloader):
batch_x = batch_x.cuda()
with torch.no_grad():
batch_probas = torch.softmax(model(batch_x), axis=1)
probas.extend(batch_probas.cpu().numpy())
return np.array(probas)
def build_discriminator(PX_train, QX_train, batch_size, num_epochs=10):
dist = ResNet18(num_classes=2).cuda()
print(f"Training distinguisher on {len(PX_train):,} P vs {len(QX_train):,} Q")
dist_train_data = np.vstack([PX_train] + [QX_train])
dist_train_label = np.array([0 for _ in PX_train]+[1 for _ in QX_train])
dist_train_dataset = MyDataset(dist_train_data, dist_train_label)
dist_train_dataloader = DataLoader(dist_train_dataset, batch_size=batch_size, shuffle=True)
train_model(dist, dist_train_dataloader, num_epochs)
return dist
def evaluation(clf, dist, PX_train, PX_test, QX_test, Qy_test, plot_max_p_rej=1.1):
errors = (predict(clf, QX_test)!=np.array(Qy_test))
Pdist_hat = predict_proba(dist, PX_test)[:, 1]
Qdist_hat = predict_proba(dist, QX_test)[:, 1]
Q_len = len(Qdist_hat)//2
thresholds = | np.linspace(0.0, 1, 100) | numpy.linspace |
from seamless.silk import Silk
from seamless.silk.meta import meta, validator
class StructureState(metaclass=meta):
version = "OK"
# DTypes of atomic data and atomic state
atomic_dtype = [
("model", 'uint16'),
("hetero", "S1"),
("name", "S4"),
("altloc","S1"),
("resname", "S3"),
("chain","S1"),
("index", 'uint32'),
("icode", "S1"),
("resid", 'uint16'),
("x", 'float32'),
("y", 'float32'),
("z", 'float32'),
("occupancy", 'float32'),
("bfactor", 'float32'),
("segid", "S4"),
("element", "S2")
]
# up to 255 objects
atomic_state_dtype = \
[("obj", 'uint8')] + \
atomic_dtype + \
[
("sele", 'uint64'), # up to 64 selections
("repr", 'uint16'), # up to 16 representations
("color", 'uint8') # up to 256 colors
]
# Names of colors and color schemes:
PRIVATE_colors = [
"atomindex",
"bfactor",
"chainid",
"electrostatic",
"element",
"hydrophobicity",
"modelindex",
"occupancy",
"residueindex",
"resname",
"sstruc"
] + \
list({
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgreen": "#006400",
"darkgrey": "#a9a9a9",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"grey": "#808080",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgray": "#d3d3d3",
"lightgreen": "#90ee90",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32"
}.keys())
def __init__(self):
import numpy as np
atomic_state_dtype = [tuple(x) for x in self.atomic_state_dtype]
atomic_state_dtype = np.dtype(atomic_state_dtype, align=True)
# Array of the atomic state
self.atomstate = np.zeros(0, dtype=atomic_state_dtype)
# Names of molecular objects
self.PRIVATE_objects = []
# Names of representations
self.PRIVATE_repr = [
"cartoon", "ribbon", "rope",
"base", "surface", "trace", "tube",
"spacefill", "point", "line",
"licorice", "ball+sticks", "hyperball",
"label", "validation"
]
# Names of selections
self.PRIVATE_sele = ["sele"]
self.PRIVATE_active_selection = "sele"
def parse_pdb(self, pdb, obj=None):
import warnings
import Bio.PDB
from Bio.PDB.StructureBuilder import PDBConstructionWarning
warnings.simplefilter('ignore', PDBConstructionWarning)
from io import StringIO
import numpy as np
# Parse from file object, or from string
if callable(getattr(pdb, "read", None)):
pdb_obj = pdb
else:
pdb_obj = StringIO(pdb)
if obj is None:
for count in range(256):
obj = "obj%0.2d" % (count+1)
if obj not in self.PRIVATE_objects:
break
if obj in self.PRIVATE_objects:
raise ValueError("object %s already exists" % obj)
self.PRIVATE_objects.append(obj)
p = Bio.PDB.PDBParser()
struc = p.get_structure(obj, pdb_obj)
natoms = len(list(struc.get_atoms()))
new_statelen = len(self.atomstate) + natoms
atomstate = np.zeros(new_statelen,dtype=self.atomstate.dtype)
assert atomstate.dtype == self.atomstate.data.dtype
atomstate[:len(self.atomstate)] = self.atomstate.data
a = atomstate[len(self.atomstate):]
a["repr"] = 1 # cartoon
a["color"] = self.PRIVATE_colors.index("chainid")
a["obj"] = len(self.PRIVATE_objects)
count = 0
for modelnr, model in enumerate(struc.get_models()):
atomlist = list(model.get_atoms())
atomlist.sort(key=lambda atom: atom.serial_number)
for atom in atomlist:
residue = atom.get_parent()
hetero, resid, icode = residue.get_id()
segid = residue.segid
resname = residue.resname
chainid = residue.get_parent().id
aa = a[count]
aa["model"] = modelnr + 1
aa["hetero"] = hetero
aa["name"] = atom.name
aa["altloc"] = atom.altloc
aa["resname"] = resname
aa["chain"] = chainid
aa["index"] = atom.serial_number
aa["icode"] = icode
aa["resid"] = resid
aa["x"] = atom.coord[0]
aa["y"] = atom.coord[1]
aa["z"] = atom.coord[2]
occ = atom.occupancy
if occ is None or occ < 0:
occ = 0
aa["occupancy"] = occ
aa["segid"] = segid
aa["element"] = atom.element
count += 1
self.atomstate = atomstate
def select(self, query, sele=None):
import seamless
import numpy as np
#Bizarre bug: the following code gives an error, but not if it is launched directly from the command line.
# Must be in pandas/numexpr C code.
# Hunches of possible causes: 1. something related to multiprocessing, 2. Something related to the bool datatype,
# 3. Something related to globals() (local_dict solves the issue), 4. Something related to strides/memory alignment
# For now, use local_dict and cast all bools to ints
#
# selearray = seamless.pandeval.eval("sele", global_dict={"sele": np.array([0,1,1,0,0,1],bool)})
if sele is None:
sele = self.PRIVATE_active_selection
self.PRIVATE_active_selection = sele
old_selearray = None
sele_state = self.atomstate.data["sele"]
try:
selenr = self.PRIVATE_sele.index(sele) + 1
selebit = np.uint64(1 << selenr)
old_selearray = (sele_state & selebit)
except ValueError:
self.PRIVATE_sele.append(sele)
selenr = len(self.PRIVATE_sele)
maxsel = 8 * sele_state.itemsize
if selenr >= maxsel:
raise ValueError("Maximum number of selections %d reached" % maxsel)
selebit = np.uint64(1 << selenr)
dic = {key: self.atomstate.data[key] for key in self.atomstate.dtype.fields.keys() \
if key not in ("obj", "repr", "sele")}
objects = np.array([""] + self.PRIVATE_objects.data).astype("S")
dic["all"] = np.ones(len(self.atomstate))
dic["obj"] = objects[self.atomstate.data["obj"]]
dic["backbone"] = (self.atomstate.data["name"] == b"CA") | \
(self.atomstate.data["name"] == b"C") | \
(self.atomstate.data["name"] == b"O") | \
(self.atomstate.data["name"] == b"N")
for xselenr0, xsele in enumerate(self.PRIVATE_sele):
xselenr = xselenr0 + 1
if xsele in dic or (xselenr == selenr and old_selearray is None):
continue
xselebit = np.uint64(1 << xselenr)
xselearray = (sele_state & xselebit).astype(bool).astype(np.uint8)
dic[xsele] = xselearray
try:
selearray = seamless.pandeval.eval(query, local_dict=dic, align_result=False, str_as_bytes=True)
selearray = np.ascontiguousarray(selearray).astype(bool).copy() # another Heisenbug; something is wrong with memory!
print("%d atoms selected" % (selearray>0).sum())
self.PRIVATE_unselect_noshift(sele)
sele_state[selearray] |= selebit
except:
import traceback; traceback.print_exc()
if old_selearray is not None:
sele_state[old_selearray] |= selebit
def PRIVATE_unselect_noshift(self, sele):
try:
selenr = self.PRIVATE_sele.index(sele) + 1
except ValueError:
raise ValueError("Unknown selection '%s'" % sele)
atomstate_sele = self.atomstate.data["sele"]
selebit = 1 << selenr
dt = atomstate_sele.dtype.type
cmpl_sele = ~dt(selebit)
atomstate_sele &= cmpl_sele
def unselect(self, sele=None):
if sele is None:
sele = self.PRIVATE_active_selection
if sele == "sele":
return self.PRIVATE_unselect_noshift("sele")
try:
selenr = self.PRIVATE_sele.index(sele) + 1
except ValueError:
raise ValueError("Unknown selection '%s'" % sele)
atomstate_sele = self.atomstate.data["sele"]
dt = atomstate_sele.dtype.type
bits_to_keep = dt(2**selenr-1)
bits_to_shift = ~bits_to_keep
atomstate_sele[:] = ((atomstate_sele >> 1) & bits_to_shift) | (atomstate_sele & bits_to_keep)
self.PRIVATE_sele.remove(sele)
if self.PRIVATE_active_selection == sele:
self.PRIVATE_active_selection = "sele"
def PRIVATE_get_sele(self, sele):
try:
selenr = self.PRIVATE_sele.index(sele) + 1
except ValueError:
raise ValueError("Unknown selection '%s'" % sele)
atomstate = self.atomstate.data
selebit = 1 << selenr
selearray = (atomstate["sele"] & selebit).astype(bool)
return selearray
def get_selection(self, sele=None, format="mask"):
assert format in ("mask", "pandas")
import numpy as np
import pandas as pd
from collections import OrderedDict
if sele is None:
sele = self.PRIVATE_active_selection
mask = self.PRIVATE_get_sele(sele)
if format == "mask":
return mask
atomstate = self.atomstate.data
arr_atomstate = OrderedDict()
for key in self.atomstate.dtype.fields.keys():
if key in ("sele", "repr", "color"):
continue
v = atomstate[key]
if key == "obj":
names = [""] + self.PRIVATE_objects.data
v = np.array(names)[v]
elif v.dtype.kind == "S":
v = v.astype("U")
arr_atomstate[key] = v
return pd.DataFrame(arr_atomstate).iloc[mask]
def PRIVATE_change_repr(self, representation, sele, op):
repr_mapping = {
"lines": "line",
"sticks": "licorice",
"ball+sticks": "ball+stick",
"dots": "point"
}
if representation in repr_mapping:
representation = repr_mapping[representation]
sele_array = self.PRIVATE_get_sele(sele)
rep = self.atomstate.data["repr"]
if representation == "all":
assert op == "hide"
rep[:] = 0
return
reprnr = self.PRIVATE_repr.index(representation)
repr_bit = 1 << reprnr
if op == "show":
rep[sele_array] |= repr_bit
elif op == "hide":
rep[sele_array] ^= repr_bit
elif op == "show-as":
rep[sele_array] = repr_bit
def show(self, representation, sele=None):
if sele is None:
sele = self.PRIVATE_active_selection
return self.PRIVATE_change_repr(representation, sele, "show")
def hide(self, representation="all", sele=None):
if sele is None:
sele = self.PRIVATE_active_selection
return self.PRIVATE_change_repr(representation, sele, "hide")
def show_as(self, representation, sele=None):
if sele is None:
sele = self.PRIVATE_active_selection
return self.PRIVATE_change_repr(representation, sele, "show-as")
def color(self, color, sele=None):
if sele is None:
sele = self.PRIVATE_active_selection
if color not in self.PRIVATE_colors:
raise ValueError("Unknown color '%s'" % color)
color_index = self.PRIVATE_colors.index(color)
sele_array = self.PRIVATE_get_sele(sele)
self.atomstate.data["color"][sele_array] = color_index
def ngl_representations(self):
import numpy as np
results = {}
for objnr, obj in enumerate(self.PRIVATE_objects):
result = []
repr_state = self.atomstate.data["repr"]
color_state = self.atomstate.data["color"]
obj_mask = (self.atomstate.data["obj"] == objnr + 1)
repr_state = repr_state[obj_mask]
color_state = color_state[obj_mask]
colors = np.unique(color_state)
for n, rep in enumerate(self.PRIVATE_repr):
reprbit = 1 << n
repr_mask = (repr_state & reprbit).astype(bool)
if not repr_mask.sum():
continue
for color in colors:
color_mask = (color_state == color)
indices = | np.nonzero(color_mask & repr_mask) | numpy.nonzero |
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, <NAME>
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
import numpy as np
import lattpy as lp
from numpy.testing import assert_array_equal, assert_array_almost_equal
from lattpy import Lattice
PI = np.pi
TWOPI = 2 * np.pi
chain = Lattice.chain(a=1.0)
rchain = Lattice(TWOPI)
square = Lattice.square(a=1.0)
rsquare = Lattice(TWOPI * np.eye(2))
rect = Lattice.rectangular(a1=2.0, a2=1.0)
rrect = Lattice(PI * np.array([[1, 0], [0, 2]]))
hexagonal = Lattice.hexagonal(a=1)
rhexagonal = Lattice(np.array([[+2.0943951, +3.62759873],
[+2.0943951, -3.62759873]]))
sc = Lattice.sc(a=1.0)
rsc = Lattice(TWOPI * np.eye(3))
fcc = Lattice.fcc(a=1.0)
rfcc = Lattice(TWOPI * np.array([[+1, +1, -1],
[+1, -1, +1],
[-1, +1, +1]]))
bcc = Lattice.bcc(a=1.0)
rbcc = Lattice(TWOPI * np.array([[+1, +1, 0],
[0, -1, +1],
[-1, 0, +1]]))
def test_is_reciprocal():
# Chain
rvecs = rchain.vectors
assert chain.is_reciprocal(rvecs)
assert not chain.is_reciprocal(-1 * rvecs)
assert not chain.is_reciprocal(+2 * rvecs)
assert not chain.is_reciprocal(0.5 * rvecs)
assert not chain.is_reciprocal(0.0 * rvecs)
# Square
rvecs = rsquare.vectors
assert square.is_reciprocal(rvecs)
assert not square.is_reciprocal(-1 * rvecs)
assert not square.is_reciprocal(+2 * rvecs)
assert not square.is_reciprocal(0.5 * rvecs)
assert not square.is_reciprocal(0.0 * rvecs)
# Rectangular
rvecs = rrect.vectors
assert rect.is_reciprocal(rvecs)
assert not rect.is_reciprocal(-1 * rvecs)
assert not rect.is_reciprocal(+2 * rvecs)
assert not rect.is_reciprocal(0.5 * rvecs)
assert not rect.is_reciprocal(0.0 * rvecs)
# Hexagonal
rvecs = rhexagonal.vectors
assert hexagonal.is_reciprocal(rvecs)
assert not hexagonal.is_reciprocal(-1 * rvecs)
assert not hexagonal.is_reciprocal(+2 * rvecs)
assert not hexagonal.is_reciprocal(0.5 * rvecs)
assert not hexagonal.is_reciprocal(0.0 * rvecs)
# Cubic
rvecs = rsc.vectors
assert sc.is_reciprocal(rvecs)
assert not sc.is_reciprocal(-1 * rvecs)
assert not sc.is_reciprocal(+2 * rvecs)
assert not sc.is_reciprocal(0.5 * rvecs)
assert not sc.is_reciprocal(0.0 * rvecs)
# Face-centerec-cudic (fcc)
rvecs = rfcc.vectors
assert fcc.is_reciprocal(rvecs)
assert not fcc.is_reciprocal(-1 * rvecs)
assert not fcc.is_reciprocal(+2 * rvecs)
assert not fcc.is_reciprocal(0.5 * rvecs)
assert not fcc.is_reciprocal(0.0 * rvecs)
# Body-centerec-cudic (bcc)
rvecs = rbcc.vectors
assert bcc.is_reciprocal(rvecs)
assert not bcc.is_reciprocal(-1 * rvecs)
assert not bcc.is_reciprocal(+2 * rvecs)
assert not bcc.is_reciprocal(0.5 * rvecs)
assert not bcc.is_reciprocal(0.0 * rvecs)
def test_reciprocal_vectors():
# Chain
expected = rchain.vectors
actual = chain.reciprocal_vectors()
| assert_array_equal(expected, actual) | numpy.testing.assert_array_equal |
## coding: utf8
## Kaggle Challenge
## Utilisation de Python 3.5
## Chargement des bibliothèques
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import collections
import sklearn
from sklearn import linear_model
from sklearn import svm
from sklearn import learning_curve
from sklearn import ensemble
#%matplotlib inline
## Chargement de la base de données
df= pd.read_csv('data.csv')
## Transformation de la date sous
## format string au format de date
df['date']=pd.to_datetime(df['datetime'])
## Extraction du mois, de l'heure et
## du jour de la semaine
df.set_index('date', inplace=True)
df['month']=df.index.month
df['hours']=df.index.hour
df['dayOfWeek']=df.index.weekday
###################
################### Influence du temps
###################
## Création d'une classe pour tracer
## la moyenne du nombre de locations
## de velos heure par heure pour les
## jours ouvrés ou non
class mean_30():
def __init__(self, df):
self.df=df
def mean_hours_min(self,h):
a = self.df["hours"] == h
return self.df[a]["count"].mean()
def transf(self, t):
return self.mean_hours_min(t)
def transfc(self, t):
return self.err_hours_min(t)
def vector_day(self):
k = []
for i in range(0,24):
k.append(i)
hour_day = pd.DataFrame()
hour_day["A"] = k
return hour_day["A"]
def view(self):
plt.plot(self.vector_day().apply(self.transf))
## Tracer des graphes
fig=plt.figure()
fig.suptitle('Location de velo selon l\'heure, jours ouvrables ou non', fontsize=13)
plt.ylabel('nombre de locations de vélos')
plt.xlabel('heure de la journée')
moy0=mean_30(df[df['workingday']==0])
moy0.view()
moy1=mean_30(df[df['workingday']==1])
moy1.view()
plt.legend(['0','1'])
plt.show()
## Création d'une classe pour tracer
## la deviation standard du nombre de
## locations de velos heure par heure
## pour un jour
## Changer cette valeur entre 0 et 6 pour
## pour obtenir un autre jour de la
## semaine
j=0
class std_30():
def __init__(self, df):
self.df=df
def mean_hours_std(self,j,h):
y = self.df[self.df["dayOfWeek"]==j]["hours"] == h
return self.df[self.df["dayOfWeek"]==j][y]["count"].mean()
def err_hours(self,j,h):
y = self.df[self.df["dayOfWeek"]==j]["hours"] == h
return self.df[self.df["dayOfWeek"]==j][y]["count"].std()
def transf_err(self,t):
return self.mean_hours_std(j,t)
def transf_err2(self,t):
return self.err_hours(j,t)
def vector_day(self):
k = []
for i in range(0,24):
k.append(i)
hour_std = pd.DataFrame()
hour_std["A"] = k
return hour_std["A"]
def view(self):
errors=self.vector_day().apply(self.transf_err2)
fig, ax = plt.subplots()
self.vector_day().apply(self.transf_err).plot(yerr=errors, ax=ax,label=str(j))
plt.legend('0',loc=2,prop={'size':9})
fig.suptitle('Deviation standard des locations de velo selon l\'heure', fontsize=13)
std0=std_30(df)
std0.view()
plt.ylabel('nombre de locations de vélos')
plt.xlabel('heure de la journée')
plt.show()
###################
################### Influence du mois
###################
## Création d'une classe pour tracer
## la moyenne du nombre de locations
## de velos par mois
class month_30():
def __init__(self, df):
self.df=df
def mean_hours_min(self,m):
a = self.df["month"] == m
return self.df[a]["count"].mean()
def transf(self, t):
return self.mean_hours_min(t)
def transfc(self, t):
return self.err_hours_min(t)
def vector_day(self):
k = []
for i in range(0,13):
k.append(i)
hour_day = pd.DataFrame()
hour_day["A"] = k
return hour_day["A"]
def view(self):
plt.plot(self.vector_day().apply(self.transf))
## Tracer des graphes
fig=plt.figure()
fig.suptitle('Location de velo selon le mois', fontsize=13)
moy0=month_30(df)
moy0.view()
plt.ylabel('nombre de locations de vélos')
plt.xlabel('mois de l\' année')
plt.show()
###################
################### Influence de la météo
###################
plt.figure()
## Moyenne de la demande en vélos
## Pour les 4 conditions grâce
## à un dictionnaire Python
a={u'Degage/nuageux':df[df['weather']==1]['count'].mean(),
u'Brouillard': df[df['weather']==2]['count'].mean(),
u'Legere pluie':df[df['weather']==3]['count'].mean()
}
width = 1/1.6
plt.bar(range(len(a)), a.values(),width,color="blue",align='center')
plt.xticks(range(len(a)), a.keys())
plt.ylabel('nombre de locations de vélos')
plt.title('Moyenne des locations de velos pour differentes conditions meteorologiques')
plt.show()
###################
################### Influence du vent, de la température et de l'humidité
###################
## Moyenne de la demande en vélos
## Pour certains paramètres grâce
## à un dictionnaire Python
D = {u'V>13k/h':df[df['windspeed']>13]['count'].mean(),
u'V<13k/h': df[df['windspeed']<13]['count'].mean(),
u'T<24°C':df[df['atemp']<24]['count'].mean(),
u'T>24°C':df[df['atemp']>24]['count'].mean(),
u'H>62%': df[df['humidity']>62]['count'].mean(),
u'H<62%':df[df['humidity']<62]['count'].mean()
}
od = collections.OrderedDict(sorted(D.items()))
width = 1/1.6
plt.figure()
plt.bar(range(len(od)), od.values(),width,color="blue",align='center')
plt.xticks(range(len(od)), od.keys())
plt.title('Variation de la demande en fonction de 3 variables')
plt.show()
###################
################### Conclusion et choix des paramètres influents
###################
##On calcule matrice de corrélation pour
##retirer les variables corrélées
df.corr()
plt.matshow(df.corr())
plt.yticks(range(len(df.corr().columns)), df.corr().columns);
plt.colorbar()
plt.show()
df1=df.drop(['workingday','datetime','season','atemp','holiday','registered','casual'],axis=1)
target=df1['count'].values #ensemble des outputs à prédire (yi)
train=df1.drop('count',axis=1) #ensemble des données (xi)
## Split aléatoire entre données d'entrainement
## et donnes test
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(
train, target, test_size=0.33, random_state=42)
## Creation d'une classe pour tracer
## les courbes d'apprentissages
def plot_learning_curve(estimator, title, X, y, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = sklearn.learning_curve.learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = | np.mean(test_scores, axis=1) | numpy.mean |
import random
import os
import numpy as np
import cv2
import torch
from torchvision.transforms import functional as F
from utils import (
generate_shiftscalerotate_matrix,
)
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, target):
for t in self.transforms:
img, target = t(img, target)
return img, target
def __repr__(self):
format_str = self.__class__.__name__ + '('
for t in self.transforms:
format_str += '\n'
format_str += f' {t}'
format_str += '\n)'
return format_str
class Resize:
def __init__(self, dst_width, dst_height, dst_K):
self.dst_width = dst_width
self.dst_height = dst_height
self.dst_K = dst_K
def __call__(self, img, target):
M = np.matmul(self.dst_K, np.linalg.inv(target.K))
#
img = cv2.warpAffine(img, M[:2], (self.dst_width, self.dst_height), flags=cv2.INTER_LINEAR, borderValue=(128, 128, 128))
target = target.transform(M, self.dst_K, self.dst_width, self.dst_height)
return img, target
class RandomShiftScaleRotate:
def __init__(self, shift_limit, scale_limit, rotate_limit, dst_width, dst_height, dst_K):
self.shift_limit = shift_limit
self.scale_limit = scale_limit
self.rotate_limit = rotate_limit
#
self.dst_width = dst_width
self.dst_height = dst_height
self.dst_K = dst_K
def __call__(self, img, target):
M = generate_shiftscalerotate_matrix(
self.shift_limit, self.scale_limit, self.rotate_limit,
self.dst_width, self.dst_height
)
img = cv2.warpAffine(img, M[:2], (self.dst_width, self.dst_height), flags=cv2.INTER_LINEAR, borderValue=(128, 128, 128))
target = target.transform(M, self.dst_K, self.dst_width, self.dst_height)
return img, target
class RandomHSV:
def __init__(self, h_ratio, s_ratio, v_ratio):
self.h_ratio = h_ratio
self.s_ratio = s_ratio
self.v_ratio = v_ratio
def __call__(self, img, target):
img = distort_hsv(img, self.h_ratio, self.s_ratio, self.v_ratio)
return img, target
class RandomNoise:
def __init__(self, noise_ratio):
self.noise_ratio = noise_ratio
def __call__(self, img, target):
img = distort_noise(img, self.noise_ratio)
return img, target
class RandomSmooth:
def __init__(self, smooth_ratio):
self.smooth_ratio = smooth_ratio
def __call__(self, img, target):
img = distort_smooth(img, self.smooth_ratio)
return img, target
class ToTensor:
def __call__(self, img, target):
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
target = target.to_tensor()
return img, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img, target):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255
img = img - | np.array(self.mean) | numpy.array |
# run_example = false
import numpy as np
import pygfx as gfx
from PySide6 import QtWidgets, QtCore, QtGui
from wgpu.gui.qt import WgpuCanvas
# %% Qt logic
class CanvasWithOverlay(WgpuCanvas):
"""A Qt canvas with support for 2D overlay."""
overlay = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._text_labels = []
self.overlay = Overlay(self)
self.overlay.setGeometry(self.geometry())
self.overlay.show()
self.overlay.raise_()
def resizeEvent(self, event): # noqa: N802
super().resizeEvent(event)
if self.overlay:
self.overlay.setGeometry(self.geometry())
def moveEvent(self, event): # noqa: N802
super().moveEvent(event)
if self.overlay:
self.overlay.setGeometry(self.geometry())
def set_text_labels(self, wobjects):
"""Set text labels to overlay. Must be a list of TextOverlay objects."""
self._text_labels = wobjects
class Overlay(QtWidgets.QWidget):
"""Overlay that draws 2D featues using the canvas API.
We cannot draw in the wgpu widget directly, because that widget has
no paint engine (we have to remove it to prevent Qt from overwriting
the screen that we draw with wgpu). We can also not use a normal
widget overlaid over it, because Qt would then do the compositing,
but the wgpu widget draws directly to screen, so the overlay widget
would get a black background. Therefore, the overlay widget is a
toplevel widget (a window) that we keep exactly on top of the actual
widget. Not pretty, but it seems to work. I am not sure how well
this holds up on other platforms.
"""
def __init__(self, *args, **kwargs):
super().__init__(
*args,
**kwargs,
)
# We want a tranlucent background, and no window frame.
# Setting the Tool flag make it always on top of the parent widget.
self.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.Tool)
# No background, just in case.
self.setAutoFillBackground(False)
def paintEvent(self, event): # noqa: N802
painter = QtGui.QPainter()
if not painter.begin(self):
return
# Draw text labels
text_labels = self.parent()._text_labels
features = None
for label in text_labels:
if features != (label.size, label.color):
features = label.size, label.color
painter.setFont(QtGui.QFont("Arial", label.size))
painter.setPen(QtGui.QColor(label.color))
painter.drawText(QtCore.QPointF(*label.ppos), label.text)
painter.end()
# %% wgpu world object and renderer
from pygfx.renderers import Renderer # noqa: E402
class TextOverlay(gfx.WorldObject):
"""A text label that can get overlaid on the visualization using e.q. Qt."""
def __init__(self, text, size=12, color="f000000", position=(0, 0, 0)):
super().__init__()
self.text = str(text)
self.size = int(size)
self.color = color
self.position = gfx.linalg.Vector3(*position)
# Other options: font, bold, background, pixel-offset, alignment
class QtOverlayRenderer(Renderer):
"""A special renderer that can draw certain 2D overlays over a Qt canvas.
Currently only text (TextOverlay objects).
"""
def __init__(self, canvas):
self._canvas = canvas
# todo: allow passing a normal qt canvas?
# assert isinstance(canvas, wgpu.gui.qt.QtWgpuCanvas)
assert isinstance(canvas, CanvasWithOverlay)
def render(self, scene: gfx.WorldObject, camera: gfx.Camera):
"""Main render method, called from the canvas."""
logical_size = self._canvas.get_logical_size()
# We assume that this call is preceded by a call to the wgpu renderer,
# so we don't need to apply any updates.
# scene.update_matrix_world()
# camera.set_viewport_size(*logical_size)
# camera.update_matrix_world()
# camera.update_projection_matrix()
# Get the list of objects to render
def visit(wobject):
if wobject.visible and isinstance(wobject, TextOverlay):
q.append(wobject)
q = []
scene.traverse(visit)
# Set the pixel position of each text overlay object
for wobject in q:
pos = wobject.position.clone()
pos = pos.apply_matrix4(wobject.matrix_world).project(camera)
# I don't understand this 0.25 value. I would expect it to be 0.5
# but for some reason it needs to be 0.25.
pos_pix = (
(+pos.x * 0.25 + 0.5) * logical_size[0],
(-pos.y * 0.25 + 0.5) * logical_size[1],
)
wobject.ppos = pos_pix
# Store the labels for the overlay, and schedule a draw.
self._canvas.set_text_labels(q)
self._canvas.overlay.update()
app = QtWidgets.QApplication([])
canvas = CanvasWithOverlay()
renderer = gfx.WgpuRenderer(canvas)
overlay_renderer = QtOverlayRenderer(canvas)
scene = gfx.Scene()
positions = | np.random.normal(0, 1, (20, 3)) | numpy.random.normal |
from UQpy.Distributions import *
import numpy as np
from scipy.spatial.distance import pdist
import scipy.stats as stats
import copy
########################################################################################################################
########################################################################################################################
# Latin hypercube sampling (LHS)
########################################################################################################################
class LHS:
"""
Perform Latin hypercube sampling (MCS) of random variables.
**Input:**
* **dist_object** ((list of) ``Distribution`` object(s)):
List of ``Distribution`` objects corresponding to each random variable.
All distributions in ``LHS`` must be independent. ``LHS`` does not generate correlated random variables.
Therefore, for multi-variate designs the `dist_object` must be a list of ``DistributionContinuous1D`` objects
or an object of the ``JointInd`` class.
* **nsamples** (`int`):
Number of samples to be drawn from each distribution.
* **criterion** (`str` or `callable`):
The criterion for pairing the generating sample points
Options:
1. 'random' - completely random. \n
2. 'centered' - points only at the centre. \n
3. 'maximin' - maximizing the minimum distance between points. \n
4. 'correlate' - minimizing the correlation between the points. \n
5. `callable` - User-defined method.
* **random_state** (None or `int` or ``numpy.random.RandomState`` object):
Random seed used to initialize the pseudo-random number generator. Default is None.
If an integer is provided, this sets the seed for an object of ``numpy.random.RandomState``. Otherwise, the
object itself can be passed directly.
* **verbose** (`Boolean`):
A boolean declaring whether to write text to the terminal.
* ****kwargs**
Additional arguments to be passed to the method specified by `criterion`
**Attributes:**
* **samples** (`ndarray`):
The generated LHS samples.
* **samples_U01** (`ndarray`):
The generated LHS samples on the unit hypercube.
**Methods**
"""
def __init__(self, dist_object, nsamples, criterion=None, random_state=None, verbose=False,
**kwargs):
# Check if a Distribution object is provided.
from UQpy.Distributions import DistributionContinuous1D, JointInd
if isinstance(dist_object, list):
for i in range(len(dist_object)):
if not isinstance(dist_object[i], DistributionContinuous1D):
raise TypeError('UQpy: A DistributionContinuous1D object must be provided.')
else:
if not isinstance(dist_object, (DistributionContinuous1D, JointInd)):
raise TypeError('UQpy: A DistributionContinuous1D or JointInd object must be provided.')
self.dist_object = dist_object
self.kwargs = kwargs
self.random_state = random_state
if isinstance(self.random_state, int):
self.random_state = np.random.RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
if isinstance(criterion, str):
if criterion not in ['random', 'centered', 'maximin', 'correlate']:
raise NotImplementedError("Exit code: Supported lhs criteria: 'random', 'centered', 'maximin', "
"'correlate'.")
else:
self.criterion = criterion
else:
self.criterion = criterion
if isinstance(nsamples, int):
self.nsamples = nsamples
else:
raise ValueError('UQpy: number of samples must be specified.')
# Set printing options
self.verbose = verbose
if isinstance(self.dist_object, list):
self.samples = np.zeros([self.nsamples, len(self.dist_object)])
elif isinstance(self.dist_object, DistributionContinuous1D):
self.samples = np.zeros([self.nsamples, 1])
elif isinstance(self.dist_object, JointInd):
self.samples = np.zeros([self.nsamples, len(self.dist_object.marginals)])
self.samplesU01 = np.zeros_like(self.samples)
if self.nsamples is not None:
self.run(self.nsamples)
def run(self, nsamples):
"""
Execute the random sampling in the ``LHS`` class.
The ``run`` method is the function that performs random sampling in the ``LHS`` class. If `nsamples` is
provided, the ``run`` method is automatically called when the ``LHS`` object is defined. The user may also call
the ``run`` method directly to generate samples. The ``run`` method of the ``LHS`` class cannot be invoked
multiple times for sample size extension.
**Input:**
* **nsamples** (`int`):
Number of samples to be drawn from each distribution.
If the ``run`` method is invoked multiple times, the newly generated samples will overwrite the existing
samples.
**Output/Returns:**
The ``run`` method has no returns, although it creates and/or appends the `samples` and `samples_U01` attributes
of the ``LHS`` object.
"""
if self.nsamples is None:
self.nsamples = nsamples
if self.verbose:
print('UQpy: Running Latin Hypercube sampling...')
cut = | np.linspace(0, 1, self.nsamples + 1) | numpy.linspace |
from __future__ import division
import numpy
import scipy
import networkx
import FARZ
from models import *
import matplotlib.pyplot as pyplot
def generate_workplace_contact_network(num_cohorts=1, num_nodes_per_cohort=100, num_teams_per_cohort=10,
mean_intracohort_degree=6, pct_contacts_intercohort=0.2,
farz_params={'alpha':5.0, 'gamma':5.0, 'beta':0.5, 'r':1, 'q':0.0, 'phi':10,
'b':0, 'epsilon':1e-6, 'directed': False, 'weighted': False},
distancing_scales=[]):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Generate FARZ networks of intra-cohort contacts:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cohortNetworks = []
teams_indices = {}
for i in range(num_cohorts):
numNodes = num_nodes_per_cohort[i] if isinstance(num_nodes_per_cohort, list) else num_nodes_per_cohort
numTeams = num_teams_per_cohort[i] if isinstance(num_teams_per_cohort, list) else num_teams_per_cohort
cohortMeanDegree = mean_intracohort_degree[i] if isinstance(mean_intracohort_degree, list) else mean_intracohort_degree
farz_params.update({'n':numNodes, 'k':numTeams, 'm':cohortMeanDegree})
cohortNetwork, cohortTeamLabels = FARZ.generate(farz_params=farz_params)
cohortNetworks.append(cohortNetwork)
for node, teams in cohortTeamLabels.items():
for team in teams:
try:
teams_indices['c'+str(i)+'-t'+str(team)].append(node)
except KeyError:
teams_indices['c'+str(i)+'-t'+str(team)] = [node]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Establish inter-cohort contacts:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cohortsAdjMatrices = [networkx.adj_matrix(cohortNetwork) for cohortNetwork in cohortNetworks]
workplaceAdjMatrix = scipy.sparse.block_diag(cohortsAdjMatrices)
workplaceNetwork = networkx.from_scipy_sparse_matrix(workplaceAdjMatrix)
N = workplaceNetwork.number_of_nodes()
cohorts_indices = {}
cohortStartIdx = -1
cohortFinalIdx = -1
for c, cohortNetwork in enumerate(cohortNetworks):
cohortStartIdx = cohortFinalIdx + 1
cohortFinalIdx = cohortStartIdx + cohortNetwork.number_of_nodes() - 1
cohorts_indices['c'+str(c)] = list(range(cohortStartIdx, cohortFinalIdx))
for team, indices in teams_indices.items():
if('c'+str(c) in team):
teams_indices[team] = [idx+cohortStartIdx for idx in indices]
for i in list(range(cohortNetwork.number_of_nodes())):
i_intraCohortDegree = cohortNetwork.degree[i]
i_interCohortDegree = int( ((1/(1-pct_contacts_intercohort))*i_intraCohortDegree)-i_intraCohortDegree )
# Add intercohort edges:
if(len(cohortNetworks) > 1):
for d in list(range(i_interCohortDegree)):
j = numpy.random.choice(list(range(0, cohortStartIdx))+list(range(cohortFinalIdx+1, N)))
workplaceNetwork.add_edge(i, j)
return workplaceNetwork, cohorts_indices, teams_indices
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def generate_demographic_contact_network(N, demographic_data, layer_generator='FARZ', layer_info=None, distancing_scales=[], isolation_groups=[], verbose=False):
graphs = {}
age_distn = demographic_data['age_distn']
household_size_distn = demographic_data['household_size_distn']
household_stats = demographic_data['household_stats']
#########################################
# Preprocess Demographic Statistics:
#########################################
meanHouseholdSize = numpy.average(list(household_size_distn.keys()), weights=list(household_size_distn.values()))
# print("mean household size: " + str(meanHouseholdSize))
# Calculate the distribution of household sizes given that the household has more than 1 member:
household_size_distn_givenGT1 = {key: value/(1-household_size_distn[1]) for key, value in household_size_distn.items()}
household_size_distn_givenGT1[1] = 0
# Percent of households with at least one member under 20:
pctHouseholdsWithMember_U20 = household_stats['pct_with_under20']
# Percent of households with at least one member over 60:
pctHouseholdsWithMember_O60 = household_stats['pct_with_over60']
# Percent of households with at least one member under 20 AND at least one over 60:
pctHouseholdsWithMember_U20andO60 = household_stats['pct_with_under20_over60']
# Percent of SINGLE OCCUPANT households where the occupant is over 60:
pctHouseholdsWithMember_O60_givenEq1 = household_stats['pct_with_over60_givenSingleOccupant']
# Average number of members Under 20 in households with at least one member Under 20:
meanNumU20PerHousehold_givenU20 = household_stats['mean_num_under20_givenAtLeastOneUnder20']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Define major age groups (under 20, between 20-60, over 60),
# and calculate age distributions conditional on belonging (or not) to one of these groups:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ageBrackets_U20 = ['0-9', '10-19']
totalPctU20 = | numpy.sum([age_distn[bracket] for bracket in ageBrackets_U20]) | numpy.sum |
import os, copy, json, sys
from easydict import EasyDict
from tqdm import tqdm
import scipy
import numpy as np
import trimesh
import tensorflow as tf
import rnn_model
import dataset
import dataset_prepare
import utils
def fill_edges(model):
# To compare accuracies to MeshCNN, this function build edges & edges length in the same way they do
edge2key = dict()
edges_length = []
edges = []
edges_count = 0
for face_id, face in enumerate(model['faces']):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
edge2key[edge] = edges_count
edges.append(list(edge))
e_l = np.linalg.norm(model['vertices'][edge[0]] - model['vertices'][edge[1]])
edges_length.append(e_l)
edges_count += 1
model['edges_meshcnn'] = np.array(edges)
model['edges_length'] = edges_length
def get_model_by_name(name):
fn = name[name.find(':')+1:]
mesh_data = np.load(fn, encoding='latin1', allow_pickle=True)
model = {'vertices': mesh_data['vertices'], 'faces': mesh_data['faces'], 'labels': mesh_data['labels'],
'edges': mesh_data['edges']}
if 'face_labels' in mesh_data.keys():
model['face_labels'] = mesh_data['face_labels']
if 'labels_fuzzy' in mesh_data.keys():
model['labels_fuzzy'] = mesh_data['labels_fuzzy']
fill_edges(model)
model['seseg'] = np.zeros((model['edges_meshcnn'].shape[0], model['labels_fuzzy'].shape[1]))
for e in range(model['edges_meshcnn'].shape[0]):
v0, v1 = model['edges_meshcnn'][e]
l0 = model['labels_fuzzy'][v0]
l1 = model['labels_fuzzy'][v1]
model['seseg'][e] = (l0 + l1) / 2
return model
def calc_final_accuracy(models, print_details=False):
# Calculating 4 types of accuracy.
# 2 alternatives for element used (vertex / edge) and for each element, vanilla accuracy and normalized one.
# Notes:
# 1. For edge calculation only, the accuracy allow fuzzy labeling:
# like MeshCNN's paper, if an edge is inbetween two different segments, any prediction from the two is considered good.
# 2. Normalized accuracy is calculated using the edge length or vertex "area" (which is the mean faces area for each vertex).
vertices_accuracy = []; vertices_norm_acc = []
edges_accuracy = []; edges_norm_acc = []
for model_name, model in models.items():
if model['labels'].size == 0:
continue
best_pred = np.argmax(model['pred'], axis=-1)
model['v_pred'] = best_pred
pred_score = scipy.special.softmax(model['pred'], axis=1)
# Calc edges accuracy
if 'edges_meshcnn' in model.keys(): # pred per edge
g = 0
gn = 0
for ei, edge in enumerate(model['edges_meshcnn']):
v0_pred = best_pred[edge[0]]
v0_score = pred_score[edge[0], v0_pred]
v1_pred = best_pred[edge[1]]
v1_score = pred_score[edge[1], v1_pred]
if v0_score > v1_score:
best = v0_pred - 1
else:
best = v1_pred - 1
if best < model['seseg'].shape[1]:
g += (model['seseg'][ei, best] != 0)
gn += (model['seseg'][ei, best] != 0) * model['edges_length'][ei]
this_accuracy = g / model['edges_meshcnn'].shape[0]
norm_accuracy = gn / np.sum(model['edges_length'])
edges_accuracy.append(this_accuracy)
edges_norm_acc.append(norm_accuracy)
# Calc vertices accuracy
if 'area_vertices' not in model.keys():
dataset_prepare.calc_mesh_area(model)
this_accuracy = (best_pred == model['labels']).sum() / model['labels'].shape[0]
norm_accuracy = np.sum((best_pred == model['labels']) * model['area_vertices']) / model['area_vertices'].sum()
vertices_accuracy.append(this_accuracy)
vertices_norm_acc.append(norm_accuracy)
if len(edges_accuracy) == 0:
edges_accuracy = [0]
return np.mean(edges_accuracy), np.mean(vertices_accuracy), np.nan
def postprocess_vertex_predictions(models):
# Averaging vertices with thir neighbors, to get best prediction (eg.5 in the paper)
for model_name, model in models.items():
pred_orig = model['pred'].copy()
av_pred = np.zeros_like(pred_orig)
for v in range(model['vertices'].shape[0]):
this_pred = pred_orig[v]
nbrs_ids = model['edges'][v]
nbrs_ids = np.array([n for n in nbrs_ids if n != -1])
if nbrs_ids.size:
first_ring_pred = (pred_orig[nbrs_ids].T / model['pred_count'][nbrs_ids]).T
nbrs_pred = np.mean(first_ring_pred, axis=0) * 0.5
av_pred[v] = this_pred + nbrs_pred
else:
av_pred[v] = this_pred
model['pred'] = av_pred
def calc_accuracy_test(logdir=None, dataset_expansion=None, dnn_model=None, params=None,
n_iters=32, model_fn=None, n_walks_per_model=32, data_augmentation={}):
# Prepare parameters for the evaluation
if params is None:
with open(logdir + '/params.txt') as fp:
params = EasyDict(json.load(fp))
params.model_fn = logdir + '/learned_model.keras'
params.new_run = 0
else:
params = copy.deepcopy(params)
if logdir is not None:
params.logdir = logdir
params.mix_models_in_minibatch = False
params.batch_size = 1
params.net_input.append('vertex_indices')
params.n_walks_per_model = n_walks_per_model
# Prepare the dataset
test_dataset, n_items = dataset.tf_mesh_dataset(params, dataset_expansion, mode=params.network_task,
shuffle_size=0, size_limit=np.inf, permute_file_names=False,
must_run_on_all=True, data_augmentation=data_augmentation)
# If dnn_model is not provided, load it
if dnn_model is None:
dnn_model = rnn_model.RnnWalkNet(params, params.n_classes, params.net_input_dim - 1, model_fn, model_must_be_load=True,
dump_model_visualization=False)
# Skip the 1st half of the walk to get the vertices predictions that are more reliable
skip = int(params.seq_len * 0.5)
models = {}
# Go through the dataset n_iters times
for _ in tqdm(range(n_iters)):
for name_, model_ftrs_, labels_ in test_dataset:
name = name_.numpy()[0].decode()
assert name_.shape[0] == 1
model_ftrs = model_ftrs_[:, :, :, :-1]
all_seq = model_ftrs_[:, :, :, -1].numpy()
if name not in models.keys():
models[name] = get_model_by_name(name)
models[name]['pred'] = | np.zeros((models[name]['vertices'].shape[0], params.n_classes)) | numpy.zeros |
import os
import numpy as np
import h5py as h5
import glob
import shutil
from .data_reader import DataReader_pred
from .predict_fn import pred_fn
import pkg_resources
model_dir = pkg_resources.resource_filename('phasenet', os.path.join('model', '190703-214543'))
script_path = os.path.dirname(os.path.realpath(__file__))
def format_data_hdf5(data, root_PN_inputs='.', filename='data.h5'):
"""Format data for PhasetNet (hdf5).
Save the data array in an hdf5 file such that PhaseNet can process it.
Parameters
-------------
data: (n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismic data
on which we want to pick the P- and S-wave arrivals.
root_PN_inputs: string, default to '.'
Path to the root folder where formatted data will be stored.
filename: string, default to 'data.h5'
Name of the file listing the filenames of all 3-component
time series to process.
"""
import h5py as h5
with h5.File(os.path.join(root_PN_inputs, filename), 'w') as f:
f.create_group('data')
for i in range(data.shape[0]):
# place the component axis at the end
three_comp_data = np.swapaxes(data[i, ...], 0, 1)
f['data'].create_dataset(f'sample{i}', data=three_comp_data)
def format_data_ram(data):
"""Format data for PhasetNet.
Build the data dictionary for PhaseNet.
Parameters
-------------
data: (n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismic data
on which we want to pick the P- and S-wave arrivals.
"""
data_pn = {}
for i in range(data.shape[0]):
data_pn[f'sample{i}'] = np.swapaxes(data[i, ...], 0, 1)
return data_pn
def run_pred(input_length,
model_path=model_dir,
data=None,
data_path='./dataset/waveform_pred/',
log_dir='./dataset/log/',
data_file='./dataset/data.h5',
format='hdf5',
amplitude=False,
batch_size=1,
threshold_P=0.6,
threshold_S=0.6,
**kwargs):
"""Run PhaseNet and fetch its raw output: the P and S probabilities.
Results are stored at the user-defined location `output_filename`. Extra
kwargs are passed to `phasenet.predict_fn.pred_fn`.
Parameters
------------
input_length: int
Duration, in samples, of the 3-component seismograms.
model_path: string, default to '/home/ebeauce/PhaseNet/model/190703-214543'
Path to the trained model. It is of course necessary to change the
default value to the adequate value on your machine (e.g. where you
downloaded PhaseNet).
data_path: string, default to './dataset/waveform_pred/'
Path to the folder with the 3-component seismograms in npz files.
log_dir: string, default to './dataset/log/'
data_list: string, default to './dataset/data_list.csv'
output_filename: string, default to './prediction.npy'
Name of the file with PhaseNet's outputs.
batch_size: int, default to 1
Number of 3-component seismograms processed by PhaseNet
at once. This should to take into account the machine's RAM.
threshold_P: float, default to 0.6
P-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_P`, a detection is triggered.
threshold_S: float, default to 0.6
S-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_S`, a detection is triggered.
"""
if format == 'hdf5':
data_reader = DataReader_pred(
format='hdf5',
data_list='', # not used with hdf5 format
hdf5_file=data_file,
hdf5_group='data',
amplitude=amplitude)
elif format == 'ram':
data_reader = DataReader_pred(
format='ram',
data=data,
amplitude=amplitude)
PhaseNet_proba, PhaseNet_picks = pred_fn(
data_reader, model_dir=model_path, log_dir=log_dir,
batch_size=batch_size, input_length=input_length,
min_p_prob=threshold_P, min_s_prob=threshold_S,
**kwargs)
if format == 'hdf5':
# PhaseNet does not take care of closing the hdf5 file
data_reader.h5.close()
return PhaseNet_proba, PhaseNet_picks
def automatic_picking(data,
station_names,
PN_base=None,
PN_dataset_name=None,
format='ram',
mini_batch_size=126,
threshold_P=0.6,
threshold_S=0.6,
**kwargs):
"""Wrapper function to call PhaseNet from a python script.
Extra kwargs are passed to `phasenet.predict_fn.pred_fn`.
Parameters
-----------
data: (n_events, n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismograms of
`n_events` earthquakes recorded at a network of `n_stations`
stations.
station_names: list or array of strings
Name of the `n_stations` stations of the array, in the same
order as given in `data`.
PN_base: string, default to None
Path to the root folder where PhaseNet formatted data will
be stored. Required if `format='ram'`.
PN_dataset_name: string, default to None
Name of the folder, inside `PN_base`, where the formatted data
of a given experiment will be stored. Required if `format='ram'`.
mini_batch_size: int, default to 126
Number of 3-component seismograms processed by PhaseNet
at once. This should to take into account the machine's RAM.
threshold_P: float, default to 0.6
P-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_P`, a detection is triggered.
threshold_S: float, default to 0.6
S-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_S`, a detection is triggered.
Returns
---------
PhaseNet_probas: (n_events, n_stations, n_samples, 2) numpy.narray, float
Probabilities of P- and S-wave arrival on the continuous time axis.
PhaseNet_probas[..., 0] is the P-wave probability.
PhaseNet_probas[..., 1] is the S-wave probability.
PhaseNet_picks: dictionary
Dictionary with four fields: 'P_proba', 'P_picks',
'S_proba', 'S_picks'. Each of these fields contains
another dictionary with one entry per station. Finally,
the content of each PhaseNet_picks[field][station] is an
(n_events, numpy.ndarrays) array of arrays with all picks and
associated probabilities for each event.
"""
if format == 'hdf5':
if not os.path.isdir(PN_base):
print(f'Creating the formatted data root folder at {PN_base}')
os.mkdir(PN_base)
# clean up input/output directories if necessary
root_PN_inputs = os.path.join(PN_base, PN_dataset_name)
if not os.path.isdir(root_PN_inputs):
print(f'Creating the experiment root folder at {root_PN_inputs}')
os.mkdir(root_PN_inputs)
else:
PN_base = ''
root_PN_inputs = ''
# assume the data were provided in the shape
# (n_events x n_stations x 3-comp x time_duration)
n_events = data.shape[0]
n_stations = data.shape[1]
input_length = data.shape[3]
# for efficiency, we merge the event and the station axes
batch_size = n_events*n_stations
print('n events: {:d}, n stations: {:d}, batch size (n events x n stations): {:d}'.
format(n_events, n_stations, batch_size))
data = data.reshape(batch_size, 3, input_length)
# make sure the minibatch size is not larger than the
# total number of traces
minibatch_size = min(mini_batch_size, batch_size)
# generate the input files necessary for PhaseNet
if format == 'hdf5':
format_data_hdf5(data, root_PN_inputs=root_PN_inputs)
data_pn = None
elif format == 'ram':
data_pn = format_data_ram(data)
# call PhaseNet
PhaseNet_proba, PhaseNet_picks = run_pred(
input_length,
data_file=os.path.join(root_PN_inputs, 'data.h5'),
log_dir=os.path.join(root_PN_inputs, 'log'),
batch_size=mini_batch_size,
threshold_P=threshold_P,
threshold_S=threshold_S,
format=format,
data=data_pn,
**kwargs)
# the new PhaseNet_proba is an array of time series with [..., 0] = proba of P arrival
# and [..., 1] = proba of S arrival (the original [..., 0] was simply 1 - Pp - Ps)
PhaseNet_proba = PhaseNet_proba.reshape((n_events, n_stations, input_length, 3))[..., 1:]
PhaseNet_picks = PhaseNet_picks.reshape((n_events, n_stations, 2, 2))
# return picks in a comprehensive python dictionary
picks = {}
picks['P_picks'] = {}
picks['P_proba'] = {}
picks['S_picks'] = {}
picks['S_proba'] = {}
for s in range(n_stations):
# (n_events, arrays): array of arrays with all detected P-arrival picks
picks['P_picks'][station_names[s]] = PhaseNet_picks[:, s, 0, 0]
# (n_events, arrays): array of arrays with probabilities of all detected P-arrival picks
picks['P_proba'][station_names[s]] = PhaseNet_picks[:, s, 0, 1]
# (n_events, arrays): array of arrays with all detected S-arrival picks
picks['S_picks'][station_names[s]] = PhaseNet_picks[:, s, 1, 0]
# (n_events, arrays): array of arrays with probabilities of all detected S-arrival picks
picks['S_proba'][station_names[s]] = PhaseNet_picks[:, s, 1, 1]
if format == 'hdf5':
# clean up when done
shutil.rmtree(root_PN_inputs)
return PhaseNet_proba, picks
# --------------------------------------------------------------------------------
# The following functions were tailored for template matching applications
# --------------------------------------------------------------------------------
def get_best_picks(picks, buffer_length=50):
"""Filter picks to keep the best one on each 3-comp seismogram.
"""
for st in picks['P_picks'].keys():
for n in range(len(picks['P_picks'][st])):
pp = picks['P_picks'][st][n]
ps = picks['S_picks'][st][n]
# ----------------
# remove picks form the buffer length
valid_P_picks = picks['P_picks'][st][n] > int(buffer_length)
valid_S_picks = picks['S_picks'][st][n] > int(buffer_length)
picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks]
picks['S_picks'][st][n] = picks['S_picks'][st][n][valid_S_picks]
picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks]
picks['S_proba'][st][n] = picks['S_proba'][st][n][valid_S_picks]
# take only the highest probability trigger
if len(picks['S_picks'][st][n]) > 0:
best_S_trigger = picks['S_proba'][st][n].argmax()
picks['S_picks'][st][n] = picks['S_picks'][st][n][best_S_trigger]
picks['S_proba'][st][n] = picks['S_proba'][st][n][best_S_trigger]
# update P picks: keep only those that are before the best S pick
valid_P_picks = picks['P_picks'][st][n] < picks['S_picks'][st][n]
picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks]
picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks]
else:
# if no valid S pick: fill in with nan
picks['S_picks'][st][n] = np.nan
picks['S_proba'][st][n] = np.nan
if len(picks['P_picks'][st][n]) > 0:
best_P_trigger = picks['P_proba'][st][n].argmax()
picks['P_picks'][st][n] = picks['P_picks'][st][n][best_P_trigger]
picks['P_proba'][st][n] = picks['P_proba'][st][n][best_P_trigger]
else:
# if no valid P pick: fill in with nan
picks['P_picks'][st][n] = np.nan
picks['P_proba'][st][n] = np.nan
# convert picks to float to allow NaNs
picks['P_picks'][st] = np.float32(picks['P_picks'][st])
picks['S_picks'][st] = np.float32(picks['S_picks'][st])
picks['P_proba'][st] = np.float32(picks['P_proba'][st])
picks['S_proba'][st] = np.float32(picks['S_proba'][st])
return picks
def get_all_picks(picks, buffer_length=50):
"""Combine all picks from multiple events (1 station) in one array.
This function makes sense when the (n_events, n_stations, n_components,
n_samples) `data` array given to `automatic_picking` is an array of
`n_events` similar earthquakes (i.e. similar locations, and therefore
similar expected picks).
Then, each station has potentially many P-wave and S-wave
picks with which we can define a mean value and an error (see
`fit_probability_density`).
Parameters
---------------
picks: dictionary
Picks returned by `automatic_picking`.
buffer_length: int, default to 50
Due to some edge effects, PhaseNet tends to trigger false detections
at the beginning of a 3-comp seismogram. `buffer_length` is the time,
in samples, to ignore at the beginning.
Returns
-----------
picks: dictionary
A dictionary with 4 fields: `P_picks`, 'S_picks', 'P_proba',
'S_proba', and each of these fields is itself a dictionary for one
entry for each station.
Example: picks['P_picks']['station1'] = [124, 123, 126, 250] means that
4 P-wave picks were identified on station1, with possibly one outlier at
sample 250.
"""
for st in picks['P_picks'].keys():
P_picks = []
P_proba = []
S_picks = []
S_proba = []
for n in range(len(picks['P_picks'][st])):
pp = picks['P_picks'][st][n]
ps = picks['S_picks'][st][n]
# ----------------
# remove picks from the buffer length
valid_P_picks = picks['P_picks'][st][n] > int(buffer_length)
valid_S_picks = picks['S_picks'][st][n] > int(buffer_length)
picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks]
picks['S_picks'][st][n] = picks['S_picks'][st][n][valid_S_picks]
picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks]
picks['S_proba'][st][n] = picks['S_proba'][st][n][valid_S_picks]
# take all picks
P_picks.extend(picks['P_picks'][st][n].tolist())
P_proba.extend(picks['P_proba'][st][n].tolist())
S_picks.extend(picks['S_picks'][st][n].tolist())
S_proba.extend(picks['S_proba'][st][n].tolist())
picks['P_picks'][st] = np.int32(P_picks)
picks['S_picks'][st] = np.int32(S_picks)
picks['P_proba'][st] = np.float32(P_proba)
picks['S_proba'][st] = np.float32(S_proba)
return picks
def fit_probability_density(picks, overwrite=False):
"""Estimate pdf of pick distribution.
When multiple picks of the same (or similar) arrival time
are available, their empirical distribution can be used to
quantify uncertainties on the estimate of this arrival time.
The pdf is estimated with the kernel density method from scikit-learn.
Parameters
-----------
picks: dictionary
Picks returned by `automatic_detection` and processed by
`get_all_picks`.
overwrite: boolean, default to False
If True, substitute PhaseNet probas in picks['P/S_proba']['stationXX']
by the pdf values.
Returns
---------
picks: dictionary
Input dictionary updated with the new field 'P/S_kde', which is the
kernel estimate of the pdf. If `overwrite` is True, 'P/S_proba' is
also equal to 'P/S_kde'.
"""
from sklearn.neighbors import KernelDensity
# estimate probability density with gaussian kernels
# of bandwidth 5 (in samples)
kde = KernelDensity(kernel='gaussian', bandwidth=5)
# update dictionary
picks['P_kde'] = {}
picks['S_kde'] = {}
for st in picks['P_picks'].keys():
if len(picks['P_picks'][st]) > 0:
kde.fit(picks['P_picks'][st].reshape(-1, 1), sample_weight=picks['P_proba'][st])
log_proba_samples = kde.score_samples(picks['P_picks'][st].reshape(-1, 1))
picks['P_kde'][st] = np.exp(log_proba_samples).squeeze()
if overwrite:
picks['P_proba'][st] = np.array(picks['P_kde'][st], ndmin=1)
# ---------------------------
if len(picks['S_picks'][st]) > 0:
kde.fit(picks['S_picks'][st].reshape(-1, 1), sample_weight=picks['S_proba'][st])
log_proba_samples = kde.score_samples(picks['S_picks'][st].reshape(-1, 1))
picks['S_kde'][st] = np.exp(log_proba_samples).squeeze()
if overwrite:
picks['S_proba'][st] = np.array(picks['S_kde'][st], ndmin=1)
return picks
def select_picks_family(picks, n_threshold, err_threshold, central='mode'):
"""Filter picks based on their quality.
After processing by `fit_probability_density`, the quality of a
given P/S composite pick can be evaluated by the number of individual
picks that went into estimating its pdf, and the level of error indicated
by the pdf.
Parameters
------------
picks: dictionary
Picks returned by `automatic_detection`, processed by
`get_all_picks` and `fit_probability_density`.
n_threshold: scalar, int
Keep composite picks whose pdf was estimated on
N >= `n_threshold` individual picks.
err_threshold: scalar, int or float
Keep composite picks whose pdf indicates an error (see `central`)
lower than `err_threshold`.
central: string, default to 'mode'
The central tendency used in the computation of the error. It should be
either 'mode' or 'mean'. The error is taken as the RMS deviation about
the central tendency.
Returns
----------
selected_picks: dictionary
The picks filtered according the quality criteria `n_threshold` and
`err_threshold`.
"""
n_threshold = max(1, n_threshold)
picks_p = {}
err_p = {}
picks_s = {}
err_s = {}
for st in picks['P_picks'].keys():
pp = picks['P_picks'][st]
ppb = picks['P_proba'][st]
# remove the invalid picks
valid = ~np.isnan(pp)
pp = pp[valid]
ppb = ppb[valid]
if len(pp) < n_threshold:
continue
if central == 'mode':
# take the most likely value as the estimate of the pick
central_tendency = pp[ppb.argmax()]
elif central == 'mean':
central_tendency = np.sum(np.float32(pp)*ppb)/np.sum(ppb)
else:
print('central should be either mean or mode!')
return
# estimate the dispersion around this value
err = np.sqrt(np.sum(ppb*(pp - central_tendency)**2/ppb.sum()))
if err > err_threshold:
continue
picks_p[st] = central_tendency
err_p[st] = err
for st in picks['S_picks'].keys():
sp = picks['S_picks'][st]
spb = picks['S_proba'][st]
# remove the invalid picks
valid = ~np.isnan(sp)
sp = sp[valid]
spb = spb[valid]
if len(sp) < n_threshold:
continue
if central == 'mode':
# take the most likely value as the estimate of the pick
central_tendency = sp[spb.argmax()]
elif central == 'mean':
central_tendency = np.sum(np.float32(sp)*spb)/np.sum(spb)
else:
print('central should be either mean or mode!')
return
# estimate the dispersion around this value
err = np.sqrt(np.sum(spb*(sp - central_tendency)**2/spb.sum()))
if err > err_threshold:
continue
picks_s[st] = central_tendency
err_s[st] = err
selected_picks = {'P_picks': picks_p, 'P_err': err_p,
'S_picks': picks_s, 'S_err': err_s}
# picks are expressed in samples!
return selected_picks
def plot_picks(picks, data_stream, figname=None, show=False, figsize=(20, 10)):
"""Plot the picks returned by `automatic_picking`.
Parameters
-------------
picks: dictionary
Picks returned by `automatic_detection`, processed by `get_all_picks`,
`fit_probability_density` and `select_picks_family`.
data_stream: `obspy.Stream`
Multi-station 3-component seismograms stored in an `obspy.Stream` object.
figname: string, default to None
Name of the `matplotlib.pyplot.Figure` instance.
show: boolean, default to False
If True, call `matplotlib.pyplot.show()`.
figsize: 2-tuple of ints, default to (20, 10)
Size of the `matplotlib.pyplot.Figure` instance in inches
(width, height).
Returns
----------
fig: `matplotlib.pyplot.Figure`
The `matplotlib.pyplot.Figure` instance created in this function.
"""
import matplotlib.pyplot as plt
old_params = plt.rcParams.copy()
plt.rcParams.update({'ytick.labelsize' : 10})
plt.rcParams.update({'legend.fontsize': 7})
# --------------------------
stations = list(set(list(picks['P_picks'].keys()) \
+ list(picks['S_picks'].keys())))
sr = data_stream[0].stats.sampling_rate
components = ['N', 'E', 'Z']
n_components = len(components)
# --------------------------
time = np.linspace(
0., data_stream[0].stats.npts/sr, data_stream[0].stats.npts)
fig, axes = plt.subplots(
num=figname, figsize=figsize, nrows=len(stations), ncols=n_components)
for s in range(len(stations)):
for c in range(n_components):
ax = axes[s, c]
try:
ax.plot(time, data_stream.select(
station=stations[s], component=components[c])[0].data,
color='k', lw=0.75, label=f'{stations[s]}.{components[c]}')
except IndexError:
# no data
continue
ax.legend(loc='upper right', fancybox=True, handlelength=0.2, borderpad=0.1)
if stations[s] in picks['P_picks'].keys():
ax.axvline(
picks['P_picks'][stations[s]][0], color='C0', lw=1.0)
xmin = picks['P_picks'][stations[s]][0]\
- picks['P_picks'][stations[s]][1]
xmax = picks['P_picks'][stations[s]][0]\
+ picks['P_picks'][stations[s]][1]
ymin, ymax = ax.get_ylim()
ax.fill([xmin, xmin, xmax, xmax],
[ymin, ymax, ymax, ymin],
color='C0', alpha=0.5, zorder=-1)
if stations[s] in picks['S_picks'].keys():
ax.axvline(
picks['S_picks'][stations[s]][0], color='C3', lw=1.0)
xmin = picks['S_picks'][stations[s]][0]\
- picks['S_picks'][stations[s]][1]
xmax = picks['S_picks'][stations[s]][0]\
+ picks['S_picks'][stations[s]][1]
ymin, ymax = ax.get_ylim()
ax.fill([xmin, xmin, xmax, xmax],
[ymin, ymax, ymax, ymin],
color='C3', alpha=0.5, zorder=-1)
ax.set_xlim(time.min(), time.max())
ax.set_yticks([])
if s < len(stations)-1:
plt.setp(ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel('Time (s)')
plt.subplots_adjust(
top=0.98, bottom=0.06, left=0.02,
right=0.98, hspace=0.)
if show:
plt.show()
plt.rcParams = old_params
return fig
def select_picks_single_event(picks, event_id, uncertainty=5):
picks_p = {}
picks_s = {}
for st in picks['P_picks'].keys():
pp = picks['P_picks'][st][event_id]
if np.isnan(pp):
continue
picks_p[st] = np.int32([pp, uncertainty])
for st in picks['S_picks'].keys():
sp = picks['S_picks'][st][event_id]
if | np.isnan(sp) | numpy.isnan |
import act
import numpy as np
import xarray as xr
import datetime
from .DMTGlobals import DMTGlobals
def calc_diams_masses(input_ds, debug=True, factor=1.0,
Globals=None):
"""
Calculates the scattering and incadescence diameters/BC masses for each particle.
Parameters
----------
input_ds: ACT Dataset
The ACT Dataset containing the processed SP2 data.
debug: boolean
If true, print out particle rejection statistics
factor: float
Multiply soot masses by this factor for AquaDag calibation. Use
1.3 for NSA.
Globals: DMTGlobals structure or None
DMTGlobals structure containing calibration coefficients. Set to
None to use default values for MOSAiC.
Returns
-------
output_ds: ACT Dataset
The ACT Dataset containing the scattering/incadescence diameters.
"""
rejectMinScatTotal = 0
rejectWidthTotal = 0
rejectFatPeakTotal = 0
rejectFtPosTotal = 0
if Globals is None:
Globals = DMTGlobals()
PkHt_ch0 = np.nanmax(np.stack([input_ds['PkHt_ch0'].values, input_ds['FtAmp_ch0'].values]), axis=0)
PkHt_ch4 = np.nanmax(np.stack([input_ds['PkHt_ch4'].values, input_ds['FtAmp_ch4'].values]), axis=0)
#accepted = np.logical_and.reduce((PkHt_ch0 > Globals.ScatMinPeakHt1,
# input_ds['PkFWHM_ch0'].values > Globals.ScatMinWidth,
# input_ds['PkFWHM_ch0'].values < Globals.ScatMaxWidth,
# input_ds['FtPos_ch0'].values < Globals.ScatMaxPeakPos,
# input_ds['FtPos_ch0'].values >= Globals.ScatMinPeakPos,
# np.greater(input_ds['FtAmp_ch0'].values, input_ds['PkFWHM_ch0'].values)))
accepted = np.logical_and.reduce((
input_ds['PkFWHM_ch0'].values > Globals.ScatMinWidth,
input_ds['PkFWHM_ch0'].values < Globals.ScatMaxWidth,
input_ds['PkHt_ch0'].values > Globals.ScatMinPeakHt1,
input_ds['FtPos_ch0'].values < Globals.ScatMaxPeakPos,
input_ds['FtPos_ch0'].values > Globals.ScatMinPeakPos))
numScatFlag = np.sum(accepted)
rejectMinScatTotal += np.sum(PkHt_ch0 < Globals.ScatMinPeakHt1)
rejectWidthTotal += np.sum(np.logical_or(
input_ds['PkFWHM_ch0'].values < Globals.ScatMinWidth, input_ds['PkFWHM_ch0'].values > Globals.ScatMaxWidth))
rejectFatPeakTotal += np.sum(np.less_equal(input_ds['FtAmp_ch0'].values, input_ds['PkFWHM_ch0'].values))
rejectFtPosTotal += np.sum(np.logical_or(
input_ds['FtPos_ch0'].values > Globals.ScatMaxPeakPos, input_ds['FtPos_ch0'].values < Globals.ScatMinPeakPos))
if debug:
print("Number of scattering particles accepted = %d" % numScatFlag)
print("Number of scattering particles rejected for min. peak height = %d" % rejectMinScatTotal)
print("Number of scattering particles rejected for peak width = %d" % rejectWidthTotal)
print("Number of scattering particles rejected for fat peak = %d" % rejectFatPeakTotal)
print("Number of scattering particles rejected for peak pos. = %d" % rejectFtPosTotal)
PkHt_ch1 = input_ds['PkHt_ch1'].values
PkHt_ch5 = input_ds['PkHt_ch5'].values
width = input_ds['PkEnd_ch1'].values - input_ds['PkStart_ch1'].values
accepted_incand = width >= Globals.IncanMinWidth
#accepted_incand = width > -9999
accepted_incand = np.logical_and(accepted_incand,
input_ds['PkHt_ch2'].values >= Globals.IncanMinPeakHt1)
accepted_incand = np.logical_and(accepted_incand,
input_ds['PkHt_ch1'].values >= Globals.IncanMinPeakHt1)
sat_incand = np.logical_and(
PkHt_ch1 >= Globals.IncanMaxPeakHt1, accepted_incand)
unsat_incand = np.logical_and(accepted_incand,
PkHt_ch1 < Globals.IncanMaxPeakHt1)
unsat_incand = np.logical_and.reduce((unsat_incand,
input_ds['IncanRatioch1ch2'].values > Globals.IncanMinPeakRatio,
input_ds['IncanRatioch1ch2'].values < Globals.IncanMaxPeakRatio,
np.abs(input_ds['IncanPkOffsetch1ch2'].values) < Globals.IncanMaxPeakOffset,
input_ds['PkPos_ch1'].values > Globals.IncanMinPeakPos,
input_ds['PkPos_ch1'].values < Globals.IncanMaxPeakPos
))
sat_incand = np.logical_and.reduce((sat_incand, PkHt_ch5 <= Globals.IncanMaxPeakHt1,
input_ds['IncanRatioch5ch6'].values > Globals.IncanMinPeakRatio,
input_ds['IncanRatioch5ch6'].values < Globals.IncanMaxPeakRatio,
np.abs(input_ds['IncanPkOffsetch5ch6'].values) < Globals.IncanMaxPeakOffset,
input_ds['PkPos_ch5'].values > Globals.IncanMinPeakPos,
input_ds['PkPos_ch5'].values < Globals.IncanMaxPeakPos
))
accepted_incand = np.logical_or(unsat_incand, sat_incand)
Scat_not_sat = 1e-18*(Globals.c0Scat1 + Globals.c1Scat1*PkHt_ch0 + Globals.c2Scat1*PkHt_ch0**2)
Scat_sat = 1e-18*(Globals.c0Scat2 + Globals.c1Scat2*PkHt_ch4 + Globals.c2Scat2*PkHt_ch4**2)
Scatter = np.where(PkHt_ch0 < Globals.ScatMaxPeakHt1,
Scat_not_sat, Scat_sat)
Scatter = np.where(accepted, Scatter, np.nan)
output_ds = input_ds.copy()
output_ds['Scatter'] = (('index'), Scatter)
output_ds['logScatter'] = (('index'), np.log10(Scatter))
output_ds['ScatDiaSO4'] = (('index'), 1000*(-0.015256 + 16.835*Scatter**0.15502))
output_ds['ScatMassSO4'] = (('index'), 0.5236e-9*Globals.densitySO4*output_ds['ScatDiaSO4']**3)
output_ds['ScatDiaBC50'] = (('index'), 1000*(0.013416 + 25.066*(Scatter**0.18057)))
sootMass_not_sat = factor * 1e-3 * (
Globals.c0Mass1 + Globals.c1Mass1*PkHt_ch1 + Globals.c2Mass1*PkHt_ch1**2)
sootDiam_not_sat = (sootMass_not_sat/(0.5236e-9*Globals.densityBC))**(1./3.)
sootMass_sat = factor * 1e-3 * (
Globals.c0Mass2 + Globals.c1Mass2*PkHt_ch5 + Globals.c2Mass2*PkHt_ch5**2)
sootDiam_sat = (sootMass_sat/(0.5236e-9*Globals.densityBC))**(1./3.)
sootMass_not_sat = | np.where(accepted_incand, sootMass_not_sat, np.nan) | numpy.where |
#!/usr/bin/env python3
import unittest as ut
import numpy as np
import yaml
from laserscan import LaserScan, SemLaserScan, MultiSemLaserScan
def project(points, fov_up, fov_down):
# laser parameters
fov_up = fov_up / 180.0 * np.pi # field of view up in radians
fov_down = fov_down / 180.0 * np.pi # field of view down in radians
fov = abs(fov_down) + abs(fov_up) # get field of view total in radians
depth = np.linalg.norm(points, 2, axis=1)
# get scan components
scan_x = points[:, 0]
scan_y = points[:, 1]
scan_z = points[:, 2]
# get angles of all points
yaw = -np.arctan2(scan_y, scan_x)
pitch = np.arcsin(scan_z / depth) # arcsin!!
proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]
proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0]
return np.array([proj_x, proj_y]).T
def unproject(points2d, depth, fov_up, fov_down):
# laser parameters
fov_up = fov_up / 180.0 * np.pi # field of view up in radians
fov_down = fov_down / 180.0 * np.pi # field of view down in radians
fov = abs(fov_down) + abs(fov_up) # get field of view total in radians
# proj_x = np.zeros((2,1))
proj_x = points2d[:,0]
proj_y = points2d[:,1]
yaw = (proj_x * 2 - 1.0) * np.pi # theta
pitch = 1.0 * fov - proj_y * fov - abs(fov_down) # phi
point_x = depth * | np.sin(np.pi/2 - pitch) | numpy.sin |
#
# Copyright (c) 2009-2021, <NAME>
#
# Started from "model_tests.py" on 3/14/13 to create
# a BMI-compliant version for a hands-on example.
#
# Show how to inherit BMI functions from BMI_base.py.
# Previous version info.
# July 5-7, 2008
# Modified: July 23, 2008 (<NAME>)
# v = sqrt(2 * g * d))
# Modified: July 18, 2016 (<NAME>)
# v = sqrt(g * d / 2) * (A_top / sqrt(A_top^2 - A_out^2))
# v ~ sqrt(g * d / 2), if A_top >> A_out
# This equation is computed for draining case from dh/dt
# (in the companion paper), after simplification.
#
# Modified: 2021-02-12 (<NAME>)
# Minor changes to run in Python 3.*
#
#-----------------------------------------------------------------
# 7/18/16. Key equations
#
# Q_in = R * A_top
# Q_out = v * A_out
# d(t) = d0 * [1 - (t/td)]^2 # solution for draining; no rain)
# Note that d(td) = 0.
#
# d'(t) = (-2 d0 / td) * [1 - (t/td)]
# V'(t) = A_top * d'(t) = Q_out
# t_drain = sqrt(2 * d0 / g) * sqrt( (A_top/A_out)^2 - 1 )
# t_fill = (d_final / R) # (time to fill from d=0)
#
# If t_fill = p * t_drain (and d_f = d0), then
# (r_top/r_out)^2 = A_top/A_out = sqrt( df*g/(2 R^2 p^2) + 1 )
# If p = 1/2, then filling will overpower draining.
#
# Try these settings in CFG file:
# dt = 4000.0 [s]
# n_steps = 60
# init_depth = 1.0 [m]
# top_radius = 20.0 [m]
# out_radius = 0.05 [m]
#
# Can also try these settings from the paper:
# init_depth = 86.40 [cm] = 0.864 [m]
# top_radius = 0.5 * (29.21) [cm]
# out_radius = 0.5 * (0.533, 0.668, 0.945, 1.087) [cm]
# td = 1223, 767, 403, 288 [s]
#
#-----------------------------------------------------------------
# 9/26/13.
# The steady-state solution for this problem is:
# Q_in = Q_out
# R * A_top = v * A_out
# d_f = (2/g) * ( R * A_top / A_out)^2
#
# For draining: d'(t) = -(2 d0 / td) * (1 - t/td)
# For filling: d'(t) = R
#
# This steady-state solution approached whether the initial
# water depth, d0, is greater than or less than d_f.
#
#-----------------------------------------------------------------
# The "water_tank" class defines a model of a cylindrical water
# tank that is open at the top (so rainfall can get in), with a
# small, circular outlet.
#
# The syntax to create an instance of the model is:
# >>> tank_model = water_tank()
#
# Once instantiated, we can call any of its methods or
# "member functions", most of which are BMI functions. e.g.
# >>> tank_model.initialize(),
# >>> tank_model.update()
#
#----------------------------------------------------------------
#
# test_tank_model()
#
# class water_tank()
#
# get_attribute()
# get_input_var_names()
# get_output_var_names()
# ------------------------
# get_var_name()
# get_var_units()
# get_var_type()
# get_var_rank() # (CHECK)
# -------------------
# get_start_time()
# get_end_time()
# get_current_time()
# get_time_step()
# get_time_units() # (can also use get_attribute() now)
# -------------------
# initialize()
# update()
# finalize()
# run_model() # (not BMI)
# -------------
# get_value()
# set_value()
#
# -------------------
# Non-BMI functions
# -------------------
# read_cfg_file()
# update_rain()
# print_tank_data()
# read_rain_data()
#
#----------------------------------------------------------------
# import BMI_base
import numpy as np # (for: float64, int16, pi, sqrt, zeros)
import model_input_2021 as model_input
import time
#---------------------------------------------
# Create an instance of the water tank model
# and then call its "run_model()" method.
#---------------------------------------------
def test_tank_model():
tank_model = water_tank()
tank_model.run_model()
#-------------------------------
# Define the water_tank class.
#-------------------------------
# class water_tank( BMI_base.BMI_component ): # (option to inherit)
class water_tank:
#-------------------------------------------
# Required, static attributes of the model
#-------------------------------------------
_att_map = {
'model_name': 'Water_Tank_Model',
'version': '1.0',
'author_name': '<NAME>',
'grid_type': 'none',
'time_step_type': 'fixed',
'step_method': 'explicit',
'time_units': 'seconds' }
#----------------------------------------------
# Input variable names (CSDMS Standard Names)
#----------------------------------------------
_input_var_names = [
'atmosphere_water__liquid_equivalent_precipitation_rate',
'atmosphere_water__precipitation_duration' ]
#-----------------------------------------------
# Output variable names (CSDMS Standard Names)
#-----------------------------------------------
_output_var_names = [
'model__time_step',
'tank_x-section__area',
'tank_x-section__radius',
'tank_outlet__area',
'tank_outlet__radius',
'tank_outlet_water__flow_speed',
'tank_water__depth'
'tank_water__initial_depth',
'tank_water__volume' ]
#------------------------------------------------------
# Create a Python dictionary that maps CSDMS Standard
# Names to the model's internal variable names.
#------------------------------------------------------
_var_name_map = {
'atmosphere_water__liquid_equivalent_precipitation_rate': 'rain_rate',
'atmosphere_water__precipitation_duration': 'rain_duration',
#------------------------------------------------------------------------------
'model__time_step': 'dt',
'tank_x-section__area': 'top_area',
'tank_x-section__radius': 'radius',
'tank_outlet__area': 'out_area',
'tank_outlet__radius': 'out_radius',
'tank_outlet_water__flow_speed': 'out_speed',
'tank_water__depth': 'depth',
'tank_water__initial_depth': 'init_depth',
'tank_water__volume': 'volume'}
#------------------------------------------------------
# Create a Python dictionary that maps CSDMS Standard
# Names to the units of each model variable.
#------------------------------------------------------
_var_units_map = {
'atmosphere_water__liquid_equivalent_precipitation_rate': 'm s-1',
'atmosphere_water__precipitation_duration': 's',
#--------------------------------------------------------------------
'model__time_step': 's',
'tank_x-section__area': 'm2',
'tank_x-section__radius': 'm',
'tank_outlet__area': 'm2',
'tank_outlet__radius': 'm',
'tank_outlet_water__flow_speed': 'm s-1',
'tank_water__depth': 'm',
'tank_water__initial_depth': 'm',
'tank_water__volume': 'm3'}
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
# BMI: Model Information Functions
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print('###################################################')
print(' ERROR: Could not find attribute: ' + att_name)
print('###################################################')
print()
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
# BMI: Variable Information Functions
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
def get_var_type(self, long_var_name):
#---------------------------------------
# So far, all vars have type "double".
#---------------------------------------
return 'float64'
#--------------------------------------------------
# A more general approach, with less maintenance.
#--------------------------------------------------
## return str( self.get_value( long_var_name ).dtype )
#-----------------------------------
# Another approach (not ready yet)
#-----------------------------------
## dtype = getattr(self, var_name)
## return str(dtype)
#-------------------
# Another approach
#-------------------
## var_name = self.get_var_name( long_var_name )
##
## try:
## exec( "dtype = self." + var_name + ".dtype" )
## except:
## dtype = 'unknown'
## return str(dtype) # (need str() here)
# get_var_type()
#-------------------------------------------------------------------
def get_var_rank(self, long_var_name):
return np.int16(0)
# get_var_rank()
#------------------------------------------------------------
def get_start_time( self ):
return 0.0
# get_start_time()
#------------------------------------------------------------
def get_end_time( self ):
return (self.n_steps * self.dt)
# get_end_time()
#------------------------------------------------------------
def get_current_time( self ):
return self.time
# get_current_time()
#------------------------------------------------------------
def get_time_step( self ):
return self.dt
# get_time_step()
#------------------------------------------------------------
def get_time_units( self ):
return 'seconds'
#--------------
# Another way
#--------------
# units = self.get_attribute( 'time_units' )
# return units
# get_time_units()
#------------------------------------------------------------
# BMI: Model Control Functions
#------------------------------------------------------------
def initialize( self, cfg_file=None ):
self.SERIALIZABLE = True
self.timer_start = time.time()
#-------------------------------------------
# Used in read_cfg_file(), so needed here.
#-------------------------------------------
self.g = 9.81 # [m/s^2]
#--------------------------------------
# Read tank settings from "tank_file"
#--------------------------------------
if (cfg_file == None):
cfg_file = 'tank_info.cfg'
self.cfg_file = cfg_file
self.read_cfg_file()
#-----------------------
# Initialize variables
#-----------------------
self.depth = self.init_depth.copy()
self.out_speed = np.sqrt(self.g * self.depth / 2.0)
self.volume = self.depth * self.top_area #[m^3]
self.out_area = np.pi * self.out_radius**2.0
self.print_tank_data()
#----------------------------
# Initialize time variables
#----------------------------
self.time = np.float64(0)
self.time_index = 0
#-------------------------------------------------
# Use "input_file" class to create rain_file
# object, then open "rain_file" to read data.
# This will be used by the update_rain() method.
#-------------------------------------------------
if ('steady:' in self.rain_data_filename):
#-------------------------------------------
# New option to specify a steady rain rate
# in mmph.
#-------------------------------------------
self.steady_rain = True
words = self.rain_data_filename.split(':')
R = np.float64( words[1] )
self.rain_rate = R
#-----------------------------------------
# Print the predicted steady-state depth
#-----------------------------------------
R_mps = R / (3600.0 * 1000.0)
fac = (1 / (2.0 * self.g))
A_ratio = (self.top_area / self.out_area)
d_steady = fac * (R_mps * A_ratio)**2
# print 'Steady-state rainrate =', R, ' [mmph]'
# print 'Predicted steady-state depth =', d_steady
else:
self.steady_rain = False
if (self.SERIALIZABLE):
# Can use dill to serialize.
self.read_rain_data( self.rain_data_filename )
else:
self.rain_file = model_input.input_file(self.rain_data_filename)
self.rain_file.open()
# initialize()
#------------------------------------------------------------
def update( self, dt=-1, REPORT=True ):
#------------------------------------------------
# Read the next rainfall file data entry.
# "rain_duration" is read, but ignored for now.
#------------------------------------------------
if not(self.steady_rain):
self.update_rain()
rain_rate_mps = self.rain_rate / (3600.0 * 1000.0)
#--------------------------------------------------
# Compute volume inflow rate from rain, Q_in
# and volume outflow rate, Q_out, in [m^3 / sec].
#--------------------------------------------------
Q_in = rain_rate_mps * self.top_area
if (self.depth > 0):
Q_out = self.out_speed * self.out_area
else:
Q_out = 0.0
dVol = (Q_in - Q_out) * self.dt
#----------------------------
# Store the state variables
#----------------------------
self.Q_out = Q_out
self.volume = max(self.volume + dVol, 0.0)
self.depth = (self.volume / self.top_area)
self.out_speed = np.sqrt(2.0 * self.g * self.depth)
### self.out_speed = np.sqrt(self.g * self.depth / 2.0)
#-------------------------
# Optional status report
#-------------------------
if (REPORT):
print('--------------------------------------')
print('rain_rate =', self.rain_rate, ' [mmph]')
print('depth =', self.depth, '[meters]')
#--------------------------------------
# Write new depth to an output file ?
#--------------------------------------
#------------------------
# Update the model time
#------------------------
if (dt == -1):
dt = self.dt
self.time += dt
self.time_index += 1
# update()
#------------------------------------------------------------
## def update_until( self, time ):
##
## #----------------------------------------------------
## # Call update() method as many times as necessary
## # in order to get to the requested time. Note that
## # we do not override the value of n_steps from
## # the tank model's cfg_file.
## #----------------------------------------------------
## n_steps = np.int16(time / self.dt)
## for k in xrange(1,n_steps+1):
## self.update()
##
## # update_until()
#------------------------------------------------------------
def finalize( self ):
timer_stop = time.time()
run_time = (timer_stop - self.timer_start)
#-----------------------------------------
# Report simulation time with good units
#-----------------------------------------
sim_time = self.time # [secs]
sim_units = ' [secs]'
#----------------------------------
if (sim_time > 3600 * 24):
sim_time = sim_time / (3600.0 * 24)
sim_units = ' [days]'
elif (sim_time > 3600):
sim_time = sim_time / 3600.0
sim_units = ' [hrs]'
elif (sim_time > 60):
sim_time = sim_time / 60.0
sim_units = ' [mins]'
print()
print('Finished with water tank simulation.')
print('Model run time =', run_time, ' [secs]')
print('Simulated time =', sim_time, sim_units)
print('Final depth =', self.depth, ' [m]')
print()
#-------------------
# Close input file
#-------------------
if not(self.SERIALIZABLE):
if not(self.steady_rain):
self.rain_file.close()
#-----------------------
# Close output files ?
#-----------------------
# finalize()
#------------------------------------------------------------
def run_model( self, cfg_file=None):
#-------------------------------------------------------
# Note: This is not a required BMI function, but gives
# an easy way to run the stand-alone model.
#-------------------------------------------------------
self.initialize( cfg_file=cfg_file )
for k in range(1, self.n_steps+1):
# print('k =', k)
self.update()
self.finalize()
# run_model()
#------------------------------------------------------------
# BMI: Variable Getters and Setters
#------------------------------------------------------------
def get_value(self, long_var_name):
var_name = self.get_var_name( long_var_name )
try:
return getattr(self, var_name)
#----------------------------
# This breaks the reference.
#----------------------------
## return np.float64(result)
except:
print('ERROR in get_value() function')
print(' for var_name =', var_name)
print(' Returning 0.')
return np.array(0, dtype='float64')
# get_value()
#-------------------------------------------------------------------
def set_value(self, long_var_name, value):
#---------------------------------------------------------------
# Notes: The "var_name" string cannot contain a ".". (5/17/12)
#---------------------------------------------------------------
# (2/7/13) We are now using 0D numpy arrays as a way to
# produce "mutable scalars" that allow a component with a
# reference to the scalar to see changes to its value.
# But we can't apply np.float64() to the value as we did
# before or it destroys the reference.
# See BMI_base.initialize_scalar() for more information.
#---------------------------------------------------------------
var_name = self.get_var_name( long_var_name )
setattr( self, var_name, value )
# set_value()
#------------------------------------------------------------
# Non-BMI functions that are only used internally.
#------------------------------------------------------------
def read_cfg_file( self, cfg_file=None ):
#-----------------------------------
# What if cfg_file doesn't exist ?
#-----------------------------------
if (cfg_file == None):
cfg_file = self.cfg_file
tank_file = model_input.input_file( cfg_file )
tank_file.open()
#------------------------------------------------
# Read values from cfg_file and store in "self"
#------------------------------------------------
self.dt = tank_file.read_value()
self.n_steps = tank_file.read_value( dtype='integer' )
self.init_depth = tank_file.read_value()
self.top_radius = tank_file.read_value()
self.top_area = np.pi * (self.top_radius)**2.0 # (7/18/16)
# self.top_area = tank_file.read_value()
self.out_radius = tank_file.read_value()
self.rain_data_filename = tank_file.read_value( dtype='string' )
tank_file.close()
# read_cfg_file
#------------------------------------------------------------
def update_rain( self ):
## if (self.time_index < self.rain_file.n_lines):
if (self.time_index < self.n_rain_lines):
if (self.SERIALIZABLE):
self.rain_rate = self.rates[ self.time_index ]
self.rain_duration = self.durations[ self.time_index ]
else:
record = self.rain_file.read_record()
self.rain_rate = record[0] ## (in mmph)
self.rain_duration = record[1] ## (in seconds)
#---------------------------------------------------------
## print 'rain_rate =', self.rain_rate
## print 'duration =', self.rain_duration
## print ' '
else:
self.rain_rate = 0.0
self.rain_duration = self.dt
# update_rain()
#------------------------------------------------------------
def print_tank_data( self ):
print(' dt =', self.dt, '[sec]')
print(' n_steps =', self.n_steps)
print(' init_depth =', self.init_depth, '[m]')
print(' top_radius =', self.top_radius, '[m]')
print(' top_area =', self.top_area, '[m2]')
print(' out_radius =', self.out_radius, '[m]')
print(' out_speed =', self.out_speed, '[m/s]')
print(' depth =', self.depth, '[m]')
print(' volume =', self.volume, '[m3]')
print(' out_area =', self.out_area, '[m2]')
print(' rain_file =', self.rain_data_filename)
print()
# print_tank_data()
#------------------------------------------------------------
def read_rain_data(self, filename='rain_data.txt'):
#------------------------------------------
# Parts of this probably belong in methods
# for a "text_file" class, e.g. n_lines
#------------------------------------------
try: f = open(filename, 'r')
except IOError as err:
errno, strerror = err.args
print('I/O error(%s): %s' % (errno, strerror))
return
n_lines = 0
for line in f:
#-------------------------------------
# Note: len(line) == 1 for null lines
#-------------------------------------
if (len(line.strip()) > 0): n_lines = (n_lines + 1)
self.n_rain_lines = n_lines
self.rates = np.zeros([n_lines], dtype='d')
self.durations = | np.zeros([n_lines], dtype='d') | numpy.zeros |
from numpy_nn import Neuron
from numpy_nn.losses import MAE
from numpy_nn.optimizers import SGD
import numpy as np
# we will try to implment x + 1
elems = [x for x in range(0,100)]
x = np.array(elems).reshape((-1,1))
y = np.array([x + 1 for x in elems]).reshape((-1,1))
neuron = Neuron(1, 1)
mae = MAE()
sgd = SGD(lr=0.0002)
neuron.set_optimizer(sgd)
epochs = 10000
print("\nTRAINING\n")
for epoch in range(epochs):
# batch
batches = x.shape[0]//10
for batch in range(0,batches,10):
x_sub = x[batch:batch+10]
y_sub = y[batch:batch+10]
# forward pass
out = neuron(x_sub)
# find loss
loss = mae(y_sub, out)
# find gradient and optimize
dL_dy = mae.gradient()
neuron.backward(dL_dy)
neuron.optimize()
if epoch % 1000 == 0:
print("Epoch {} Loss : {} ".format(epoch, loss))
x = | np.array([-1,4,1]) | numpy.array |
from __future__ import annotations
import numpy as np
import pytest
from manim.utils.space_ops import *
from manim.utils.space_ops import shoelace, shoelace_direction
def test_rotate_vector():
vec = | np.array([0, 1, 0]) | numpy.array |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to test any model on any dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# <NAME> - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import os
import torch
os.environ.update(OMP_NUM_THREADS='1',
OPENBLAS_NUM_THREADS='1',
NUMEXPR_NUM_THREADS='1',
MKL_NUM_THREADS='1',)
import numpy as np
import matplotlib.pyplot as plt
from os.path import isfile, join, exists
from os import listdir, remove, getcwd, makedirs
from sklearn.metrics import confusion_matrix
import time
import pickle
from torch.utils.data import DataLoader
from matplotlib.animation import FuncAnimation
import matplotlib.patches as patches
from matplotlib.widgets import Slider, Button
import imageio
# My libs
from utils.config import Config
from utils.metrics import IoU_from_confusions, smooth_metrics, fast_confusion, fast_threshold_stats
from utils.ply import read_ply
from models.architectures import FakeColliderLoss, KPCollider
from utils.tester import ModelTester
from utils.mayavi_visu import fast_save_future_anim, save_zoom_img, colorize_collisions, zoom_collisions, superpose_gt, \
show_local_maxima, show_risk_diffusion, superpose_gt_contour, superpose_and_merge
# Datasets
from datasets.MyhalCollision import MyhalCollisionDataset, MyhalCollisionSampler, MyhalCollisionCollate, MyhalCollisionSamplerTest
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def running_mean(signal, n, axis=0, stride=1):
# Create the smoothing convolution
torch_conv = torch.nn.Conv1d(1, 1, kernel_size=2 * n + 1, stride=stride, bias=False)
torch_conv.weight.requires_grad_(False)
torch_conv.weight *= 0
torch_conv.weight += 1 / (2 * n + 1)
signal = np.array(signal)
if signal.ndim == 1:
# Reshape signal to torch Tensor
signal = np.expand_dims(np.expand_dims(signal, 0), 1).astype(np.float32)
torch_signal = torch.from_numpy(signal)
# Get result
smoothed = torch_conv(torch_signal).squeeze().numpy()
return smoothed
elif signal.ndim == 2:
# transpose if we want axis 0
if axis == 0:
signal = signal.T
# Reshape signal to torch Tensor
signal = np.expand_dims(signal, 1).astype(np.float32)
torch_signal = torch.from_numpy(signal)
# Get result
smoothed = torch_conv(torch_signal).squeeze().numpy()
# transpose if we want axis 0
if axis == 0:
smoothed = smoothed.T
return smoothed
else:
print('wrong dimensions')
return None
def IoU_multi_metrics(all_IoUs, smooth_n):
# Get mean IoU for consecutive epochs to directly get a mean
all_mIoUs = [np.hstack([np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs]) for epoch_IoUs in all_IoUs]
smoothed_mIoUs = []
for epoch in range(len(all_mIoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_mIoUs))
smoothed_mIoUs += [np.mean(np.hstack(all_mIoUs[i0:i1]))]
# Get mean for each class
all_objs_mIoUs = [[np.mean(obj_IoUs, axis=1) for obj_IoUs in epoch_IoUs] for epoch_IoUs in all_IoUs]
smoothed_obj_mIoUs = []
for epoch in range(len(all_objs_mIoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_objs_mIoUs))
epoch_obj_mIoUs = []
for obj in range(len(all_objs_mIoUs[0])):
epoch_obj_mIoUs += [np.mean(np.hstack([objs_mIoUs[obj] for objs_mIoUs in all_objs_mIoUs[i0:i1]]))]
smoothed_obj_mIoUs += [epoch_obj_mIoUs]
return np.array(smoothed_mIoUs), np.array(smoothed_obj_mIoUs)
def IoU_class_metrics(all_IoUs, smooth_n):
# Get mean IoU per class for consecutive epochs to directly get a mean without further smoothing
smoothed_IoUs = []
for epoch in range(len(all_IoUs)):
i0 = max(epoch - smooth_n, 0)
i1 = min(epoch + smooth_n + 1, len(all_IoUs))
smoothed_IoUs += [np.mean(np.vstack(all_IoUs[i0:i1]), axis=0)]
smoothed_IoUs = np.vstack(smoothed_IoUs)
smoothed_mIoUs = np.mean(smoothed_IoUs, axis=1)
return smoothed_IoUs, smoothed_mIoUs
def load_confusions(filename, n_class):
with open(filename, 'r') as f:
lines = f.readlines()
confs = np.zeros((len(lines), n_class, n_class))
for i, line in enumerate(lines):
C = np.array([int(value) for value in line.split()])
confs[i, :, :] = C.reshape((n_class, n_class))
return confs
def load_training_results(path):
filename = join(path, 'training.txt')
with open(filename, 'r') as f:
lines = f.readlines()
epochs = []
steps = []
L_out = []
L_p = []
acc = []
t = []
L_2D_init = []
L_2D_prop = []
for line in lines[1:]:
line_info = line.split()
if (len(line) > 0):
epochs += [int(line_info[0])]
steps += [int(line_info[1])]
L_out += [float(line_info[2])]
L_p += [float(line_info[3])]
acc += [float(line_info[4])]
t += [float(line_info[5])]
if len(line_info) > 6:
L_2D_init += [float(line_info[6])]
L_2D_prop += [float(line_info[7])]
else:
break
ret_list = [epochs, steps, L_out, L_p, acc, t]
if L_2D_init:
ret_list.append(L_2D_init)
if L_2D_prop:
ret_list.append(L_2D_prop)
return ret_list
def load_single_IoU(filename, n_parts):
with open(filename, 'r') as f:
lines = f.readlines()
# Load all IoUs
all_IoUs = []
for i, line in enumerate(lines):
all_IoUs += [np.reshape([float(IoU) for IoU in line.split()], [-1, n_parts])]
return all_IoUs
def load_snap_clouds(path, dataset, only_last=False):
cloud_folders = np.array([join(path, str(f, 'utf-8')) for f in listdir(path)
if str(f, 'utf-8').startswith('val_preds')])
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders])
epoch_order = np.argsort(cloud_epochs)
cloud_epochs = cloud_epochs[epoch_order]
cloud_folders = cloud_folders[epoch_order]
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32)
for c_i, cloud_folder in enumerate(cloud_folders):
if only_last and c_i < len(cloud_epochs) - 1:
continue
# Load confusion if previously saved
conf_file = join(cloud_folder, 'conf.txt')
if isfile(conf_file):
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
else:
for f in listdir(cloud_folder):
f = str(f, 'utf-8')
if f.endswith('.ply') and not f.endswith('sub.ply'):
data = read_ply(join(cloud_folder, f))
labels = data['class']
preds = data['preds']
Confs[c_i] += fast_confusion(labels, preds, dataset.label_values).astype(np.int32)
np.savetxt(conf_file, Confs[c_i], '%12d')
# Erase ply to save disk memory
if c_i < len(cloud_folders) - 1:
for f in listdir(cloud_folder):
f = str(f, 'utf-8')
if f.endswith('.ply'):
remove(join(cloud_folder, f))
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
Confs = np.delete(Confs, l_ind, axis=1)
Confs = np.delete(Confs, l_ind, axis=2)
return cloud_epochs, IoU_from_confusions(Confs)
def load_multi_snap_clouds(path, dataset, file_i, only_last=False):
cloud_folders = np.array([join(path, f) for f in listdir(path) if f.startswith('val_preds')])
cloud_epochs = np.array([int(f.split('_')[-1]) for f in cloud_folders])
epoch_order = np.argsort(cloud_epochs)
cloud_epochs = cloud_epochs[epoch_order]
cloud_folders = cloud_folders[epoch_order]
if len(cloud_folders) > 0:
dataset_folders = [f for f in listdir(cloud_folders[0]) if dataset.name in f]
cloud_folders = [join(f, dataset_folders[file_i]) for f in cloud_folders]
Confs = np.zeros((len(cloud_epochs), dataset.num_classes, dataset.num_classes), dtype=np.int32)
for c_i, cloud_folder in enumerate(cloud_folders):
if only_last and c_i < len(cloud_epochs) - 1:
continue
# Load confusion if previously saved
conf_file = join(cloud_folder, 'conf_{:s}.txt'.format(dataset.name))
if isfile(conf_file):
Confs[c_i] += np.loadtxt(conf_file, dtype=np.int32)
else:
for f in listdir(cloud_folder):
if f.endswith('.ply') and not f.endswith('sub.ply'):
if np.any([cloud_path.endswith(f) for cloud_path in dataset.files]):
data = read_ply(join(cloud_folder, f))
labels = data['class']
preds = data['preds']
Confs[c_i] += confusion_matrix(labels, preds, dataset.label_values).astype(np.int32)
np.savetxt(conf_file, Confs[c_i], '%12d')
# Erase ply to save disk memory
if c_i < len(cloud_folders) - 1:
for f in listdir(cloud_folder):
if f.endswith('.ply'):
remove(join(cloud_folder, f))
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(dataset.label_values))):
if label_value in dataset.ignored_labels:
Confs = np.delete(Confs, l_ind, axis=1)
Confs = np.delete(Confs, l_ind, axis=2)
return cloud_epochs, IoU_from_confusions(Confs)
def load_multi_IoU(filename, n_parts):
with open(filename, 'r') as f:
lines = f.readlines()
# Load all IoUs
all_IoUs = []
for i, line in enumerate(lines):
obj_IoUs = [[float(IoU) for IoU in s.split()] for s in line.split('/')]
obj_IoUs = [np.reshape(IoUs, [-1, n_parts[obj]]) for obj, IoUs in enumerate(obj_IoUs)]
all_IoUs += [obj_IoUs]
return all_IoUs
# ----------------------------------------------------------------------------------------------------------------------
#
# Plot functions
# \********************/
#
def compare_trainings(list_of_paths, list_of_labels=None, smooth_epochs=3.0):
# Parameters
# **********
plot_lr = False
stride = 2
if list_of_labels is None:
list_of_labels = [str(i) for i in range(len(list_of_paths))]
# Read Training Logs
# ******************
all_epochs = []
all_loss = []
all_loss1 = []
all_loss2 = []
all_loss3 = []
all_lr = []
all_times = []
all_RAMs = []
for path in list_of_paths:
# Check if log contains stuff
check = 'val_IoUs.txt' in [str(f, 'utf-8') for f in listdir(path)]
check = check or ('val_confs.txt' in [str(f, 'utf-8') for f in listdir(path)])
check = check or ('val_RMSEs.txt' in [str(f, 'utf-8') for f in listdir(path)])
if check:
config = Config()
config.load(path)
else:
continue
# Load results
training_res_list = load_training_results(path)
if len(training_res_list) > 6:
epochs, steps, L_out, L_p, acc, t, L_2D_init, L_2D_prop = training_res_list
else:
epochs, steps, L_out, L_p, acc, t = training_res_list
L_2D_init = []
L_2D_prop = []
epochs = np.array(epochs, dtype=np.int32)
epochs_d = np.array(epochs, dtype=np.float32)
steps = np.array(steps, dtype=np.float32)
# Compute number of steps per epoch
max_e = np.max(epochs)
first_e = np.min(epochs)
epoch_n = []
for i in range(first_e, max_e):
bool0 = epochs == i
e_n = np.sum(bool0)
epoch_n.append(e_n)
epochs_d[bool0] += steps[bool0] / e_n
smooth_n = int(np.mean(epoch_n) * smooth_epochs)
smooth_loss = running_mean(L_out, smooth_n, stride=stride)
all_loss += [smooth_loss]
if L_2D_init:
all_loss2 += [running_mean(L_2D_init, smooth_n, stride=stride)]
all_loss3 += [running_mean(L_2D_prop, smooth_n, stride=stride)]
all_loss1 += [all_loss[-1] - all_loss2[-1] - all_loss3[-1]]
all_epochs += [epochs_d[smooth_n:-smooth_n:stride]]
all_times += [t[smooth_n:-smooth_n:stride]]
# Learning rate
if plot_lr:
lr_decay_v = np.array([lr_d for ep, lr_d in config.lr_decays.items()])
lr_decay_e = np.array([ep for ep, lr_d in config.lr_decays.items()])
max_e = max(np.max(all_epochs[-1]) + 1, np.max(lr_decay_e) + 1)
lr_decays = np.ones(int(np.ceil(max_e)), dtype=np.float32)
lr_decays[0] = float(config.learning_rate)
lr_decays[lr_decay_e] = lr_decay_v
lr = np.cumprod(lr_decays)
all_lr += [lr[np.floor(all_epochs[-1]).astype(np.int32)]]
# Rescale losses
rescale_losses = True
if L_2D_init and rescale_losses:
all_loss2[-1] *= 1 / config.power_2D_init_loss
all_loss3[-1] *= 1 / config.power_2D_prop_loss
# Plots learning rate
# *******************
if plot_lr:
# Figure
fig = plt.figure('lr')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_lr[i], linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('lr')
plt.yscale('log')
# Display legends and title
plt.legend(loc=1)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plots loss
# **********
if all_loss2:
fig, axes = plt.subplots(1, 3, sharey=False, figsize=(12, 5))
for i, label in enumerate(list_of_labels):
axes[0].plot(all_epochs[i], all_loss1[i], linewidth=1, label=label)
axes[1].plot(all_epochs[i], all_loss2[i], linewidth=1, label=label)
axes[2].plot(all_epochs[i], all_loss3[i], linewidth=1, label=label)
# Set names for axes
for ax in axes:
ax.set_xlabel('epochs')
axes[0].set_ylabel('loss')
axes[0].set_yscale('log')
# Display legends and title
axes[2].legend(loc=1)
axes[0].set_title('3D_net loss')
axes[1].set_title('2D_init loss')
axes[2].set_title('2D_prop loss')
# Customize the graph
for ax in axes:
ax.grid(linestyle='-.', which='both')
else:
# Figure
fig = plt.figure('loss')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], all_loss[i], linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('loss')
plt.yscale('log')
# Display legends and title
plt.legend(loc=1)
plt.title('Losses compare')
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Plot Times
# **********
# Figure
fig = plt.figure('time')
for i, label in enumerate(list_of_labels):
plt.plot(all_epochs[i], np.array(all_times[i]) / 3600, linewidth=1, label=label)
# Set names for axes
plt.xlabel('epochs')
plt.ylabel('time')
# plt.yscale('log')
# Display legends and title
plt.legend(loc=0)
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
# ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all
plt.show()
def compare_convergences_collision2D(list_of_paths, list_of_names=None, smooth_n=20):
# Parameters
# **********
if list_of_names is None:
list_of_names = [str(i) for i in range(len(list_of_paths))]
# Read Logs
# *********
all_pred_epochs = []
all_fe = []
all_bce = []
all_fp = []
all_fp_bce = []
all_fn = []
all_fn_bce = []
# Load parameters
config = Config()
config.load(list_of_paths[0])
for path in list_of_paths:
# Load config and saved results
metric_list = []
file_list = ['subpart_IoUs.txt',
'val_IoUs.txt',
'reconstruction_error.txt',
'future_error.txt',
'future_error_bce.txt',
'future_FP.txt',
'future_FN.txt',
'future_FP_bce.txt',
'future_FN_bce.txt']
max_epoch = 0
for filename in file_list:
try:
metric = np.loadtxt(join(path, filename))
max_epoch = max(max_epoch, metric.shape[0])
smoothed = running_mean(metric, smooth_n)
except OSError as e:
smoothed = np.zeros((0, 0), dtype=np.float64)
metric_list.append(smoothed)
(IoUs,
val_IoUs,
mean_recons_e,
mean_future_e,
mean_future_bce,
mean_future_FP,
mean_future_FN,
mean_future_FP_bce,
mean_future_FN_bce) = metric_list
# Epoch count
epochs_d = np.array([i for i in range(max_epoch)])
# Aggregate results
all_pred_epochs += [epochs_d[smooth_n:-smooth_n]]
all_fe += [mean_future_e]
all_bce += [mean_future_bce]
all_fp += [mean_future_FP]
all_fp_bce += [mean_future_FP_bce]
all_fn += [mean_future_FN]
all_fn_bce += [mean_future_FN_bce]
# Plots
# *****
# create plots
for reduc in ['mean']:
for error, error_name in zip([all_fe, all_bce, all_fp, all_fp_bce, all_fn, all_fn_bce],
['all_fe', 'all_bce', 'all_fp', 'all_fp_bce', 'all_fn', 'all_fn_bce']):
if 'bce' in error_name:
continue
fig = plt.figure(reduc + ' ' + error_name[4:])
for i, name in enumerate(list_of_names):
if error[i].shape[0] > 0:
if reduc == 'last':
plotted_e = error[i][:, -1]
else:
plotted_e = np.mean(error[i], axis=1)
else:
plotted_e = all_pred_epochs[i] * 0
p = plt.plot(all_pred_epochs[i], plotted_e, linewidth=1, label=name)
plt.xlabel('epochs')
plt.ylabel(reduc + ' ' + error_name[4:])
# Set limits for y axis
#plt.ylim(0.55, 0.95)
# Display legends and title
plt.legend()
# Customize the graph
ax = fig.gca()
ax.grid(linestyle='-.', which='both')
#ax.set_yticks(np.arange(0.8, 1.02, 0.02))
# Show all -------------------------------------------------------------------
plt.show()
return
def evolution_gifs(chosen_log):
############
# Parameters
############
# Load parameters
config = Config()
config.load(chosen_log)
# Find all checkpoints in the chosen training folder
chkp_path = join(chosen_log, 'checkpoints')
chkps = np.sort([join(chkp_path, f) for f in listdir(chkp_path) if f[:4] == 'chkp'])
# Get training and validation days
val_path = join(chosen_log, 'val_preds')
val_days = np.unique([f.split('_')[0] for f in listdir(val_path) if f.endswith('pots.ply')])
# Util ops
softmax = torch.nn.Softmax(1)
sigmoid_2D = torch.nn.Sigmoid()
fake_loss = FakeColliderLoss(config)
# Result folder
visu_path = join(config.saving_path, 'test_visu')
if not exists(visu_path):
makedirs(visu_path)
##################################
# Change model parameters for test
##################################
# Change parameters for the test here. For example, you can stop augmenting the input data.
config.augment_noise = 0
config.augment_scale_min = 1.0
config.augment_scale_max = 1.0
config.augment_symmetries = [False, False, False]
config.augment_rotation = 'none'
config.validation_size = 100
##########################################
# Choice of the image we want to visualize
##########################################
# Dataset
test_dataset = MyhalCollisionDataset(config, val_days, chosen_set='validation', balance_classes=False)
wanted_inds = [700, 100, 150, 800]
wanted_s_inds = [test_dataset.all_inds[ind][0] for ind in wanted_inds]
wanted_f_inds = [test_dataset.all_inds[ind][1] for ind in wanted_inds]
sf_to_i = {tuple(test_dataset.all_inds[ind]): i for i, ind in enumerate(wanted_inds)}
####################################
# Preload to avoid long computations
####################################
# List all precomputed preds:
saved_preds = np.sort([f for f in listdir(visu_path) if f.endswith('.pkl')])
saved_pred_inds = [int(f[:-4].split('_')[-1]) for f in saved_preds]
# Load if available
if np.all([ind in saved_pred_inds for ind in wanted_inds]):
print('\nFound previous predictions, loading them')
all_preds = []
all_gts = []
for ind in wanted_inds:
wanted_ind_file = join(visu_path, 'preds_{:08d}.pkl'.format(ind))
with open(wanted_ind_file, 'rb') as wfile:
ind_preds, ind_gts = pickle.load(wfile)
all_preds.append(ind_preds)
all_gts.append(ind_gts)
all_preds = np.stack(all_preds, axis=1)
all_gts = np.stack(all_gts, axis=0)
########
# Or ...
########
else:
############
# Choose GPU
############
# Set which gpu is going to be used (auto for automatic choice)
GPU_ID = 'auto'
# Automatic choice (need pynvml to be installed)
if GPU_ID == 'auto':
print('\nSearching a free GPU:')
for i in range(torch.cuda.device_count()):
a = torch.cuda.list_gpu_processes(i)
print(torch.cuda.list_gpu_processes(i))
a = a.split()
if a[1] == 'no':
GPU_ID = a[0][-1:]
# Safe check no free GPU
if GPU_ID == 'auto':
print('\nNo free GPU found!\n')
a = 1 / 0
else:
print('\nUsing GPU:', GPU_ID, '\n')
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
chosen_gpu = int(GPU_ID)
###########################
# Initialize model and data
###########################
# Specific sampler with pred inds
test_sampler = MyhalCollisionSamplerTest(test_dataset, wanted_inds)
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=MyhalCollisionCollate,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate samplers
if config.max_val_points < 0:
config.max_val_points = 1e9
test_loader.dataset.max_in_p = 1e9
test_sampler.calib_max_in(config, test_loader, untouched_ratio=0.95, verbose=True)
test_sampler.calibration(test_loader, verbose=True)
# Init model
net = KPCollider(config, test_dataset.label_values, test_dataset.ignored_labels)
# Choose to train on CPU or GPU
if torch.cuda.is_available():
device = torch.device("cuda:{:d}".format(chosen_gpu))
net.to(device)
else:
device = torch.device("cpu")
######################################
# Start predictions with ckpts weights
######################################
all_preds = []
all_gts = [None for _ in wanted_inds]
for chkp_i, chkp in enumerate(chkps):
# Load new checkpoint weights
if torch.cuda.is_available():
checkpoint = torch.load(chkp, map_location=device)
else:
checkpoint = torch.load(chkp, map_location=torch.device('cpu'))
net.load_state_dict(checkpoint['model_state_dict'])
epoch_i = checkpoint['epoch'] + 1
net.eval()
print("\nModel and training state restored from " + chkp)
chkp_preds = [None for _ in wanted_inds]
# Predict wanted inds with this chkp
for i, batch in enumerate(test_loader):
if 'cuda' in device.type:
batch.to(device)
# Forward pass
outputs, preds_init_2D, preds_2D = net(batch, config)
# Get probs and labels
f_inds = batch.frame_inds.cpu().numpy()
lengths = batch.lengths[0].cpu().numpy()
stck_init_preds = sigmoid_2D(preds_init_2D).cpu().detach().numpy()
stck_future_logits = preds_2D.cpu().detach().numpy()
stck_future_preds = sigmoid_2D(preds_2D).cpu().detach().numpy()
stck_future_gts = batch.future_2D.cpu().detach().numpy()
torch.cuda.synchronize(device)
# Loop on batch
i0 = 0
for b_i, length in enumerate(lengths):
# Get the 2D predictions and gt (init_2D)
img0 = stck_init_preds[b_i, 0, :, :, :]
gt_im0 = np.copy(stck_future_gts[b_i, config.n_frames - 1, :, :, :])
gt_im1 = stck_future_gts[b_i, config.n_frames - 1, :, :, :]
gt_im1[:, :, 2] = np.max(stck_future_gts[b_i, :, :, :, 2], axis=0)
img1 = stck_init_preds[b_i, 1, :, :, :]
s_ind = f_inds[b_i, 0]
f_ind = f_inds[b_i, 1]
# Get the 2D predictions and gt (prop_2D)
img = stck_future_preds[b_i, :, :, :, :]
gt_im = stck_future_gts[b_i, config.n_frames:, :, :, :]
# # Future errors defined the same as the loss
if sf_to_i[(s_ind, f_ind)] == 0:
future_errors_bce = fake_loss.apply(gt_im, stck_future_logits[b_i, :, :, :, :], error='bce')
a = 1/0
# future_errors = fake_loss.apply(gt_im, stck_future_logits[b_i, :, :, :, :], error='linear')
# future_errors = np.concatenate((future_errors_bce, future_errors), axis=0)
# # Save prediction too in gif format
# s_ind = f_inds[b_i, 0]
# f_ind = f_inds[b_i, 1]
# filename = '{:s}_{:07d}_e{:04d}.npy'.format(test_dataset.sequences[s_ind], f_ind, epoch_i)
# gifpath = join(config.saving_path, 'test_visu', filename)
# fast_save_future_anim(gifpath[:-4] + '_f_gt.gif', gt_im, zoom=5, correction=True)
# fast_save_future_anim(gifpath[:-4] + '_f_pre.gif', img, zoom=5, correction=True)
# Store all predictions
chkp_preds[sf_to_i[(s_ind, f_ind)]] = img
if chkp_i == 0:
all_gts[sf_to_i[(s_ind, f_ind)]] = gt_im
if np.all([chkp_pred is not None for chkp_pred in chkp_preds]):
break
if np.all([chkp_pred is not None for chkp_pred in chkp_preds]):
break
# Store all predictions
chkp_preds = np.stack(chkp_preds, axis=0)
all_preds.append(chkp_preds)
# All predictions shape: [chkp_n, frames_n, T, H, W, 3]
all_preds = np.stack(all_preds, axis=0)
# All gts shape: [frames_n, T, H, W, 3]
all_gts = np.stack(all_gts, axis=0)
# Save each preds
for ind_i, ind in enumerate(wanted_inds):
wanted_ind_file = join(visu_path, 'preds_{:08d}.pkl'.format(ind))
with open(wanted_ind_file, 'wb') as wfile:
pickle.dump((all_preds[:, ind_i], all_gts[ind_i]), wfile)
################
# Visualizations
################
# First idea: future for different chkp
idea1 = True
if idea1:
for frame_i, _ in enumerate(wanted_inds):
# Colorize and zoom both preds and gts
showed_preds = colorize_collisions(all_preds[:, frame_i])
showed_preds = zoom_collisions(showed_preds, 5)
showed_gts = colorize_collisions(all_gts[frame_i])
showed_gts = zoom_collisions(showed_gts, 5)
# Repeat gt for all checkpoints and merge with preds
showed_gts = np.expand_dims(showed_gts, 0)
showed_gts = np.tile(showed_gts, (showed_preds.shape[0], 1, 1, 1, 1))
merged_imgs = superpose_gt(showed_preds, showed_gts)
c_showed = [0, 5, 10, -1]
n_showed = len(c_showed)
fig, axes = plt.subplots(1, n_showed)
images = []
for ax_i, chkp_i in enumerate(c_showed):
images.append(axes[ax_i].imshow(merged_imgs[chkp_i, 0]))
def animate(i):
for ax_i, chkp_i in enumerate(c_showed):
images[ax_i].set_array(merged_imgs[chkp_i, i])
return images
anim = FuncAnimation(fig, animate,
frames=np.arange(merged_imgs.shape[1]),
interval=50,
blit=True)
plt.show()
# SAME BUT COMPARE MULTIPLE LOGS AT THE END OF THEIR CONFERGENCE
# Second idea: evolution of prediction for different timestamps
idea2 = False
if idea2:
for frame_i, _ in enumerate(wanted_inds):
# Colorize and zoom both preds and gts
showed_preds = colorize_collisions(all_preds[:, frame_i])
showed_preds = zoom_collisions(showed_preds, 5)
showed_gts = colorize_collisions(all_gts[frame_i])
showed_gts = zoom_collisions(showed_gts, 5)
# Repeat gt for all checkpoints and merge with preds
showed_gts = np.expand_dims(showed_gts, 0)
showed_gts = np.tile(showed_gts, (showed_preds.shape[0], 1, 1, 1, 1))
merged_imgs = superpose_gt(showed_preds, showed_gts)
t_showed = [2, 10, 18, 26]
n_showed = len(t_showed)
fig, axes = plt.subplots(1, n_showed)
images = []
for t, ax in zip(t_showed, axes):
images.append(ax.imshow(merged_imgs[0, t]))
# Add progress rectangles
xy = (0.2 * merged_imgs.shape[-3], 0.015 * merged_imgs.shape[-2])
dx = 0.6 * merged_imgs.shape[-3]
dy = 0.025 * merged_imgs.shape[-2]
rect1 = patches.Rectangle(xy, dx, dy, linewidth=1, edgecolor='white', facecolor='white')
rect2 = patches.Rectangle(xy, dx * 0.01, dy, linewidth=1, edgecolor='white', facecolor='green')
axes[0].add_patch(rect1)
axes[0].add_patch(rect2)
images.append(rect1)
images.append(rect2)
def animate(i):
for t_i, t in enumerate(t_showed):
images[t_i].set_array(merged_imgs[i, t])
progress = float(i + 1) / merged_imgs.shape[0]
images[-1].set_width(dx * progress)
return images
n_gif = merged_imgs.shape[0]
animation_frames = np.arange(n_gif)
animation_frames = np.pad(animation_frames, 10, mode='edge')
anim = FuncAnimation(fig, animate,
frames=animation_frames,
interval=100,
blit=True)
plt.show()
# # Create superposition of gt and preds
# r = preds[:, :, :, 0]
# g = preds[:, :, :, 1]
# b = preds[:, :, :, 2]
# r[gt_mask] += 0
# g[gt_mask] += 0
# b[gt_mask] += 255
# # Compute precision recall curves
# figPR = show_PR(p, gt)
# #fig, axes = plt.subplots(1, 2, figsize=(14, 6))
# #anim0 = anim_multi_PR(p, gt, axis=axes[0])
# #anim = show_future_anim(preds, axis=axes[1])
# fig, double_anim = anim_PR_gif(preds, p, gt)
# plt.show()
a = 1 / 0
return
def comparison_gifs(list_of_paths, wanted_inds=[]):
############
# Parameters
############
# Set which gpu is going to be used (auto for automatic choice)
GPU_ID = 'auto'
if not wanted_inds:
# For flowfollowers 1200 1400
wanted_inds = [1200, 1400, 1500, 700, 800, 900] # Bouncers
# wanted_inds = [1300, 700, 1400, 1500, 100, 150, 800, 900] # Wanderers
#wanted_inds = [1200, 1400, 1500, 100, 150, 700, 800, 900] # FlowFollowers
#wanted_inds = [i for i in range(2850, 2900, 10)] # FlowFollowersbis
comparison_gts = []
comparison_ingts = []
comparison_preds = []
for chosen_log in list_of_paths:
############
# Parameters
############
# Load parameters
config = Config()
config.load(chosen_log)
# Find all checkpoints in the chosen training folder
chkp_path = join(chosen_log, 'checkpoints')
chkps = np.sort([join(chkp_path, f) for f in listdir(chkp_path) if f[:4] == 'chkp'])
# Get training and validation days
val_path = join(chosen_log, 'val_preds')
val_days = np.unique([f.split('_')[0] for f in listdir(val_path) if f.endswith('pots.ply')])
# Util ops
softmax = torch.nn.Softmax(1)
sigmoid_2D = torch.nn.Sigmoid()
fake_loss = FakeColliderLoss(config)
# Result folder
visu_path = join(config.saving_path, 'test_visu')
if not exists(visu_path):
makedirs(visu_path)
####################################
# Preload to avoid long computations
####################################
# List all precomputed preds:
saved_preds = np.sort([f for f in listdir(visu_path) if f.endswith('.pkl')])
saved_pred_inds = [int(f[:-4].split('_')[-1]) for f in saved_preds]
# Load if available
if np.all([ind in saved_pred_inds for ind in wanted_inds]):
print('\nFound previous predictions, loading them')
all_preds = []
all_gts = []
all_ingts = []
for ind in wanted_inds:
wanted_ind_file = join(visu_path, 'preds_{:08d}.pkl'.format(ind))
with open(wanted_ind_file, 'rb') as wfile:
ind_preds, ind_gts, ind_ingts = pickle.load(wfile)
all_preds.append( | np.copy(ind_preds) | numpy.copy |
from __future__ import division, print_function
import numpy as np
from numpy import dot, newaxis
from numpy.linalg import norm, solve
import os
import sys
import lib
from training import print_dict, training_data
def col_square_norm(A):
return np.einsum('ij, ij->j', A, A)
def row_square_norm(A):
return np.einsum('ij, ij->i', A, A)
# Optimize B in-place, using Lagrange dual method of:
# Lee et al., Efficient Sparse Coding Algorithms.
# with c=1.
@lib.timeit
def optimize_dictionary(X_T, S_T, B_T, Lam_0=None):
SST = dot(S_T.T, S_T)
XST = dot(X_T.T, S_T)
XST_T = XST.T.copy()
XTX = dot(X_T, X_T.T)
XSTTXST = dot(XST_T, XST)
def B(Lam_vec):
Lam = np.diag(Lam_vec)
return solve(SST + Lam, XST_T)
def D(Lam_vec):
Lam = np.diag(Lam_vec)
return np.trace(XTX) - np.trace(Lam) \
- np.trace(XST.dot(solve(SST + Lam, XST_T)))
def grad(Lam_vec):
Lam = np.diag(Lam_vec)
return row_square_norm(solve(SST + Lam, XST_T)) - 1
def hessian(Lam, inv_SST_Lam):
return -2 * inv_SST_Lam \
* (inv_SST_Lam.dot(XSTTXST).dot(inv_SST_Lam))
# last_B_T = None
Lam_vec = np.ones(S_T.shape[1]) if Lam_0 is None else Lam_0.copy()
print('current D:', D(Lam_vec))
Lam_vec, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=lambda x: -D(x),
bounds=[(0, np.inf) for l in Lam_vec],
fprime=lambda x: -grad(x),
x0=Lam_vec
)
print('final D:', D(Lam_vec))
B_T[...] = B(Lam_vec)
print(B_T)
return Lam_vec
def solve_cholesky(L, b):
# solve L L* x = b
y = solve_triangular(L, b, lower=True)
return solve_triangular(L.T, y)
@lib.timeit
# @profile
def feature_sign_search_vec(Y_T, X_T, A_T, gamma):
Y = Y_T.T.copy()
A = A_T.T.copy()
X = X_T.T.copy()
ATA = dot(A_T, A)
X_T[abs(X_T) < 1e-7] = 0
active_set = X != 0
theta = np.sign(X)
A_T_Y = dot(A_T, Y)
first_step_2 = True
last_Is = None
# shape same as X
L2_partials = 2 * (dot(ATA, X) - A_T_Y)
L2_partials_abs = np.abs(L2_partials)
while True:
print()
print('==== STEP 2 ====')
L2_partials_abs[np.abs(X) >= 1e-7] = 0 # rule out zero elements of X
Is = L2_partials_abs.argmax(axis=0) # max for each column
activate_rows, = np.nonzero(L2_partials_abs.max(axis=0) > gamma)
index = (Is[activate_rows], activate_rows)
active_set[index] = True
theta[index] = -np.sign(L2_partials[index])
print('mean active:', active_set.sum(axis=0).mean())
print('activating rows:', activate_rows.shape[0])
if activate_rows.shape[0] == 0:
print('WARNING: activating nothing')
assert last_Is is None or \
not np.all(last_Is == Is[activate_rows])
last_Is = Is[activate_rows]
working_rows = np.arange(X.shape[1]) if first_step_2 else activate_rows
first_step_2 = False
while True:
print('---- STEP 3 ----')
print('working rows:', working_rows.shape[0])
Q = A_T_Y[:, working_rows] - gamma / 2 * theta[:, working_rows]
X_working = X[:, working_rows]
X_new = X_working.copy()
Y_working = Y[:, working_rows]
active_set_working = active_set[:, working_rows]
for idx, active in enumerate(active_set_working.T):
active_idxs, = active.nonzero()
q_hat = Q[active_idxs, idx]
ATA_hat = ATA[np.ix_(active_idxs, active_idxs)]
_, x_new_hat, info = scipy.linalg.lapack.dposv(ATA_hat, q_hat)
if info != 0:
x_new_hat = dot(pinv(ATA_hat), q_hat)
if np.abs(dot(ATA_hat, x_new_hat) - q_hat).mean() > 0.1:
# no good. try null-space zero crossing.
active = active_set[:, idx]
x_hat = X[active_idxs, idx]
theta_hat = theta[active_idxs, idx]
u, s, v = np.linalg.svd(ATA_hat)
assert s[s.shape[0] - 1] < 1e-7
z = v[v.shape[0] - 1]
assert np.abs(dot(ATA_hat, z)).sum() < 1e-7
# [x_hat + t_i * z]_i = 0
# want to reduce theta dot (x + tz) => t * theta dot z
# so t should have opposite sign of theta dot z
direction = -np.sign(dot(theta_hat, z))
null_ts = -x_hat / z
null_ts[np.sign(null_ts) != direction] = np.inf
null_ts[np.abs(null_ts) < 1e-7] = np.inf
first_change = np.abs(null_ts).argmin()
x_new_hat = x_hat + null_ts[first_change] * z
X_new[active_idxs, idx] = x_new_hat
# sign_changes = np.logical_xor(x_new_hat > 0, x_hat > 0)
sign_changes = np.logical_and.reduce([
np.logical_xor(X_new > 0, X_working > 0),
np.abs(X_working) >= 1e-7,
# np.abs(X_new) >= 1e-7,
# np.abs((X_new - X_working) / X_working) >= 1e-9,
])
# (1 - t) * x + t * x_new
count_sign_changes = sign_changes.sum(axis=0)
max_sign_changes = count_sign_changes.max()
has_sign_changes, = np.nonzero(count_sign_changes > 0)
print('max sign changes:', max_sign_changes)
print('rows with sign changes:', has_sign_changes.shape[0])
if max_sign_changes > 0:
sign_changes = sign_changes[:, has_sign_changes]
count_sign_changes = count_sign_changes[has_sign_changes]
Y_sign = Y_working[:, has_sign_changes]
X_new_sign = X_new[:, has_sign_changes]
X_sign = X_working[:, has_sign_changes]
compressed_ts = | np.zeros((max_sign_changes, has_sign_changes.shape[0])) | numpy.zeros |
import cv2
import numpy as np
import sys
from pathlib import Path
import torch
from glob import glob
sys.path.append('..')
from torchlib.segneuralnet import SegmentationNeuralNet
from torchlib.post_processing_func import MAP_post
softmax = torch.nn.Softmax(1)
MAP = MAP_post()
def get_full_path(selected_model):
if selected_model == 'Cross Entropy':
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_cwce__adam_map_none_1_60_0/models/model_best.pth.tar'
elif selected_model == 'Dice':
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_cwdice__adam_map_none_1_60_0/models/model_best.pth.tar'
elif selected_model == "J-REG":
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_jreg__adam_map_none_1_60_0/models/model_best.pth.tar'
else:
assert False, f"MODEL {selected_model} NOT FOUD, OPT: 'Cross Entropy' 'Dice' 'J-REG'"
return model_path
def load_model(full_url, use_cuda=True, gpu_id=0, verbose=False):
full_url = Path(full_url)
full_path = str(full_url)
nameproject = full_url.parent.parent.name
patchproject = full_url.parent.parent.parent
ckpt_path = '/'.join((full_url.parts)[-2:])
file_name = full_url.name
exp_type = full_url.parent.parent.parent.name
net = SegmentationNeuralNet(
patchproject=patchproject,
nameproject=nameproject,
no_cuda=not use_cuda, parallel=False, seed=2021,
print_freq=False, gpu=gpu_id
)
check = net.load( full_path, verbose)
if use_cuda:
net.net.cuda(gpu_id)
net.net.eval()
return net
def get_results(srcs, net):
def from2to3(src):
zero = | np.zeros(src.shape[:2]) | numpy.zeros |
#!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import re
import numpy as np
from shapely.geometry import Polygon, LineString
from shapely.ops import polygonize, unary_union
def list_intersect(a, b):
""" return the intersection of two lists """
return list(set(a) & set(b))
def list_union(a, b):
""" return the union of two lists """
return list(set(a) | set(b))
def ply_parser(fp):
'''
:param fp: PLY file path
:return: Surface coordinates and surface index
'''
tf = open(fp)
lines = tf.readlines()
flag = 0
for l in lines:
if re.search("\s*element\s*vertex\s*\d*", l) is not None:
vertex_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("\s*element\s*face\s*\d*", l) is not None:
face_num = int(re.findall("\d+\.?\d*", l)[0])
if re.search("end_header", l) is not None:
begin_num = flag + 1
flag += 1
x = [float(re.findall("-*\d+\.?\d*", l)[0]) for l in lines[begin_num:begin_num + vertex_num]]
y = [float(re.findall("-*\d+\.?\d*", l)[1]) for l in lines[begin_num:begin_num + vertex_num]]
z = [float(re.findall("-*\d+\.?\d*", l)[2]) for l in lines[begin_num:begin_num + vertex_num]]
cor = [[x[i], y[i], z[i]] for i in range(0, len(x))]
cor = np.asarray(cor)
f = [re.findall("\d+\.?\d*", l)
for l in lines[begin_num + vertex_num:begin_num + vertex_num + face_num]]
return cor, f
def check_relation(plane1, plane2):
'''
Checking spatial relationship between planes.
:param plane1:
:param plane2:
:return: spatial relationship tag
'''
p1 = Polygon(plane1)
p2 = Polygon(plane2)
try:
if p1.intersects(p2):
if p1.contains(p2):
flag = 1
else:
if p1.area >= p2.area:
flag = 2
else:
flag = 3
else:
flag = 4
return flag
except: # noqa: E722
return 4
def get_height_from_dem(cor, dem_parameter):
'''
Get Z coordinate from DEM based on given XY coordinate.
r1-r4 represent the image boundaries for coordinates outside.
:param cor: XY coordinate
:param dem: DEM object
:return: Z coordinate
'''
xOrigin = dem_parameter[0]
yOrigin = dem_parameter[1]
pixelWidth = dem_parameter[2]
pixelHeight = dem_parameter[3]
data = dem_parameter[4]
r = dem_parameter[5]
base_height = []
for i in range(cor.shape[0]):
x = cor[i, 0]
y = cor[i, 1]
xOffset = int((x - xOrigin) / pixelWidth)
yOffset = int((y - yOrigin) / pixelHeight)
try:
value = data[yOffset][xOffset]
base_height.append(value)
except: # noqa: E722
dist_2 = np.sum((r - np.array([yOffset, xOffset])) ** 2, axis=1)
index = np.argmin(dist_2)
value = data[r[index, 0]][r[index, 1]]
base_height.append(value)
return np.array(base_height)
def get_height_from_lower_surface(plane1, plane2):
'''
:param plane1: Higher surface
:param plane2: Lower surface
:return: Z coordinate on lower surface
'''
[a, b, c, d] = fit_plane(plane1)
def z(x):
return -(a * x[0] + b * x[1] + d) / c
return z([plane2[:, 0], plane2[:, 1]])
def get_difference_plane(plane1, plane2):
'''
Get difference and intersection part for two planes
:param plane1:
:param plane2:
:return:
'''
try:
p1 = Polygon(plane1)
p2 = Polygon(plane2)
pd = p2.difference(p1)
pi = p2.intersection(p1)
flag = True
p3 = np.array(pd.exterior.coords[:])
p4 = np.array(pi.exterior.coords[:])
return [flag, p3, p4]
except: # noqa: E722
flag = False
p3 = None
p4 = None
return [flag, p3, p4]
def fit_plane(point):
'''
Using normal vector and distance to origin to represent a plane.
:param point: Plane coordinates
:return: Plane parameters
'''
xyz_mean = np.array([point[:, 0].mean(), point[:, 1].mean(), point[:, 2].mean()])
xyz_m = np.array(
[point[:, 0] - xyz_mean[0], point[:, 1] - xyz_mean[1], point[:, 2] - xyz_mean[2]])
[U, S, V] = np.linalg.svd(xyz_m)
v = np.array([U[0, 2], U[1, 2], U[2, 2]])
a = v[0]
b = v[1]
c = v[2]
d = - np.dot(v, xyz_mean.transpose())
# normal vector of plane
return [a, b, c, d]
def rotate_plane(plane):
'''
Rotate a 3D plane into 2D plane.
:param plane:
:return: [2D plane coordinates, rotate tag(whether or not), rotation matrix, plane center]
'''
temp_cor = plane
p_n = fit_plane(temp_cor)
p_n = np.array(p_n[0:3])
s_n = np.array([0, 0, 1])
[rx, ry, rz] = np.cross(p_n, s_n)
ra = np.arccos(np.dot(p_n, s_n) / (np.linalg.norm(p_n) * np.linalg.norm(s_n)))
rotate_flag = False
rm = None
center = None
if abs(ra) > 0.001:
norm = np.linalg.norm(np.cross(p_n, s_n))
[rx, ry, rz] = [rx / norm, ry / norm, rz / norm]
r1 = [np.cos(ra) + rx ** 2 * (1 - np.cos(ra)), rx * ry * (1 - np.cos(ra)) - rz * np.sin(ra),
ry * np.sin(ra) + rx * rz * (1 - np.cos(ra))]
r2 = [rz * np.sin(ra) + rx * ry * (1 - np.cos(ra)), np.cos(ra) + ry ** 2 * (1 - np.cos(ra)),
-rx * np.sin(ra) + ry * rz * (1 - np.cos(ra))]
r3 = [-ry * np.sin(ra) + rx * rz * (1 - np.cos(ra)),
rx * np.sin(ra) + ry * rz * (1 - np.cos(ra)),
np.cos(ra) + rz ** 2 * (1 - np.cos(ra))]
rm = np.array([r1, r2, r3])
center = [np.mean(temp_cor[:, 0]), np.mean(temp_cor[:, 1]), np.mean(temp_cor[:, 2])]
cor_2d = np.dot(rm, (temp_cor - center).transpose()).transpose()
rotate_flag = True
else:
cor_2d = temp_cor
return [cor_2d, rotate_flag, rm, center]
def remove_close_point(plane, T=1e-6):
'''
Remove close points in a surface
:param plane:
:param T: Threshold
:return: New plane coordinates
'''
origin_plane = plane
test_plane = plane[:, 0:2]
del_list = []
for i in range(0, test_plane.shape[0]):
for j in range(i+1, test_plane.shape[0]):
dist = np.linalg.norm(test_plane[i] - test_plane[j])
if dist <= T:
del_list.append(i)
plane = np.delete(plane, del_list, axis=0)
if plane.shape[0] < 3:
return origin_plane
else:
return plane
def fix_intersection(plane):
'''
Solve self-intersection issue
:param plane: plane coordinates
:return: None self-intersection plane coordinates
'''
if plane.shape[0] <= 4:
return plane, False
temp_cor = plane
p_n = fit_plane(temp_cor)
p_n = np.array(p_n[0:3])
s_n = np.array([0, 0, 1])
[rx, ry, rz] = np.cross(p_n, s_n)
ra = np.arccos(np.dot(p_n, s_n) / (np.linalg.norm(p_n) * np.linalg.norm(s_n)))
rotate_flag = False
if abs(ra) > 0.001:
norm = np.linalg.norm(np.cross(p_n, s_n))
[rx, ry, rz] = [rx / norm, ry / norm, rz / norm]
r1 = [np.cos(ra) + rx ** 2 * (1 - np.cos(ra)), rx * ry * (1 - np.cos(ra)) - rz * np.sin(ra),
ry * np.sin(ra) + rx * rz * (1 - np.cos(ra))]
r2 = [rz * | np.sin(ra) | numpy.sin |
import numpy as np
from scipy.linalg import qr, qr_delete
from UncertainSCI.utils.version import version_lessthan
def mgs_pivot_restart(A, p=None, pstart=None):
"""
Computes pivots from a QR decomposition with starting pivots specified. If
A is an M x N matrix, computes pivots associated to a permutation matrix P
in a partial QR decomposition of A.T with column pivoting:
A P = T R,
where the first p columns of T are orthonormal, and R is an upper
triangular matrix where the first p rows contain residual entries as in a
standard QR decomposition. The last N-p rows of R are a slice of the
identity matrix.
An ordered list of pivots is returned that are associated to the
permutation matrix P.
Args:
A (numpy array): M x N array
p (int): The number of pivots to compute. Defaults to None, in which
case p is set to max(min(M,N), len(pstart)).
pstart (list/array of ints): Ordered list of user-chosen pivots.
Returns:
numpy.ndarray: A vector of ints containing p pivots.
"""
M, N = A.shape
if pstart is None:
pstart = np.zeros(0, dtype=int)
else:
assert all(0 <= pval < N for pval in pstart)
pstart = np.array(pstart, dtype=int)
p = max(min(M, N), len(pstart))
# Since we must take pivots in pstart, permute so that these indices are at
# the top.
Npst = len(pstart)
if Npst == 0:
# More work than necessary, but is probably more efficient
_, _, inds = qr(A, pivoting=True)
return inds[:p]
cpstart = np.setdiff1d(range(N), pstart)
A = np.hstack((A[:, pstart], A[:, cpstart]))
inds = np.hstack((pstart, cpstart))
# Now perform MGS, partly using scipy/lapack routines
Q, R = qr(A[:, :Npst], mode='economic')
# MGS Orthogonalization:
for qq in range(Npst):
A[:, Npst:] -= np.outer(Q[:, qq], Q[:, qq].T @ A[:, Npst:])
# Now we just MGS our way to the end.
for q in range(Npst, p):
pnext = q + np.argmax(np.sum(A[:, q:]**2, axis=0))
# Pivot
inds[[q, pnext]] = inds[[pnext, q]]
A[:, [q, pnext]] = A[:, [pnext, q]]
# Orthogonalize
qvec = A[:, q]
if np.linalg.norm(qvec) < 1e-13:
assert False # Matrix is low-rank so stop pivoting
qvec /= np.linalg.norm(qvec)
temp = (qvec.T @ A[:, q+1:])
A[:, q+1:] -= np.outer(qvec, temp)
return inds[:p]
def greedy_d_optimal(A, p, pstart=None):
r"""
Chooses p rows in A via a greedy D-optimal design. Performs the iterative
optimization,
if |R| < A.shape[1]:
max_r det( A[S, :] * A[S, :].T ), S = r \cup R,
else:
max_r det( A[S, :]^T * A[S, :] ), S = r \cup R,
where R = \emptyset is the starting point, and at each step R \gets R \cup
r^\ast, where r^\ast is the maximizing row.
If an iterable pstart is given, forces the indices pstart to lie in the set
R. Returns an error if len(pstart) < N, with N the number of columns of A.
Returns an ordered pivot vector P indicating the ordered selection of rows
of A.
"""
assert len(A.shape) == 2
if p > A.shape[0]:
p = A.shape[0]
M, N = A.shape
if pstart is None:
R, P = qr(A.T, pivoting=True, mode='r')
numpivots = N
else:
assert all(0 <= pval <= M-1 for pval in pstart)
# User asked for fewer pivots than the starting ones
if len(pstart) >= p:
return pstart[:p]
P = np.hstack([np.array(pstart), np.setdiff1d(range(M), pstart)])
if len(pstart) < N:
P[:N] = mgs_pivot_restart(A.T, p=N, pstart=pstart)
numpivots = len(pstart)
# Otherwise: we have at least N pivots, but fewer than p.
if p > numpivots:
W = A[P[:numpivots], :]
G = np.dot(W.T, W)
Ginvwm = np.linalg.solve(G, A[P[numpivots:], :].T)
for m in range(numpivots, p):
# The remaining choices:
detnorms = np.sum(A[P[m:], :].T * Ginvwm[:, (m-numpivots):], axis=0)
# Det maximization
Pind = np.argmax(detnorms)
# Update inv(G)*wm via sherman-morrison
Ginvwm[:, (m-numpivots):] -= np.outer(Ginvwm[:, m-numpivots+Pind],
np.dot(A[P[m+Pind], :].T,
Ginvwm[:, (m-numpivots):])/(1+detnorms[Pind])
)
# Pivoting
P[[m, Pind+m]] = P[[Pind+m, m]]
Ginvwm[:, [m-numpivots, m-numpivots+Pind]] = Ginvwm[:,
[m-numpivots+Pind,
m-numpivots]]
return P[:p]
def lstsq_loocv_error(A, b, weights):
"""Computes the leave-one-out cross validation (LOOCV) metric for a
least-squares problem.
Parameters:
A: The M x N design matrix from a least-squares procedure
b: The right-hand side array with M rows from a least-squares procedure
weights: size-M array with positive entries, indicating pointwise
weights in the LOOCV metric.
Attributes:
cv: The sum-of-squares cross-validation metric (scalar, float)
"""
M, N = A.shape
Q, R, P = qr(A, pivoting=True, mode='economic')
bdim = len(b.shape)
if bdim == 1:
cv = 0.
else:
cv = | np.zeros(b.shape[1]) | numpy.zeros |
import numpy as np
from scipy import ndimage
from scipy import stats
from skimage.color import rgb2grey
from skimage.util import img_as_float
from skimage.feature import peak_local_max
def _compute_derivatives(image):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndimage.sobel(image, axis=0, mode='constant', cval=0)
imx = ndimage.sobel(image, axis=1, mode='constant', cval=0)
return imx, imy
def _compute_auto_correlation(image, sigma):
"""Compute auto-correlation matrix using sum of squared differences.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
Axx : ndarray
Element of the auto-correlation matrix for each pixel in input image.
Axy : ndarray
Element of the auto-correlation matrix for each pixel in input image.
Ayy : ndarray
Element of the auto-correlation matrix for each pixel in input image.
"""
if image.ndim == 3:
image = img_as_float(rgb2grey(image))
imx, imy = _compute_derivatives(image)
# structure tensore
Axx = ndimage.gaussian_filter(imx * imx, sigma, mode='constant', cval=0)
Axy = ndimage.gaussian_filter(imx * imy, sigma, mode='constant', cval=0)
Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode='constant', cval=0)
return Axx, Axy, Ayy
def corner_kitchen_rosenfeld(image):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
------------------------------------------------------
(imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second derivatives.
Parameters
----------
image : ndarray
Input image.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image)
imxx, imxy = _compute_derivatives(imx)
imyx, imyy = _compute_derivatives(imy)
numerator = (imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
denominator = (imx**2 + imy**2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are the first derivatives averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square
array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = _compute_auto_correlation(image, sigma)
# determinant
detA = Axx * Ayy - Axy**2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA**2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are the first derivatives averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square
array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = _compute_auto_correlation(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are the first derivatives averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square
array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = _compute_auto_correlation(image, sigma)
# determinant
detA = Axx * Ayy - Axy**2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask]**2
return w, q
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for point classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
"""
# window extent in one direction
wext = (window_size - 1) / 2
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size**2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = | np.sum(winx_winx * x) | numpy.sum |
#!/usr/bin/env python
__all__ = ['render', 'show', 'set_scene']
import numpy as np
from ..vec3 import Vec3
from .helper import intersect, show, set_scene
class RayType:
kUnknownRay, kCameraRay, kShadowRay = range(3)
class Ray:
def __init__(self, orig, direction):
self.orig = orig
self.direction = direction
self.tmin = 0
self.tmax = 1.e24
self.type = RayType.kUnknownRay
def __call__(self, t):
return self.orig + self.direction * t
class Atmosphere:
def __init__(self, sd=Vec3(0., 1., 0.), re=6360.e3,
ra=6420.e3, hr=7994., hm=1200.):
self.sundir = sd
self.radiusEarth = re
self.radiusAtmosphere = ra
self.Hr = hr
self.Hm = hm
# For Mars
# self.sundir = sd
# self.radiusEarth = 3389.5e3
# self.radiusAtmosphere = 3396.2e3
# self.Hr = hr
# self.Hm = hm
# Rayleigh scattering coefficients at sea level (for Earth)
# 440 nm, 550 nm, 680 nm
self.betaR = Vec3(3.8e-6, 13.5e-6, 33.1e-6)
# Rayleigh scattering coefficients (for Mars)
# 440 nm, 550 nm, 680 nm
# self.betaR = Vec3(5.75e-3, 13.57e-3, 19.918e-3)
# Mie scattering coefficient at sea level (for Earth)
self.betaM = Vec3(21.e-6)
def compute_incident_light(self, r):
t0 = self.radiusEarth + 1
t1 = 0
t = [t0, t1]
if (not intersect(r, self.radiusAtmosphere, t)) or (t1 < 0.):
return Vec3(0)
t0, t1 = t
if (t0 > r.tmin) and (t0 > 0):
r.tmin = t0
if t1 < r.tmax:
r.tmax = t1
numSamples = 16.
numSamplesLight = 16.
segmentLength = (r.tmax - r.tmin) / numSamples
tCurrent = r.tmin
sumR = Vec3(0.)
sumM = Vec3(0.)
opticalDepthR = 0
opticalDepthM = 0
mu = r.direction.dot(self.sundir)
# Anisotropy of the medium (aerosol)
# if g = 0, function is equal to rayleigh
g = 0.76
phaseR = 3. / (16. * np.pi) * (mu * mu + 1.)
phaseM = 3. / (8. * np.pi) * \
((1. - g * g) * (1. + mu * mu)) / \
((2. + g * g) * np.power(1. + g * g - 2. * g * mu, 1.5))
for i in np.arange(numSamples):
samplePosition = r(tCurrent + segmentLength * 0.5)
height = samplePosition.length() - self.radiusEarth
hr = segmentLength * | np.exp(-height / self.Hr) | numpy.exp |
import logging
logging.basicConfig(level=logging.INFO)
import argparse
import torch
import numpy as np
import cv2
import time
from scipy import spatial
from pytorch3d import transforms
import json
import open3d as o3d
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--resolution', default=800, type=int, help="View window resolution")
parser.add_argument('-l', '--trajectory-length', default=500, type=int, help="Generated trajectory key-frame count.")
parser.add_argument('-m', '--trajectory-momentum', default=0.95, type=float, help="Generated trajectory movement momentum.")
parser.add_argument('--trajectory-acceleration', default=0.8, type=float,
help="Generated trajectory acceleration standard deviation.")
parser.add_argument('--point-distance', default=10, type=float,
help="Point to camera distance standard deviation.")
parser.add_argument('--points-per-camera', default=15, type=float,
help="Point generated per camera.")
parser.add_argument('--point-visibility-falloff', default=8, type=float,
help="How distant cameras see the point.")
parser.add_argument('--view-noise', default=0.02, type=float,
help="Point location error in radians. (points projected to camera)")
parser.add_argument('--cam-noise', default=100, type=float,
help="Initial camera position noise.")
parser.add_argument('--point-noise', default=100, type=float,
help="Initial point position noise.")
parser.add_argument('--world-shift', default=10, type=float,
help="Initial shift in world position estimate.")
parser.add_argument('--learning-rate', default=5, type=float,
help="ADAM learning rate.")
parser.add_argument('--cam-dist-weight', default=0.25, type=float,
help="Mutual camera distance cost weight.")
parser.add_argument('--cam-dir-weight', default=0.25, type=float,
help="Horizontal camera alinment cost weight.")
parser.add_argument('--json-recording', help='Read viewing directions from json file.')
args = parser.parse_args()
return args
def range_coeff(x, distance, range):
return np.exp(-np.log2(distance / x) ** 2 / range)
def generate_cam_trajectory(length=30, momentum=0.94, acc_sdev=0.6, acc2_sdev=0.0):
positions = np.zeros([length, 3], dtype=np.float32)
positions_2 = np.zeros([length, 3], dtype=np.float32)
last_pos = np.zeros([3], dtype=np.float32)
last_pos_2 = np.zeros([3], dtype=np.float32)
last_velocity = np.asarray([10, 0, 0], dtype=np.float32)
last_velocity_2 = np.asarray([10, 0, 0], dtype=np.float32)
for p, p2 in zip(positions, positions_2):
p[...] = last_pos + last_velocity
p2[...] = last_pos_2 + last_velocity_2
last_pos = p
last_pos_2 = p2
acc = np.random.normal(size=[2]) * acc_sdev
last_velocity[:2] = last_velocity[:2] * momentum + acc
last_velocity_2[:2] = last_velocity_2[:2] * momentum + acc + np.random.normal(size=[2]) * acc2_sdev
return positions, positions_2
def generate_points(trajectory, trajectory_2, point_camera_distance=5, points_per_camera=10, camera_point_visibility_distance=10):
points = np.zeros([trajectory.shape[0]*points_per_camera, 3])
points_2 = np.zeros([trajectory.shape[0]*points_per_camera, 3])
for i in range(trajectory.shape[0]):
p = np.random.normal(size=[points_per_camera, 3]) * point_camera_distance
p[:, 2] *= 0.1
points[i*points_per_camera:i*points_per_camera+points_per_camera] = \
trajectory[i].reshape(1, 3) + p
points_2[i*points_per_camera:i*points_per_camera+points_per_camera] = \
trajectory_2[i].reshape(1, 3) + p
camera_point_assignment = np.zeros([trajectory.shape[0], points.shape[0]])
for i in range(trajectory.shape[0]):
camera_point_assignment[i, i*points_per_camera:i*points_per_camera+points_per_camera] = 1
distances = spatial.distance_matrix(trajectory, points, p=2)
prob = range_coeff(distances, camera_point_visibility_distance, 1) * 0.7
print(prob.mean())
prob = prob > np.random.uniform(0, 1, size=prob.shape)
print(np.mean(prob))
camera_point_assignment[prob] = 1
return points, points_2, camera_point_assignment
def view(cam_trajectory, points, points2, camera_point_assignment, resolution=1600, center=None, size=None, relations=False, errors=False):
cam_trajectory = cam_trajectory.copy()[:, :2]
points = points.copy()[:, :2]
points2 = points2.copy()[:, :2]
all = cam_trajectory[:, :2]
if center is None:
center = (np.max(all, axis=0, keepdims=True) + np.min(all, axis=0, keepdims=True)) / 2
if size is None:
size = np.max(np.linalg.norm(all - center, axis=1)) * 1.1
img = np.zeros([resolution, resolution, 3], dtype=np.uint8)
cam_trajectory = (cam_trajectory - center) / size / 2 + 0.5
points = (points - center) / size / 2 + 0.5
points2 = (points2 - center) / size / 2 + 0.5
cam_trajectory *= resolution
points *= resolution
points2 *= resolution
if relations:
for start, camera_points in zip(cam_trajectory, camera_point_assignment):
for end in points[camera_points > 0]:
cv2.line(img, (int(start[0]), int(start[1])), (int(end[0]), int(end[1])), (128, 128, 128))
if errors:
for p1, p2 in zip(points, points2):
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (128, 128, 128))
cv2.circle(img, (int(p1[0]), int(p1[1])), 1, (0, 0, 255), -1)
cv2.circle(img, (int(p2[0]), int(p2[1])), 1, (255, 0, 0), -1)
else:
for p in points:
cv2.circle(img, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
for p in cam_trajectory:
cv2.circle(img, (int(p[0]), int(p[1])), 2, (0, 255, 0), -1)
return img, center, size
def get_viewing_directions(cam_trajectory, points, noise_sdev=0.1):
directions = cam_trajectory.reshape(cam_trajectory.shape[0], 1, 3) - points.reshape(1, points.shape[0], 3)
directions = directions / np.linalg.norm(directions, axis=2, keepdims=True)
noise = np.random.normal(size=directions.shape)
noise = noise ** 4 * np.sign(noise) * noise_sdev
torch_c_rotation = transforms.euler_angles_to_matrix(torch.tensor(noise), convention='XYZ')
torch_c_rotation = torch_c_rotation.reshape(-1, 3, 3)
directions = torch_c_rotation.bmm(torch.tensor(directions).reshape(-1, 3, 1)).cpu().numpy().reshape(directions.shape[0], directions.shape[1], 3)
return directions
def problem_loss(torch_cameras, torch_c_rotation, torch_camera_i, torch_points, torch_point_i, torch_directions,
camera_distance, camera_up, cam_dist_weight, cam_dir_weight):
cam_flat = torch_cameras[torch_camera_i]
points_flat = torch_points[torch_point_i]
# cameras should be roughly horizontal
torch_c_rotation = transforms.euler_angles_to_matrix(torch_c_rotation * 0.05, convention='XYZ')
real_up = torch_c_rotation.bmm(camera_up.reshape(-1, 3, 1))
up_score = 1.001 - camera_up.reshape(-1, 1, 3).bmm(real_up)
# rotate camera point views
torch_directions = torch_c_rotation[torch_camera_i].bmm(torch_directions.reshape(-1, 3, 1))
# get current point view directions
dir = cam_flat - points_flat
dir = dir / (torch.norm(dir, dim=1, keepdim=True) + 1e-5)
# point view loss
prod = 1.001 - torch.bmm(dir.reshape(-1, 1, 3), torch_directions.reshape(-1, 3, 1))
prod = prod ** 0.1
# consecutive camera pair distance loss
cam_dist = torch.sum((torch.sum((torch_cameras[1:] - torch_cameras[:-1]) ** 2, axis=1) ** 0.5 - camera_distance)**2)
# final combined loss
opt = torch.sum(prod) + cam_dist_weight * cam_dist + cam_dir_weight * torch.sum(up_score)
return opt
def generate_data(args):
cameras, cameras_2 = generate_cam_trajectory(args.trajectory_length, momentum=args.trajectory_momentum,
acc_sdev=args.trajectory_acceleration)
points, points_2, camera_point_assignment = generate_points(cameras, cameras_2,
point_camera_distance=args.point_distance,
points_per_camera=args.points_per_camera,
camera_point_visibility_distance=args.point_visibility_falloff)
print('Connections: ', np.sum(camera_point_assignment), | np.sum(camera_point_assignment) | numpy.sum |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Synthetic Solar Radiation Module
Copyright (C) 2014-2017 <NAME>
Last edited: January 2018
Main Functions
--------------
- Aguiar_hourly_G0: generate annual sequence of hourly irradiances on a horizontal plane (W/m2)
- Aguiar_hourly_kt: generate sequence of hourly clearness indices for a single solar day
- Aguiar_daily_Kt: generate sequence of daily clearness indices given mean monthly Kt
- trend_sequence: generate annual sequence of hourly trend irradiances (no randomness)
- incident_HDKR: generate annual sequence of hourly irradiances incident on a tilted plane with HDKR model (W/m2)
Note: all hourly sequences are calculated in terms of solar time at the location (not civil time)
Utility Functions
-----------------
- declination: calculate solar declination
- sunrise: calculate sunrise / sunset angle (in radians)
- eccentricity: calculate earth eccentricity correction factor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
def declination(n):
"""
Returns the solar declination (in radians) on the n-th day of the year using the accurate
approximation:
delta = -arcsin (0.39779 cos [0.98565(n+10) + 1.914 sin (0.98565 (n-2))])
(note that the quantities inside the cosine and sine are in degrees)
Inputs: n is the day of the year (where n=1 is midnight on January 1)
"""
delta = -np.arcsin(0.39779 * np.cos(np.radians(0.98565 * (n+10) + 1.914 * np.sin (np.radians(0.98565 * (n-2))))))
return delta
def sunrise(delta, lat):
"""
Returns the sunrise / sunset angle (in radians)
Inputs: delta is the solar declination angle (in radians)
lat is the latitude of the location (in radians)
"""
omega = np.arccos(-np.tan(delta) * np.tan(lat))
return omega
def eccentricity(n):
"""
Returns the earth's eccentricity correction factor according to Spencer's formula in the paper:
<NAME>, “Fourier series representation of the position of the Sun”, Search, Vol. 2, 1972
Inputs: n is the day of the year (where n=1 is midnight on January 1)
"""
day_ang = 2 * np.pi * (n - 1) / 365
epsilon = 1.00011 + 0.034221 * np.cos(day_ang) + 0.00128 * np.sin(day_ang) + 0.000719 * np.cos(2 * day_ang) + 0.000077 * np.sin(2 * day_ang)
return epsilon
def Aguiar_daily_Kt(Ktm, Kt0, nd):
"""
Generates a sequence of synthetic daily clearness indices (Kt) using the mean monthly clearness
index as the input. The algorithm is based on the method by Aguiar et al in the paper:
<NAME> and <NAME>, "A simple procedure for the generation of sequences of
daily radiation values using Markov transition matrices", Solar Energy, vol. 40, 269-279, 1988
Inputs: Ktm is the mean clearness index for the month
Kt0 is initial clearness index (on the first day of the month)
nd is the number of daily clearness indices to generate
"""
# Markov Transition Matrices
MTM_lib = {}
MTM_states = [0.30, 0.35, 0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70]
MTM_min = [0.031, 0.058, 0.051, 0.052, 0.028, 0.053, 0.044, 0.085, 0.010, 0.319]
MTM_max = [0.705, 0.694, 0.753, 0.753, 0.807, 0.856, 0.818, 0.846, 0.842, 0.865]
# Kt <= 0.30
MTM_lib[0] = np.matrix([[0.229,0.333,0.208,0.042,0.083,0.042,0.042,0.021,0.000,0.000],
[0.167,0.319,0.194,0.139,0.097,0.028,0.042,0.000,0.014,0.000],
[0.250,0.250,0.091,0.136,0.091,0.046,0.046,0.023,0.068,0.000],
[0.158,0.237,0.158,0.263,0.026,0.053,0.079,0.026,0.000,0.000],
[0.211,0.053,0.211,0.158,0.053,0.053,0.158,0.105,0.000,0.000],
[0.125,0.125,0.250,0.188,0.063,0.125,0.000,0.125,0.000,0.000],
[0.040,0.240,0.080,0.120,0.080,0.080,0.120,0.120,0.080,0.040],
[0.000,0.250,0.000,0.125,0.000,0.125,0.125,0.250,0.063,0.063],
[0.000,0.250,0.000,0.125,0.250,0.000,0.250,0.000,0.000,0.125],
[0.000,0.000,0.000,0.000,0.000,0.000,0.500,0.250,0.000,0.250]])
# 0.30 < Kt <= 0.35
MTM_lib[1] = np.matrix([[0.000,0.000,0.091,0.000,0.364,0.091,0.182,0.000,0.273,0.000],
[0.118,0.118,0.176,0.118,0.059,0.118,0.176,0.059,0.059,0.000],
[0.067,0.267,0.067,0.200,0.067,0.000,0.133,0.133,0.000,0.067],
[0.118,0.235,0.000,0.235,0.059,0.176,0.118,0.000,0.059,0.000],
[0.077,0.154,0.308,0.077,0.154,0.077,0.000,0.077,0.077,0.000],
[0.083,0.000,0.167,0.250,0.083,0.167,0.000,0.083,0.167,0.000],
[0.222,0.222,0.000,0.111,0.111,0.000,0.111,0.222,0.000,0.000],
[0.091,0.182,0.273,0.000,0.091,0.273,0.000,0.091,0.000,0.000],
[0.111,0.111,0.111,0.222,0.000,0.000,0.000,0.222,0.111,0.111],
[0.000,0.000,0.000,0.000,0.000,0.000,0.500,0.000,0.000,0.500]])
# 0.35 < Kt <= 0.40
MTM_lib[2] = np.matrix([[0.206,0.088,0.176,0.176,0.088,0.029,0.176,0.029,0.029,0.000],
[0.120,0.100,0.140,0.160,0.120,0.220,0.100,0.000,0.020,0.020],
[0.077,0.123,0.185,0.123,0.077,0.139,0.092,0.123,0.061,0.000],
[0.048,0.111,0.095,0.206,0.206,0.190,0.095,0.048,0.000,0.000],
[0.059,0.137,0.118,0.137,0.098,0.118,0.118,0.157,0.059,0.000],
[0.014,0.097,0.139,0.153,0.125,0.139,0.208,0.056,0.042,0.028],
[0.073,0.101,0.116,0.145,0.087,0.159,0.203,0.087,0.029,0.000],
[0.019,0.037,0.111,0.056,0.074,0.111,0.185,0.296,0.074,0.037],
[0.035,0.069,0.035,0.000,0.035,0.103,0.172,0.138,0.379,0.035],
[0.000,0.167,0.167,0.000,0.167,0.000,0.000,0.333,0.000,0.167]])
# 0.40 < Kt <= 0.45
MTM_lib[3] = np.matrix([[0.167,0.167,0.167,0.000,0.083,0.125,0.000,0.167,0.125,0.000],
[0.117,0.117,0.150,0.117,0.083,0.117,0.200,0.067,0.017,0.017],
[0.049,0.085,0.134,0.158,0.098,0.110,0.134,0.134,0.061,0.037],
[0.039,0.090,0.141,0.141,0.167,0.141,0.090,0.141,0.039,0.013],
[0.009,0.139,0.074,0.093,0.194,0.139,0.167,0.093,0.074,0.019],
[0.036,0.018,0.117,0.099,0.144,0.180,0.180,0.117,0.072,0.036],
[0.000,0.046,0.061,0.061,0.136,0.159,0.273,0.167,0.098,0.000],
[0.016,0.056,0.080,0.128,0.104,0.080,0.160,0.208,0.136,0.032],
[0.011,0.053,0.021,0.043,0.128,0.096,0.074,0.223,0.277,0.074],
[0.000,0.074,0.037,0.000,0.074,0.074,0.074,0.074,0.333,0.259]])
# 0.45 < Kt <= 0.50
MTM_lib[4] = np.matrix([[0.120,0.200,0.160,0.120,0.120,0.120,0.080,0.000,0.040,0.040],
[0.100,0.080,0.120,0.140,0.140,0.200,0.180,0.040,0.000,0.000],
[0.046,0.114,0.068,0.171,0.125,0.171,0.080,0.159,0.057,0.011],
[0.015,0.061,0.084,0.099,0.191,0.153,0.153,0.115,0.115,0.015],
[0.024,0.030,0.098,0.098,0.165,0.195,0.195,0.140,0.043,0.012],
[0.015,0.026,0.062,0.124,0.144,0.170,0.170,0.222,0.062,0.005],
[0.000,0.013,0.045,0.108,0.112,0.175,0.188,0.224,0.117,0.018],
[0.008,0.023,0.054,0.066,0.093,0.125,0.191,0.253,0.183,0.004],
[0.006,0.022,0.061,0.033,0.067,0.083,0.139,0.222,0.322,0.044],
[0.000,0.046,0.091,0.091,0.046,0.046,0.136,0.091,0.273,0.182]])
# 0.50 < Kt <= 0.55
MTM_lib[5] = np.matrix([[0.250,0.179,0.107,0.107,0.143,0.071,0.107,0.036,0.000,0.000],
[0.133,0.022,0.089,0.111,0.156,0.178,0.111,0.133,0.067,0.000],
[0.064,0.048,0.143,0.048,0.175,0.143,0.206,0.095,0.079,0.000],
[0.000,0.022,0.078,0.111,0.156,0.156,0.244,0.167,0.044,0.022],
[0.016,0.027,0.037,0.069,0.160,0.219,0.230,0.160,0.075,0.005],
[0.013,0.025,0.030,0.093,0.144,0.202,0.215,0.219,0.055,0.004],
[0.006,0.041,0.035,0.064,0.090,0.180,0.337,0.192,0.049,0.006],
[0.012,0.021,0.029,0.035,0.132,0.123,0.184,0.371,0.082,0.012],
[0.008,0.016,0.016,0.024,0.071,0.103,0.159,0.270,0.309,0.024],
[0.000,0.000,0.000,0.000,0.059,0.000,0.059,0.294,0.412,0.176]])
# 0.55 < Kt <= 0.60
MTM_lib[6] = np.matrix([[0.217,0.087,0.000,0.174,0.130,0.087,0.087,0.130,0.087,0.000],
[0.026,0.079,0.132,0.079,0.026,0.158,0.158,0.132,0.158,0.053],
[0.020,0.020,0.020,0.040,0.160,0.180,0.160,0.200,0.100,0.100],
[0.025,0.013,0.038,0.076,0.076,0.139,0.139,0.266,0.215,0.013],
[0.030,0.030,0.050,0.020,0.091,0.131,0.162,0.283,0.131,0.071],
[0.006,0.006,0.013,0.057,0.057,0.121,0.204,0.287,0.185,0.064],
[0.004,0.026,0.037,0.030,0.093,0.107,0.193,0.307,0.167,0.037],
[0.011,0.009,0.014,0.042,0.041,0.071,0.152,0.418,0.203,0.041],
[0.012,0.022,0.022,0.038,0.019,0.050,0.113,0.281,0.360,0.084],
[0.008,0.024,0.039,0.039,0.063,0.039,0.118,0.118,0.284,0.268]])
# 0.60 < Kt <= 0.65
MTM_lib[7] = np.matrix([[0.067,0.133,0.133,0.067,0.067,0.200,0.133,0.133,0.067,0.000],
[0.118,0.059,0.059,0.059,0.059,0.118,0.118,0.235,0.118,0.059],
[0.000,0.024,0.024,0.049,0.146,0.073,0.195,0.244,0.195,0.049],
[0.026,0.000,0.026,0.026,0.053,0.184,0.263,0.184,0.237,0.000],
[0.014,0.000,0.042,0.056,0.069,0.097,0.139,0.306,0.278,0.000],
[0.009,0.009,0.052,0.069,0.052,0.112,0.215,0.285,0.138,0.060],
[0.009,0.009,0.026,0.017,0.094,0.099,0.232,0.283,0.210,0.021],
[0.010,0.014,0.016,0.019,0.027,0.062,0.163,0.467,0.202,0.019],
[0.004,0.007,0.031,0.017,0.033,0.050,0.086,0.252,0.469,0.050],
[0.000,0.000,0.015,0.046,0.031,0.046,0.077,0.123,0.446,0.215]])
# 0.65 < Kt <= 0.70
MTM_lib[8] = np.matrix([[0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,1.000,0.000],
[0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,1.000,0.000],
[0.000,0.000,0.000,0.000,0.000,0.000,0.250,0.250,0.500,0.000],
[0.000,0.000,0.000,0.000,0.250,0.000,0.000,0.375,0.250,0.125],
[0.000,0.000,0.000,0.083,0.000,0.167,0.167,0.250,0.333,0.000],
[0.000,0.000,0.042,0.042,0.042,0.083,0.083,0.292,0.292,0.125],
[0.000,0.000,0.032,0.000,0.000,0.032,0.129,0.387,0.355,0.065],
[0.000,0.000,0.000,0.038,0.038,0.075,0.047,0.340,0.415,0.047],
[0.004,0.004,0.007,0.007,0.011,0.030,0.052,0.141,0.654,0.089],
[0.000,0.000,0.000,0.000,0.061,0.061,0.030,0.030,0.349,0.470]])
# Kt > 0.70
MTM_lib[9] = np.matrix([[0.000,0.000,0.000,0.000,0.000,0.000,0.000,0.000,1.000,0.000],
[0.100,0.100,0.100,0.100,0.100,0.100,0.100,0.100,0.100,0.100],
[0.000,0.000,0.000,0.250,0.000,0.000,0.000,0.500,0.250,0.000],
[0.000,0.000,0.143,0.143,0.000,0.143,0.143,0.429,0.000,0.000],
[0.000,0.000,0.000,0.200,0.000,0.000,0.200,0.400,0.200,0.000],
[0.000,0.000,0.000,0.000,0.000,0.000,0.222,0.444,0.333,0.000],
[0.000,0.000,0.000,0.000,0.080,0.080,0.080,0.480,0.240,0.040],
[0.000,0.000,0.027,0.009,0.027,0.018,0.135,0.523,0.252,0.009],
[0.000,0.000,0.000,0.022,0.000,0.043,0.043,0.326,0.511,0.054],
[0.000,0.000,0.000,0.143,0.000,0.000,0.000,0.143,0.714,0.000]])
# Determine the appropriate MTM based on the mean monthly Kt
MTM_index = np.digitize([Ktm], MTM_states)[0]
MTM = MTM_lib[MTM_index]
# Calculate states and step sizes
min_state = MTM_min[MTM_index]
max_state = MTM_max[MTM_index]
step_size = (max_state - min_state)/10
states = np.arange(min_state, max_state, step_size)
# Generate daily clearness indices for nd days
Kti = Kt0
Kt = [Kti]
for i in range(nd-1):
MTM_row = np.digitize([Kti], states)[0]
MTM_cum = np.ravel(np.cumsum(MTM[MTM_row - 1,:]))
R = np.random.rand()
new_state = np.digitize([R], MTM_cum)[0] + 1
if new_state > 10:
new_state = 10
# Calculate interpolation factor
if new_state == 1:
k_interp = R / MTM_cum[new_state-1]
else:
k_interp = (R - MTM_cum[new_state-2]) / (MTM_cum[new_state-1]-MTM_cum[new_state-2])
Kti = states[new_state-1] + k_interp * step_size
Kt.append(Kti)
return Kt
def Aguiar_hourly_kt(Kt, n, lat, max_iter):
"""
Generates a sequence of synthetic hourly clearness indices (kt) using the mean daily clearness
index (Kt) as the input. The algorithm is based on the method by Aguiar et al in the paper:
<NAME> and <NAME>ereira, "TAG: A time-dependent, autoregressive Gaussian model
for generating synthetic hourly radiation", Solar Energy, vol. 49, 167-174, 1992
Inputs: Kt is the mean clearness index for the day
n is the day of the year (n=1 is midnight on January 1)
lat is the latitude of the location (degrees)
max_iter is the maximum number of iterations for each new kt
"""
# Solar declination in radians
delta = declination(n)
# Sunrise angle in radians
omega = sunrise(delta, np.radians(lat))
# Autocorrelation coefficient
phi = 0.38 + 0.06 * np.cos(7.4*Kt - 2.5)
# Calculate algorithm constants
lmbda = -0.19 + 1.12 * Kt + 0.24 * np.exp(-8 * Kt)
eta = 0.32 - 1.6 * (Kt - 0.5) ** 2
kappa = 0.19 + 2.27 * Kt ** 2 - 2.51 * Kt ** 3
A = 0.14 * np.exp (-20 * (Kt - 0.35) ** 2)
B = 3 * (Kt - 0.45) ** 2 + 16 * Kt ** 5
# Generate kt for each solar hour
kt = []
y = []
for h in range(1,25):
angle_start = (h - 13) * np.pi / 12 # Start of hour
angle_end = (h - 12) * np.pi / 12 # End of hour
if (angle_start > -omega) and (angle_end < omega):
# Clear sky clearness index
kcs = 0.88 * np.cos(np.pi * (h - 12.5) / 30)
# Angle at centre of hour
h_ang = (h - 12.5) * np.pi / 12
# Solar elevation/ altitude angle
hs = np.arcsin (np.cos(h_ang) * np.cos(delta) * np.cos(np.radians(lat)) + np.sin(delta) * np.sin(np.radians(lat)))
# Average clearness index
ktm = lmbda + eta * np.exp(-kappa / np.sin(hs))
# Standard deviation
sigma = A * np.exp (B * (1 - np.sin(hs)))
# Generate new kt only if greater than 0 and less than clear sky kt
kti = -1
iter = 0
while (kti < 0) or (kti > kcs):
z = np.random.rand()
r = sigma * (z ** 0.135 - (1 - z) ** 0.135) / 0.1975
yi = phi * y[h-2] + r
kti = ktm + sigma * yi
# Iteration control
iter = iter + 1
if iter > max_iter:
if kti < 0:
kti = 0
if kti > kcs:
kti = kcs
kt.append(kti)
y.append(yi)
else:
# For non-sunlight hours, set kt to zero
kt.append(0)
y.append(0)
return kt
def trend_sequence(lat):
"""
Generates an annual sequence of clear sky (extraterrestrial) hourly irradiance values
(on a horizontal plane) based on the annual trend for a given latitude. Estimation of the
daily irradiation and hourly irradiance on a horizontal plane is based on the method outlined
in Chapter 20 of:
<NAME>, <NAME>, “Handbook of Photovoltaic Science and Engineering”, Wiley, 2003
Inputs: lat is the latitude of the location (in decimals)
"""
n = np.arange(1,366)
lat_rad = np.radians(lat)
epsilon = eccentricity(n)
delta = declination(n)
omega = sunrise(delta, lat_rad)
# Daily extraterrestrial irradiation (on a horizontal plane) Wh/m2/day
# (Refer to Section 20.4 of Luque and Hegedus)
B0d = 24 / np.pi * 1367 * epsilon * (omega * np.sin(delta) * np.sin(lat_rad) - np.cos(delta) * np.cos(lat_rad) * np.sin(-omega))
# Set up hour angles
h = np.arange(1,25)
h_start = (h - 13) * np.pi / 12 # Start of hour
h_end = (h - 12) * np.pi / 12 # End of hour
h_ang = (h - 12.5) * np.pi / 12 # Centre of hour
# Hourly clear sky irradiance (on a horizontal plane) W/m2
G0c = []
for d in range(1,366):
omega_s = -omega[d-1] # Sunrise angle for the day
a = 0.409 - 0.5016 * np.sin(omega_s + 60 * np.pi/180)
b = 0.6609 + 0.4767 * np.sin(omega_s + 60 * np.pi/180)
# Hourly irradiance on a horizontal plane
# (Refer to Section 20.5.2 of Luque and Hegedus)
G0ci = np.pi / 24 * (np.cos(h_ang) - np.cos(omega_s)) / (omega_s * np.cos(omega_s) - np.sin(omega_s)) * (a + b * np.cos(h_ang)) * B0d[d-1]
h_sunrise = np.digitize([omega_s], h_start)[0]
h_sunset = np.digitize([-omega_s], h_end)[0]
G0ci[0:h_sunrise] = 0
G0ci[h_sunset:24] = 0
G0c.extend(G0ci)
return G0c
def Aguiar_hourly_G0(Ktm, lat):
"""
Generates an annual sequence of synthetic hourly irradiance values G0 (on a horizontal plane)
based on monthly mean clearness indices. The methods proposed by Aguiar et al for the generation
of synthetic daily and hourly irradiance values is used to create the sequence.
Inputs: Ktm is an array of monthly mean clearness indices
lat is the latitude of the location (in decimal degrees)
"""
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Generate daily clearness indices for each day in the year
Kt = []
Kt0 = Ktm[11]
for i in range(12):
Kti = Aguiar_daily_Kt(Ktm[i], Kt0, days[i])
Kt.extend(Kti)
Kt0 = Ktm[i]
# Generate hourly clearness indices for each hour in the year
kt = []
for d in range(365):
kti = Aguiar_hourly_kt(Kt[d], d+1, lat, 10)
kt.extend(kti)
# Generate trend irradiances for each hour in the year
G0c = trend_sequence(lat)
# Calculate synthetic irradiance for each hour of the year
G0 = G0c * np.array(kt)
return G0, kt
def incident_HDKR(G0, Kt, lat, tilt, azimuth, albedo):
"""
Generates an annual sequence of hourly irradiances incident on a tilted surface (W/m2)
Calculations based on Hay, Davies, Klucher and Reindl (HDKR) model.
Inputs: G0 is an array of hourly global horizontal irradiances
Kt is an array of hourly clearness indices
lat is the latitude of the location (in decimal degrees)
tilt is the tilt angle of the surface (in degrees)
azimuth is the azimuthal angle of the surface (in degrees)
albedo is the ground reflectance (in per unit - 0.0 = 0%, 1.0 = 100%)
"""
phi = np.radians(lat)
beta = np.radians(tilt)
gamma = np.radians(azimuth)
# Generate trend (extraterrestrial) irradiances for each hour in the year
G0c = trend_sequence(lat)
# Set up hour angles for each hour of the year
h = np.array(list(np.arange(1,25)) * 365)
omega = (h - 12.5) * np.pi / 12 # Hour angle at centre of each hour (0 is solar noon)
# Calculate declination for every hour of the year
dec_d = []
for d in range(1,366):
dec_d.append(declination(d))
delta = np.repeat(np.array(dec_d), 24)
# Calculate angle of incidence on tilted surface for every hour of the year
cos_theta = np.sin(delta) * np.sin(phi) * np.cos(beta) - | np.sin(delta) | numpy.sin |
from polymuse import rnn, dutils, dataset, dataset2 as d2, enc_deco
from polymuse.losses import rmsecat
import tensorflow as tf
import numpy, random
"""
rnn_player -- capable of playing/generating the music output as octave/time encoded representation
These also includes two most important functions :
* shift:
* add_flatroll:
"""
def rmsecat(depth):
def rmsecat_(y_true, y_pred):
a = []
h_ = None
for i in range(depth * 2):
h__ = categorical_crossentropy(y_true[:, i : i + 16], y_pred[ :, i : i + 16])
if h_ is None: h_ = tf.square(h__)
else: h_ += tf.square(h__)
a = (tf.sqrt(h_) / (2 * depth))
return a
return rmsecat_
def rsingle_note_stateful_play(model_note, ini_ip, y_expected_note = None, bs = 32, ip_memory = None, predict_instances = 250):
"""stateful player
Arguments:
model_note {keras.Sequential} -- [description]
ini_ip {numpy.ndarray} -- input initiater, shape (note_instances, ip_memory, depth, tuple(enc)), enc = (2, 16)
Keyword Arguments:
y_expected_note {numpy.ndarray} -- [description] (default: {None})
ip_memory {int} -- [description] (default: {None})
predict_instances {int} -- [description] (default: {250})
Returns:
[numpy.ndarray] -- [description]
"""
model_note = rnn.load(model_note) if type(model_note) == str else model_note
ip_memory = ip_memory if ip_memory else ini_ip.shape[1]
depth = ini_ip.shape[2]
enc = ini_ip.shape[3:]
r1 = random.randint(0, ini_ip.shape[0])
inp = | numpy.zeros((bs, ip_memory, depth) + enc) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 14:14:27 2018
@author: allis
"""
import numpy as np
from sklearn import linear_model
import random
def readFeatureMatrix(filename):
in_file = open(filename,'r')
feature_matrix = []
for line in in_file:
if "," not in line:
continue
entries = line.split(",")
feature_matrix.append([])
for i in range(0,len(entries)-1):
feature_matrix[len(feature_matrix)-1].append(int(entries[i]))
in_file.close()
return | np.array(feature_matrix) | numpy.array |
# ---------------------------------------------------------------------------
# Unified Panoptic Segmentation Network
#
# Copyright (c) 2018-2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by <NAME>
# ---------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
import torch
import torch.utils.data
import pickle, gzip
import numpy as np
import cv2
import json
from pycocotools.cocoeval import COCOeval
from collections import defaultdict, Sequence
from upsnet.config.config import config
from upsnet.dataset.json_dataset import JsonDataset, extend_with_flipped_entries, filter_for_training, add_bbox_regression_targets
from upsnet.dataset.base_dataset import BaseDataset
from upsnet.rpn.assign_anchor import add_rpn_blobs
from PIL import Image, ImageDraw
from lib.utils.logging import logger
import pycocotools.mask as mask_util
class coco(BaseDataset):
def __init__(self, image_sets, flip=False, proposal_files=None, phase='train', result_path=''):
super(coco, self).__init__()
image_dirs = {
'train2014': os.path.join(config.dataset.dataset_path, 'coco_train2014'),
'val2014': os.path.join(config.dataset.dataset_path, 'coco_val2014'),
'minival2014': os.path.join(config.dataset.dataset_path, 'coco_val2014'),
'valminusminival2014': os.path.join(config.dataset.dataset_path, 'coco_val2014'),
'test2015': os.path.join(config.dataset.dataset_path, 'coco_test2015'),
'test-dev2015': os.path.join(config.dataset.dataset_path, 'coco_test2015'),
'train2017': os.path.join(config.dataset.dataset_path, 'images', 'train2017'),
'val2017': os.path.join(config.dataset.dataset_path, 'images', 'val2017'),
'test-dev2017': os.path.join(config.dataset.dataset_path, 'images', 'test2017'),
}
anno_files = {
'train2014': 'instances_train2014.json',
'val2014': 'instances_val2014.json',
'minival2014': 'instances_minival2014.json',
'valminusminival2014': 'instances_valminusminival2014.json',
'test2015': 'image_info_test2015.json',
'test-dev2015': 'image_info_test-dev2015.json',
'train2017': 'instances_train2017.json',
'val2017': 'instances_val2017.json',
'test-dev2017': 'image_info_test-dev2017.json',
}
if image_sets[0] == 'test-dev2017':
self.panoptic_json_file = os.path.join(config.dataset.dataset_path, 'annotations', 'image_info_test-dev2017.json')
else:
self.panoptic_json_file = os.path.join(config.dataset.dataset_path, 'annotations', 'panoptic_val2017_stff.json')
self.panoptic_gt_folder = os.path.join(config.dataset.dataset_path, 'annotations', 'panoptic_val2017')
if proposal_files is None:
proposal_files = [None] * len(image_sets)
if phase == 'train' and len(image_sets) > 1:
# combine multiple datasets
roidbs = []
for image_set, proposal_file in zip(image_sets, proposal_files):
dataset = JsonDataset('coco_' + image_set,
image_dir=image_dirs[image_set],
anno_file=os.path.join(config.dataset.dataset_path, 'annotations', anno_files[image_set]))
roidb = dataset.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=config.train.crowd_filter_thresh)
if flip:
if logger:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, dataset)
roidbs.append(roidb)
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
add_bbox_regression_targets(roidb)
else:
assert len(image_sets) == 1
self.dataset = JsonDataset('coco_' + image_sets[0],
image_dir=image_dirs[image_sets[0]],
anno_file=os.path.join(config.dataset.dataset_path, 'annotations',
anno_files[image_sets[0]]))
roidb = self.dataset.get_roidb(gt=True, proposal_file=proposal_files[0],
crowd_filter_thresh=config.train.crowd_filter_thresh if phase != 'test' else 0)
if flip:
if logger:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, self.dataset)
if phase != 'test':
roidb = filter_for_training(roidb)
add_bbox_regression_targets(roidb)
self.roidb = roidb
self.phase = phase
self.flip = flip
self.result_path = result_path
self.num_classes = 81
def __len__(self):
return len(self.roidb)
def __getitem__(self, index):
blob = defaultdict(list)
im_blob, im_scales = self.get_image_blob([self.roidb[index]])
if config.network.has_rpn:
if self.phase != 'test':
add_rpn_blobs(blob, im_scales, [self.roidb[index]])
data = {'data': im_blob,
'im_info': blob['im_info']}
label = {'roidb': blob['roidb'][0]}
for stride in config.network.rpn_feat_stride:
label.update({
'rpn_labels_fpn{}'.format(stride): blob['rpn_labels_int32_wide_fpn{}'.format(stride)].astype(
np.int64),
'rpn_bbox_targets_fpn{}'.format(stride): blob['rpn_bbox_targets_wide_fpn{}'.format(stride)],
'rpn_bbox_inside_weights_fpn{}'.format(stride): blob[
'rpn_bbox_inside_weights_wide_fpn{}'.format(stride)],
'rpn_bbox_outside_weights_fpn{}'.format(stride): blob[
'rpn_bbox_outside_weights_wide_fpn{}'.format(stride)]
})
else:
data = {'data': im_blob,
'im_info': np.array([[im_blob.shape[-2],
im_blob.shape[-1],
im_scales[0]]], np.float32)}
label = None
else:
raise NotImplementedError
if config.network.has_fcn_head:
if self.phase != 'test':
seg_gt = np.array(Image.open(self.roidb[index]['image'].replace('images', 'annotations').replace('train2017', 'panoptic_train2017_semantic_trainid_stff').replace('val2017', 'panoptic_val2017_semantic_trainid_stff').replace('jpg', 'png')))
if self.roidb[index]['flipped']:
seg_gt = | np.fliplr(seg_gt) | numpy.fliplr |
#!/usr/bin/env python
__author__ = '<NAME>'
import os
import argparse
import numpy as np
from RouToolPa.GeneralRoutines import FileRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--sample_directory", action="store", dest="samples_dir", required=True,
type=lambda s: FileRoutines.check_path(os.path.abspath(s)),
help="Directory with samples")
parser.add_argument("-s", "--samples", action="store", dest="samples",
help="Comma-separated list of subdirectories(one per sample) to handle. "
"If not set all subdirectories will be considered as containing samples."
"In sample directory should one(in case SE reads) or two(in case PE reads) files."
"Filenames should should contain '_1.fq' or '_1.fastq' for forward(left) reads, "
" '_2.fq' or '_2.fastq' for reverse(right) reads and '.fq' or '.fastq' for SE reads")
parser.add_argument("-o", "--output_dir", action="store", dest="output_dir",
type=lambda s: FileRoutines.check_path(os.path.abspath(s)),
default="./", help="Directory to write output. Default: current directory")
"""
#parser.add_argument("-t", "--threads", action="store", dest="threads", default=1, type=int,
# help="Number of threads to use in Trimmomatic. Default - 1.")
parser.add_argument("-q", "--average_quality_threshold", action="store", dest="average_quality_threshold", default=15,
type=int,
help="Quality threshold for sliding window. Works only if -q/--average_quality_threshold is set"
"Default - 15.")
parser.add_argument("-u", "--score_type", action="store", dest="score_type", default="phred64",
help="Phred quality score type. Allowed: phred33, phred64. Default: phred64")
parser.add_argument("-n", "--name_type", action="store", dest="name_type", default="short",
help="Type of read name. Required to gather per tile filtering statistics. Default: short")
"""
args = parser.parse_args()
samples = args.samples.split(",") if args.samples else sorted(os.listdir(args.samples_dir))
FileRoutines.safe_mkdir(args.output_dir)
overall_stat_file = "%s/overall_samples.stat" % args.output_dir
overall_stat_fd = open(overall_stat_file, "w")
overall_stat_fd.write("#Sample_id\tTotal_pairs\tRetained_pairs\tRetained_pairs_percent\tMin_pairs_retained_in_tiles\n")
for sample in samples:
print("Handling %s" % sample)
sample_dir = "%s%s/" % (args.samples_dir, sample)
sample_out_dir = "%s%s/" % (args.output_dir, sample)
FileRoutines.safe_mkdir(sample_out_dir)
files_from_sample_dir = sorted(os.listdir(sample_dir))
stat_files_from_sample_dir = []
prefix_list = []
for filename in files_from_sample_dir:
if ".stat" == filename[-5:]:
stat_files_from_sample_dir.append("%s%s" % (sample_dir, filename))
prefix_list.append("%s%s" % (sample_out_dir, filename[:-5]))
number_of_stat_files = len(stat_files_from_sample_dir)
percent_total_sample_stat_file = "%s/%s.sample.percent.stats" % (sample_out_dir, sample)
sample_stat_fd = open(percent_total_sample_stat_file, "w")
sample_stat_fd.write("Read_group\t"
"Paires_retained\tForward_only_retained\tReverse_only_retained\tPairs_discarded\t"
"Tile_min_pairs_retained\tTile_min_forward_only_retained\t"
"Tile_min_reverse_only_retained\tTile_min_pairs_discarded\t"
"Tile_max_pairs_retained\tTile_max_forward_only_retained\t"
"Tile_max_reverse_only_retained\tTile_max_pairs_discarded\t"
"Tile_mean_pairs_retained\tTile_mean_forward_only_retained\t"
"Tile_mean_reverse_only_retained\tTile_mean_pairs_discarded\t"
"Tile_median_pairs_retained\tTile_median_forward_only_retained\t"
"Tile_median_reverse_only_retained\tTile_median_pairs_discarded\n")
total_reads_sample_stats = []
min_percent_retained_pairs_in_tile_list = []
for stat_file_index in range(0, number_of_stat_files):
percent_total_stat_file = "%s.total.percent.stats" % prefix_list[stat_file_index]
percent_tile_stat_file = "%s.tile.percent.stats" % prefix_list[stat_file_index]
tile_description_list = []
total_reads_list = []
tile_stats_list = []
with open(stat_files_from_sample_dir[stat_file_index], "r") as stat_fd:
try:
line = stat_fd.readline()
while line[:13] != "instrument_id":
if line[:15] == "Paires retained" or line[:14] == "Pairs retained":
pairs_retained = float(line.strip().split("\t")[-1])
elif line[:21] == "Forward only retained":
forward_only_retained = float(line.strip().split("\t")[-1])
elif line[:21] == "Reverse only retained":
reverse_only_retained = float(line.strip().split("\t")[-1])
elif line[:15] == "Pairs discarded":
pairs_discarded = float(line.strip().split("\t")[-1])
line = stat_fd.readline()
line = stat_fd.readline()
total_stats_list = np.array([pairs_retained, forward_only_retained, reverse_only_retained, pairs_discarded])
for line in stat_fd:
line_list = line.strip().split("\t")
tile_stats = map(float, line_list[-4:])
# skip absent tile
if sum(tile_stats) == 0:
print("\tTile %s is absent in input data for %s" % (line_list[4], prefix_list[stat_file_index]))
continue
tile_description_list.append(line_list[:-4])
tile_stats_list.append(tile_stats)
total_reads_list.append(sum(tile_stats))
except StopIteration:
print("\tEmpty .stat file for %s" % prefix_list[stat_file_index])
continue
total_reads_sample_stats.append(total_stats_list)
total_reads_list = np.array(total_reads_list)
# tile_stats
tile_stats_list = np.array(tile_stats_list)
percent_stats_list = tile_stats_list / total_reads_list[:, None]
#print(percent_stats_list)
# total_stats
total_percent_stats = total_stats_list / sum(total_stats_list)
samples_mean_percent_stats = np.mean(percent_stats_list, axis=0)
samples_median_percent_stats = np.median(percent_stats_list, axis=0)
samples_max_percent_stats = np.max(percent_stats_list, axis=0)
samples_min_percent_stats = | np.min(percent_stats_list, axis=0) | numpy.min |
#data.py
#load and save data for heliocats
#https://github.com/cmoestl/heliocats
import numpy as np
import pandas as pd
import scipy
import copy
import matplotlib.dates as mdates
import datetime
import urllib
import json
import os
import pdb
from sunpy.time import parse_time
import scipy.io
import scipy.signal
import pickle
import time
import sys
import cdflib
import matplotlib.pyplot as plt
import heliosat
from numba import njit
from astropy.time import Time
import heliopy.data.cassini as cassinidata
import heliopy.data.helios as heliosdata
import heliopy.data.spice as spicedata
import heliopy.spice as spice
import astropy
import requests
import math
import h5py
from config import data_path
#data_path='/nas/helio/data/insitu_python/'
heliosat_data_path='/nas/helio/data/heliosat/data/'
data_path_sun='/nas/helio/data/SDO_realtime/'
'''
MIT LICENSE
Copyright 2020, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
####################################### get new data ####################################
def remove_wind_spikes_gaps(data):
#nan intervals
nt1=parse_time('2020-04-20 17:06').datetime
nt2=parse_time('2020-04-20 17:14').datetime
gapind1=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-04-21 01:20').datetime
nt2=parse_time('2020-04-21 01:22').datetime
gapind2=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-11-09T16:04Z').datetime
nt2=parse_time('2020-11-09T17:08Z').datetime
gapind3=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-08-31T16:58Z').datetime
nt2=parse_time('2020-08-31T18:32Z').datetime
gapind4=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2021-02-01T12:32Z').datetime
nt2=parse_time('2021-02-01T14:04Z').datetime
gapind5=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
data.bt[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.bx[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.by[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.bz[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
return data
def save_stereoa_science_data_merge_rtn(data_path,file):
print('STEREO-A science data merging')
filesta="stereoa_2007_2019_rtn.p"
[sta0,hsta0]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_april_rtn.p"
[sta1,hsta1]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_may_july_rtn.p"
[sta2,hsta2]=pickle.load(open(data_path+filesta, "rb" ) )
#beacon data
#filesta='stereoa_2019_now_sceq_beacon.p'
#[sta3,hsta3]=pickle.load(open(data_path+filesta2, "rb" ) )
#sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta0.time)+np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta0.time,sta1.time,sta2.time))
sta.bx=np.hstack((sta0.bx,sta1.bx,sta2.bx))
sta.by=np.hstack((sta0.by,sta1.by,sta2.by))
sta.bz=np.hstack((sta0.bz,sta1.bz,sta2.bz))
sta.bt=np.hstack((sta0.bt,sta1.bt,sta2.bt))
sta.vt=np.hstack((sta0.vt,sta1.vt,sta2.vt))
sta.np=np.hstack((sta0.np,sta1.np,sta2.np))
sta.tp=np.hstack((sta0.tp,sta1.tp,sta2.tp))
sta.x=np.hstack((sta0.x,sta1.x,sta2.x))
sta.y=np.hstack((sta0.y,sta1.y,sta2.y))
sta.z=np.hstack((sta0.z,sta1.z,sta2.z))
sta.r=np.hstack((sta0.r,sta1.r,sta2.r))
sta.lon=np.hstack((sta0.lon,sta1.lon,sta2.lon))
sta.lat=np.hstack((sta0.lat,sta1.lat,sta2.lat))
pickle.dump(sta, open(data_path+file, "wb"))
print('STEREO-A merging done')
return 0
def save_stereoa_science_data_merge_sceq(data_path,file):
print('STEREO-A science data merging')
filesta="stereoa_2007_2019_sceq.p"
[sta0,hsta0]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_april_sceq.p"
[sta1,hsta1]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_may_july_sceq.p"
[sta2,hsta2]=pickle.load(open(data_path+filesta, "rb" ) )
#beacon data
#filesta='stereoa_2019_now_sceq_beacon.p'
#[sta3,hsta3]=pickle.load(open(data_path+filesta2, "rb" ) )
#sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta0.time)+np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta0.time,sta1.time,sta2.time))
sta.bx=np.hstack((sta0.bx,sta1.bx,sta2.bx))
sta.by=np.hstack((sta0.by,sta1.by,sta2.by))
sta.bz=np.hstack((sta0.bz,sta1.bz,sta2.bz))
sta.bt=np.hstack((sta0.bt,sta1.bt,sta2.bt))
sta.vt=np.hstack((sta0.vt,sta1.vt,sta2.vt))
sta.np=np.hstack((sta0.np,sta1.np,sta2.np))
sta.tp=np.hstack((sta0.tp,sta1.tp,sta2.tp))
sta.x=np.hstack((sta0.x,sta1.x,sta2.x))
sta.y=np.hstack((sta0.y,sta1.y,sta2.y))
sta.z=np.hstack((sta0.z,sta1.z,sta2.z))
sta.r=np.hstack((sta0.r,sta1.r,sta2.r))
sta.lon=np.hstack((sta0.lon,sta1.lon,sta2.lon))
sta.lat=np.hstack((sta0.lat,sta1.lat,sta2.lat))
pickle.dump(sta, open(data_path+file, "wb"))
print('STEREO-A merging done')
def save_stereoa_science_data(path,file,t_start, t_end,sceq):
#impact https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/
#download with heliosat
#-------------------
#print('start STA')
#sta_sat = heliosat.STA()
#create an array with 1 minute resolution between t start and end
#time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
#time_mat=mdates.date2num(time)
#tm, mag = sta_sat.get_data_raw(t_start, t_end, "sta_impact_l1")
#print('download complete')
#---------------------------
#2020 PLASTIC download manually
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/
sta_impact_path='/nas/helio/data/heliosat/data/sta_impact_l1/'
sta_plastic_path='/nas/helio/data/heliosat/data/sta_plastic_l2_ascii/'
t_start1=copy.deepcopy(t_start)
time_1=[]
#make 1 min datetimes
while t_start1 < t_end:
time_1.append(t_start1)
t_start1 += datetime.timedelta(minutes=1)
#make array for 1 min data
sta=np.zeros(len(time_1),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=time_1
#make data file names
t_start1=copy.deepcopy(t_start)
days_sta = []
days_str = []
i=0
while t_start < t_end:
days_sta.append(t_start)
days_str.append(str(days_sta[i])[0:4]+str(days_sta[i])[5:7]+str(days_sta[i])[8:10])
i=i+1
t_start +=datetime.timedelta(days=1)
#go through all files
bt=np.zeros(int(1e9))
bx=np.zeros(int(1e9))
by=np.zeros(int(1e9))
bz=np.zeros(int(1e9))
t2=[]
i=0
for days_date in days_str:
cdf_file = 'STA_L1_MAG_RTN_{}_V06.cdf'.format(days_date)
if os.path.exists(sta_impact_path+cdf_file):
print(cdf_file)
f1 = cdflib.CDF(sta_impact_path+cdf_file)
t1=parse_time(f1.varget('Epoch'),format='cdf_epoch').datetime
t2.extend(t1)
bfield=f1.varget('BFIELD')
bt[i:i+len(bfield[:,3])]=bfield[:,3]
bx[i:i+len(bfield[:,0])]=bfield[:,0]
by[i:i+len(bfield[:,1])]=bfield[:,1]
bz[i:i+len(bfield[:,2])]=bfield[:,2]
i=i+len(bfield[:,3])
#cut array
bt=bt[0:i]
bx=bx[0:i]
by=by[0:i]
bz=bz[0:i]
tm2=mdates.date2num(t2)
time_mat=mdates.date2num(time_1)
#linear interpolation to time_mat times
sta.bx = np.interp(time_mat, tm2, bx )
sta.by = np.interp(time_mat, tm2, by )
sta.bz = np.interp(time_mat, tm2, bz )
#sta.bt = np.sqrt(sta.bx**2+sta.by**2+sta.bz**2)
#round first each original time to full minutes original data at 30sec
tround=copy.deepcopy(t2)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(t2)):
tround[k] = datetime.datetime.strptime(datetime.datetime.strftime(t2[k], format_str), format_str)
tm2_round=parse_time(tround).plot_date
#which values are not in original data compared to full time range
isin=np.isin(time_mat,tm2_round)
setnan=np.where(isin==False)
#set to to nan that is not in original data
sta.bx[setnan]=np.nan
sta.by[setnan]=np.nan
sta.bz[setnan]=np.nan
sta.bt = np.sqrt(sta.bx**2+sta.by**2+sta.bz**2)
########### get PLASTIC new prel data
#PLASTIC
#2019 monthly if needed
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2019/
#2020 manually all
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/
#STA_L2_PLA_1DMax_1min_202004_092_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202005_122_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202006_153_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202007_183_PRELIM_v01.txt
########
pvt=np.zeros(int(1e8))
pnp=np.zeros(int(1e8))
ptp=np.zeros(int(1e8))
pt2=[]
pfiles=['STA_L2_PLA_1DMax_1min_202004_092_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202005_122_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202006_153_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202007_183_PRELIM_v01.txt']
j=0
for name in pfiles:
p1=np.genfromtxt(sta_plastic_path+name,skip_header=2)
print(name)
vt1=p1[:,8]
np1=p1[:,9]
tp1=p1[:,10]
#YEAR DOY hour min sec
year1=p1[:,0]
doy1=p1[:,1]
hour1=p1[:,2]
min1=p1[:,3]
sec1=p1[:,4]
p1t=[]
#make datetime array from year and doy
for i in np.arange(len(doy1)):
p1t.append(parse_time(str(int(year1[i]))+'-01-01 00:00').datetime+datetime.timedelta(days=doy1[i]-1)+\
+datetime.timedelta(hours=hour1[i]) + datetime.timedelta(minutes=min1[i]) )
pvt[j:j+len(vt1)]=vt1
pnp[j:j+len(np1)]=np1
ptp[j:j+len(tp1)]=tp1
pt2.extend(p1t)
j=j+len(vt1)
#cut array
pvt=pvt[0:j]
pnp=pnp[0:j]
ptp=ptp[0:j]
pt2=pt2[0:j]
pt2m=mdates.date2num(pt2)
#linear interpolation to time_mat times
sta.vt = np.interp(time_mat, pt2m, pvt )
sta.np = np.interp(time_mat, pt2m, pnp )
sta.tp = np.interp(time_mat, pt2m, ptp )
#which values are not in original data compared to full time range
isin=np.isin(time_mat,pt2m)
setnan=np.where(isin==False)
#set to to nan that is not in original data
sta.vt[setnan]=np.nan
sta.np[setnan]=np.nan
sta.tp[setnan]=np.nan
#add position
print('position start')
frame='HEEQ'
kernels = spicedata.get_kernel('stereo_a')
kernels += spicedata.get_kernel('stereo_a_pred')
spice.furnish(kernels)
statra=spice.Trajectory('-234') #STEREO-A SPICE NAIF code
statra.generate_positions(sta.time,'Sun',frame)
statra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(statra.x,statra.y,statra.z)
sta.x=statra.x
sta.y=statra.y
sta.z=statra.z
sta.r=r
sta.lat=np.degrees(lat)
sta.lon=np.degrees(lon)
print('position end ')
coord='RTN'
#convert magnetic field to SCEQ
if sceq==True:
print('convert RTN to SCEQ ')
coord='SCEQ'
sta=convert_RTN_to_SCEQ(sta,'STEREO-A')
header='STEREO-A magnetic field (IMPACT instrument, science data) and plasma data (PLASTIC, preliminary science data), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/ and '+ \
'https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/ '+ \
'Timerange: '+sta.time[0].strftime("%Y-%b-%d %H:%M")+' to '+sta.time[-1].strftime("%Y-%b-%d %H:%M")+\
', with an average time resolution of '+str(np.mean(np.diff(sta.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by sta.time, sta.bx, sta.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(sta.size)+'. '+\
'Units are btxyz [nT, '+coord+', vt [km/s], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats '+\
'and https://github.com/heliopython/heliopy. '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, <NAME> and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
print('save pickle file')
pickle.dump([sta,header], open(path+file, "wb"))
print('done sta')
print()
return 0
def save_wsa_hux(filein):
#load wsa hux
windraw = np.loadtxt('data/wsa_hux_mars_aug2014_jan2018.txt', dtype=[('time','<U30'),('time2','<U30'),('time_mat', float),('vt', float)] )
windraw = windraw.view(np.recarray)
wind=np.zeros(len(windraw),dtype=[('time',object),('vt', float)])
wind=wind.view(np.recarray)
for i in np.arange(len(windraw)):
wind_time_str=windraw.time[i][8:12]+'-'+windraw.time[i][4:7]+'-'+windraw.time[i][1:3]+' '+windraw.time2[i][0:8]
wind.time[i]=(parse_time(wind_time_str).datetime)
wind.vt=windraw.vt
fileout='wsa_hux_mars_aug2014_jan2018.p'
pickle.dump(wind, open(data_path+fileout, "wb"))
return 0
def load_mars_wsa_hux():
file='wsa_hux_mars_aug2014_jan2018.p'
rad=pickle.load(open(data_path+file, "rb"))
return rad
def load_maven_sir_huang():
#Huang et al. 2019 APJ convert PDF to excel with https://pdftoxls.com
mavensir='sircat/sources/Huang_2019_SIR_MAVEN_table_1.xlsx'
print('load MAVEN Huang SIR catalog from ', mavensir)
ms=pd.read_excel(mavensir)
ms=ms.drop(index=[0,1,2])
ms_num=np.array(ms['No.'])
ms_start=np.array(ms['Start'])
ms_end=np.array(ms['End'])
ms_si=np.array(ms['SI'])
ms=np.zeros(len(ms_num),dtype=[('start',object),('end',object),('si',object)])
ms=ms.view(np.recarray)
#make correct years for start time
ms_num[np.where(ms_num< 7)[0]]=2014
ms_num[np.where(ms_num< 27)[0]]=2015
ms_num[np.where(ms_num< 64)[0]]=2016
ms_num[np.where(ms_num< 83)[0]]=2017
ms_num[np.where(ms_num< 127)[0]]=2018
#make correct years for end and si time
ms_num2=copy.deepcopy(ms_num)
ms_num2[3]=2015
ms_num2[62]=2017
#transform date of start time
for t in np.arange(0,len(ms_start)):
#check for nans in between time strings
if pd.isna(ms_start[t])==False:
####################### start time
#year
year=str(ms_num[t])
#month
datetimestr=ms_start[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int(np.floor(monthfloat)))
#day
if int(month) < 10: day=datetimestr[2:4]
if int(month) > 9: day=datetimestr[3:5]
#time
timestr=datetimestr[-5:]
#construct year month day
datetimestrfin=str(ms_num[t])+'-'+month+'-'+day
#remove white spaces at the end and add time
finaldatetime=datetimestrfin.strip()+' '+timestr
#print(ms_start[t])
#print(finaldatetime)
ms.start[t]=parse_time(finaldatetime).datetime
################### end time
#year
year=str(ms_num2[t])
#month
datetimestr=ms_end[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int(np.floor(monthfloat)))
#day
if int(month) < 10: day=datetimestr[2:4]
if int(month) > 9: day=datetimestr[3:5]
#time
timestr=datetimestr[-5:]
#construct year month day
datetimestrfin=str(ms_num2[t])+'-'+month+'-'+day
#remove white spaces at the end and add time
finaldatetime=datetimestrfin.strip()+' '+timestr
#print(ms_end[t])
#print(finaldatetime)
ms.end[t]=parse_time(finaldatetime).datetime
############# stream interface time
#year
year=str(ms_num2[t])
#month
datetimestr=ms_si[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int(np.floor(monthfloat)))
#day
if int(month) < 10: day=datetimestr[2:4]
if int(month) > 9: day=datetimestr[3:5]
#time
timestr=datetimestr[-5:]
#construct year month day
datetimestrfin=str(ms_num2[t])+'-'+month+'-'+day
#remove white spaces at the end and add time
finaldatetime=datetimestrfin.strip()+' '+timestr
#print(ms_si[t])
#print(finaldatetime)
ms.si[t]=parse_time(finaldatetime).datetime
#print()
#get rid of zeros where the years where stated in the original data
ms2 = ms[np.argwhere(ms)]
return ms2
def save_msl_rad():
#convert file
# year, doy, sol, doseE hourly [uGy/day], doseE sol-filtered [uGy/day]
raw=np.loadtxt('data/doseE_sol_filter_2019.dat')
rad=np.zeros(len(raw),dtype=[('time',object),('sol', float),('dose_hour', float),('dose_sol', float)])
rad = rad.view(np.recarray)
rad.sol=raw[:,2]
rad.dose_hour=raw[:,3]
rad.dose_sol=raw[:,4]
#make datetime array from year and doy
for i in np.arange(len(rad)):
rad[i].time=parse_time(str(int(raw[i,0]))+'-01-01 00:00').datetime+datetime.timedelta(days=raw[i,1]-1)
print(rad[i].time)
file='msl_2012_2019_rad.p'
pickle.dump(rad, open(data_path+file, "wb"))
return 0
def load_msl_rad():
file='msl_2012_2019_rad.p'
rad=pickle.load(open(data_path+file, "rb"))
return rad
def save_psp_data(path, file, sceq):
print('save PSP data')
t_start = datetime.datetime(2018, 10, 6)
t_end = datetime.datetime(2019, 4, 24) # UNTIL ERROR on Apr 25
psp1=get_psp_data(t_start,t_end)
t_start = datetime.datetime(2019, 4, 26)
#t_end = datetime.datetime(2019, 4, 30)
#t_end = datetime.datetime(2019, 10, 15)
t_end = datetime.datetime(2021, 3, 31)
psp2=get_psp_data(t_start,t_end)
#add both
psp=np.zeros(np.size(psp1.time)+np.size(psp2.time),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('vx', float),('vy', float),('vz', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
psp = psp.view(np.recarray)
psp.time=np.hstack((psp1.time,psp2.time))
psp.bx=np.hstack((psp1.bx,psp2.bx))
psp.by=np.hstack((psp1.by,psp2.by))
psp.bz=np.hstack((psp1.bz,psp2.bz))
psp.bt=np.hstack((psp1.bt,psp2.bt))
psp.vt=np.hstack((psp1.vt,psp2.vt))
psp.vx=np.hstack((psp1.vx,psp2.vx))
psp.vy=np.hstack((psp1.vy,psp2.vy))
psp.vz=np.hstack((psp1.vz,psp2.vz))
psp.np=np.hstack((psp1.np,psp2.np))
psp.tp=np.hstack((psp1.tp,psp2.tp))
psp.x=np.hstack((psp1.x,psp2.x))
psp.y=np.hstack((psp1.y,psp2.y))
psp.z=np.hstack((psp1.z,psp2.z))
psp.r=np.hstack((psp1.r,psp2.r))
psp.lon=np.hstack((psp1.lon,psp2.lon))
psp.lat=np.hstack((psp1.lat,psp2.lat))
print('Merging done')
#convert magnetic field to SCEQ
coord='RTN'
if sceq==True:
coord='SCEQ'
psp=convert_RTN_to_SCEQ(psp,'PSP')
header='PSP magnetic field (FIELDS instrument) and plasma data (SWEAP), ' + \
'obtained from https://spdf.gsfc.nasa.gov/pub/data/psp/ '+ \
'Timerange: '+psp.time[0].strftime("%Y-%b-%d %H:%M")+' to '+psp.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(psp.time)).seconds)+' seconds. '+\
'The data are put in a numpy recarray, fields can be accessed by psp.time, psp.bx, psp.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(psp.size)+'. '+\
'Units are btxyz [nT,'+coord+'], vtxyz [km/s, RTN], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([psp,header], open(path+file, "wb"))
def get_psp_data(t_start,t_end):
print('start PSP')
psp_sat = heliosat.PSP()
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
time_mat=mdates.date2num(time)
tm, mag = psp_sat.get_data_raw(t_start, t_end, "psp_fields_l2")#,return_datetimes=True)
tp, pro = psp_sat.get_data_raw(t_start, t_end, "psp_spc_l3")#,return_datetimes=True)
tm=parse_time(tm,format='unix').datetime
tp=parse_time(tp,format='unix').datetime
print('download complete')
print('start nan or interpolate')
print('field')
#round first each original time to full minutes original data at 30sec
tround=copy.deepcopy(tm)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(tm)):
tround[k] = datetime.datetime.strptime(datetime.datetime.strftime(tm[k], format_str), format_str)
tm_mat=parse_time(tround).plot_date
bx = np.interp(time_mat, tm_mat, mag[:,0] )
by = np.interp(time_mat, tm_mat, mag[:,1] )
bz = np.interp(time_mat, tm_mat, mag[:,2] )
#which values are not in original data compared to full time range
isin=np.isin(time_mat,tm_mat)
setnan=np.where(isin==False)
#set to to nan that is not in original data
bx[setnan]=np.nan
by[setnan]=np.nan
bz[setnan]=np.nan
bt = np.sqrt(bx**2+by**2+bz**2)
print('plasma')
#for plasma round first each original time to full minutes
tround=copy.deepcopy(tp)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(tp)):
tround[k] = datetime.datetime.strptime(datetime.datetime.strftime(tp[k], format_str), format_str)
tp_mat=mdates.date2num(tround)
isin=np.isin(time_mat,tp_mat)
setnan=np.where(isin==False)
den = np.interp(time_mat, tp_mat, pro[:,0])
vx = np.interp(time_mat, tp_mat, pro[:,1])
vy = np.interp(time_mat, tp_mat, pro[:,2])
vz = np.interp(time_mat, tp_mat, pro[:,3])
temp = np.interp(time_mat, tp_mat, pro[:,4])
den[setnan]=np.nan
temp[setnan]=np.nan
vx[setnan]=np.nan
vy[setnan]=np.nan
vz[setnan]=np.nan
vt=np.sqrt(vx**2+vy**2+vz**2)
print('end nan or interpolate')
print('position start')
frame='HEEQ'
spice.furnish(spicedata.get_kernel('psp_pred'))
psptra=spice.Trajectory('SPP')
psptra.generate_positions(time,'Sun',frame)
psptra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(psptra.x,psptra.y,psptra.z)
print('PSP pos')
print('position end')
#make array
psp=np.zeros(np.size(bx),dtype=[('time',object),('bt', float),('bx', float),\
('by', float),('bz', float),('vt', float),('vx', float),('vy', float),\
('vz', float),('np', float),('tp', float),('x', float),('y', float),\
('z', float),('r', float),('lat', float),('lon', float)])
#convert to recarray
psp = psp.view(np.recarray)
#fill with data
psp.time=time
psp.bx=bx
psp.by=by
psp.bz=bz
psp.bt=bt
psp.x=psptra.x
psp.y=psptra.y
psp.z=psptra.z
psp.r=r
psp.lat=np.degrees(lat)
psp.lon=np.degrees(lon)
psp.vt=vt
psp.vx=vx
psp.vy=vy
psp.vz=vz
psp.np=den
#https://en.wikipedia.org/wiki/Thermal_velocity convert from km/s to K
from astropy.constants import m_p,k_B
psp.tp=np.pi*m_p*((temp*1e3)**2)/(8*k_B)
#remove spikes
psp.tp[np.where(psp.tp > 1e10)]=np.nan
print('done get psp')
print()
return psp
def save_psp_data_non_merged(path, file):
'''
*** TO DO
save PSP data as pickle file with 3 separate arrays for orbit, magnetic field and plasma data
'''
print('start PSP')
psp_sat = heliosat.PSP()
t_start = datetime.datetime(2018, 10, 14,14,14, 30)
#t_end = datetime.datetime(2018, 12, 12,23,59,30)
t_end = datetime.datetime(2019, 4, 23,23,59,30)
#t_end = datetime.datetime(2019, 5, 31,23,59,30)
#t_end = datetime.datetime(2019, 5, 1,23,59,30)
timeb, mag = psp_sat.get_data_raw(t_start, t_end, "psp_fields_l2")
timep, pro = psp_sat.get_data_raw(t_start, t_end, "psp_spc_l3")
print('download complete')
#create an array with 1 minute resolution between t start and end for position
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
#make separate arrays for orbit plasma and mag
psp_orbit=np.zeros(np.size(time),dtype=[('time',object),('x', float),('y', float),\
('z', float),('r', float),('lat', float),('lon', float)])
#convert to recarray
psp_orbit = psp_orbit.view(np.recarray)
psp_mag=np.zeros(np.size(timeb),dtype=[('time',object),('bt', float),('bx', float),\
('by', float),('bz', float)])
psp_mag = psp_mag.view(np.recarray)
psp_plasma=np.zeros(np.size(timep),dtype=[('time',object),('vt', float),('vx', float),('vy', float),\
('vz', float),('np', float),('tp', float)])
psp_plasma = psp_plasma.view(np.recarray)
psp_orbit.time=time
psp_mag.time=parse_time(timeb,format='unix').datetime
psp_plasma.time=parse_time(timep,format='unix').datetime
print('time convert done')
print('position start')
frame='HEEQ'
spice.furnish(spicedata.get_kernel('psp_pred'))
psptra=spice.Trajectory('SPP')
psptra.generate_positions(time,'Sun',frame)
psptra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(psptra.x,psptra.y,psptra.z)
psp_orbit.x=psptra.x
psp_orbit.y=psptra.y
psp_orbit.z=psptra.z
psp_orbit.r=r
psp_orbit.lat=np.rad2deg(lat)
psp_orbit.lon=np.rad2deg(lon)
print('position end')
#fields
psp_mag.bx = mag[:,0]
psp_mag.by = mag[:,1]
psp_mag.bz = mag[:,2]
psp_mag.bt = np.sqrt(psp_mag.bx**2+psp_mag.by**2+psp_mag.bz**2)
#sweap
from astropy.constants import m_p,k_B
psp_plasma.np = pro[:,0]
#https://en.wikipedia.org/wiki/Thermal_velocity convert from km/s to K
psp_plasma.tp = np.pi*m_p*((pro[:,4]*1e3)**2)/(8*k_B)
psp_plasma.vx = pro[:,1]
psp_plasma.vy = pro[:,2]
psp_plasma.vz = pro[:,3]
psp_plasma.vt=np.sqrt(psp_plasma.vx**2+psp_plasma.vy**2+psp_plasma.vz**2)
header='PSP magnetic field (FIELDS instrument) and plasma data (SWEAP), ' + \
'obtained from https://spdf.gsfc.nasa.gov/pub/data/psp/ '+ \
'Timerange: '+psp_orbit.time[0].strftime("%Y-%b-%d %H:%M")+' to '+psp_orbit.time[-1].strftime("%Y-%b-%d %H:%M")+\
'. The data are put in 3 numpy recarrays, fields can be accessed by psp_plasma.timep (for plasma), psp.vt etc.; psp_mag.timeb (for magnetic field),psp.bt, etc.; psp_orbit.time (for position) psp.r, psp.lon, ... '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(psp_plasma.size)+' (plasma), '+str(psp_mag.size)+' (mag). '+\
'Units are btxyz [nT, RTN], vtxyz [km/s, RTN], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats heliocats.data.save_psp_data_non_merged (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([psp_orbit,psp_mag,psp_plasma,header], open(path+file, "wb"))
print('done psp')
print()
def save_wind_data(path,file,start_date,end_date,heeq):
'''
description of data sources used in heliosat:
https://cdaweb.sci.gsfc.nasa.gov/misc/NotesW.html
'''
print('start wind update')
wind_sat = heliosat.WIND()
t_start = start_date
t_end = end_date
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=2*n) for n in range(int ((t_end - t_start).days*30*24))]
time_mat=mdates.date2num(time)
#print(parse_time(time[0:10]).iso)
tm, mag = wind_sat.get_data_raw(t_start, t_end, "wind_mfi_k0", extra_columns=["PGSE"])#, return_datetimes=True)
#tm, mag = wind_sat.get_data_raw(t_start, t_end, "wind_mfi_h0") k0
tp, pro = wind_sat.get_data_raw(t_start, t_end, "wind_swe_h1")#,return_datetimes=True)
tm=parse_time(tm,format='unix').datetime
tp=parse_time(tp,format='unix').datetime
#print(parse_time(tm[0:10]).iso)
#print(parse_time(tp[0:10]).iso)
tm=parse_time(tm).plot_date
tp=parse_time(tp).plot_date
print('download complete')
#linear interpolation to time_mat times
bx = np.interp(time_mat, tm, mag[:,0] )
by = np.interp(time_mat, tm, mag[:,1] )
bz = np.interp(time_mat, tm, mag[:,2] )
bt = np.sqrt(bx**2+by**2+bz**2)
den = np.interp(time_mat, tp, pro[:,0])
vt = np.interp(time_mat, tp, pro[:,1])
tp = np.interp(time_mat, tp, pro[:,2])
#interpolate the GSE position over full data range
x_gse = np.interp(time_mat, tm, mag[:,3])*6378.1/149597870.7*astropy.units.AU #earth radii to km to AU
y_gse = np.interp(time_mat, tm, mag[:,4])*6378.1/149597870.7*astropy.units.AU
z_gse = np.interp(time_mat, tm, mag[:,5])*6378.1/149597870.7*astropy.units.AU
print('position start')
frame='HEEQ'
planet_kernel=spicedata.get_kernel('planet_trajectories')
earth=spice.Trajectory('399') #399 for Earth
earth.generate_positions(time,'Sun',frame)
earth.change_units(astropy.units.AU) #from km to AU
#add gse position to Earth position
x=earth.x-x_gse #earth radii to km
y=earth.y-y_gse
z=earth.z+z_gse
[r, lat, lon]=cart2sphere(x,y,z)
#wind_pos=heliosat.WIND().trajectory(time, frame="HEEQ")
#x=wind._pos[:,0]
#y=wind_pos[:,1]
#z=wind_pos[:,2]
#[r, lat, lon]=hd.cart2sphere(wind_pos[:,0],wind_pos[:,1],wind_pos[:,2])
#lon=np.rad2deg(lon) #convert to degree
#lat=np.rad2deg(lat)
print('position end ')
#make array
win=np.zeros(np.size(bx),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('np', float),('vt', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
win = win.view(np.recarray)
#fill with data
win.time=time
win.bx=bx
win.by=by
win.bz=bz
win.bt=bt
win.x=x
win.y=y
win.z=z
win.r=r
win.lat=np.degrees(lat)
win.lon=np.degrees(lon)
win.np=den
win.vt=vt
#https://en.wikipedia.org/wiki/Thermal_velocity convert from km/s to K
from astropy.constants import m_p,k_B
win.tp=np.pi*m_p*((tp*1e3)**2)/(8*k_B)
############ spike removal
#plasma
win.np[np.where(win.np> 500)]=1000000
#get rid of all single spikes with scipy signal find peaks
peaks, properties = scipy.signal.find_peaks(win.np, height=500,width=(1, 250))
#go through all of them and set to nan according to widths
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
win.np[peaks[i]-width-2:peaks[i]+width+2]=np.nan
win.tp[np.where(win.tp> 1e8)]=1e11
#get rid of all single spikes with scipy signal find peaks
peaks, properties = scipy.signal.find_peaks(win.tp, height=1e8,width=(1, 250))
#go through all of them and set to nan according to widths
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
win.tp[peaks[i]-width-2:peaks[i]+width+2]=np.nan
win.vt[np.where(win.vt> 3000)]=1e11
#get rid of all single spikes with scipy signal find peaks
peaks, properties = scipy.signal.find_peaks(win.vt, height=1e8,width=(1, 250))
#go through all of them and set to nan according to widths
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
win.vt[peaks[i]-width-2:peaks[i]+width+2]=np.nan
#magnetic field
peaks, properties = scipy.signal.find_peaks(win.bt, prominence=30,width=(1, 10))
#go through all of them and set to nan according to widths
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths'])[i])
#remove data
win.bt[peaks[i]-width-5:peaks[i]+width+5]=np.nan
peaks, properties = scipy.signal.find_peaks(abs(win.bx), prominence=30,width=(1, 10))
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
win.bx[peaks[i]-width-5:peaks[i]+width+5]=np.nan
peaks, properties = scipy.signal.find_peaks(abs(win.by), prominence=30,width=(1, 10))
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
win.by[peaks[i]-width-5:peaks[i]+width+5]=np.nan
peaks, properties = scipy.signal.find_peaks(abs(win.bz), prominence=30,width=(1, 10))
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
win.bz[peaks[i]-width-5:peaks[i]+width+5]=np.nan
#manual spike removal for magnetic field
if t_start < datetime.datetime(2018, 7, 19, 16, 25):
if t_end > datetime.datetime(2018, 7, 19, 16, 25):
remove_start=datetime.datetime(2018, 7, 19, 16, 25)
remove_end=datetime.datetime(2018, 7, 19, 17, 35)
remove_start_ind=np.where(remove_start<win.time)[0][0]
remove_end_ind=np.where(remove_end<win.time)[0][0]
win.bt[remove_start_ind:remove_end_ind]=np.nan
win.bx[remove_start_ind:remove_end_ind]=np.nan
win.by[remove_start_ind:remove_end_ind]=np.nan
win.bz[remove_start_ind:remove_end_ind]=np.nan
if t_start < datetime.datetime(2018, 8, 29, 19, 00):
if t_end > datetime.datetime(2018, 8, 29, 19, 00):
remove_start=datetime.datetime(2018, 8, 29, 19, 00)
remove_end=datetime.datetime(2018,8, 30, 5, 00)
remove_start_ind=np.where(remove_start<win.time)[0][0]
remove_end_ind=np.where(remove_end<win.time)[0][0]
win.bt[remove_start_ind:remove_end_ind]=np.nan
win.bx[remove_start_ind:remove_end_ind]=np.nan
win.by[remove_start_ind:remove_end_ind]=np.nan
win.bz[remove_start_ind:remove_end_ind]=np.nan
if t_start < datetime.datetime(2019, 8, 8, 22, 45):
if t_end > datetime.datetime(2019, 8, 8, 22, 45):
remove_start=datetime.datetime(2019, 8, 8, 22, 45)
remove_end=datetime.datetime(2019, 8, 9, 17, 00)
remove_start_ind=np.where(remove_start<win.time)[0][0]
remove_end_ind=np.where(remove_end<win.time)[0][0]
win.bt[remove_start_ind:remove_end_ind]=np.nan
win.bx[remove_start_ind:remove_end_ind]=np.nan
win.by[remove_start_ind:remove_end_ind]=np.nan
win.bz[remove_start_ind:remove_end_ind]=np.nan
if t_start < datetime.datetime(2019, 8, 21, 22, 45):
if t_end > datetime.datetime(2019, 8, 21, 22, 45):
remove_start=datetime.datetime(2019, 8, 20, 18, 0)
remove_end=datetime.datetime(2019, 8, 21, 12, 0)
remove_start_ind=np.where(remove_start<win.time)[0][0]
remove_end_ind=np.where(remove_end<win.time)[0][0]
win.bt[remove_start_ind:remove_end_ind]=np.nan
win.bx[remove_start_ind:remove_end_ind]=np.nan
win.by[remove_start_ind:remove_end_ind]=np.nan
win.bz[remove_start_ind:remove_end_ind]=np.nan
if t_start < datetime.datetime(2019, 8, 21, 22, 45):
if t_end > datetime.datetime(2019, 8, 21, 22, 45):
remove_start=datetime.datetime(2019, 8, 22, 1, 0)
remove_end=datetime.datetime(2019, 8, 22, 9, 0)
remove_start_ind=np.where(remove_start<win.time)[0][0]
remove_end_ind=np.where(remove_end<win.time)[0][0]
win.bt[remove_start_ind:remove_end_ind]=np.nan
win.bx[remove_start_ind:remove_end_ind]=np.nan
win.by[remove_start_ind:remove_end_ind]=np.nan
win.bz[remove_start_ind:remove_end_ind]=np.nan
coord='GSE'
#convert magnetic field to SCEQ
if heeq==True:
win=convert_GSE_to_HEEQ(win)
coord='HEEQ'
######################
header='Wind magnetic field (MAG instrument) and plasma data (SWE), ' + \
'obtained from https://spdf.gsfc.nasa.gov/pub/data/wind/ '+ \
'Timerange: '+win.time[0].strftime("%Y-%b-%d %H:%M")+' to '+win.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(win.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by win.time, win.bx, win.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(win.size)+'. '+\
'Units are btxyz [nT, '+coord+'], vt [km/s], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats heliocats.data.save_wind_data (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([win,header], open(path+file, "wb"))
print('wind update done')
print()
def load_stereoa_science_1min():
varnames = ['Epoch', 'Vp', 'Vr_Over_V_RTN', 'Np', 'Tp', 'BFIELDRTN']
alldata = {k: [] for k in varnames}
if not os.path.exists(heliosat_data_path+'sta_magplasma_outside_heliosat'):
os.mkdir(heliosat_data_path+'sta_magplasma_outside_heliosat')
for year in ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014','2015','2016','2017','2018','2019']:
print('get STEREO-A yearly 1min data file for ',year)
cdf_write = heliosat_data_path+'sta_magplasma_outside_heliosat/STA_L2_MAGPLASMA_1m_{}_V01.cdf'.format(year)
if not os.path.exists(cdf_write):
cdf_url = ("https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/magplasma/STA_L2_MAGPLASMA_1m_{}_V01.cdf".format(year))
cdf_file = requests.get(cdf_url)
open(cdf_write, 'wb').write(cdf_file.content)
cdf = cdflib.CDF(cdf_write)
#cdf.cdf_info() shows all variable names and attributes
for var in varnames:
data = cdf[var][...]
data[np.where(data < cdf.varattsget(var)['VALIDMIN'][0])] = np.NaN
data[np.where(data > cdf.varattsget(var)['VALIDMAX'][0])] = np.NaN
alldata[var].append(data)
arrays = {}
for var in varnames:
arrays[var] = np.concatenate(alldata[var])
return arrays
def save_all_stereoa_science_data(path,file,sceq):
'''
saves all STEREO-Ahead science data btxyz
vt np tp x y z r lat lon 1 min resolution as pickle
sceq=True -> convert RTN to SCEQ coordinates for magnetic field components
filesta_all='stereoa_2007_2019_sceq.p'
hd.save_all_stereoa_science_data(data_path, filesta_all,sceq=True)
filesta_all='stereoa_2007_2019_rtn.p'
hd.save_all_stereoa_science_data(data_path, filesta_all,sceq=False)
[sta_t,hsta_t]=pickle.load(open(data_path+filesta_all, "rb" ) )
'''
#load all data with function
s1=load_stereoa_science_1min()
print('download complete')
#make array
sta=np.zeros(len(s1['Epoch']),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
#parse Epoch time to datetime objects
sta.time=parse_time(s1['Epoch'],format='cdf_epoch').datetime
print('time conversion complete')
sta.bx=s1['BFIELDRTN'][:,0]
sta.by=s1['BFIELDRTN'][:,1]
sta.bz=s1['BFIELDRTN'][:,2]
sta.bt=np.sqrt(sta.bx**2+sta.by**2+sta.bz**2)
sta.vt=s1['Vp']
sta.np=s1['Np']
sta.tp=s1['Tp']
print('parameters into array complete')
print('position start')
frame='HEEQ'
kernels = spicedata.get_kernel('stereo_a')
kernels += spicedata.get_kernel('stereo_a_pred')
spice.furnish(kernels)
statra=spice.Trajectory('-234') #STEREO-A SPICE NAIF code
statra.generate_positions(sta.time,'Sun',frame)
statra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(statra.x,statra.y,statra.z)
sta.x=statra.x
sta.y=statra.y
sta.z=statra.z
sta.r=r
sta.lat=np.degrees(lat)
sta.lon=np.degrees(lon)
print('position end ')
#remove spike in magnetic field in 2015
spike_ind=np.where(sta.bt >300)[0]
if len(spike_ind) > 0:
sta.bt[spike_ind[0]-77:spike_ind[-1]+5]=np.nan
sta.bx[spike_ind[0]-77:spike_ind[-1]+5]=np.nan
sta.by[spike_ind[0]-77:spike_ind[-1]+5]=np.nan
sta.bz[spike_ind[0]-77:spike_ind[-1]+5]=np.nan
coord='RTN'
#convert magnetic field to SCEQ
if sceq==True:
print('convert RTN to SCEQ ')
coord='SCEQ'
sta=convert_RTN_to_SCEQ(sta,'STEREO-A')
header='STEREO-A magnetic field (IMPACT instrument) and plasma data (PLASTIC, science), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/magplasma '+ \
'Timerange: '+sta.time[0].strftime("%Y-%b-%d %H:%M")+' to '+sta.time[-1].strftime("%Y-%b-%d %H:%M")+\
', with an average time resolution of '+str(np.mean(np.diff(sta.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by sta.time, sta.bx, sta.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(sta.size)+'. '+\
'Units are btxyz [nT, '+coord+', vt [km/s], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats '+\
'and https://github.com/heliopython/heliopy. '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, <NAME> and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
print('save pickle file')
pickle.dump([sta,header], open(path+file, "wb"))
print('done sta')
print()
def load_stereob_science_1min():
varnames = ['Epoch', 'Vp', 'Vr_Over_V_RTN', 'Np', 'Tp', 'BFIELDRTN']
alldata = {k: [] for k in varnames}
if not os.path.exists(heliosat_data_path+'stb_magplasma_outside_heliosat'):
os.mkdir(heliosat_data_path+'stb_magplasma_outside_heliosat')
for year in ['2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014']:
#for year in ['2007']:
print('get STEREO-B yearly 1min data file for ',year)
cdf_write = heliosat_data_path+'stb_magplasma_outside_heliosat/STB_L2_MAGPLASMA_1m_{}_V01.cdf'.format(year)
if not os.path.exists(cdf_write):
cdf_url = ("https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/behind/magplasma/STB_L2_MAGPLASMA_1m_{}_V01.cdf".format(year))
cdf_file = requests.get(cdf_url)
open(cdf_write, 'wb').write(cdf_file.content)
cdf = cdflib.CDF(cdf_write)
#cdf.cdf_info() shows all variable names and attributes
for var in varnames:
data = cdf[var][...]
#fillval = cdf[var].attrs['FILLVAL']
#fillval=cdf.varattsget(var)['FILLVAL'][0]
data[np.where(data < cdf.varattsget(var)['VALIDMIN'][0])] = np.NaN
data[np.where(data > cdf.varattsget(var)['VALIDMAX'][0])] = np.NaN
alldata[var].append(data)
arrays = {}
for var in varnames:
arrays[var] = np.concatenate(alldata[var])
return arrays
def save_all_stereob_science_data(path,file,sceq):
'''
saves all STEREO-Behind science data btxyz
vt np tp x y z r lat lon 1 min resolution as pickle
sceq=True -> convert RTN to SCEQ coordinates for magnetic field components
use as:
filestb_all='stereob_2007_2014_sceq.p'
hd.save_all_stereob_science_data(data_path, filestb_all,sceq=True)
filestb_all='stereob_2007_2014.p'
hd.save_all_stereob_science_data(data_path, filestb_all,sceq=False)
[stb_t,hstb_t]=pickle.load(open(data_path+filestb_all, "rb" ) )
'''
#load all data with function
s1=load_stereob_science_1min()
print('download complete')
#make array
stb=np.zeros(len(s1['Epoch']),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
stb = stb.view(np.recarray)
#parse Epoch time to datetime objects
stb.time=parse_time(s1['Epoch'],format='cdf_epoch').datetime
print('time conversion complete')
stb.bx=s1['BFIELDRTN'][:,0]
stb.by=s1['BFIELDRTN'][:,1]
stb.bz=s1['BFIELDRTN'][:,2]
stb.bt=np.sqrt(stb.bx**2+stb.by**2+stb.bz**2)
stb.vt=s1['Vp']
stb.np=s1['Np']
stb.tp=s1['Tp']
print('parameters into array complete')
print('position start')
frame='HEEQ'
kernels = spicedata.get_kernel('stereo_b')
spice.furnish(kernels)
stbtra=spice.Trajectory('-235') #STEREO-A SPICE NAIF code
stbtra.generate_positions(stb.time,'Sun',frame)
stbtra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(stbtra.x,stbtra.y,stbtra.z)
stb.x=stbtra.x
stb.y=stbtra.y
stb.z=stbtra.z
stb.r=r
stb.lat=np.rad2deg(lat)
stb.lon=np.rad2deg(lon)
print('position end ')
coord='RTN'
#convert magnetic field to SCEQ
if sceq==True:
print('convert RTN to SCEQ ')
coord='SCEQ'
stb=convert_RTN_to_SCEQ(stb,'STEREO-B')
header='STEREO-B magnetic field (IMPACT instrument) and plasma data (PLASTIC, science), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/behind/magplasma '+ \
'Timerange: '+stb.time[0].strftime("%Y-%b-%d %H:%M")+' to '+stb.time[-1].strftime("%Y-%b-%d %H:%M")+\
', with a an average time resolution of '+str(np.mean(np.diff(stb.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by stb.time, stb.bx, stb.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(stb.size)+'. '+\
'Units are btxyz [nT, '+coord+'], vt [km/s], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats '+\
'and https://github.com/heliopython/heliopy. '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, <NAME> and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
print('save pickle file')
pickle.dump([stb,header], open(path+file, "wb"))
print('done stb')
print()
def get_sdo_realtime_image():
"""Downloads latest SDO image."""
sdo_latest='https://sdo.gsfc.nasa.gov/assets/img/latest/latest_1024_0193.jpg'
#PFSS
#sdo_latest='https://sdo.gsfc.nasa.gov/assets/img/latest/latest_1024_0193pfss.jpg'
try: urllib.request.urlretrieve(sdo_latest,data_path_sun+'latest_1024_0193.jpg')
except urllib.error.URLError as e:
print('Failed downloading ', sdo_latest,' ',e)
print('saved ',data_path_sun+'latest_1024_0193.jpg')
sdo_latest='https://sdo.gsfc.nasa.gov/assets/img/latest/latest_1024_HMIB.jpg'
try: urllib.request.urlretrieve(sdo_latest,data_path_sun+'latest_1024_HMIB.jpg')
except urllib.error.URLError as e:
print('Failed downloading ', sdo_latest,' ',e)
print('saved ',data_path_sun+'latest_1024_HMIB.jpg')
'''
#convert to png
#check if ffmpeg is available locally in the folder or systemwide
if os.path.isfile('ffmpeg'):
os.system('./ffmpeg -i latest_1024_0193.jpg latest_1024_0193.png -loglevel quiet -y')
ffmpeg_avail=True
logger.info('downloaded SDO latest_1024_0193.jpg converted to png')
os.system('rm latest_1024_0193.jpg')
else:
os.system('ffmpeg -i latest_1024_0193.jpg latest_1024_0193.png -loglevel quiet -y')
os.system('rm latest_1024_0193.jpg')
'''
def save_noaa_rtsw_data(data_path,noaa_path,filenoaa):
print(' ')
print('convert NOAA real time solar wind archive to pickle file')
items=os.listdir(noaa_path)
newlist = []
for names in items:
if names.endswith(".json"):
newlist.append(names)
#print(newlist)
a=sorted(newlist) #sort so that mag and plasma and dates are separated
#print(a)
nr_of_files=int(np.size(a)/2)#******************
mag=a[0:nr_of_files]
pla=a[nr_of_files:-1]
#make array for 10 years
noaa=np.zeros(5000000,dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
k=0
for i in np.arange(nr_of_files)-1:
#read in data of corresponding files
#print(noaa_path+mag[i])
m1=open(noaa_path+mag[i],'r')
p1=open(noaa_path+pla[i],'r')
try:
d1=get_noaa_realtime_data(m1, p1)
#save in large array
noaa[k:k+np.size(d1)]=d1
k=k+np.size(d1)
except:
print(mag[i], ' ', pla[i], ' json not working')
#cut zeros, sort, convert to recarray, and find unique times and data
noaa_cut=noaa[0:k]
noaa_cut.sort()
nu=noaa_cut.view(np.recarray)
[dum,ind]=np.unique(nu.time,return_index=True)
nf=nu[ind]
header='Real time solar wind magnetic field and plasma data from NOAA, ' + \
'obtained daily from https://services.swpc.noaa.gov/products/solar-wind/ '+ \
'Timerange: '+nf.time[0].strftime("%Y-%b-%d %H:%M")+' to '+nf.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(nf.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by nf.time, nf.bx, nf.vt etc. '+\
'Total number of data points: '+str(nf.size)+'. '+\
'Units are btxyz [nT, RTN], vt [km s^-1], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats save_noaa_rtsw_data '+\
'By <NAME> (twitter @chrisoutofspace) and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([nf,header], open(data_path+filenoaa, "wb"))
#to read file
#import pickle
#filenoaa='noaa_rtsw_2020.p'
#data_path='/nas/helio/data/insitu_python/'
#[n,hn]=pickle.load(open(data_path+filenoaa, "rb" ) )
print('NOAA done')
def save_noaa_rtsw_data_predstorm(data_path,noaa_path,filenoaa):
print(' ')
print('convert NOAA real time solar wind from predstorm h5 file to pickle file')
hf = h5py.File(noaa_path, 'r')
#make array
noaa=np.zeros(len(np.array(hf.get('time'))),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
noaa = noaa.view(np.recarray)
noaa.time = mdates.num2date( np.array(hf.get('time')))
noaa.bt=np.array(hf.get('bt'))
noaa.bx=np.array(hf.get('bx_gsm'))
noaa.by=np.array(hf.get('by_gsm'))
noaa.bz=np.array(hf.get('bz_gsm'))
noaa.vt=np.array(hf.get('speed'))
noaa.np=np.array(hf.get('density'))
noaa.tp=np.array(hf.get('temperature'))
header='Real time solar wind magnetic field and plasma data from NOAA, ' + \
'obtained daily from https://services.swpc.noaa.gov/products/solar-wind/ '+ \
'Timerange: '+noaa.time[0].strftime("%Y-%b-%d %H:%M")+' to '+noaa.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(noaa.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by nf.time, nf.bx, nf.vt etc. '+\
'Total number of data points: '+str(noaa.size)+'. '+\
'Units are btxyz [nT, RTN], vt [km s^-1], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats save_noaa_rtsw_data_predstorm '+\
'By <NAME> (twitter @chrisoutofspace) and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([noaa,header], open(data_path+filenoaa, "wb"))
print('NOAA from predstorm done')
def get_noaa_realtime_data(magfile, plasmafile):
"""
Downloads and returns noaa real time solar wind data
data from http://services.swpc.noaa.gov/products/solar-wind/
if needed replace with ACE
http://legacy-www.swpc.noaa.gov/ftpdir/lists/ace/
get 3 or 7 day data
url_plasma='http://services.swpc.noaa.gov/products/solar-wind/plasma-3-day.json'
url_mag='http://services.swpc.noaa.gov/products/solar-wind/mag-3-day.json'
Author: <NAME>, modified for heliocats by <NAME>
Parameters
==========
None
Returns: recarray with interpolated data
=======
"""
# Read plasma data:
dp = json.loads(plasmafile.read())
dpn = [[np.nan if x == None else x for x in d] for d in dp] # Replace None w NaN
dtype=[(x, 'float') for x in dp[0]]
datesp = [datetime.datetime.strptime(x[0], "%Y-%m-%d %H:%M:%S.%f") for x in dpn[1:]]
#convert datetime to matplotlib times
mdatesp=mdates.date2num(datesp)
dp_ = [tuple([d]+[float(y) for y in x[1:]]) for d, x in zip(mdatesp, dpn[1:])]
DSCOVR_P = np.array(dp_, dtype=dtype)
# Read magnetic field data:
dm = json.loads(magfile.read())
dmn = [[np.nan if x == None else x for x in d] for d in dm] # Replace None w NaN
dtype=[(x, 'float') for x in dmn[0]]
datesm = [datetime.datetime.strptime(x[0], "%Y-%m-%d %H:%M:%S.%f") for x in dmn[1:]]
mdatesm=mdates.date2num(datesm)
dm_ = [tuple([d]+[float(y) for y in x[1:]]) for d, x in zip(mdatesm, dm[1:])]
DSCOVR_M = np.array(dm_, dtype=dtype)
#first_timestep = np.max([mdatesp[-1], mdatesm[-1]])
#last_timestep = np.min([mdatesp[-1], mdatesm[-1]])
#nminutes = int((num2date(last_timestep)-num2date(first_timestep)).total_seconds()/60.)
#itime = np.asarray([date2num(num2date(first_timestep) + timedelta(minutes=i)) for i in range(nminutes)], dtype=np.float64)
#use mag for times
t_start=datesm[0]
t_end=datesm[-1]
#1 minute res
itime = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int (((t_end - t_start).days+1)*60*24))] #*******BUG everywhere with this line for last day
itimeint=mdates.date2num(itime)
rbtot_m = np.interp(itimeint, DSCOVR_M['time_tag'], DSCOVR_M['bt'])
rbxgsm_m = np.interp(itimeint, DSCOVR_M['time_tag'], DSCOVR_M['bx_gsm'])
rbygsm_m = np.interp(itimeint, DSCOVR_M['time_tag'], DSCOVR_M['by_gsm'])
rbzgsm_m = np.interp(itimeint, DSCOVR_M['time_tag'], DSCOVR_M['bz_gsm'])
rpv_m = np.interp(itimeint, DSCOVR_P['time_tag'], DSCOVR_P['speed'])
rpn_m = np.interp(itimeint, DSCOVR_P['time_tag'], DSCOVR_P['density'])
rpt_m = np.interp(itimeint, DSCOVR_P['time_tag'], DSCOVR_P['temperature'])
#make array
dscovr_data=np.zeros(np.size(rbtot_m),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
dscovr_data = dscovr_data.view(np.recarray)
dscovr_data.time=itime
dscovr_data.bt=rbtot_m
dscovr_data.bx=rbxgsm_m
dscovr_data.by=rbygsm_m
dscovr_data.bz=rbzgsm_m
dscovr_data.vt=rpv_m
dscovr_data.np=rpn_m
dscovr_data.tp=rpt_m
#print('position start')
frame='HEEQ'
planet_kernel=spicedata.get_kernel('planet_trajectories')
earth=spice.Trajectory('399') #399 for Earth, not barycenter (because of moon)
earth.generate_positions(itime,'Sun',frame)
#from km to AU
earth.change_units(astropy.units.AU)
#add gse position to Earth position
x=earth.x-1.5*1e6*astropy.units.km
y=earth.y
z=earth.z
[r, lat, lon]=cart2sphere(x,y,z)
#*****with astropy lagrange points exact value? L1 position with 0.01 AU
#[r, lat, lon]=cart2sphere(earth.x-0.01*astropy.units.AU,earth.y,earth.z)
#print('position end ')
dscovr_data.x=x
dscovr_data.y=y
dscovr_data.z=z
dscovr_data.r=r
dscovr_data.lat=np.rad2deg(lat)
dscovr_data.lon=np.rad2deg(lon)
print('NOAA data read completed for file with end time: ',itime[-1])
return dscovr_data
def save_stereob_beacon_data(path,file,start_time,end_time):
print('start STB')
stb_sat = heliosat.STB()
t_start = start_time
t_end = end_time
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
time_mat=mdates.date2num(time)
tm, mag = stb_sat.get_data_raw(t_start, t_end, "mag_beacon")
tp, pro = stb_sat.get_data_raw(t_start, t_end, "proton_beacon")
print('download complete')
tm=parse_time(tm,format='unix').datetime
tp=parse_time(tp,format='unix').datetime
#convert to matplotlib time for linear interpolation
tm_mat=mdates.date2num(tm)
tp_mat=mdates.date2num(tp)
print('time convert done')
print('position start')
frame='HEEQ'
spice.furnish(spicedata.get_kernel('stereo_b'))
stbtra=spice.Trajectory('-235') #STEREO-A SPICE NAIF code
stbtra.generate_positions(time,'Sun',frame)
stbtra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(stbtra.x,stbtra.y,stbtra.z)
print('position end ')
#linear interpolation to time_mat times
bx = np.interp(time_mat, tm_mat, mag[:,0] )
by = np.interp(time_mat, tm_mat, mag[:,1] )
bz = np.interp(time_mat, tm_mat, mag[:,2] )
bt = np.sqrt(bx**2+by**2+bz**2)
den = np.interp(time_mat, tp_mat, pro[:,0])
vt = np.interp(time_mat, tp_mat, pro[:,1])
tp = np.interp(time_mat, tp_mat, pro[:,2])
#make array
stb=np.zeros(np.size(bx),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
stb = stb.view(np.recarray)
#fill with data
stb.time=time
stb.bx=bx
stb.by=by
stb.bz=bz
stb.bt=bt
stb.x=stbtra.x
stb.y=stbtra.y
stb.z=stbtra.z
stb.r=r
stb.lat=np.rad2deg(lat)
stb.lon=np.rad2deg(lon)
stb.np=den
stb.tp=tp
stb.vt=vt
#remove spikes from plasma data
#median filter
stb.vt=scipy.signal.medfilt(stb.vt,9)
#set nans to a high number
stb.vt[np.where(np.isfinite(stb.vt) == False)]=1e5
#get rid of all single spikes with scipy signal find peaks (cannot use nan)
peaks,properties = scipy.signal.find_peaks(stb.vt, prominence=200,width=(1,200))
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
stb.vt[peaks[i]-width-2:peaks[i]+width+2]=np.nan
#set nan again
stb.vt[np.where(stb.vt == 1e5)]=np.nan
stb.tp[np.where(np.isfinite(stb.vt) == False)]=np.nan
stb.np[np.where(np.isfinite(stb.vt) == False)]=np.nan
#remove spikes from magnetic field data
#median filter
#set nans to a high number
stb.bt[np.where(np.isfinite(stb.bt) == False)]=1e5
#get rid of all single spikes with scipy signal find peaks (cannot use nan)
peaks,properties = scipy.signal.find_peaks(stb.bt, height=40,width=(1,20))
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths'])[i])
#remove data
stb.bt[peaks[i]-width-2:peaks[i]+width+2]=np.nan
stb.bx[peaks[i]-width-2:peaks[i]+width+2]=np.nan
stb.by[peaks[i]-width-2:peaks[i]+width+2]=np.nan
stb.bz[peaks[i]-width-2:peaks[i]+width+2]=np.nan
#set nan again
stb.bt[np.where(stb.bt == 1e5)]=np.nan
#manual spike removal for speed
remove_start=datetime.datetime(2007, 7, 18,22, 00)
remove_end=datetime.datetime(2007, 7, 19, 16, 00)
remove_start_ind=np.where(remove_start==stb.time)[0][0]
remove_end_ind=np.where(remove_end==stb.time)[0][0]
stb.vt[remove_start_ind:remove_end_ind]=np.nan
header='BEACON STEREO-B magnetic field (IMPACT instrument) and plasma data (PLASTIC), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/beacon/behind/ '+ \
'Timerange: '+stb.time[0].strftime("%Y-%b-%d %H:%M")+' to '+stb.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(stb.time)).seconds)+' seconds. '+\
'A median filter has been applied (plasma data only) and then spikes were removed with scipy.signal.find_peaks (plasma and field). '+\
'The data are available in a numpy recarray, fields can be accessed by stb.time, stb.bx, stb.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(stb.size)+'. '+\
'Units are btxyz [nT, RTN], vt [km s^-1], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats heliocats.data.save_stereob_beacon_data (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([stb,header], open(path+file, "wb"))
print('done stb')
print()
def save_stereoa_beacon_data(path,file,start_time,end_time,sceq):
print('start STA')
sta_sat = heliosat.STA()
t_start = start_time
t_end = end_time
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
time_mat=mdates.date2num(time)
tm, mag = sta_sat.get_data_raw(t_start, t_end, "mag_beacon")
tp, pro = sta_sat.get_data_raw(t_start, t_end, "proton_beacon")
print('download complete')
tm1=parse_time(tm,format='unix').datetime
tp1=parse_time(tp,format='unix').datetime
#convert to matplotlib time for linear interpolation
tm1_mat=mdates.date2num(tm1)
tp1_mat=mdates.date2num(tp1)
print('time convert done')
print('position start')
frame='HEEQ'
spice.furnish(spicedata.get_kernel('stereo_a_pred'))
statra=spice.Trajectory('-234') #STEREO-A SPICE NAIF code
statra.generate_positions(time,'Sun',frame)
statra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(statra.x,statra.y,statra.z)
print('position end ')
#linear interpolation to time_mat times
bx = np.interp(time_mat, tm1_mat, mag[:,0] )
by = np.interp(time_mat, tm1_mat, mag[:,1] )
bz = np.interp(time_mat, tm1_mat, mag[:,2] )
bt = np.sqrt(bx**2+by**2+bz**2)
den = np.interp(time_mat, tp1_mat, pro[:,0])
vt = np.interp(time_mat, tp1_mat, pro[:,1])
tp = np.interp(time_mat, tp1_mat, pro[:,2])
#round first each original time to full minutes
troundm=copy.deepcopy(tm1)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(tm1)):
troundm[k] = datetime.datetime.strptime(datetime.datetime.strftime(tm1[k], format_str), format_str)
tm_round=parse_time(troundm).plot_date
troundp=copy.deepcopy(tp1)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(tp1)):
troundp[k] = datetime.datetime.strptime(datetime.datetime.strftime(tp1[k], format_str), format_str)
tp_round=parse_time(troundp).plot_date
#check
#print('-----')
#print(tm1)
#print(tp1)
#print(troundm)
#print(troundp)
isin=np.isin(time_mat,tm_round)
setnan=np.where(isin==False)
#set to to nan that is not in original data
bx[setnan]=np.nan
by[setnan]=np.nan
bz[setnan]=np.nan
bt = np.sqrt(bx**2+by**2+bz**2)
isin=np.isin(time_mat,tp_round)
setnan=np.where(isin==False)
#set to to nan that is not in original data
den[setnan]=np.nan
tp[setnan]=np.nan
vt[setnan]=np.nan
#make array
sta=np.zeros(np.size(bx),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
#fill with data
sta.time=time
sta.bx=bx
sta.by=by
sta.bz=bz
sta.bt=bt
sta.x=statra.x
sta.y=statra.y
sta.z=statra.z
sta.r=r
sta.lat=np.rad2deg(lat)
sta.lon=np.rad2deg(lon)
sta.np=den
sta.tp=tp
sta.vt=vt
#remove spikes from plasma data
#median filter
sta.vt=scipy.signal.medfilt(sta.vt,9)
#set nans to a high number
sta.vt[np.where(np.isfinite(sta.vt) == False)]=1e5
#get rid of all single spikes with scipy signal find peaks (cannot use nan)
peaks,properties = scipy.signal.find_peaks(sta.vt, prominence=200,width=(1,200))
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
sta.vt[peaks[i]-width-2:peaks[i]+width+2]=np.nan
#set nan again
sta.vt[np.where(sta.vt == 1e5)]=np.nan
sta.tp[np.where(np.isfinite(sta.vt) == False)]=np.nan
sta.np[np.where(np.isfinite(sta.vt) == False)]=np.nan
#manual spike removal for magnetic field
#remove_start=datetime.datetime(2018, 9, 23, 11, 00)
#remove_end=datetime.datetime(2018, 9, 25, 00, 00)
#remove_start_ind=np.where(remove_start==sta.time)[0][0]
#remove_end_ind=np.where(remove_end==sta.time)[0][0]
#sta.bt[remove_start_ind:remove_end_ind]=np.nan
#sta.bx[remove_start_ind:remove_end_ind]=np.nan
#sta.by[remove_start_ind:remove_end_ind]=np.nan
#sta.bz[remove_start_ind:remove_end_ind]=np.nan
coord='RTN'
#convert magnetic field to SCEQ
if sceq==True:
sta=convert_RTN_to_SCEQ(sta,'STEREO-A')
coord='SCEQ'
header='BEACON STEREO-A magnetic field (IMPACT instrument) and plasma data (PLASTIC), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/beacon/ahead/ '+ \
'Timerange: '+sta.time[0].strftime("%Y-%b-%d %H:%M")+' to '+sta.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(sta.time)).seconds)+' seconds. '+\
'A median filter has been applied as scipy.signal.medfilt(sta.vt,9) and then spikes were removed with '+\
'scipy.signal.find_peaks(sta.vt, prominence=200,width=(1,200)). '+\
'The data are available in a numpy recarray, fields can be accessed by sta.time, sta.bx, sta.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(sta.size)+'. '+\
'Units are btxyz [nT, '+coord+'], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats heliocats.data.save_stereoa_beacon_data (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([sta,header], open(path+file, "wb"))
print('done sta')
print()
def save_stereoa_science_data_old(path,file,t_start, t_end,sceq):
'''** TO DO
'''
print('start STA')
sta_sat = heliosat.STA()
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
time_mat=mdates.date2num(time)
#tp, pro = sta_sat.get_data_raw(t_start, t_end, "sta_plastic_l2")
#tm, mag = sta_sat.get_data_raw(t_start, t_end, "sta_impact_beacon")
tm, mag = sta_sat.get_data_raw(t_start, t_end, "sta_impact_l1")
print('download complete')
tm=parse_time(tm,format='unix').datetime
tp=parse_time(tp,format='unix').datetime
#convert to matplotlib time for linear interpolation
tm_mat=mdates.date2num(tm)
tp_mat=mdates.date2num(tp)
print('time convert done')
print('position start')
frame='HEEQ'
spice.furnish(spicedata.get_kernel('stereo_a_pred'))
statra=spice.Trajectory('-234') #STEREO-A SPICE NAIF code
statra.generate_positions(time,'Sun',frame)
statra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(statra.x,statra.y,statra.z)
print('position end ')
#linear interpolation to time_mat times
bx = np.interp(time_mat, tm_mat, mag[:,0] )
by = np.interp(time_mat, tm_mat, mag[:,1] )
bz = np.interp(time_mat, tm_mat, mag[:,2] )
bt = np.sqrt(bx**2+by**2+bz**2)
den = np.interp(time_mat, tp_mat, pro[:,0])
vt = np.interp(time_mat, tp_mat, pro[:,1])
tp = np.interp(time_mat, tp_mat, pro[:,2])
#make array
sta=np.zeros(np.size(bx),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
#fill with data
sta.time=time
sta.bx=bx
sta.by=by
sta.bz=bz
sta.bt=bt
sta.x=statra.x
sta.y=statra.y
sta.z=statra.z
sta.r=r
sta.lat=np.rad2deg(lat)
sta.lon=np.rad2deg(lon)
sta.vt=vt
sta.np=den
sta.tp=tp
#remove spikes from plasma data
#median filter
sta.vt=scipy.signal.medfilt(sta.vt,9)
#set nans to a high number
sta.vt[np.where(np.isfinite(sta.vt) == False)]=1e5
#get rid of all single spikes with scipy signal find peaks (cannot use nan)
peaks,properties = scipy.signal.find_peaks(sta.vt, prominence=200,width=(1,200))
for i in np.arange(len(peaks)):
#get width of current peak
width=int(np.ceil(properties['widths']/2)[i])
#remove data
sta.vt[peaks[i]-width-2:peaks[i]+width+2]=np.nan
#set nan again
sta.vt[np.where(sta.vt == 1e5)]=np.nan
sta.tp[np.where(np.isfinite(sta.vt) == False)]=np.nan
sta.np[np.where(np.isfinite(sta.vt) == False)]=np.nan
#manual spike removal for magnetic field
remove_start=datetime.datetime(2018, 9, 23, 11, 00)
remove_end=datetime.datetime(2018, 9, 25, 00, 00)
remove_start_ind=np.where(remove_start==sta.time)[0][0]
remove_end_ind=np.where(remove_end==sta.time)[0][0]
sta.bt[remove_start_ind:remove_end_ind]=np.nan
sta.bx[remove_start_ind:remove_end_ind]=np.nan
sta.by[remove_start_ind:remove_end_ind]=np.nan
sta.bz[remove_start_ind:remove_end_ind]=np.nan
#convert magnetic field to SCEQ
if sceq==True:
sta=convert_RTN_to_SCEQ(sta,'STEREO-A')
header='STEREO-A magnetic field (IMPACT instrument, beacon) and plasma data (PLASTIC, science), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/ins_data/ '+ \
'Timerange: '+sta.time[0].strftime("%Y-%b-%d %H:%M")+' to '+sta.time[-1].strftime("%Y-%b-%d %H:%M")+\
', linearly interpolated to a time resolution of '+str(np.mean(np.diff(sta.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by sta.time, sta.bx, sta.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(sta.size)+'. '+\
'Units are btxyz [nT, RTN], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([sta,header], open(path+file, "wb"))
print('done sta')
print()
def omni_loader():
'''
downloads all omni2 data into the "data" folder
'''
#if overwrite>0:
# print('download OMNI2 again')
# if os.path.exists('data/omni2_all_years.dat'): os.remove('data/omni2_all_years.dat')
#if not os.path.exists('data/omni2_all_years.dat'):
#see http://omniweb.gsfc.nasa.gov/html/ow_data.html
print('load OMNI2 .dat into "data" directory from')
omni2_url='https://spdf.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat'
print(omni2_url)
try: urllib.request.urlretrieve(omni2_url, 'data/omni2_all_years.dat')
except urllib.error.URLError as e:
print(' ', omni2_url,' ',e.reason)
sys.exit()
def save_omni_data(path,file):
'''
save variables from OMNI2 dataset as pickle
documentation https://spdf.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2.text
omni2_url='https://spdf.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat'
'''
print('start omni')
omni_loader()
#check how many rows exist in this file
f=open('data/omni2_all_years.dat')
dataset= len(f.readlines())
#make array
o=np.zeros(dataset,dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bygsm', float),('bzgsm', float),('bt', float),\
('vt', float),('np', float),('tp', float),('alpha', float),\
('dst', float),('kp', float),('spot', float),\
('ae', float),('ap', float),('f107', float),\
('pcn', float),('al', float),('au', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
o=o.view(np.recarray)
print(dataset, ' datapoints') #for reading data from OMNI file
j=0
with open('data/omni2_all_years.dat') as f:
for line in f:
line = line.split() # to deal with blank
#time - need to convert from year doy hour to datetime object
o.time[j]=datetime.datetime(int(line[0]), 1, 1) + datetime.timedelta(int(line[1]) - 1) \
+ datetime.timedelta(hours=int(line[2]))
#25 is bulkspeed F6.0, in km/s
o.vt[j]=line[24]
if o.vt[j] == 9999: o.vt[j]=np.NaN
#24 in file, index 23 proton density /ccm
o.np[j]=line[23]
if o.np[j] == 999.9: o.np[j]=np.NaN
#23 in file, index 22 Proton temperature /ccm
o.tp[j]=line[22]
if o.tp[j] == 9999999.: o.tp[j]=np.NaN
#28 in file, index 27 alpha to proton ratio
o.alpha[j]=line[27]
if o.alpha[j] == 9.999: o.alpha[j]=np.NaN
#9 is total B F6.1 also fill ist 999.9, in nT
o.bt[j]=line[9]
if o.bt[j] == 999.9: o.bt[j]=np.NaN
#GSE components from 13 to 15, so 12 to 14 index, in nT
o.bx[j]=line[12]
if o.bx[j] == 999.9: o.bx[j]=np.NaN
o.by[j]=line[13]
if o.by[j] == 999.9: o.by[j]=np.NaN
o.bz[j]=line[14]
if o.bz[j] == 999.9: o.bz[j]=np.NaN
#GSM
o.bygsm[j]=line[15]
if o.bygsm[j] == 999.9: o.bygsm[j]=np.NaN
o.bzgsm[j]=line[16]
if o.bzgsm[j] == 999.9: o.bzgsm[j]=np.NaN
o.kp[j]=line[38]
if o.kp[j] == 99: o.kp[j]=np.nan
o.spot[j]=line[39]
if o.spot[j] == 999: o.spot[j]=np.nan
o.dst[j]=line[40]
if o.dst[j] == 99999: o.dst[j]=np.nan
o.ae[j]=line[41]
if o.ae[j] == 9999: o.ae[j]=np.nan
o.ap[j]=line[49]
if o.ap[j] == 999: o.ap[j]=np.nan
o.f107[j]=line[50]
if o.f107[j] == 999.9 : o.f107[j]=np.nan
o.pcn[j]=line[51]
if o.pcn[j] == 999.9 : o.pcn[j]=np.nan
o.al[j]=line[52]
if o.al[j] == 99999 : o.al[j]=np.nan
o.au[j]=line[53]
if o.au[j] == 99999 : o.au[j]=np.nan
j=j+1
print('position start')
frame='HEEQ'
planet_kernel=spicedata.get_kernel('planet_trajectories')
earth=spice.Trajectory('399') #399 for Earth, not barycenter (because of moon)
earth.generate_positions(o.time,'Sun',frame)
earth.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(earth.x,earth.y,earth.z)
print('position end ')
o.x=earth.x
o.y=earth.y
o.z=earth.z
o.r=r
o.lat=np.rad2deg(lat)
o.lon=np.rad2deg(lon)
header='Near Earth OMNI2 1 hour solar wind and geomagnetic indices data since 1963. ' + \
'Obtained from https://spdf.gsfc.nasa.gov/pub/data/omni/low_res_omni/ '+ \
'Timerange: '+o.time[0].strftime("%Y-%b-%d %H:%M")+' to '+o.time[-1].strftime("%Y-%b-%d %H:%M")+'. '+\
'The data are available in a numpy recarray, fields can be accessed by o.time, o.bx, o.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(o.size)+'. '+\
'For units and documentation see: https://spdf.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2.text, the '+\
'heliospheric position of Earth was added and is given in x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats heliocats.data.save_omni_data (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([o,header], open(path+file, 'wb') )
print('done omni')
print()
def convert_MAVEN_mat_original(file_input,filename):
print('load MAVEN from MAT')
file=file_input
mavraw = scipy.io.loadmat(file)
#make array
mav=np.zeros(np.size(mavraw['BT']),dtype=[('time',object),('bt', float),('bx', float),\
('by', float),('bz', float),('vt', float),('vx', float),('vy', float),\
('vz', float),('tp', float),('np', float),('r', float),('lat', float),\
('lon', float),('x', float),('y', float),('z', float),\
('ro', float), ('lato', float), ('lono', float),\
('xo', float), ('yo', float), ('zo', float)])
#convert to recarray
mav = mav.view(np.recarray)
#convert time from matlab to python
t=mavraw['timeD'][:,0]
for p in np.arange(np.size(t)):
mav.time[p]= datetime.datetime.fromordinal(t[p].astype(int) ) + \
datetime.timedelta(days=t[p]%1) - datetime.timedelta(days = 366)
mav.bx=mavraw['Bx'][:,0]
mav.by=mavraw['By'][:,0]
mav.bz=mavraw['Bz'][:,0]
mav.bt=mavraw['BT'][:,0]
mav.vx=mavraw['Vx'][:,0]
mav.vy=mavraw['Vy'][:,0]
mav.vz=mavraw['Vz'][:,0]
mav.vt=mavraw['VT'][:,0]
mav.tp=mavraw['Tp'][:,0]*(1.602176634*1e-19)/(1.38064852*1e-23) #from ev to K
mav.np=mavraw['np'][:,0]
#add position with respect to Mars center in km in MSO
print('orbit position start')
insertion=datetime.datetime(2014,9,22,2,24,0)
#these are the indices of the times for the cruise phase
tc=np.where(mdates.date2num(mav.time) < mdates.date2num(insertion))
mars_radius=3389.5
mav.xo=mavraw['Xsc'][:,0]*mars_radius
mav.yo=mavraw['Ysc'][:,0]*mars_radius
mav.zo=mavraw['Zsc'][:,0]*mars_radius
#set to nan for cruise phase
mav.xo[tc]=np.nan
mav.yo[tc]=np.nan
mav.zo[tc]=np.nan
[mav.ro,mav.lato,mav.lono]=cart2sphere(mav.xo,mav.yo,mav.zo)
mav.lono=np.rad2deg(mav.lono)
mav.lato=np.rad2deg(mav.lato)
print('HEEQ position start')
frame='HEEQ'
#add position in HEEQ for cruise phase and orbit
#cruise phase
#use heliopy to load own bsp spice file from MAVEN
#obtained through https://naif.jpl.nasa.gov/pub/naif/pds/pds4/maven/maven_spice/spice_kernels/spk/
spice.furnish(data_path+'input/maven_cru_rec_131118_140923_v1.bsp')
cruise=spice.Trajectory('MAVEN') #or NAIF CODE -202
cruise.generate_positions(mav.time[tc],'Sun',frame)
cruise.change_units(astropy.units.AU)
mav.x[tc]=cruise.x
mav.y[tc]=cruise.y
mav.z[tc]=cruise.z
[mav.r[tc], mav.lat[tc], mav.lon[tc]]=cart2sphere(mav.x[tc],mav.y[tc],mav.z[tc])
#times in orbit
to=np.where(mdates.date2num(mav.time) > mdates.date2num(insertion))
planet_kernel=spicedata.get_kernel('planet_trajectories')
mars=spice.Trajectory('MARS BARYCENTER')
mars.generate_positions(mav.time[to],'Sun',frame)
mars.change_units(astropy.units.AU)
mav.x[to]=mars.x
mav.y[to]=mars.y
mav.z[to]=mars.z
[mav.r[to], mav.lat[to], mav.lon[to]]=cart2sphere(mav.x[to],mav.y[to],mav.z[to])
#convert to degree
mav.lon=np.rad2deg(mav.lon)
mav.lat=np.rad2deg(mav.lat)
print('position end ')
print('save MAVEN as pickle')
header='MAVEN merged magnetic field and plasma data, obtained from Toulouse and <NAME>. '+\
'Timerange: '+mav.time[0].strftime("%Y-%b-%d %H:%M")+' to '+mav.time[-1].strftime("%Y-%b-%d %H:%M")+'.'+\
'Mean time resolution: '+str(np.mean(np.diff(mav.time)).seconds)+' seconds. '+\
'The data are put in a numpy recarray, fields can be accessed by mav.time, mav.bx, mav.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(mav.size)+'. '+\
'Units are btxyz [nT, MSO], vtxyz [km/s, MSO], np[cm^-3], tp [K], orbital position: '+ \
'xo/yo/zo/ro/lono/lato [km, degree, MSO], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
'Made with https://github.com/cmoestl/heliocats heliocats.data.convert_MAVEN_mat_original (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>by. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([mav,header], open(filename, "wb"))
def convert_MAVEN_mat_removed(file_input,filename):
print('load MAVEN from MAT')
file=file_input
mavraw = scipy.io.loadmat(file)
#load time data extra
file_input=data_path+'input/Data-MAVEN-MAG_SolarWind_102014-012021_time.mat'
mavraw_time = scipy.io.loadmat(file_input)
#mavraw_time['time']
#make array
mav=np.zeros(np.size(mavraw['BT']),dtype=[('time',object),('bt', float),('bx', float),\
('by', float),('bz', float),('vt', float),('vx', float),('vy', float),\
('vz', float),('tp', float),('np', float),('r', float),('lat', float),\
('lon', float),('x', float),('y', float),('z', float),\
('ro', float), ('lato', float), ('lono', float),\
('xo', float), ('yo', float), ('zo', float)])
#convert to recarray
mav = mav.view(np.recarray)
#convert time from matlab to python
#t=mavraw['timeD'][:,0]
t=mavraw_time['time']
for p in np.arange(np.size(t)):
mav.time[p]= datetime.datetime.fromordinal(t[p][0].astype(int) ) + \
datetime.timedelta(days=t[p][0]%1) - datetime.timedelta(days = 366)
mav.bx=mavraw['Bx'][:,0]
mav.by=mavraw['By'][:,0]
mav.bz=mavraw['Bz'][:,0]
mav.bt=mavraw['BT'][:,0]
### TO DO *** add plasma data
#file_input=data_path+'input/MAVEN_2014to2018_cyril.mat'
#mavraw2 = scipy.io.loadmat(file_input)
#mav.vx[0:len(mavraw2)]=mavraw2['Vx'][:,0]
#mav.vy[0:len(mavraw2)]=mavraw2['Vy'][:,0]
#mav.vz[0:len(mavraw2)]=mavraw2['Vz'][:,0]
#mav.vt[0:len(mavraw2)]=mavraw2['VT'][:,0]
#mav.tp[0:len(mavraw2)]=mavraw2['Tp'][:,0]*(1.602176634*1e-19)/(1.38064852*1e-23) #from ev to K
#mav.np[0:len(mavraw2)]=mavraw2['np'][:,0]
#add position with respect to Mars center in km in MSO
print('orbit position start')
insertion=datetime.datetime(2014,9,22,2,24,0)
#these are the indices of the times for the cruise phase
#tc=np.where(mdates.date2num(mav.time) < mdates.date2num(insertion))
mars_radius=3389.5
mav.xo=mavraw['Xsc'][:,0]*mars_radius
mav.yo=mavraw['Ysc'][:,0]*mars_radius
mav.zo=mavraw['Zsc'][:,0]*mars_radius
#set to nan for cruise phase
#mav.xo[tc]=np.nan
#mav.yo[tc]=np.nan
#mav.zo[tc]=np.nan
[mav.ro,mav.lato,mav.lono]=cart2sphere(mav.xo,mav.yo,mav.zo)
mav.lono=np.rad2deg(mav.lono)
mav.lato=np.rad2deg(mav.lato)
print('HEEQ position start')
frame='HEEQ'
#add position in HEEQ for cruise phase and orbit
#cruise phase for plasma file only
#use heliopy to load own bsp spice file from MAVEN
#obtained through https://naif.jpl.nasa.gov/pub/naif/pds/pds4/maven/maven_spice/spice_kernels/spk/
#spice.furnish(data_path+'input/maven_cru_rec_131118_140923_v1.bsp')
#cruise=spice.Trajectory('MAVEN') #or NAIF CODE -202
#cruise.generate_positions(mav.time[tc],'Sun',frame)
#cruise.change_units(astropy.units.AU)
#mav.x[tc]=cruise.x
#mav.y[tc]=cruise.y
#mav.z[tc]=cruise.z
#[mav.r[tc], mav.lat[tc], mav.lon[tc]]=cart2sphere(mav.x[tc],mav.y[tc],mav.z[tc])
#times in orbit
to=np.where(mdates.date2num(mav.time) > mdates.date2num(insertion))
planet_kernel=spicedata.get_kernel('planet_trajectories')
mars=spice.Trajectory('MARS BARYCENTER')
mars.generate_positions(mav.time[to],'Sun',frame)
mars.change_units(astropy.units.AU)
mav.x[to]=mars.x
mav.y[to]=mars.y
mav.z[to]=mars.z
[mav.r[to], mav.lat[to], mav.lon[to]]=cart2sphere(mav.x[to],mav.y[to],mav.z[to])
#convert to degree
mav.lon=np.rad2deg(mav.lon)
mav.lat=np.rad2deg(mav.lat)
print('position end ')
header='MAVEN merged magnetic field and plasma data, obtained from Toulouse. ' + \
'The magnetosphere is removed with the Gruesbeck et al. 3D model (by <NAME>). '+ \
'Timerange: '+mav.time[0].strftime("%Y-%b-%d %H:%M")+' to '+mav.time[-1].strftime("%Y-%b-%d %H:%M")+'. '+\
'Mean time resolution: '+str(np.mean(np.diff(mav.time)).seconds)+' seconds. '+\
'The data are put in a numpy recarray, fields can be accessed by mav.time, mav.bx, mav.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(mav.size)+'. '+\
'Units are btxyz [nT, MSO], vtxyz [km/s, MSO], np[cm^-3], tp [K], orbital position: '+ \
'xo/yo/zo/ro/lono/lato [km, degree, MSO], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
'Made with https://github.com/cmoestl/heliocats heliocats.data.convert_MAVEN_mat_removed (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([mav,header], open(filename, "wb"))
def MAVEN_smooth_orbit(filemav,filename):
[mav,hmav]=pickle.load(open(filemav, 'rb' ) )
print('loaded ',filemav)
############# smooth over each orbit to extract solar wind signal
#determine apogees
#get rid of nans
mav.ro[np.where(mav.ro)==np.nan]=-1e3
peaks,properties = scipy.signal.find_peaks(mav.ro,height=5000,width=(100))
#bring nans back
mav.ro[np.where(mav.ro)==-1e3]=np.nan
print('Nr. of orbits in dataset: ',len(peaks))
#make array
mavs=np.zeros(np.size(peaks),dtype=[('time',object),('bt', float),('bx', float),\
('by', float),('bz', float),('vt', float),('vx', float),('vy', float),\
('vz', float),('tp', float),('np', float),('r', float),('lat', float),\
('lon', float),('x', float),('y', float),('z', float),\
('ro', float), ('lato', float), ('lono', float),\
('xo', float), ('yo', float), ('zo', float)])
#convert to recarray
mavs = mavs.view(np.recarray)
#2h on each side
window=121
for i in np.arange(len(peaks)):
mavs.bt[i]=np.nanmedian(mav.bt[peaks[i]-window:peaks[i]+window])
mavs.bx[i]=np.nanmedian(mav.bx[peaks[i]-window:peaks[i]+window])
mavs.by[i]=np.nanmedian(mav.by[peaks[i]-window:peaks[i]+window])
mavs.bz[i]=np.nanmedian(mav.bz[peaks[i]-window:peaks[i]+window])
mavs.vt[i]=np.nanmedian(mav.vt[peaks[i]-window:peaks[i]+window])
mavs.vx[i]=np.nanmedian(mav.vx[peaks[i]-window:peaks[i]+window])
mavs.vy[i]=np.nanmedian(mav.vy[peaks[i]-window:peaks[i]+window])
mavs.vz[i]=np.nanmedian(mav.vz[peaks[i]-window:peaks[i]+window])
mavs.np[i]=np.nanmedian(mav.np[peaks[i]-window:peaks[i]+window])
mavs.tp[i]=np.nanmedian(mav.tp[peaks[i]-window:peaks[i]+window])
mavs.time[i]=mav.time[peaks[i]]
mavs.r[i]=mav.r[peaks[i]]
mavs.lat[i]=mav.lat[peaks[i]]
mavs.lon[i]=mav.lon[peaks[i]]
mavs.x[i]=mav.x[peaks[i]]
mavs.y[i]=mav.y[peaks[i]]
mavs.z[i]=mav.z[peaks[i]]
mavs.ro[i]=mav.ro[peaks[i]]
mavs.lato[i]=mav.lato[peaks[i]]
mavs.lono[i]=mav.lono[peaks[i]]
mavs.xo[i]=mav.xo[peaks[i]]
mavs.yo[i]=mav.yo[peaks[i]]
mavs.zo[i]=mav.zo[peaks[i]]
'''
for testing:
plt.figure(1)
ax1 = plt.subplot(121)
#ax1.plot_date(mav.time,mav.bt,'bo')
#ax1.plot_date(mav.time,mav.bt,'-r')
ax1.plot_date(mav.time[peaks],bt1,'-r')
ax1.plot_date(mav.time[peaks],vt1,'-k')
#ax1.plot_date(mav.time[peaks],bt2,'bo')
#ax1.plot_date(mav.time,g,'-b')
#ax1.set_xlim(timeset-days_window*10,timeset+days_window*10)
ax2 = plt.subplot(122)
ax2.plot_date(mav.time,mav.ro,'-k')
ax2.plot_date(mav.time[peaks],mav.ro[peaks],'bo')
#ax2.set_xlim(timeset-days_window,timeset+days_window)
ax2.set_ylim(7000,10000)
plt.show()
plt.figure(2)
plt.plot_date(mav.time,mav.vt,'-r')
plt.plot_date(mav.time[peaks],vt1,'ob')
'''
#pickle.dump(mavs, open(data_path+filename, "wb"))
header='MAVEN solar wind merged magnetic field and plasma data. ' + \
'The magnetosphere was removed with the Gruesbeck et al. 3D model (by <NAME>), '+\
'and a +/-2h median filter around the apogee is used for 1 data point per orbit. '+ \
'Timerange: '+mavs.time[0].strftime("%Y-%b-%d %H:%M")+' to '+mavs.time[-1].strftime("%Y-%b-%d %H:%M")+'. '+\
'Mean time resolution: '+str(np.mean(np.diff(mavs.time)).seconds)+' seconds. '+\
'The data are put in a numpy recarray, fields can be accessed by mav.time, mav.bx, mav.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(mavs.size)+'. '+\
'Units are btxyz [nT, MSO], vtxyz [km/s, MSO], np[cm^-3], tp [K], orbital position: '+ \
'xo/yo/zo/ro/lono/lato [km, degree, MSO], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
'Made with https://github.com/cmoestl/heliocats heliocats.data.convert_MAVEN_mat_removed_orbit (uses https://github.com/ajefweiss/HelioSat '+\
'and https://github.com/heliopython/heliopy). '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
pickle.dump([mavs,header], open(filename, "wb"))
########################################## load HISTORIC DATA ############################
def save_helios_data(file):
'''
**TO DO
'''
print('start Helios')
t_start = datetime.datetime(1975, 1, 1)
t_end = datetime.datetime(1980, 12, 31)
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
#h1=heliosdata.corefit(1,t_start,t_end)
#h2=heliosdata.corefit(2,t_start,t_end)
h1=heliosdata.merged(1,t_start,t_end)
print('end Helios')
def save_cassini_data(file):
'''
**TO DO
'''
print('start Cassini')
t_start = datetime.datetime(1999, 8, 16)
t_end = datetime.datetime(2016, 12, 31)
#create an array with 1 minute resolution between t start and end
time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
coords='RTN'
#Cassini Orbiter Magnetometer Calibrated MAG data in 1 minute averages available
#covering the period 1999-08-16 (DOY 228) to 2016-12-31 (DOY 366).
#The data are provided in RTN coordinates throughout the mission, with Earth, Jupiter,
#and Saturn centered coordinates for the respective flybys of those planets.
cas=cassinidata.mag_hires(t_start,t_end, coords)
def save_ulysses_data(data_path):
print('read Ulysses data from cdf and convert to pickle')
datacat_path='/nas/helio/data/DATACAT/'
#load cdf
ulycdf = cdflib.CDF(datacat_path+'ulysses_merged_1990_2009_CDAWEB.cdf')
#check variables
#ulycdf.cdf_info()
#time conversion to datetime
time=ulycdf.varget('Epoch')
t=parse_time(time,format='cdf_epoch').datetime
#cutoff time and later data so that it starts with available position on Oct 6 1990
t=t[6696:-1]
print('Ulysses position start')
#position starts on Oct 6 1990
frame='HEEQ'
spice.furnish(spicedata.get_kernel('ulysses'))
upos=spice.Trajectory('-55')
upos.generate_positions(t,'Sun',frame)
upos.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(upos.x,upos.y,upos.z)
print('position end ')
#make custom array
uly=np.zeros(len(t),dtype=[('time',object),('bx', float),('by', float), \
('bz', float), ('bt', float),('vt', float),('np', float),('tp', float), \
('x', float),('y', float), ('z', float),('r', float),('lat', float), \
('lon', float)])
#convert to recarray
uly = uly.view(np.recarray)
uly.time=t
uly.bx=ulycdf.varget('BR')[6696:-1]
uly.by=ulycdf.varget('BT')[6696:-1]
uly.bz=ulycdf.varget('BN')[6696:-1]
uly.bt=ulycdf.varget('ABS_B')[6696:-1]
uly.vt=ulycdf.varget('plasmaFlowSpeed')[6696:-1]
uly.np=ulycdf.varget('protonDensity')[6696:-1]
uly.tp=ulycdf.varget('protonTempLarge')[6696:-1]
uly.x=upos.x
uly.y=upos.y
uly.z=upos.z
uly.r=r
uly.lat=np.rad2deg(lat)
uly.lon=np.rad2deg(lon)
badmag=np.where(uly.bt < -10000)
uly.bt[badmag]=np.nan
uly.bx[badmag]=np.nan
uly.by[badmag]=np.nan
uly.bz[badmag]=np.nan
badv=np.where(uly.vt < -100000)
uly.vt[badv]=np.nan
badn=np.where(uly.np < -100000)
uly.np[badn]=np.nan
badt=np.where(uly.tp < -100000)
uly.tp[badt]=np.nan
header='Ulysses merged magnetic field and plasma data, obtained from CDAWEB. '+ \
'Timerange: '+uly.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+uly.time[-1].strftime("%d-%b-%Y %H:%M:%S") +\
'. Units are btxyz [nT, RTN], vt [km/s], np [cm-3], tp[K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ].\
Made with https://github.com/cmoestl/heliocats and https://github.com/heliopython/heliopy. By <NAME> (twitter @chrisoutofspace) and <NAME>.'
file=data_path+'ulysses_1990_2009_rtn.p'
pickle.dump([uly,header], open(file, "wb"))
print('Ulysses done')
############################# HELCATS DATA into single file ###############################
#for reading .sav files
def getcat(filename):
print('reading CAT '+filename)
cat=scipy.io.readsav(filename, verbose='true')
print('done reading CAT')
return cat
def convert_sav_to_p():
start=time.time()
wind=getcat('/nas/helio/data/DATACAT/WIND_2007to2016_HEEQ.sav')
end=time.time()
print('load wind end. time in minutes:', (end-start)/60)
#save time and components as pickle
pickle.dump(wind.wind, open( "/nas/helio/data/insitu_python/WIND_2007to2016_HEEQ.p", "wb" ) )
def despike_helcats_speed_wind(vt,vx,vy,vz):
#set all nan to 0 in the v gradient array
v1=copy.deepcopy( abs( np.gradient(vt) ))
vnan_ind=np.where(np.isnan(v1)==True)[0]
v1[vnan_ind]=0
peaks, properties = scipy.signal.find_peaks(v1, prominence=40, width=(1, 10))
vt[np.where(v1>100)]=np.nan
vx[np.where(v1>100)]=np.nan
vy[np.where(v1>100)]=np.nan
vz[np.where(v1>100)]=np.nan
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
#print(width)
vt[peaks[i]-width:peaks[i]+width]=np.nan
vx[peaks[i]-width:peaks[i]+width]=np.nan
vy[peaks[i]-width:peaks[i]+width]=np.nan
vz[peaks[i]-width:peaks[i]+width]=np.nan
#print(properties['widths'])
#plt.plot(win.vt[0:80000],'-b')
#plt.plot(win.vt[0:80000],'-k',linewidth=1)
#plt.plot(v,'-r',linewidth=1)
#plt.plot(v1,'-b',linewidth=1)
return vt,vx,vy,vz
def despike_helcats_density_wind(den):
#set all nan to 0 in the v gradient array
den1=copy.deepcopy( abs( np.gradient(den) ))
den1nan_ind=np.where(np.isnan(den1)==True)[0]
den1[den1nan_ind]=0
peaks, properties = scipy.signal.find_peaks(den1, prominence=10, width=(1, 10))
den[np.where(den1>10)]=np.nan
den1[np.where(den1>10)]=np.nan
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
#print(width)
den[peaks[i]-width:peaks[i]+width]=np.nan
#print(properties['widths'])
#plt.plot(win.vt[0:80000],'-b')
#plt.plot(win.np[1200000:1500000]+50,'-g',linewidth=5)
#plt.plot(den+1,'-k',linewidth=1)
#plt.plot(den1,'-b',linewidth=1)
return den
def despike_helcats_temperature_wind(den):
den=den/1e6
#set all nan to 0 in the v gradient array
den1=copy.deepcopy( abs( np.gradient(den) ))
den1nan_ind=np.where(np.isnan(den1)==True)[0]
den1[den1nan_ind]=0
peaks, properties = scipy.signal.find_peaks(den1, prominence=0.2, width=(1, 10))
#den[np.where(den>100)[0]]=np.nan
den[np.where(den1>0.2)]=np.nan
den1[np.where(den1>0.2)]=np.nan
for i in np.arange(len(peaks)):
width=int(np.ceil(properties['widths'])[i])
#print(width)
den[peaks[i]-width:peaks[i]+width]=np.nan
return den*1e6
def save_helcats_datacat(data_path,removed):
''' to save all of helcats DATACAT into a single file
use: hd.save_helcats_datacat(data_path,removed=True)
'''
print('save all helcats DATACAT into single file')
datacat_path='/nas/helio/data/DATACAT/'
print('all data in', datacat_path)
print( 'read Wind')
winin= pickle.load( open(datacat_path+ "WIND_2007to2018_HEEQ.p", "rb" ) )
winin_time=parse_time(winin.time,format='utime').datetime
winin=winin.astype([('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('vt', 'float64'), ('vx', 'float64'), ('vy', 'float64'), \
('vz', 'float64'), ('tp', 'float64'), ('np', 'float64'), \
('r', 'float64'),('lat', 'float64'), ('lon', 'float64')])
#make new array with xyz
win=np.zeros(np.size(winin),[('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('vt', 'float64'), ('vx', 'float64'), ('vy', 'float64'), \
('vz', 'float64'), ('tp', 'float64'), ('np', 'float64'),\
('x', 'float64'),('y', 'float64'), ('z', 'float64'),\
('r', 'float64'),('lat', 'float64'), ('lon', 'float64')])
win = win.view(np.recarray)
win.time=winin_time
win.bx=winin.bx
win.by=winin.by
win.bz=winin.bz
win.bt=winin.bt
win.vt=winin.vt
win.vx=winin.vx
win.vy=winin.vy
win.vz=winin.vz
win.np=winin.np
win.tp=winin.tp
[win.vt,win.vx,win.vy,win.vz]=despike_helcats_speed_wind(win.vt,win.vx,win.vy,win.vz)
win.np=despike_helcats_density_wind(win.np)
win.tp=despike_helcats_temperature_wind(win.tp)
win.r=winin.r/(astropy.constants.au.value/1e3)
win.lat=winin.lat
win.lon=winin.lon
[win.x, win.y, win.z]=sphere2cart(win.r,np.abs(win.lat-np.radians(90)),win.lon)
win.lon=np.rad2deg(win.lon)
win.lat=np.rad2deg(win.lat)
del(winin)
hwin='Wind merged magnetic field and plasma data, obtained from HELCATS (<NAME>). '+ \
'Timerange: '+win.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+win.time[-1].strftime("%d-%b-%Y %H:%M:%S") +\
'Units are btxyz [nT, SCEQ], vtxyz [km/s, SCEQ], np [#/cm-3], tp[K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([win,hwin], open(data_path+ "helcats/wind_2007_2018_helcats.p", "wb" ) )
print( 'convert Wind done.')
print( 'read STEREO-A')
stain= pickle.load( open(datacat_path+ "STA_2007to2015_SCEQ.p", "rb" ) )
stain_time=parse_time(stain.time,format='utime').datetime
stain=stain.astype([('time', 'object'), ('bt', 'float'),\
('bx', 'float'), ('by', 'float'), ('bz', 'float'), \
('vt', 'float'), ('vx', 'float'), ('vy', 'float'), \
('vz', 'float'), ('tp', 'float'), ('np', 'float'), \
('r', 'float'),('lat', 'float'), ('lon', 'float')])
sta=np.zeros(np.size(stain),[('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('vt', 'float64'), ('vx', 'float64'), ('vy', 'float64'), \
('vz', 'float64'), ('tp', 'float64'), ('np', 'float64'),\
('x', 'float64'),('y', 'float64'), ('z', 'float64'),\
('r', 'float64'),('lat', 'float64'), ('lon', 'float64')])
sta = sta.view(np.recarray)
sta.time=stain_time
sta.bx=stain.bx
sta.by=stain.by
sta.bz=stain.bz
sta.bt=stain.bt
sta.vt=stain.vt
sta.vx=stain.vx
sta.vy=stain.vy
sta.vz=stain.vz
sta.np=stain.np
sta.tp=stain.tp
sta.r=stain.r/(astropy.constants.au.value/1e3)
sta.lat=stain.lat
sta.lon=stain.lon
[sta.x, sta.y, sta.z]=sphere2cart(sta.r,np.abs(sta.lat-np.radians(90)),sta.lon)
sta.lon=np.rad2deg(sta.lon)
sta.lat=np.rad2deg(sta.lat)
del(stain)
hsta='STEREO-A merged magnetic field and plasma data, obtained from HELCATS (<NAME>). '+ \
'Timerange: '+sta.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+sta.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'Units are btxyz [nT, SCEQ], vtxyz [km/s, SCEQ], np [#/cm-3], tp[K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([sta,hsta], open(data_path+ "helcats/stereoa_2007_2015_helcats.p", "wb" ) )
print( 'read STA done.')
print( 'read STEREO-B')
stbin= pickle.load( open(datacat_path+ "STB_2007to2014_SCEQ.p", "rb" ) )
stbin_time=parse_time(stbin.time,format='utime').datetime
stbin=stbin.astype([('time', 'object'), ('bt', 'float'),\
('bx', 'float'), ('by', 'float'), ('bz', 'float'), \
('vt', 'float'), ('vx', 'float'), ('vy', 'float'), \
('vz', 'float'), ('tp', 'float'), ('np', 'float'), \
('r', 'float'),('lat', 'float'), ('lon', 'float')])
stb=np.zeros(np.size(stbin),[('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('vt', 'float64'), ('vx', 'float64'), ('vy', 'float64'), \
('vz', 'float64'), ('tp', 'float64'), ('np', 'float64'),\
('x', 'float64'),('y', 'float64'), ('z', 'float64'),\
('r', 'float64'),('lat', 'float64'), ('lon', 'float64')])
stb = stb.view(np.recarray)
stb.time=stbin_time
stb.bx=stbin.bx
stb.by=stbin.by
stb.bz=stbin.bz
stb.bt=stbin.bt
stb.vt=stbin.vt
stb.vx=stbin.vx
stb.vy=stbin.vy
stb.vz=stbin.vz
stb.np=stbin.np
stb.tp=stbin.tp
stb.r=stbin.r/(astropy.constants.au.value/1e3)
stb.lat=stbin.lat
stb.lon=stbin.lon
[stb.x, stb.y, stb.z]=sphere2cart(stb.r,np.abs(stb.lat-np.radians(90)),stb.lon)
stb.lon=np.rad2deg(stb.lon)
stb.lat=np.rad2deg(stb.lat)
#replace missing 2014 plasma data for STEREO-B
filestb2='stereob_2013_2014.p'
[stb2,hstb2]=pickle.load(open(data_path+filestb2, "rb" ) )
stb_time_mat=parse_time(stb.time).plot_date
stb2_time_mat=parse_time(stb2.time).plot_date
#interpolate times onto stb.time
dumvt=np.interp(stb_time_mat, stb2_time_mat,stb2.vt)
dumtp=np.interp(stb_time_mat, stb2_time_mat,stb2.tp)
dumnp=np.interp(stb_time_mat, stb2_time_mat,stb2.np)
#get indices of 1-1-2014 to end
begin=np.where(stb_time_mat > parse_time('2014-1-1').plot_date )[0][0]
end=np.size(stb.vt)
stb.vt[begin:end]=dumvt[begin:end]
stb.tp[begin:end]=dumtp[begin:end]
stb.np[begin:end]=dumnp[begin:end]
del(stbin)
del(dumvt)
del(dumtp)
del(dumnp)
hstb='STEREO-B merged magnetic field and plasma data, obtained from HELCATS (<NAME>). '+ \
'Timerange: '+stb.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+stb.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'Units are btxyz [nT, SCEQ], vtxyz [km/s, SCEQ], np [#/cm-3], tp[K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([stb,hstb], open(data_path+ "helcats/stereob_2007_2014_helcats.p", "wb" ) )
print( 'read STB done.')
print( 'read MESSENGER')
#get insitu data from helcats, converted from IDL .sav to pickle
if removed == True:
mesin= pickle.load( open( datacat_path+"MES_2007to2015_SCEQ_removed.p", "rb" ) )
#non removed dataset
if removed == False:
mesin= pickle.load( open( datacat_path+"MES_2007to2015_SCEQ_non_removed.p", "rb" ) )
#time conversion
mesin_time=parse_time(mesin.time,format='utime').datetime
#replace mes.time with datetime object
#new variable names
mes=np.zeros(np.size(mesin),[('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('x', 'float64'),('y', 'float64'), ('z', 'float64'),\
('r', 'float64'),('lat', 'float64'), ('lon', 'float64')])
#convert to recarray
mes = mes.view(np.recarray)
#set time new
mes.time=mesin_time
mes.bx=mesin.bx
mes.by=mesin.by
mes.bz=mesin.bz
mes.bt=mesin.btot
#convert distance from Sun from km to AU, astropy constant is given in m
mes.r=mesin.mes_radius_in_km_heeq/(astropy.constants.au.value/1e3)
mes.lon=np.degrees(mesin.mes_longitude_in_radians_heeq.astype('float64'))
mes.lat=np.degrees(mesin.mes_latitude_in_radians_heeq.astype('float64'))
[mes.x, mes.y, mes.z]=sphere2cart(mes.r,np.radians(np.abs(mes.lat-90)),np.radians(mes.lon))
if removed == True:
hmes='MESSENGER magnetic field data, obtained from NASA PDS. '+ \
'Timerange: '+mes.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+mes.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'. The magnetosphere is removed with a manual magnetopause crossings list (<NAME>, <NAME>, <NAME>). '+ \
'Units are btxyz [nT, SCEQ], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([mes,hmes], open(data_path+ "helcats/messenger_2007_2015_helcats_removed.p", "wb" ) )
if removed == False:
hmes='MESSENGER magnetic field data, obtained from NASA PDS. '+ \
'Timerange: '+mes.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+mes.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'. The magnetosphere is removed with a manual magnetopause crossings list (<NAME>, <NAME>, <NAME>). '+ \
'Units are btxyz [nT, SCEQ], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([mes,hmes], open(data_path+ "helcats/messenger_2007_2015_helcats.p", "wb" ) )
print('convert MESSENGER done.')
print ('read VEX')
if removed == True:
vexin= pickle.load( open(datacat_path+ "VEX_2007to2014_SCEQ_removed.p", "rb" ) )
if removed == False:
vexin= pickle.load( open(datacat_path+ "VEX_2007to2014_SCEQ.p", "rb" ) )
#time conversion
vexin_time=parse_time(vexin.time,format='utime').datetime
vex=np.zeros(np.size(vexin),[('time', 'object'), ('bt', 'float64'),\
('bx', 'float64'), ('by', 'float64'), ('bz', 'float64'), \
('x', 'float64'),('y', 'float64'), ('z', 'float64'),\
('r', 'float64'),('lat', 'float64'), ('lon', 'float64'),\
('xo', 'float64'),('yo', 'float64'), ('zo', 'float64'),\
('ro', 'float64'),('lato', 'float64'), ('lono', 'float64')])
vex = vex.view(np.recarray)
vex.r=vexin.vex_radius_in_km_heeq/(astropy.constants.au.value/1e3)
vex.lon=np.rad2deg(vexin.vex_longitude_in_radians_heeq)
vex.lat=np.rad2deg(vexin.vex_latitude_in_radians_heeq)
[vex.x, vex.y, vex.z]=sphere2cart(vex.r,np.radians(np.abs(vex.lat-90)),np.radians(vex.lon))
#convert to degree
vex.time=vexin_time
vex.bx=vexin.bx
vex.by=vexin.by
vex.bz=vexin.bz
vex.bt=vexin.btot
#add orbit position
#https://www.cosmos.esa.int/web/spice/spice-for-vex
#or from VEX_2007to2014_VSO.p
vex2in=pickle.load( open( datacat_path+"VEX_2007to2014_VSO.p", "rb" ) )
vex.xo=vex2in.x
vex.yo=vex2in.y
vex.zo=vex2in.z
[vex.ro, vex.lato, vex.lono]=cart2sphere(vex.xo,vex.yo,vex.zo)
vex.lono=np.rad2deg(vex.lono)
vex.lato=np.rad2deg(vex.lato)
del(vexin)
del(vex2in)
if removed == True:
hvex='VEX magnetic field data, obtained from the VEX magnetometer PI T. Zhang IWF Graz, Austria. '+ \
'Timerange: '+vex.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+vex.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'. The magnetosphere was removed with the model from Zhang et al. (2008), see Moestl et al. (2017, doi: 10.1002/2017SW001614) for details. '+ \
'Units are btxyz [nT, SCEQ], orbital position: '+ \
'xo/yo/zo/ro/lono/lato [km, degree, VSO], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([vex,hvex], open(data_path+ "helcats/vex_2007_2014_helcats_removed.p", "wb" ) )
if removed == False:
hvex='VEX magnetic field data, obtained from the VEX magnetometer PI T. Zhang IWF Graz, Austria. '+ \
'Timerange: '+vex.time[0].strftime("%d-%b-%Y %H:%M:%S")+' to '+vex.time[-1].strftime("%d-%b-%Y %H:%M:%S")+\
'. Units are btxyz [nT, SCEQ], orbital position: '+ \
'xo/yo/zo/ro/lono/lato [km, degree, VSO], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]'
pickle.dump([vex,hvex], open(data_path+ "helcats/vex_2007_2014_helcats.p", "wb" ) )
print( 'convert VEX done.')
#the Ulysses file has been generated by selecting the merged Ulysses data in CDAWEB
#and then saved as one cdf 2.7 file
print('read Ulysses from CDAWEB cdf')
save_ulysses_data(data_path)
#fileuly=data_path+'ulysses_1990_2009_helcats.p'
#[uly,huly]=pickle.load(open(fileuly, 'rb' ) )
#if removed==True:
# pickle.dump([vex,win,mes,sta,stb,uly,hvex,hwin,hmes,hsta,hstb,huly], open(data_path+ "helcats_all_data_removed.p", "wb" ) )
# print('saved as ' +data_path+ 'helcats_all_data_removed.p')
#if removed==False:
# pickle.dump([vex,win,mes,sta,stb,uly,hvex,hwin,hmes,hsta,hstb,huly], open(data_path+ "helcats_all_data_non_removed.p", "wb" ) )
# print('saved as ' +data_path+ 'helcats_all_data_non_removed.p')
def load_helcats_datacat(file):
''' to load all of helcats DATACAT from a single file'''
print('load all helcats DATACAT from single file: ', file)
[vex,win,mes,sta,stb,uly,hvex,hwin,hmes,hsta,hstb,huly]=pickle.load( open(file, "rb" ) )
print('Use vex,win,sta,stb,mes,uly to access data and position, hvex,hwin, hmes, hsta, hstb, huly for headers.')
return [vex,win,mes,sta,stb,uly,hvex,hwin,hmes,hsta,hstb,huly]
def recarray_to_numpy_array(rec):
'''convert data recarray to numpy structured array with matplotlib time '''
#recarr.time=parse_time(recarr.time).plot_date
#numarr = pd.DataFrame(recarr).to_numpy()
num=copy.deepcopy(np.array(rec))
num['time']=parse_time(num['time']).plot_date
return num
#################################### MATH ################################################
@njit
def cart2sphere(x,y,z):
r = np.sqrt(x**2+ y**2 + z**2)
theta = np.arctan2(z,np.sqrt(x**2+ y**2))
phi = np.arctan2(y,x)
return (r, theta, phi)
@njit
def sphere2cart(r,theta,phi):
x = r * np.sin( theta ) * np.cos( phi )
y = r * np.sin( theta ) * np.sin( phi )
z = r * np.cos( theta )
return (x, y,z)
def convert_GSE_to_HEEQ(sc_in):
'''
for Wind magnetic field components: convert GSE to HEE to HAE to HEEQ
'''
sc=copy.deepcopy(sc_in)
print('conversion GSE to HEEQ start')
jd=np.zeros(len(sc))
mjd=np.zeros(len(sc))
for i in np.arange(0,len(sc)):
jd[i]=parse_time(sc.time[i]).jd
mjd[i]=float(int(jd[i]-2400000.5)) #use modified julian date
#GSE to HEE
#Hapgood 1992 rotation by 180 degrees, or simply change sign in bx by
#rotangle=np.radians(180)
#c, s = np.cos(rotangle), np.sin(rotangle)
#T1 = np.array(((c,s, 0), (-s, c, 0), (0, 0, 1)))
#[bx_hee,by_hee,bz_hee]=T1[sc.bx[i],sc.by[i],sc.bz[i]]
b_hee=[-sc.bx[i],-sc.by[i],sc.bz[i]]
#HEE to HAE
#define T00 and UT
T00=(mjd[i]-51544.5)/36525.0
dobj=sc.time[i]
UT=dobj.hour + dobj.minute / 60. + dobj.second / 3600. #time in UT in hours
#lambda_sun in Hapgood, equation 5, here in rad
M=np.radians(357.528+35999.050*T00+0.04107*UT)
LAMBDA=280.460+36000.772*T00+0.04107*UT
lambda_sun=np.radians( (LAMBDA+(1.915-0.0048*T00)*np.sin(M)+0.020*np.sin(2*M)) )
#S-1 Matrix equation 12 hapgood 1992, change sign in lambda angle for inversion HEE to HAE instead of HAE to HEE
c, s = np.cos(-(lambda_sun+np.radians(180))), np.sin(-(lambda_sun+np.radians(180)))
Sm1 = np.array(((c,s, 0), (-s, c, 0), (0, 0, 1)))
b_hae=np.dot(Sm1,b_hee)
#HAE to HEEQ
iota=np.radians(7.25)
omega=np.radians((73.6667+0.013958*((mjd[i]+3242)/365.25)))
theta=np.arctan(np.cos(iota)*np.tan(lambda_sun-omega))
#quadrant of theta must be opposite lambda_sun minus omega; Hapgood 1992 end of section 5
#get lambda-omega angle in degree mod 360 and theta in degrees
lambda_omega_deg=np.mod(np.degrees(lambda_sun)-np.degrees(omega),360)
theta_node_deg= | np.degrees(theta) | numpy.degrees |
"""
Functions for preprocessing products from the MAP Framework Team
"""
import os
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from gisutils.raster import get_values_at_points, write_raster
from mfsetup import load_modelgrid
from mfsetup.discretization import voxels_to_layers, fill_cells_vertically
from mfsetup.testing import point_is_on_nhg
from mfsetup.units import convert_length_units
def get_layer(botm_array, i, j, elev):
"""Return the botm_array for elevations at i, j locations.
Parameters
----------
botm_array : 3D numpy array
layer bottom elevations
i : scaler or sequence
row index (zero-based)
j : scaler or sequence
column index
elev : scaler or sequence
elevation (in same units as model)
Returns
-------
k : np.ndarray (1-D) or scalar
zero-based layer index
"""
def to_array(arg):
if not isinstance(arg, np.ndarray):
return np.array([arg])
else:
return arg
i = to_array(i)
j = to_array(j)
nlay = botm_array.shape[0]
elev = to_array(elev)
botms = botm_array[:, i, j] # .tolist()
# identify layer botms that are above and below the elevations
differences = np.round((botms - elev), 2)
isabove = differences >= 0
# layer is the number of botm_array that are above
layers = np.sum(isabove, axis=0)
# force elevations below model bottom into bottom layer
layers[layers > nlay - 1] = nlay - 1
layers = np.atleast_1d(np.squeeze(layers))
if len(layers) == 1:
layers = layers[0]
return layers
def plot_slice(layer_elevations, property_data=None,
row=0, column=slice(None),
voxel_start_layer=0, voxel_zones=None, cmap='copper',
voxel_cmap='viridis', unit_labels=None, add_surfaces=None):
"""Plot a single cross section slice
Parameters
----------
layer_elevations : 3D numpy array
Array of layer elevations, starting with the model top.
(Length equal to the number of botm_array + 1)
property_data : 3D numpy array
Array of zone numbers generated by setup_model_layers.
row : int or slice instance
If a cross section along a row is desired, row should be a integer,
and column should be a slice instance indicating the range of columns to include.
by default, 0.
column : int or slice instance
If a cross section along a column is desired, column should be a integer,
and row should be a slice instance indicating the range of rows to include.
by default, slice(None), which includes all columns.
voxel_start_layer : int, optional
First layer with voxel data, by default 0
voxel_zones : sequence, optional
Zone numbers within property_data that are voxel-based,
by default None
cmap : str, optional
Matplotlib colormap for non-voxel zone numbers, by default 'copper',
to contrast with colormap for voxel-based zone numbers.
voxel_cmap : str, optional
Matplotlib colormap for voxel-based zone numbers, by default 'viridis'.
unit_labels : dict, optional
Dictionary mapping non-voxel zone numbers to hydrogeologic units,
by default None
Returns
-------
ax : matplotlib AxesSubplot instance for figure
"""
# cross section code
nlay, nrow, ncol = layer_elevations.shape
# create meshgrid for rows or columns
# along a row
if isinstance(column, slice):
# x, z = np.meshgrid(range(ncol), np.array(z_edges))
# x = grid.xcellcenters[row, column]
ncells = ncol
title = 'Row {}'.format(row)
xlabel = 'Column in model'
# along a column
else:
# x, z = np.meshgrid(range(nrow), np.array(z_edges))
# x = grid.ycellcenters[row, column]
ncells = nrow
title = 'Column {}'.format(column)
xlabel = 'Row in model'
x = np.arange(ncells)
z = layer_elevations[:, row, column].copy()
# since z is used to define cell edges in the pcolormesh (below)
# z cannot be masked or have nan values
# set missing data values (outside of the model footprint) in z
# to -9999
# pcolormesh will still skip these cells, as they are defined
# as no data by the mask for the property array
z_nodata = -9999
z[np.isnan(z)] = z_nodata
# zero values will result in pcolormesh edges that dip to zero
# on the edge of nodata areas
# fill these with previous value in either direction
# first drop any indices along the edges
for side in -1, 1:
k, j = np.where(z == z_nodata)
interior_zeros = (j > 0) & (j < z.shape[1] - 1)
j = j[interior_zeros]
k = k[interior_zeros]
# then reassign the zero elevations
z[k, j] = z[k, j+side]
#z = np.ma.masked_where(np.isnan(z), z)
thicknesses = np.diff(z, axis=0) * -1
thicknesses[thicknesses <= 0] = 0.
fig, ax = plt.subplots(figsize=(11, 8.5))
# optionally plot a property such as resistivity facies
if property_data is not None:
# drop na values
# (areas with no voxel data at any depth)
#loc = ~np.all(z.mask, axis=0)
data = property_data[:, row, column].copy()
vmin, vmax = property_data.min(), property_data.max()
#x = np.squeeze(x[loc]) # + [x[-1] + 1]
#x = np.ma.masked_array(x, mask=~loc)
zstart = voxel_start_layer
zend = voxel_start_layer + property_data.shape[0] + 1
z = np.squeeze(z[zstart:zend, :])
#if not np.any(z):
# return
#data = np.squeeze(data[:, loc])
#thicknesses = np.squeeze(thicknesses[:, loc])
if | np.any(z) | numpy.any |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 10:12:34 2020
@author: Erick
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
import confidence as cf
from scipy.linalg import svd
import matplotlib as mpl
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
from scipy.linalg import norm
import platform
import os
root_folder = r'G:\Shared drives\FenningLab2\Projects\PVRD1\ExpData\DLCP\SiNx\D233-p5'
area_mm = 1.3
area_cm = area_mm*1E-4
area_cm_err = 0.0
color_palette = 'winter'
ndl_type = np.dtype([('eA/C (cm)', 'd'),
('NDL (cm^-3)', 'd'),
('NDL_err', 'd')])
def poly_model(x: [float,np.ndarray], b: np.ndarray):
"""
A polynomial model for the capacitance
Parameters
----------
b: np.ndarray
The coefficients of the polynomial
x: np.ndarray
The x values to evaluate the polynomial
Returns
-------
np.ndarray
The polynomial evaluated at the point x
"""
return b[0] + b[1]*x + b[2]*np.power(x,2) + b[3]*np.power(x,3)
def n_dl(C0: float, C1: float, er: float, area_cm: float):
"""
Estimates the drive level from the fitted values C0 and C1
C = C0 + C1 dV + C2 (dV)^2 + ...
Parameters
----------
C0: float
The fitted C0 in pico Farads (10^-12 C^2/J)
C1: float
The fitted C1 in pico Farads/V (10^-12 C^3/J^2)
er: float
The relative permittivity of the dielectric
area_cm: float
The area of the device in cm^2
Returns
-------
float
NDL
"""
# e0 = 8.854187817620389e-14 C^2 / J / cm
# q = 1.6021766208e-19
qe = er*8.854187817620389*1.6021766208 # x 1E-33
NDL = -1.0E9*np.power(C0,3.0)/(2*qe*area_cm*C1)
return NDL
def xvariation(C0: float, er: float, area_cm: float):
"""
Estimates the quantity
eps*A / C0 = x_e + eps*F_e/rho_e
which corresponds to variations in the depletion width over approximately
the same distance scale.
Parameters
----------
C0: float
The fitted value of C0 in pico Farads (C^2/J)
er: float
The relative permittivity of the dielectric
area_cm: float
The area of the device in cm^2
Returns
-------
float:
eps*A/C0 in cm
"""
# e0 = 8.854187817620389e-14 C^2 / J / cm
# q = 1.6021766208e-19
x = er*8.854187817620389*area_cm/C0/100
return x
def files_with_extension(path: str,extension: str):
"""
Gives a list of the files in the given directory that have the given extension
Parameters
----------
path: str
The full path to the folder where the files are stored
extension: str
The extension of the files
Returns
-------
List[str]
A list containing the files
"""
from os import listdir
return [f for f in listdir(path) if f.endswith(extension)]
defaultPlotStyle = {'font.size': 14,
'font.family': 'Arial',
'font.weight': 'regular',
'legend.fontsize': 14,
'mathtext.fontset': 'custom',
'mathtext.rm': 'Times New Roman',
'mathtext.it': 'Times New Roman:italic',#'Arial:italic',
'mathtext.cal': 'Times New Roman:italic',#'Arial:italic',
'mathtext.bf': 'Times New Roman:bold',#'Arial:bold',
'xtick.direction' : 'in',
'ytick.direction' : 'in',
'xtick.major.size' : 4.5,
'xtick.major.width' : 1.75,
'ytick.major.size' : 4.5,
'ytick.major.width' : 1.75,
'xtick.minor.size' : 2.75,
'xtick.minor.width' : 1.0,
'ytick.minor.size' : 2.75,
'ytick.minor.width' : 1.0,
'ytick.right' : False,
'lines.linewidth' : 2.5,
'lines.markersize' : 10,
'lines.markeredgewidth' : 0.85,
'axes.labelpad' : 5.0,
'axes.labelsize' : 16,
'axes.labelweight' : 'regular',
'axes.linewidth': 1.25,
'axes.titlesize' : 16,
'axes.titleweight' : 'bold',
'axes.titlepad' : 6,
'figure.titleweight' : 'bold',
'figure.dpi': 100}
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
if __name__ == '__main__':
if platform.system() == 'Windows':
root_folder = u'\\\?\\' + root_folder
mpl.rcParams.update(defaultPlotStyle)
files = files_with_extension(path=root_folder, extension='csv')
nfiles = len(files)
sorted_files = []
nominal_biases = np.empty(nfiles)
# Order the file list by nominal bias
for i,fn in enumerate(files):
csv_file = os.path.join(root_folder,fn)
print('Reading file: \'{0}\''.format(fn))
df = pd.read_csv(filepath_or_buffer=csv_file, delimiter=',',
index_col=0)
try:
nominal_biases[i] = df['nominal_bias'][0]
sorted_files.append(dict(nominal_bias = df['nominal_bias'][0],
filename = fn))
except:
print('nominal bias not found in \'{0}\'.'.format(fn))
np.delete(nominal_biases,i)
sorted_files = sorted(sorted_files, key = lambda i: i['nominal_bias'])
nominal_biases = np.sort(nominal_biases, axis=0)
normalize = mpl.colors.Normalize(vmin=np.amin(nominal_biases),
vmax=np.amax(nominal_biases))
cm = mpl.cm.get_cmap(color_palette)
nb_colors = [cm(normalize(bb)) for bb in nominal_biases]
scalar_maps = mpl.cm.ScalarMappable(cmap=cm, norm=normalize)
fig = plt.figure()
fig.set_size_inches(6.5,3.0,forward=True)
fig.subplots_adjust(hspace=0.15, wspace=0.5)
gs0 = gridspec.GridSpec(ncols=1, nrows=1, figure=fig, width_ratios=[1])
gs00 = gridspec.GridSpecFromSubplotSpec(nrows=1, ncols=2,
subplot_spec = gs0[0])
ax1 = fig.add_subplot(gs00[0,0])
ax2 = fig.add_subplot(gs00[0,1])
ndl = np.empty(nfiles)
ndl_err = np.empty(nfiles)
xvar = | np.empty(nfiles) | numpy.empty |
import tensorflow as tf
from tensorflow.keras.losses import (
sparse_categorical_crossentropy,
binary_crossentropy,
)
import logging
from logging import handlers
from time import perf_counter
import os
import numpy as np
import pandas as pd
from xml.etree.ElementTree import SubElement
from xml.etree import ElementTree
from lxml import etree
def get_logger():
"""
Initialize logger configuration.
Returns:
logger.
"""
formatter = logging.Formatter(
'%(asctime)s %(name)s.%(funcName)s +%(lineno)s: '
'%(levelname)-8s [%(process)d] %(message)s'
)
logger = logging.getLogger('session_log')
logger.setLevel(logging.DEBUG)
file_title = os.path.join('Logs', 'session.log')
if 'Logs' not in os.listdir():
file_title = f'{os.path.join("..", file_title)}'
file_handler = handlers.RotatingFileHandler(file_title, backupCount=10)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
default_logger = get_logger()
def timer(logger):
"""
Timer wrapper.
logger: logging.RootLogger object
Returns:
timed
"""
def timed(func):
def wrapper(*args, **kwargs):
start_time = perf_counter()
result = func(*args, **kwargs)
total_time = perf_counter() - start_time
if logger is not None:
logger.info(
f'{func.__name__} execution time: ' f'{total_time} seconds'
)
if result is not None:
return result
return wrapper
return timed
def ratios_to_coordinates(bx, by, bw, bh, width, height):
"""
Convert relative coordinates to actual coordinates.
Args:
bx: Relative center x coordinate.
by: Relative center y coordinate.
bw: Relative box width.
bh: Relative box height.
width: Image batch width.
height: Image batch height.
Return:
x1: x coordinate.
y1: y coordinate.
x2: x1 + Bounding box width.
y2: y1 + Bounding box height.
"""
w, h = bw * width, bh * height
x, y = bx * width + (w / 2), by * height + (h / 2)
return x, y, x + w, y + h
def transform_images(x_train, size):
"""
Resize image tensor.
Args:
x_train: Image tensor.
size: new (width, height)
"""
x_train = tf.image.resize(x_train, (size, size))
return x_train / 255
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
n = tf.shape(y_true)[0]
y_true_out = tf.zeros(
(n, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6)
)
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(n):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32)
)
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1 / grid_size), tf.int32)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]]
)
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]]
)
idx += 1
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack()
)
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(
tf.expand_dims(box_wh, -2), (1, 1, tf.shape(anchors)[0], 1)
)
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * tf.minimum(
box_wh[..., 1], anchors[..., 1]
)
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(
transform_targets_for_output(y_train, grid_size, anchor_idxs)
)
grid_size *= 2
return tuple(y_outs)
def broadcast_iou(box_1, box_2):
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(
tf.minimum(box_1[..., 2], box_2[..., 2])
- tf.maximum(box_1[..., 0], box_2[..., 0]),
0,
)
int_h = tf.maximum(
tf.minimum(box_1[..., 3], box_2[..., 3])
- tf.maximum(box_1[..., 1], box_2[..., 1]),
0,
)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * (
box_1[..., 3] - box_1[..., 1]
)
box_2_area = (box_2[..., 2] - box_2[..., 0]) * (
box_2[..., 3] - box_2[..., 1]
)
return int_area / (box_1_area + box_2_area - int_area)
def get_boxes(pred, anchors, classes):
grid_size = tf.shape(pred)[1]
box_xy, box_wh, object_probability, class_probabilities = tf.split(
pred, (2, 2, 1, classes), axis=-1
)
box_xy = tf.sigmoid(box_xy)
object_probability = tf.sigmoid(object_probability)
class_probabilities = tf.sigmoid(class_probabilities)
pred_box = tf.concat((box_xy, box_wh), axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(
grid_size, tf.float32
)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, object_probability, class_probabilities, pred_box
def calculate_loss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
pred_box, pred_obj, pred_class, pred_xywh = get_boxes(
y_pred, anchors, classes
)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1
)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(
grid, tf.float32
)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(
tf.math.is_inf(true_wh), tf.zeros_like(true_wh), true_wh
)
obj_mask = tf.squeeze(true_obj, -1)
best_iou = tf.map_fn(
lambda x: tf.reduce_max(
broadcast_iou(
x[0], tf.boolean_mask(x[1], tf.cast(x[2], tf.bool))
),
axis=-1,
),
(pred_box, true_box, obj_mask),
tf.float32,
)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
xy_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
)
wh_loss = (
obj_mask
* box_loss_scale
* tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = (
obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss
)
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class
)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
def add_xml_path(xml_file, path):
"""
Add a path element to the xml file and save.
Args:
xml_file: .xml file path.
path: str, path to add.
Returns:
None
"""
tree = ElementTree.parse(xml_file)
top = tree.getroot()
folder_tag = tree.find('folder')
folder_tag.text = path
file_name_tag = tree.find('filename')
path_tag = SubElement(top, 'path')
path_tag.text = os.path.join(folder_tag.text, file_name_tag.text)
rough_string = ElementTree.tostring(top, 'utf8')
root = etree.fromstring(rough_string)
pretty = etree.tostring(root, pretty_print=True, encoding='utf-8').replace(
' '.encode(), '\t'.encode()
)
os.remove(xml_file)
with open(xml_file, 'wb') as output:
output.write(pretty)
def get_detection_data(image, image_name, outputs, class_names):
"""
Organize predictions of a single image into a pandas DataFrame.
Args:
image: Image as a numpy array.
image_name: str, name to write in the image column.
outputs: Outputs from inference_model.predict()
class_names: A list of object class names.
Returns:
data: pandas DataFrame with the detections.
"""
nums = outputs[-1]
boxes, scores, classes = 3 * [None]
if isinstance(outputs[0], np.ndarray):
boxes, scores, classes = [
item[0][: int(nums)] for item in outputs[:-1]
]
if not isinstance(outputs[0], np.ndarray):
boxes, scores, classes = [
item[0][: int(nums)].numpy() for item in outputs[:-1]
]
w, h = | np.flip(image.shape[0:2]) | numpy.flip |
import unittest
import numpy as np
import scipy.stats
import PySeismoSoil.helper_generic as hlp
import PySeismoSoil.helper_site_response as sr
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Helper_Site_Response(unittest.TestCase):
def test_num_int(self):
accel, _ = hlp.read_two_column_stuff(_join(f_dir, 'two_column_data_example.txt'))
v, u = sr.num_int(accel)
v_bench = np.array(
[[0.1000, 0.1000],
[0.2000, 0.3000],
[0.3000, 0.6000],
[0.4000, 1.0000],
[0.5000, 1.5000],
[0.6000, 1.7000],
[0.7000, 2.0000],
[0.8000, 2.4000],
[0.9000, 2.9000],
[1.0000, 3.5000],
[1.1000, 3.8000],
[1.2000, 4.2000],
[1.3000, 4.7000],
[1.4000, 5.3000],
[1.5000, 6.0000]]
)
u_bench = np.array(
[[0.1000, 0.0100],
[0.2000, 0.0400],
[0.3000, 0.1000],
[0.4000, 0.2000],
[0.5000, 0.3500],
[0.6000, 0.5200],
[0.7000, 0.7200],
[0.8000, 0.9600],
[0.9000, 1.2500],
[1.0000, 1.6000],
[1.1000, 1.9800],
[1.2000, 2.4000],
[1.3000, 2.8700],
[1.4000, 3.4000],
[1.5000, 4.0000]]
)
self.assertTrue(np.allclose(v, v_bench))
self.assertTrue(np.allclose(u, u_bench))
def test_num_diff(self):
v_bench = np.array(
[[0.1000, 0.1000],
[0.2000, 0.3000],
[0.3000, 0.6000],
[0.4000, 1.0000],
[0.5000, 1.5000],
[0.6000, 1.7000],
[0.7000, 2.0000],
[0.8000, 2.4000],
[0.9000, 2.9000],
[1.0000, 3.5000]]
)
v_bench[0, 1] = 0 # because the "initial offset" info is lost in num_diff
displac = np.array(
[[0.1000, 0.0100],
[0.2000, 0.0400],
[0.3000, 0.1000],
[0.4000, 0.2000],
[0.5000, 0.3500],
[0.6000, 0.5200],
[0.7000, 0.7200],
[0.8000, 0.9600],
[0.9000, 1.2500],
[1.0000, 1.6000]]
)
veloc = sr.num_diff(displac)
self.assertTrue(np.allclose(veloc, v_bench))
def test_stratify(self):
prof1 = np.array([[3, 4, 5, 6, 0], [225, 225*2, 225*3, 225*2.4, 225*5]]).T
prof1_ = sr.stratify(prof1)
prof1_benchmark = np.array(
[[1, 1, 1, 2, 2, 2.5, 2.5, 2, 2, 2, 0],
[225, 225, 225, 450, 450, 675, 675, 540, 540, 540, 1125]]
).T
self.assertTrue(np.allclose(prof1_, prof1_benchmark))
def test_response_spectra(self):
accel, _ = hlp.read_two_column_stuff(_join(f_dir, 'two_column_data_example.txt'))
T_min = 0.01
T_max = 10
n_pts = 50
Tn, SA = sr.response_spectra(
accel, T_min=T_min, T_max=T_max, n_pts=n_pts, parallel=False,
)[:2]
Tn_bench = np.logspace(np.log10(T_min), np.log10(T_max), n_pts)
SA_bench = np.array(
[7.0000, 7.0000, 7.0000, 7.0000, 7.0001, 7.0002,
6.9995, 7.0007, 7.0024, 6.9941, 7.0176, 6.9908,
6.9930, 6.9615, 7.0031, 7.1326, 6.9622, 7.0992,
6.5499, 7.3710, 7.3458, 6.8662, 8.3708, 8.5229,
7.9719, 7.5457, 8.9573, 10.6608, 10.5915, 9.4506,
8.1594, 6.9023, 7.1242, 6.5462, 6.3940, 6.3472,
6.7302, 7.0554, 7.2901, 7.6946, 7.6408, 7.1073,
6.3034, 5.3997, 4.5102, 3.6991, 2.9946, 2.4023,
1.9156, 1.5218]
)
self.assertTrue(np.allclose(Tn, Tn_bench))
self.assertTrue(np.allclose(SA, SA_bench, rtol=0.0001, atol=0.0))
def test_find_f0(self):
data, _ = hlp.read_two_column_stuff(_join(f_dir, 'two_column_data_example.txt'))
f0 = sr.find_f0(data)
f0_benchmark = 0.5
self.assertAlmostEqual(f0, f0_benchmark)
f0_incr = sr.find_f0(np.array([[0.1, 0.2, 0.3], [1, 2, 3]]).T)
f0_decr = sr.find_f0(np.array([[0.1, 0.2, 0.3], [3, 2, 1]]).T)
self.assertAlmostEqual(f0_incr, 0.3) # monotonically increasing
self.assertAlmostEqual(f0_decr, 0.1) # monotonically decreasing
def test_get_xi_rho(self):
vs = np.array([100, 300, 500, 700, 900])
xi, rho = sr.get_xi_rho(vs, formula_type=1)
self.assertTrue(np.allclose(xi, [.05, .02, .02, .02, .01]))
self.assertTrue(np.allclose(rho, [1600, 1800, 1800, 1800, 2000]))
self.assertTrue(np.allclose(
sr.get_xi_rho(vs, formula_type=2)[0],
[0.0484, 0.0295, 0.0167, 0.0108, 0.0077],
atol=0.01,
rtol=0.0,
))
self.assertTrue(np.allclose(
sr.get_xi_rho(vs, formula_type=3)[0],
[0.0833, 0.0278, 0.0167, 0.0119, 0.0093],
atol=0.01,
rtol=0.0,
))
def test_calc_Vs30_and_VsZ(self):
vs_profile = np.array([[10, 10, 10, 10], [200, 300, 400, 500]]).T
vs30 = sr.calc_Vs30(vs_profile)
vs40 = sr.calc_VsZ(vs_profile, 40)
vs30_benchmark = scipy.stats.hmean(vs_profile[:, 1][:3])
vs40_benchmark = scipy.stats.hmean(vs_profile[:, 1])
self.assertAlmostEqual(vs30, vs30_benchmark)
self.assertAlmostEqual(vs40, vs40_benchmark)
vs_profile = np.array([[10, 10], [200, 300]]).T
vs30 = sr.calc_Vs30(vs_profile, option_for_profile_shallower_than_30m=1)
vs30_benchmark = scipy.stats.hmean([200, 300, 300])
self.assertAlmostEqual(vs30, vs30_benchmark)
vs_profile = np.array([[10, 10], [200, 300]]).T
vs30 = sr.calc_Vs30(vs_profile, option_for_profile_shallower_than_30m=2)
vs30_benchmark = scipy.stats.hmean(vs_profile[:, 1])
self.assertAlmostEqual(vs30, vs30_benchmark)
def test_calc_z1__normal_case__Vs_reaches_1000_meters_per_sec(self):
vs_prof_1 = np.array([[5, 4, 3, 2, 1], [200, 500, 700, 1000, 1200]]).T
self.assertAlmostEqual(sr.calc_z1(vs_prof_1), 12)
def test_calc_z1__abnormal_case__Vs_doesnt_reaches_1000_meters_per_sec(self):
# Abnormal case: Vs does not reach 1000 m/s ---> use total depth
vs_prof_2 = np.array([[5, 4, 3, 2, 1], [200, 500, 700, 800, 900]]).T
self.assertAlmostEqual(sr.calc_z1(vs_prof_2), 15)
def test_thk2dep_and_dep2thk(self):
thk = np.array([6, 5, 4, 3, 2, 0])
dep_mid = np.array([3, 8.5, 13, 16.5, 19])
dep_top = np.array([0, 6, 11, 15, 18, 20])
self.assertTrue(np.allclose(sr.dep2thk(dep_top), thk))
self.assertTrue(np.allclose(sr.dep2thk(dep_top, include_halfspace=False),
thk[:-1]))
self.assertTrue(np.allclose(sr.thk2dep(thk, midpoint=True), dep_mid))
self.assertTrue(np.allclose(sr.thk2dep(thk), dep_top))
def test_amplify_motion(self):
time = np.linspace(0, np.pi * 4, num=1000)
accel = | np.sin(time) | numpy.sin |
import multiprocessing as mp
import numpy as np
import galsim
def _vk_seeing(r0_500, wavelength, L0):
# von Karman profile FWHM from Tokovinin fitting formula
kolm_seeing = galsim.Kolmogorov(r0_500=r0_500, lam=wavelength).fwhm
r0 = r0_500 * (wavelength/500)**1.2
arg = 1. - 2.183*(r0/L0)**0.356
factor = | np.sqrt(arg) | numpy.sqrt |
''' CONFIDENTIAL
Copyright (c) 2021 <NAME>,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
try:
import pcl
from pyquaternion import Quaternion
except:
print('cannot import pcl -> change python version')
import matplotlib.cm as cmx
from scipy.spatial import distance_matrix
from scipy.optimize import leastsq
import matplotlib
import matplotlib.animation as animation
import open3d as o3d
import glob
import cv2
import cv2.aruco as aruco
import os
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
import pickle
from matplotlib.lines import Line2D
import pandas as pd
import random
from scipy.spatial import ConvexHull
from math import sqrt
from math import atan2, cos, sin, pi
from collections import namedtuple
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pyquaternion import Quaternion
np.set_printoptions(suppress=True)
def eulerAnglesToRotationMatrix2(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
Rot_matrix = eulerAnglesToRotationMatrix2([0, 0, np.deg2rad(-90)])
InitLidar = True
InitLidar = False
global globalTrigger
globalTrigger = True
stereoRectify = False# True
#stereoRectify = True
class Annotation3D(Annotation):
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self, s, xy=(0, 0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy = (xs, ys)
Annotation.draw(self, renderer)
def save_obj(obj, name):
with open('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
print('{}.pkl Object saved'.format(name))
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def showErros(_3DErros, IMageNames):
print('len(_3DErros)->{}'.format(np.shape(_3DErros)))
if len(_3DErros)>1:
_3DErros = np.array(_3DErros).squeeze()
# norm_total = np.array(_3DErros[:,0]).squeeze()
norm_axis = np.array(_3DErros).squeeze() * 1000
index, bar_width = np.arange(len(IMageNames)), 0.24
fig, ax = plt.subplots()
X = ax.bar(index, norm_axis[:, 0], bar_width, label="X")
Y = ax.bar(index + bar_width, norm_axis[:, 1], bar_width, label="Y")
Z = ax.bar(index + bar_width + bar_width, norm_axis[:, 2], bar_width, label="Z")
ax.set_xlabel('images')
ax.set_ylabel('errors in mm')
ax.set_title('3D error')
ax.set_xticks(index + bar_width / 3)
ax.set_xticklabels(IMageNames)
ax.legend()
plt.show()
def triangulation(kp1, kp2, T_1w, T_2w):
"""Triangulation to get 3D points
Args:
kp1 (Nx2): keypoint in view 1 (normalized)
kp2 (Nx2): keypoints in view 2 (normalized)
T_1w (4x4): pose of view 1 w.r.t i.e. T_1w (from w to 1)
T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
Returns:
X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
"""
kp1_3D = np.ones((3, kp1.shape[0]))
kp2_3D = np.ones((3, kp2.shape[0]))
kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
X /= X[3]
X1 = T_1w[:3].dot(X)
X2 = T_2w[:3].dot(X)
return X[:3].T, X1.T, X2.T
def triangulate(R1,R2,t1,t2,K1,K2,D1,D2, pts1, pts2):
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
_3d_points = []
for i,point in enumerate(pts1):
point3D = cv2.triangulatePoints(P1, P2, pts1[i], pts2[i]).T
point3D = point3D[:, :3] / point3D[:, 3:4]
_3d_points.append(point3D)
print('Triangulate _3d_points -> {}'.format(np.shape(_3d_points)))
return np.array(_3d_points).squeeze()
def mai(R1,R2,t1,t2,imagePoint1,imagePoint2, K2=None,K1=None, D2=None,D1=None):
# Set up two cameras near each other
if K1 is None:
K = np.array([
[718.856, 0., 607.1928],
[0., 718.856, 185.2157],
[0., 0., 1.],
])
R1 = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
R2 = np.array([
[0.99999183, -0.00280829, -0.00290702],
[0.0028008, 0.99999276, -0.00257697],
[0.00291424, 0.00256881, 0.99999245]
])
t1 = np.array([[0.], [0.], [0.]])
t2 = np.array([[-0.02182627], [0.00733316], [0.99973488]])
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
point3D = cv2.triangulatePoints(P1, P2, imagePoint1, imagePoint2).T
point3D = point3D[:, :3] / point3D[:, 3:4]
print('Triangulate point3D -> {}'.format(point3D))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(R1.T) # Change
rvec2, _ = cv2.Rodrigues(R2.T) # Change
p1, _ = cv2.projectPoints(point3D, rvec1, -t1, K1, distCoeffs=D1) # Change
p2, _ = cv2.projectPoints(point3D, rvec2, -t2, K2, distCoeffs=D2) # Change
# measure difference between original image point and reporjected image point
reprojection_error1 = np.linalg.norm(imagePoint1 - p1[0, :])
reprojection_error2 = np.linalg.norm(imagePoint2 - p2[0, :])
print('difference between original image point and reporjected image point')
print(reprojection_error1, reprojection_error2)
return p1,p2
class PointCloud_filter(object):
def __init__(self, file, img_file=None, img_file2=None, debug=True):
self.debug = debug
self.img_file = img_file
self.img_file2 = img_file2
self.name = os.path.basename(file).split('.')[0]
self.file = file
self.useVoxel, self.voxel_size = False, 0.15
self.lowerTemplate, self.showImage = False, True
self.showError = False
self.points_correspondences = None
self.OK = False
self.useInitialPointCloud = False #user all point to fit or only margins
self.chessBoard = False
self.applyICP_directly = False
self.s = .1 # scale
self.plotInit, self.axis_on, self.colour, self.Annotate = False, True, False, False
self.chess, self.corn, self.p1, self.p2, self.p3, self.ICP_finetune_plot = None, None, None, None, None, None
if self.showImage:
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
self.ImageNames = []
self._3DErros = []
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.s
self.fig = plt.figure(figsize=plt.figaspect(0.5))
self.fig.suptitle('Data collection', fontsize=16)
self.ax = self.fig.add_subplot(1, 2, 1, projection='3d')
#self.ax = self.fig.add_subplot(1, 2, 2, projection='3d')
self.readCameraIntrin()
self.QueryImg = cv2.imread(img_file)
self.ImageNames.append(os.path.basename(img_file))
if self.img_file2: # use stereo case
self.QueryImg2 = cv2.imread(img_file2)
if stereoRectify:
self.QueryImg = cv2.remap(src=self.QueryImg, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
self.QueryImg2 = cv2.remap(src=self.QueryImg2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(self.QueryImg2, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_right and ret_left:
print('Found chessboard in both images')
self.chessBoard = True
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
self.corners2 = corners2_left
cv2.drawChessboardCorners(self.QueryImg, (10, 7), self.corners2, ret_left)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K_left, self.D_left)
imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.QueryImg = self.draw(self.QueryImg, corners=corners2_left, imgpts=imgpts)
self.pixelsPoints = np.asarray(corners2_left).squeeze()
self.pixels_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg2, (10, 7), corners2_right, ret_right)
self.pixels_right = np.asarray(corners2_right).squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
'''print('TRIANGULATE HERE==========================================')
P_1 = np.vstack((np.hstack((np.eye(3), np.zeros(3)[:, np.newaxis])), [0, 0, 0, 1])) # left camera
P_2 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # right camera
print('P1_{}, P_2{}, x_left:{}, x_right:{}'.format(np.shape(P_1), np.shape(P_2),
np.shape(self.x_left), np.shape(self.x_right)))
X_w, X1, X2 = triangulation(self.x_left,self.x_right,P_1,P_2)
print('X_w:{}, X1:{}, X2:{}, '.format(np.shape(X_w), np.shape(X1), np.shape(X2)))
print(X_w[0])
print(X1[0])
print(X2[0])'''
'''R1 = np.eye(3)
R2 = self.R
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
imagePoint1 = self.x_left[0]
imagePoint2 = self.x_right[0]
print('imagePoint1:{}, imagePoint2:{}'.format(np.shape(imagePoint1), np.shape(imagePoint2)))
print('self.K_left ')
print(self.K_left)
print('self.K_right ')
print(self.K_right)
p1,p2 = test(R1,R2,t1,t2,imagePoint1,imagePoint2,K1=self.K_left,K2=self.K_right, D1=self.D_left,D2=self.D_right)
p1 = np.array(p1).squeeze().astype(int)
p2 = np.array(p2).squeeze().astype(int)
print('p1:{}, p2:{}'.format(np.shape(p1), np.shape(p2)))
#d2 = distance_matrix(X_w, X_w)
#print('d2:{}'.format(d2))
cv2.circle(self.QueryImg, (p1[0],p1[1]), 7, (255, 0, 0), 7)
cv2.circle(self.QueryImg2, (p2[0], p2[1]), 7, (255, 0, 0), 7)
cv2.imshow('QueryImg', cv2.resize(self.QueryImg,None,fx=.5,fy=.5))
cv2.imshow('QueryImg2', cv2.resize(self.QueryImg2, None, fx=.5, fy=.5))
cv2.waitKey(0)
cv2.destroyAllWindows()'''
else:
self.chessBoard = False
self.useVoxel = False
print('No chessboard ')
corners2_left, ids_left, rejectedImgPoints = aruco.detectMarkers(gray_left, self.ARUCO_DICT)
corners2_left, ids_left, _, _ = aruco.refineDetectedMarkers(image=gray_left,
board=self.calibation_board,
detectedCorners=corners2_left,
detectedIds=ids_left,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_left,
distCoeffs=self.D_left)
corners2_right, ids_right, rejectedImgPoints = aruco.detectMarkers(gray_right, self.ARUCO_DICT)
corners2_right, ids_right, _, _ = aruco.refineDetectedMarkers(image=gray_right,
board=self.calibation_board,
detectedCorners=corners2_right,
detectedIds=ids_right,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_right,
distCoeffs=self.D_right)
if np.all(ids_left != None) and np.all(ids_right != None):
print('found charuco board, in both images')
retval_left, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners2_left, ids_left,
self.calibation_board,
self.K_left, self.D_left, None,
None)
retval_right, self.rvecs_right, self.tvecs_right = aruco.estimatePoseBoard(corners2_right,
ids_right,
self.calibation_board,
self.K_right,
self.D_right, None,
None)
if retval_left and retval_right:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
self.tvecs, 0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners2_left, ids_left,
borderColor=(0, 0, 255))
b = 1
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs_right, self.tvecs_right, self.K_right,
self.D_right)
self.corners2_right = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
circle_tvec, 0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('Cannot estimate board position for both charuco')
self.pixelsPoints = self.corners2.squeeze()
self.pixels_left = self.pixelsPoints
self.pixels_right = self.corners2_right.squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
# self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
print('disparity:{}'.format(np.shape(disparity)))
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
else:
print('No any board found!!!')
else:
# Undistortion
h, w = self.QueryImg.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (w, h), 1, (w, h))
dst = cv2.undistort(self.QueryImg, self.K, self.D, None, newcameramtx)
x, y, w, h = roi
self.QueryImg = dst[y:y + h, x:x + w]
gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
if ret: # found chessboard
print('Found chessboard')
self.chessBoard = True
self.corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg, (10, 7), corners, ret)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K, self.D)
# ret, self.rvecs, self.tvecs, inliers = cv2.solvePnPRansac(self.objp, self.corners2, self.K, self.D)
self.imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K, self.D)
self.QueryImg = self.draw(self.QueryImg, self.corners2, self.imgpts)
self.pixelsPoints = np.asarray(self.corners2).squeeze()
else: # check for charuco
self.chessBoard = False
self.useVoxel = False
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
self.chessBoard = False
if len(ids) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
if retval:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, self.tvecs,
0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, ids,
borderColor=(0, 0, 255))
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, circle_tvec,
0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('No board Found')
self.image_ax = self.fig.add_subplot(1, 2, 2)
#self.image_ax = self.fig.add_subplot(1, 2, 1)
self.image_ax.imshow(self.QueryImg)
self.image_ax.set_axis_off()
self.image_ax.set_xlabel('Y')
self.image_ax.set_ylabel('Z')
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlabel('X', fontsize=10)
self.ax.set_ylabel('Y', fontsize=10)
self.ax.set_zlabel('Z', fontsize=10)
self.fig.tight_layout()
plt.subplots_adjust(left=.15, bottom=0.2)
#plt.subplots_adjust( bottom=0.2)
self.Rx, self.Ry, self.Rz = [np.deg2rad(-90), 0, np.deg2rad(-40)] if self.chessBoard else [0, 0, 0]
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.savePoints = Button(plt.axes([0.03, 0.45, 0.15, 0.04], ), 'filter points', color='white')
self.savePoints.on_clicked(self.getClosestPoints)
self.resetBtn = Button(plt.axes([0.03, 0.25, 0.15, 0.04], ), 'reset', color='white')
self.resetBtn.on_clicked(self.reset)
self.X_btn = Button(plt.axes([0.03, 0.9, 0.024, 0.04], ), 'X', color='red')
self.X_btn.on_clicked(self.Close)
self.OK_btn = Button(plt.axes([0.03, 0.83, 0.074, 0.04], ), 'OK', color='green')
self.OK_btn.on_clicked(self.OK_btnClick)
self.not_OK_btn = Button(plt.axes([0.105, 0.83, 0.074, 0.04], ), 'not OK', color='red')
self.not_OK_btn.on_clicked(self.not_OK_btnClick)
self.saveCorrespondences = Button(plt.axes([0.03, 0.76, 0.15, 0.04], ), 'Save points', color='white')
self.saveCorrespondences.on_clicked(self.savePointsCorrespondences)
self.fitChessboard = Button(plt.axes([0.03, 0.66, 0.15, 0.04], ), 'auto fit', color='white')
self.fitChessboard.on_clicked(self.auto_fitBoard)
# set up sliders
self.Rx_Slider = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Rx', -180, 180.0, valinit=np.degrees(self.Rx))
self.Ry_Slider = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'Ry', -180, 180.0, valinit=np.degrees(self.Ry))
self.Rz_Slider = Slider(plt.axes([0.25, 0.05, 0.65, 0.03]), 'Rz', -180, 180.0, valinit=np.degrees(self.Rz))
self.Rx_Slider.on_changed(self.update_R)
self.Ry_Slider.on_changed(self.update_R)
self.Rz_Slider.on_changed(self.update_R)
self.check = CheckButtons(plt.axes([0.03, 0.3, 0.15, 0.12]), ('Axes', 'Black', 'Annotate'),
(self.axis_on, self.colour, self.Annotate))
self.check.on_clicked(self.func_CheckButtons)
# set up translation buttons
self.step = .1 # m
self.trigger = True
self.Tx_btn_plus = Button(plt.axes([0.05, 0.15, 0.04, 0.045]), '+Tx', color='white')
self.Tx_btn_plus.on_clicked(self.Tx_plus)
self.Tx_btn_minus = Button(plt.axes([0.12, 0.15, 0.04, 0.045]), '-Tx', color='white')
self.Tx_btn_minus.on_clicked(self.Tx_minus)
self.Ty_btn_plus = Button(plt.axes([0.05, 0.1, 0.04, 0.045]), '+Ty', color='white')
self.Ty_btn_plus.on_clicked(self.Ty_plus)
self.Ty_btn_minus = Button(plt.axes([0.12, 0.1, 0.04, 0.045]), '-Ty', color='white')
self.Ty_btn_minus.on_clicked(self.Ty_minus)
self.Tz_btn_plus = Button(plt.axes([0.05, 0.05, 0.04, 0.045]), '+Tz', color='white')
self.Tz_btn_plus.on_clicked(self.Tz_plus)
self.Tz_btn_minus = Button(plt.axes([0.12, 0.05, 0.04, 0.045]), '-Tz', color='white')
self.Tz_btn_minus.on_clicked(self.Tz_minus)
self.Tx_flip = Button(plt.axes([0.17, 0.15, 0.04, 0.045]), 'FlipX', color='white')
self.Tx_flip.on_clicked(self.flipX)
self.Ty_flip = Button(plt.axes([0.17, 0.1, 0.04, 0.045]), 'FlipY', color='white')
self.Ty_flip.on_clicked(self.flipY)
self.Tz_flip = Button(plt.axes([0.17, 0.05, 0.04, 0.045]), 'FlipZ', color='white')
self.Tz_flip.on_clicked(self.flipZ)
self.radio = RadioButtons(plt.axes([0.03, 0.5, 0.15, 0.15], ), ('Final', 'Init'), active=0)
self.radio.on_clicked(self.colorfunc)
self.tag = None
self.circle_center = None
self.errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d.",
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible.",
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
self.legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Original pointcloud', markerfacecolor='g', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Corners', markerfacecolor='k', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Margins', markerfacecolor='r', markersize=4),
]
def setUp(self):
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.board()
self.ax.legend(handles=self.legend_elements, loc='best')
if self.showImage:
self.getDepth_Inside_Outside()
self.fitNewPlan()
def auto_fitBoard(self, args):
# estimate 3D-R and 3D-t between chess and PointCloud
# Inital guess of the transformation
x0 = np.array([np.degrees(self.Rx), np.degrees(self.Ry), np.degrees(self.Rz), self.Tx, self.Ty, self.Tz])
report = {"error": [], "template": []}
def f_min(x):
self.Rx, self.Ry, self.Rz = np.deg2rad(x[0]), np.deg2rad(x[1]), np.deg2rad(x[2])
self.Tx, self.Ty, self.Tz = x[3], x[4], x[5]
template = self.board(plot=False)
if self.useInitialPointCloud:
dist_mat = distance_matrix(template, self.point_cloud)
else:
dist_mat = distance_matrix(template, self.corners_)
err_func = dist_mat.sum(axis=1) # N x 1
# err_func = dist_mat.sum(axis=0) # N x 1
if self.debug:
print('errors = {}, dist_mat:{}, err_func:{}'.format(round(np.sum(err_func), 2), np.shape(dist_mat),
np.shape(err_func)))
report["error"].append(np.sum(err_func))
report["template"].append(template)
return err_func
maxIters = 700
sol, status = leastsq(f_min, x0, ftol=1.49012e-07, xtol=1.49012e-07, maxfev=maxIters)
print('sol:{}, status:{}'.format(sol, status))
print(self.errors[status])
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.ICP_finetune_plot:
self.ICP_finetune_plot.remove()
self.lowerTemplate = False
self.board()
point_cloud = np.asarray(self.point_cloud, dtype=np.float32)
template = np.asarray(report["template"][0], dtype=np.float32) if self.applyICP_directly else np.asarray(
self.template_cloud, dtype=np.float32)
converged, self.transf, estimate, fitness = self.ICP_finetune(template, point_cloud)
# converged, self.transf, estimate, fitness = self.ICP_finetune(point_cloud,template)
self.estimate = np.array(estimate)
if self.chessBoard:
self.ICP_finetune_plot = self.ax.scatter(self.estimate[:, 0], self.estimate[:, 1], self.estimate[:, 2],
c='k', marker='o', alpha=0.8, s=4)
else:
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = self.estimate[idx, :]
self.ICP_finetune_plot = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2],
c='k', marker='o', alpha=0.8, s=4)
self.trigger = False
# set values of sol to Sliders
self.Rx_Slider.set_val(np.rad2deg(self.Rx))
self.Ry_Slider.set_val(np.rad2deg(self.Ry))
self.Rz_Slider.set_val(np.rad2deg(self.Rz))
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.trigger = True
self.board()
self.AnnotateEdges()
self.fig.canvas.draw_idle()
if self.showError:
print('min error:{} , at index:{}'.format(np.min(report["error"]), np.argmin(report["error"])))
rep = plt.figure(figsize=(15, 8))
plt.xlim(0, len(report["error"]) + 1)
plt.xlabel('Iteration')
plt.ylabel('RMSE')
plt.yticks(color='w')
plt.plot(np.arange(len(report["error"])) + 1, report["error"])
print('Start animation gif')
def update_graph(num):
data = np.asarray(report["template"][num])
graph._offsets3d = (data[:, 0], data[:, 1], data[:, 2])
title.set_text('Iteration {}'.format(num))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
title = ax.set_title('3D Test')
data = report["template"][0]
graph = ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2])
ani = animation.FuncAnimation(fig, update_graph, 101, interval=2, blit=False, repeat=False)
ani.save('myAnimation.gif', writer='imagemagick', fps=30)
print('Animation done')
plt.show()
def flipX(self, event):
self.Rx_Slider.set_val(np.rad2deg(self.Rx + np.pi))
self.update_R(0)
def flipY(self, event):
self.Ry_Slider.set_val(np.rad2deg(self.Ry + np.pi))
self.update_R(0)
def flipZ(self, event):
self.Rz_Slider.set_val(np.rad2deg(self.Rz + np.pi))
self.update_R(0)
def update_R(self, val):
if self.trigger:
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.Rx = np.deg2rad(self.Rx_Slider.val)
self.Ry = np.deg2rad(self.Ry_Slider.val)
self.Rz = np.deg2rad(self.Rz_Slider.val)
self.board()
self.fig.canvas.draw_idle()
def board(self, plot=True, given_origin=None, angle=None):
self.board_origin = [self.Tx, self.Ty, self.Tz] if given_origin is None else given_origin
if self.chessBoard:
self.nCols, self.nRows, org = 7 + 2, 10 + 2, np.asarray(self.board_origin)
#org[0] -= self.nCols / 2
#org[1] -= self.nRows / 2
org[0] -= 4
org[1] -= 6
#org = np.zeros(3)
if self.lowerTemplate:
nrCols, nrRows = 2, 3
else:
nrCols, nrRows = self.nCols, self.nRows
#nrCols, nrRows = self.nCols+1, self.nRows+1 #remove later
print('org:{}, self.nCols - >{}, nrCols:{}'.format(org,self.nCols,nrCols))
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,num=nrRows)
X, Y = np.linspace(org[0], org[0] + self.nCols-1, num=nrCols), np.linspace(org[1], org[1] + self.nRows-1,
num=nrRows)
print('X:{}'.format(X))
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
colors, colortuple = np.empty(X.shape, dtype=str), ('k', 'w')
for y in range(nrCols):
for x in range(nrRows):
colors[x, y] = colortuple[(x + y) % len(colortuple)]
colors[0, 0] = 'r'
alpha = 0.65
else:
self.nCols, self.nRows, org = 10, 10, np.asarray(self.board_origin)
org[0] -= self.nCols / 2
org[1] -= self.nRows / 2
# nrCols, nrRows = 4,4z
nrCols, nrRows = self.nCols, self.nRows
# nrCols, nrRows = 20, 20
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,
num=nrRows)
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
alpha = 0.25
angles = np.array([self.Rx, self.Ry, self.Rz]) if angle is None else np.array(angle)
Rot_matrix = self.eulerAnglesToRotationMatrix(angles)
X, Y, Z = X * self.s, Y * self.s, Z * self.s
corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0))
init = corners.reshape(-1, 3)
print('corners-----------------------------------------------------')
#print(init)
print('corners -> {}'.format(np.shape(init)))
dist_Lidar = distance_matrix(init, init)
print('dist_Lidar corners---------------------------------------------------------')
print(dist_Lidar[0, :11])
translation = np.mean(init, axis=0) # get the mean point
corners = np.subtract(corners, translation) # substract it from all the other points
X, Y, Z = np.transpose(np.add(np.dot(corners, Rot_matrix), translation), (2, 0, 1))
# corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0)).reshape(-1, 3)
corners = np.transpose(np.array([X, Y, Z]), (2, 1, 0)).reshape(-1, 3)
if plot:
if self.chessBoard:
self.chess = self.ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0.2, cmap='gray', alpha=alpha)
else:
self.chess = self.ax.plot_surface(X, Y, Z, linewidth=0.2, cmap='gray', alpha=alpha)
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = corners[idx, :]
self.corn = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2], c='tab:blue',
marker='o', s=5)
self.template_cloud = corners
return np.array(corners)
def getPointCoud(self, colorsMap='jet', skip=1, useRing = True):
# X, Y, Z, intensity, ring
if useRing:
originalCloud = np.array(np.load(self.file, mmap_mode='r'))[:,:5]
if InitLidar:
xyz = originalCloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
originalCloud[:, 0:3] = new_xyz
#mean_x = np.mean(originalCloud[:, 0])
#originalCloud[:, 0] = mean_x
df = pd.DataFrame(data=originalCloud, columns=["X", "Y", "Z","intens","ring"])
gp = df.groupby('ring')
keys = gp.groups.keys()
#groups = gp.groups
coolPoints, circlePoints = [],[]
for i in keys:
line = np.array(gp.get_group(i), dtype=np.float)
first,last = np.array(line[0], dtype=np.float)[:3],np.array(line[-1], dtype=np.float)[:3]
coolPoints.append(first)
coolPoints.append(last)
if self.chessBoard == False:
if len(line) > 50:
l = line[:,:3]
for i in range(2,len(l)-2,1):
d = np.linalg.norm(l[i]-l[i+1])
if d > 0.08: #half of the circle
circlePoints.append(l[i])
circlePoints.append(l[i+1])
self.coolPoints = np.array(coolPoints).squeeze()
self.ax.scatter(*self.coolPoints.T, color='r', marker='o', alpha=1, s=2)
print('coolPoints:{}, circlePoints:{}'.format(np.shape(self.coolPoints), np.shape(circlePoints)))
circlePoints = np.array(circlePoints)
if len(circlePoints)>0:
self.ax.scatter(*circlePoints.T, color='r', marker='o', alpha=1, s=5)
self.fitCircle(circlePoints)
#self.point_cloud = np.array(self.coolPoints, dtype=np.float32)
self.point_cloud = np.array(np.load(self.file, mmap_mode='r')[::skip, :3], dtype=np.float32)
if InitLidar:
xyz = self.point_cloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
self.point_cloud[:, 0:3] = new_xyz
# center the point_cloud
#mean_x = np.mean(self.point_cloud[:, 0])
#self.point_cloud[:, 0] = mean_x
self.point_cloud_mean = np.mean(self.point_cloud, axis=0)
self.Tx, self.Ty, self.Tz = self.point_cloud_mean
# self.point_cloud = self.point_cloud - self.point_cloud_mean
self.point_cloud_colors = np.array(np.load(self.file, mmap_mode='r'))[::skip, 3]
if self.plotInit:
cm = plt.get_cmap(colorsMap)
cNorm = matplotlib.colors.Normalize(vmin=min(self.point_cloud_colors), vmax=max(self.point_cloud_colors))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
self.p1 = self.ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2],
color=scalarMap.to_rgba(self.point_cloud_colors), s=0.2)
else:
self.p = pcl.PointCloud(self.point_cloud)
inlier, outliner, coefficients = self.do_ransac_plane_segmentation(self.p, pcl.SACMODEL_PLANE,
pcl.SAC_RANSAC, 0.01)
#self.planeEquation(coef=np.array(coefficients).squeeze())
self.point_cloud_init = self.point_cloud.copy()
if self.useVoxel:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.point_cloud)
self.point_cloud = np.array(pcd.voxel_down_sample(voxel_size=self.voxel_size).points)
# self.p1 = self.ax.scatter(outliner[:, 0], outliner[:, 1], outliner[:, 2], c='y', s=0.2)
self.p2 = self.ax.scatter(inlier[:, 0], inlier[:, 1], inlier[:, 2], c='g', s=0.2)
w, v = self.PCA(inlier)
point = np.mean(inlier, axis=0)
if self.chessBoard == False and self.circle_center:
#point[1:] = self.circle_center
point[[0,2]]= self.circle_center
w *= 2
if self.chessBoard==False and self.circle_center:
p = Circle(self.circle_center, self.circle_radius, alpha = .3, color='tab:blue')
self.ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=point[1], zdir="y")
self.p3 = self.ax.quiver([point[0]], [point[1]], [point[2]], [v[0, :] * np.sqrt(w[0])],
[v[1, :] * np.sqrt(w[0])],
[v[2, :] * np.sqrt(w[0])], linewidths=(1.8,))
def axisEqual3D(self, centers=None):
extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
# centers = np.mean(extents, axis=1) if centers is None
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def planeEquation(self, coef):
a, b, c, d = coef
mean = np.mean(self.point_cloud, axis=0)
normal = [a, b, c]
d2 = -mean.dot(normal)
# print('d2:{}'.format(d2))
# print('mean:{}'.format(mean))
# print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))
# plot the normal vector
startX, startY, startZ = mean[0], mean[1], mean[2]
startZ = (-normal[0] * startX - normal[1] * startY - d) * 1. / normal[2]
self.ax.quiver([startX], [startY], [startZ], [normal[0]], [normal[1]], [normal[2]], linewidths=(3,),edgecolor="red")
def PCA(self, data, correlation=False, sort=True):
# data = nx3
mean = np.mean(data, axis=0)
data_adjust = data - mean
#: the data is transposed due to np.cov/corrcoef syntax
if correlation:
matrix = np.corrcoef(data_adjust.T)
else:
matrix = np.cov(data_adjust.T)
eigenvalues, eigenvectors = np.linalg.eig(matrix)
if sort:
#: sort eigenvalues and eigenvectors
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def eulerAnglesToRotationMatrix(self, theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def do_ransac_plane_segmentation(self, pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
"""
Create the segmentation object
:param pcl_data: point could data subscriber
:param pcl_sac_model_plane: use to determine plane models
:param pcl_sac_ransac: RANdom SAmple Consensus
:param max_distance: Max distance for apoint to be considered fitting the model
:return: segmentation object
"""
seg = pcl_data.make_segmenter()
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
inlier_object = pcl_data.extract(inliers, negative=False)
outlier_object = pcl_data.extract(inliers, negative=True)
if len(inliers) <= 1:
outlier_object = [0, 0, 0]
inlier_object, outlier_object = np.array(inlier_object), np.array(outlier_object)
return inlier_object, outlier_object, coefficients
def func_CheckButtons(self, label):
if label == 'Axes':
if self.axis_on:
self.ax.set_axis_off()
self.axis_on = False
else:
self.ax.set_axis_on()
self.axis_on = True
elif label == 'Black':
if self.colour:
self.colour = False
self.ax.set_facecolor((1, 1, 1))
else:
self.colour = True
self.ax.set_facecolor((0, 0, 0))
elif label == 'Annotate':
self.Annotate = not self.Annotate
self.AnnotateEdges()
self.fig.canvas.draw_idle()
def ICP_finetune(self, points_in, points_out):
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(points_in)
cloud_out.from_array(points_out)
# icp = cloud_in.make_IterativeClosestPoint()
icp = cloud_out.make_IterativeClosestPoint()
converged, transf, estimate, fitness = icp.icp(cloud_in, cloud_out)
print('fitness:{}, converged:{}, transf:{}, estimate:{}'.format(fitness, converged, np.shape(transf),
np.shape(estimate)))
return converged, transf, estimate, fitness
def colorfunc(self, label):
if label == 'Init':
self.plotInit = True
else:
self.plotInit = False
self.reset(0)
def OK_btnClick(self, args):
self.OK = True
plt.close()
def not_OK_btnClick(self, args):
self.OK = False
plt.close()
def Close(self, args):
global globalTrigger
globalTrigger = False
plt.close()
def reset(self, args):
self.ax.cla()
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.Rx, self.Ry, self.Rz = 0, 0, 0
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.board()
self.fig.canvas.draw_idle()
def getClosestPoints(self, arg):
dist_mat = distance_matrix(self.template_cloud, self.point_cloud_init)
self.neighbours = np.argsort(dist_mat, axis=1)[:, 0]
self.finaPoints = np.asarray(self.point_cloud_init[self.neighbours, :]).squeeze()
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.p3:
self.p3.remove()
if self.p2:
self.p2.remove()
if self.p1:
self.p1.remove()
self.scatter_finalPoints = self.ax.scatter(self.finaPoints[:, 0], self.finaPoints[:, 1], self.finaPoints[:, 2],
c='k', marker='x', s=1)
self.corn = self.ax.scatter(self.template_cloud[:, 0], self.template_cloud[:, 1], self.template_cloud[:, 2],
c='blue', marker='o', s=5)
self.fig.canvas.draw_idle()
def Tz_plus(self, event):
self.Tz += self.step
self.update_R(0)
def Tz_minus(self, event):
self.Tz -= self.step
self.update_R(0)
def Ty_plus(self, event):
self.Ty += self.step
self.update_R(0)
def Ty_minus(self, event):
self.Ty -= self.step
self.update_R(0)
def Tx_plus(self, event):
self.Tx += self.step
self.update_R(0)
def Tx_minus(self, event):
self.Tx -= self.step
self.update_R(0)
def readCameraIntrin(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_left = self.camera_model['K_left']
self.K_right = self.camera_model['K_right']
self.D_left = self.camera_model['D_left']
self.D_right = self.camera_model['D_right']
# self.K_left = self.camera_model['K_right']
# self.K_right = self.camera_model['K_left']
# self.D_left = self.camera_model['D_right']
# self.D_right = self.camera_model['D_left']
# print('K_left')
# print(self.K_left)
# print('K_right')
# print(self.K_right)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.T = np.array([-0.98, 0., 0.12])[:, np.newaxis]
#self.T = np.array([-.75, 0., 0.])[:, np.newaxis]
#print('self T after {}'.format(np.shape(self.T)))
#angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
#self.R = euler_matrix(angles)
#Q = self.camera_model_rectify['Q']
#roi_left, roi_right = self.camera_model_rectify['roi_left'], self.camera_model_rectify['roi_right']
self.leftMapX, self.leftMapY = self.camera_model_rectify['leftMapX'], self.camera_model_rectify['leftMapY']
self.rightMapX, self.rightMapY = self.camera_model_rectify['rightMapX'], self.camera_model_rectify['rightMapY']
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
self.K = self.K_right
self.D = self.D_right
try:
N = 5
aruco_dict = aruco.custom_dictionary(0, N, 1)
aruco_dict.bytesList = np.empty(shape=(4, N - 1, N - 1), dtype=np.uint8)
A = np.array([[0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(A)
R = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(R)
V = np.array([[1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0]],
dtype=np.uint8)
O = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(O)
aruco_dict.bytesList[3] = aruco.Dictionary_getByteListFromBits(V)
self.ARUCO_DICT = aruco_dict
self.calibation_board = aruco.GridBoard_create(
markersX=2, markersY=2,
markerLength=0.126, markerSeparation=0.74,
dictionary=self.ARUCO_DICT)
except:
print('Install Aruco')
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
return img
def annotate3D(self, ax, s, *args, **kwargs):
self.tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(self.tag)
def AnnotateEdges(self, giveAX=None, givenPoints=None):
if self.Annotate:
# add vertices annotation.
if giveAX is None:
if self.lowerTemplate or self.chessBoard == False:
if self.chessBoard == False:
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
self.templatePoints = [pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]
self.templatePoints = np.array(self.templatePoints).reshape(-1, 3)
cornersToPLot = self.estimate[idx, :]
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=12, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(self.template_cloud):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
try:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
except:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols+1, self.nRows+1, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
# templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nRows,self.nCols, 3)[1:self.nRows-1,1:self.nCols-1,:]
self.templatePoints = np.array(templatePoints).reshape(-1, 3)
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(givenPoints):
self.annotate3D(giveAX, s=str(j), xyz=xyz_, fontsize=10, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
if self.showImage:
# annotate image
points = np.asarray(self.corners2).squeeze()
font, lineType = cv2.FONT_HERSHEY_SIMPLEX, 2 if self.chessBoard else 10
for i, point in enumerate(points):
point = tuple(point.ravel())
cv2.putText(self.QueryImg, '{}'.format(i), point, font, 1 if self.chessBoard else 3, (0, 0, 0)
if self.chessBoard else (255, 0, 0), lineType)
self.image_ax.imshow(self.QueryImg)
def getCamera_XYZ_Stereo(self):
#cam_rot, jac = cv2.Rodrigues(self.rvecs)
#mR = np.matrix(cam_rot)
#mT = np.matrix(self.tvecs)
#cam_trans = -mR * mT
_3DPoints = []
for i, pixel in enumerate(self.x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = self.depth[i]
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - self.fxypxy[2]) / self.fxypxy[0]
pt[1] = pt[2] * (pt[1] - self.fxypxy[3]) / self.fxypxy[1]
# pt = pt.dot(cam_rot.T) + self.tvecs
_3DPoints.append(pt)
print('_3DPoints {}'.format(np.shape(_3DPoints)))
print('tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ_Stereo mean {}'.format(np.mean(_3DPoints, axis=0)))
_3DPoints = np.array(_3DPoints).squeeze()
print('from disparity getCamera_XYZ_Stereo ')
d = distance_matrix(_3DPoints,_3DPoints)
print(d)
return _3DPoints
def getCamera_XYZ(self):
R_mtx, jac = cv2.Rodrigues(self.rvecs)
inv_R_mtx = np.linalg.inv(R_mtx)
inv_K = np.linalg.inv(self.K)
def compute_XYZ(u, v): # from 2D pixels to 3D world
uv_ = np.array([[u, v, 1]], dtype=np.float32).T
suv_ = uv_
xyz_ = inv_K.dot(suv_) - self.tvecs
XYZ = inv_R_mtx.dot(xyz_)
pred = XYZ.T[0]
return pred
Camera_XYZ = []
for i, point in enumerate(self.pixelsPoints):
xyz = compute_XYZ(u=point[0], v=point[1])
# print 'xyz:{}'.format(xyz)
Camera_XYZ.append(xyz)
Camera_XYZ = np.array(Camera_XYZ)
print('init tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ mean {}'.format(np.mean(Camera_XYZ, axis=0)))
if self.img_file2 is None:
for i, point in enumerate(Camera_XYZ):
imgpts, jac = cv2.projectPoints(point, self.rvecs, self.tvecs, self.K, self.D)
imgpts = np.asarray(imgpts).squeeze()
cv2.circle(self.QueryImg, (int(imgpts[0]), int(imgpts[1])), 7, (255, 0, 0), 7)
self.image_ax.imshow(self.QueryImg)
return Camera_XYZ
def getImagePixels(self):
img = cv2.imread(self.img_file) #left image
img2 = cv2.imread(self.img_file2) # left image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pixelsPoints,pixelsPoints2, _3DreconstructedBoard = [],[],[]
if self.chessBoard:
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
ret2, corners2 = cv2.findChessboardCorners(gray2, (10, 7), None)
if ret and ret2: # found chessboard
print('Found chessboard')
corners_2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
corners2_2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), self.criteria)
pixelsPoints = np.asarray(corners_2).squeeze()
pixelsPoints2 = np.asarray(corners2_2).squeeze()
cv2.drawChessboardCorners(img, (10, 7), corners_2, ret)
cv2.drawChessboardCorners(img2, (10, 7), corners2_2, ret)
# Find the rotation and translation vectors.
success, rvecs, tvecs, inliers = cv2.solvePnPRansac(self.objp, corners_2, self.K, self.D)
rvecs, _ = cv2.Rodrigues(rvecs)
_3Dpoints = self.objp
# project 3D points to image plane
_2Dpoints, jac = cv2.projectPoints(_3Dpoints, rvecs, tvecs, self.K, self.D)
_2Dpoints = np.array(_2Dpoints, dtype=np.float32).squeeze()
print('_2Dpoints -> {}'.format(np.shape(_2Dpoints)))
for i in range(len(_2Dpoints)):
cv2.circle(img, tuple(_2Dpoints[i]), 5, (0, 255, 0), 3)
_3Dpoints = rvecs.dot(_3Dpoints.T) + tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat[0, :11])
_3DreconstructedBoard = _3Dpoints
else:
return None,None
else:
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers(gray2, self.ARUCO_DICT)
corners2, ids2, rejectedImgPoints2, recoveredIds2 = aruco.refineDetectedMarkers(
image=gray2, board=self.calibation_board, detectedCorners=corners2, detectedIds=ids2,
rejectedCorners=rejectedImgPoints2, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None) and np.all(ids2 != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
if len(ids) and len(ids2) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
retval2, self.rvecs2, self.tvecs2 = aruco.estimatePoseBoard(corners2, ids2,
self.calibation_board, self.K,
self.D, None, None)
img = aruco.drawDetectedMarkers(img, corners, ids,borderColor=(0, 0, 255))
img2 = aruco.drawDetectedMarkers(img2, corners2, ids2, borderColor=(0, 0, 255))
if retval and retval2:
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
self.dst2, jacobian = cv2.Rodrigues(self.rvecs2)
#self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0],[.5,.5,0]])
_3Dpoints = self.dst.T.dot(np.array(self.pts).squeeze().T) + self.tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat)
_3DreconstructedBoard = _3Dpoints
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img = cv2.line(img, top_right, bot_right, (0, 255, 0), 4)
img = cv2.line(img, bot_right, bot_left, (0, 255, 0), 4)
img = cv2.line(img, bot_left, top_left, (0, 255, 0), 4)
img = cv2.line(img, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img, tuple(corners2[-1]), 5, (0, 255, 0), 3)
cv2.circle(img, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints = np.asarray(corners2).squeeze()
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs2, self.tvecs2, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img2 = cv2.line(img2, top_right, bot_right, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_right, bot_left, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_left, top_left, (0, 255, 0), 4)
img2 = cv2.line(img2, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img2, tuple(corners2[-1]), 5, (0, 255, 0), 3)
#cv2.circle(img2, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints2 = np.asarray(corners2).squeeze()
else:
return None,None
else:
return None,None
else:
return None,None
scale = .4
_horizontal = np.hstack(
(cv2.resize(img, None, fx=scale, fy=scale), cv2.resize(img2, None, fx=scale, fy=scale)))
cv2.imshow('_horizontal', _horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()
return pixelsPoints,pixelsPoints2, _3DreconstructedBoard
def savePointsCorrespondences(self, args):
display = True
fig = plt.figure(figsize=plt.figaspect(1))
ax = plt.axes(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if self.chessBoard:
legend_elements = [
Line2D([0], [0], marker='o', label='board template', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='green', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='k', markersize=6),
Line2D([0], [0], marker='o', label='Camera_XYZ', markerfacecolor='red', markersize=6),
]
board_template = self.template_cloud
board_template_ICP_finetuned = self.estimate
closest_lidar_points = self.finaPoints
try:
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
except:
print('Second-----------------------------')
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
icp_finetuned_inside = np.array(icp_finetuned_inside).reshape(-1, 3)
board_template_inside = np.array(board_template_inside).reshape(-1, 3)
print('board_template_inside-----------------------------------------------------')
print(board_template_inside)
print('board_template_inside -> {}'.format(np.shape(board_template_inside)))
dist_Lidar = distance_matrix(board_template_inside, board_template_inside)
print('dist_Lidar---------------------------------------------------------')
print(dist_Lidar[0, :11])
closest_lidar_points_inside = np.array(closest_lidar_points_inside).reshape(-1, 3)
Camera_XYZ = self.getCamera_XYZ()
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
display = True
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('icp_finetuned_inside:{}'.format(np.shape(icp_finetuned_inside)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('closest_lidar_points_inside:{}'.format(np.shape(closest_lidar_points_inside)))
print('Camera_XYZ:{}'.format(np.shape(Camera_XYZ)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
#dist = distance_matrix(Camera_XYZ_Stereo, Camera_XYZ_Stereo)
#print('distance matrix Camera_XYZ_Stereo:{}'.format(dist))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*icp_finetuned_inside.T, color='g', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*closest_lidar_points_inside.T, color='k', marker='x', alpha=1, s=20)
ax.scatter(*Camera_XYZ.T, color='k', marker='x', alpha=1, s=30)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=1, s=3)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ_Stereo, axis=0) if self.img_file2 is not None else np.mean(board_template,axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft)<=0:
print('Cannot get pixels points !!! ')
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('icp_finetuned_inside', icp_finetuned_inside),
('closest_lidar_points', closest_lidar_points),
('closest_lidar_points_inside', closest_lidar_points_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ', Camera_XYZ)])
# save_obj(self.points_correspondences, self.name)
else:
legend_elements = [
Line2D([0], [0], marker='o', label='board template all', markerfacecolor='b', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='red', markersize=6),
Line2D([0], [0], marker='o', label='board template inside', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='red', markersize=6),
]
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
board_template = np.array([pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1,
3)
board_template = board_template
pts = np.asarray(self.estimate.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.estimate[idx], axis=0)
board_template_ICP_finetuned = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
board_template_inside = self.templatePoints
pts = np.asarray(self.finaPoints.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.finaPoints[idx], axis=0)
closest_lidar_points = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=.8, s=20)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ, axis=0) if self.img_file2 is not None else np.mean(board_template, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
_3DreconstructedBoard = np.array(_3DreconstructedBoard).squeeze()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft) <= 0:
print('Cannot get pixels points !!! ')
ax.scatter(*_3DreconstructedBoard.T, color='b', marker='x', alpha=1, s=20)
print('pixelsPointsLeft:{}'.format(np.shape(self.pixelsPointsLeft)))
print('pixelsPointsRight:{}'.format(np.shape(self.pixelsPointsRight)))
print('_3DreconstructedBoard:{}'.format(np.shape(_3DreconstructedBoard)))
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('closest_lidar_points', closest_lidar_points)])
# save_obj(self.points_correspondences, self.name)
ax.legend(handles=legend_elements, loc='best')
plt.show()
def getDepth_Inside_Outside(self):
calibrations = ['inside', 'outside']
output = []
for calib in calibrations:
camera_model = load_obj('{}_combined_camera_model'.format(calib))
camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(calib))
K_left = camera_model['K_right']
D_left = camera_model['D_right']
T = camera_model['T']
leftMapX, leftMapY = camera_model_rectify['leftMapX'], camera_model_rectify['leftMapY']
rightMapX, rightMapY = camera_model_rectify['rightMapX'], camera_model_rectify['rightMapY']
imgleft = cv2.imread(self.img_file)
imgright = cv2.imread(self.img_file2)
if stereoRectify:
imgleft = cv2.remap(src=imgleft, map1=leftMapX, map2=leftMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
imgright = cv2.remap(src=imgright, map1=rightMapX, map2=rightMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(imgleft, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(imgright, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_left and ret_right: # found chessboard
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
x_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
x_right = np.asarray(corners2_right).squeeze()
baseline = abs(T[0])
focal_length, cx, cy = K_left[0, 0], K_left[0, 2], K_left[1, 2]
disparity = np.sum(np.sqrt((x_left - x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
depth = (baseline * focal_length / disparity) # .reshape(10,7)
fxypxy = [K_left[0, 0], K_left[1, 1], cx, cy]
print('{} fx:{}, fy:{}'.format(calib, round(K_left[0, 0],2), round(K_left[1, 1],2)))
_3DPoints = []
for i, pixel in enumerate(x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = depth[i]
# print('u:{},v:{},distance:{}'.format(u,v, distance))
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - fxypxy[2]) / fxypxy[0]
pt[1] = pt[2] * (pt[1] - fxypxy[3]) / fxypxy[1]
_3DPoints.append(pt)
_3DPoints = np.array(_3DPoints)
output.append(_3DPoints)
else:
print('cannot detect board in both images')
if len(output)>1:
inside_3D = np.array(output[0]).squeeze()
outisde_3D = np.array(output[1]).squeeze()
#get the error for each point
a_min_b = inside_3D - outisde_3D
norm_total = np.linalg.norm(a_min_b)/70
norm_axis = np.linalg.norm(a_min_b, axis=0)/70
print('norm_total:{}, norm_axis:{}'.format(norm_total,norm_axis))
self._3DErros.append(norm_axis)
def fitNewPlan(self):
coolPoints = self.coolPoints
def minimum_bounding_rectangle(points):
pi2 = np.pi / 2.
# get the convex hull for the points
hull = ConvexHull(points)
hull_points = points[hull.vertices]
y_saved = []
for simplex in hull.simplices:
y = coolPoints[simplex,1]
x = points[simplex, 0]
z = points[simplex, 1]
self.ax.plot(x, y, z, 'k-', alpha = .5)
y_saved.append(y)
y_saved = np.array(y_saved)
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
rotations = np.vstack([
np.cos(angles),np.cos(angles - pi2),
np.cos(angles + pi2),np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
rval = np.array(rval)
d_matrix = distance_matrix(rval, points)
neighbours = np.argsort(d_matrix, axis=1)[:, 0]
rval2 = np.asarray(coolPoints[neighbours, 1]).squeeze()
return rval, rval2
points = list(self.coolPoints[:, [0, -1]])
y = np.mean(self.coolPoints[:, 1])
c, c2 = minimum_bounding_rectangle(np.array(points))
self.corners_ = []
for i,point in enumerate(c):
#self.corners_.append([point[0],y, point[1]])
self.corners_.append([point[0],c2[i], point[1]])
if self.chessBoard==False and self.circle_center:
self.corners_.append([self.circle_center[0],y,self.circle_center[1]])
self.corners_ = np.array(self.corners_)
self.ax.scatter(*self.corners_.T, color='k', marker='x', alpha=1, s=50)
def fitCircle(self, points):
if len(points)>0:
def calc_R(x, y, xc, yc):
"""calculate the distance of each 2D points from the center (xc, yc)"""
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f(c, x, y):
"""calculate the algebraic distance between the data points
and the mean circle centered at c=(xc, yc)"""
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def sigma(coords, x, y, r):
"""Computes Sigma for circle fit."""
dx, dy, sum_ = 0., 0., 0.
for i in range(len(coords)):
dx = coords[i][1] - x
dy = coords[i][0] - y
sum_ += (sqrt(dx * dx + dy * dy) - r) ** 2
return sqrt(sum_ / len(coords))
def hyper_fit(coords, IterMax=99, verbose=False):
"""
Fits coords to circle using hyperfit algorithm.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : s, sigma - variance of data wrt solution (float)
"""
X, Y = None, None
if isinstance(coords, np.ndarray):
X = coords[:, 0]
Y = coords[:, 1]
elif isinstance(coords, list):
X = np.array([x[0] for x in coords])
Y = np.array([x[1] for x in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
n = X.shape[0]
Xi = X - X.mean()
Yi = Y - Y.mean()
Zi = Xi * Xi + Yi * Yi
# compute moments
Mxy = (Xi * Yi).sum() / n
Mxx = (Xi * Xi).sum() / n
Myy = (Yi * Yi).sum() / n
Mxz = (Xi * Zi).sum() / n
Myz = (Yi * Zi).sum() / n
Mzz = (Zi * Zi).sum() / n
# computing the coefficients of characteristic polynomial
Mz = Mxx + Myy
Cov_xy = Mxx * Myy - Mxy * Mxy
Var_z = Mzz - Mz * Mz
A2 = 4 * Cov_xy - 3 * Mz * Mz - Mzz
A1 = Var_z * Mz + 4. * Cov_xy * Mz - Mxz * Mxz - Myz * Myz
A0 = Mxz * (Mxz * Myy - Myz * Mxy) + Myz * (Myz * Mxx - Mxz * Mxy) - Var_z * Cov_xy
A22 = A2 + A2
# finding the root of the characteristic polynomial
y = A0
x = 0.
for i in range(IterMax):
Dy = A1 + x * (A22 + 16. * x * x)
xnew = x - y / Dy
if xnew == x or not np.isfinite(xnew):
break
ynew = A0 + xnew * (A1 + xnew * (A2 + 4. * xnew * xnew))
if abs(ynew) >= abs(y):
break
x, y = xnew, ynew
det = x * x - x * Mz + Cov_xy
Xcenter = (Mxz * (Myy - x) - Myz * Mxy) / det / 2.
Ycenter = (Myz * (Mxx - x) - Mxz * Mxy) / det / 2.
x = Xcenter + X.mean()
y = Ycenter + Y.mean()
r = sqrt(abs(Xcenter ** 2 + Ycenter ** 2 + Mz))
s = sigma(coords, x, y, r)
iter_ = i
if verbose:
print('Regression complete in {} iterations.'.format(iter_))
print('Sigma computed: ', s)
return x, y, r, s
def least_squares_circle(coords):
"""Circle fit using least-squares solver.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : MSE of solution against training data (float)
"""
x, y = None, None
if isinstance(coords, np.ndarray):
x = coords[:, 0]
y = coords[:, 1]
elif isinstance(coords, list):
x = np.array([point[0] for point in coords])
y = np.array([point[1] for point in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, _ = leastsq(f, center_estimate, args=(x, y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R) ** 2)
return xc, yc, R, residu
def plot_data_circle(x, y, xc, yc, R):
"""
Plot data and a fitted circle.
Inputs:
x : data, x values (array)
y : data, y values (array)
xc : fit circle center (x-value) (float)
yc : fit circle center (y-value) (float)
R : fir circle radius (float)
Output:
None (generates matplotlib plot).
"""
f = plt.figure(facecolor='white')
plt.axis('equal')
theta_fit = np.linspace(-pi, pi, 180)
x_fit = xc + R * np.cos(theta_fit)
y_fit = yc + R * np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-', label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.scatter(x, y, c='red', label='data')
plt.legend(loc='best', labelspacing=0.1)
plt.grid()
plt.title('Fit Circle')
x1, y1, r1, resid1 = hyper_fit(points[:,[0,2]])
x2, y2, r2, resid2 = least_squares_circle(points[:,[0,2]])
#plot_data_circle(points[:,1], points[:,2],x,y,r)
if resid1>resid2:
x, y, r = x2, y2, r2
else:
x, y, r = x1, y1, r1
self.circle_center = (x, y)
self.circle_radius = r
def getData(chess=True):
pcl_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/*.npy'.format('chess' if chess else 'charuco'))
imgleft_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/left/*.png'.format('chess' if chess else 'charuco'))
imgright_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/right/*.png'.format('chess' if chess else 'charuco'))
pcl_files.sort()
imgleft_files.sort()
imgright_files.sort()
GoodPoints,_3DErros, IMageNames = [],[],[]
for i, file in enumerate(pcl_files):
if globalTrigger:
print('work with {}'.format(file))
image_left = imgleft_files[i]
image_right = imgright_files[i]
filt = PointCloud_filter(file=file, img_file=image_left, img_file2=image_right, debug=False)
filt.setUp()
plt.show()
plt.close()
print('\n OK:{}, Save points_correspondences : {}'.format(filt.OK, np.shape(filt.points_correspondences)))
if filt.OK:
GoodPoints.append(filt.points_correspondences)
print('save data {} '.format(np.shape(GoodPoints)))
_3DErros.append(filt._3DErros)
IMageNames.append(os.path.basename(image_left))
else:
print('Close')
break
#save_obj(GoodPoints, 'GoodPoints2_{}'.format('chess' if chess else 'charuco'))
print('Data saved in GoodPoints')
showErros(_3DErros, IMageNames)
def euler_from_matrix(R):
beta = -np.arcsin(R[2, 0])
alpha = np.arctan2(R[2, 1] / np.cos(beta), R[2, 2] / np.cos(beta))
gamma = np.arctan2(R[1, 0] / np.cos(beta), R[0, 0] / np.cos(beta))
return np.array((alpha, beta, gamma))
def euler_matrix(theta):
R = np.array([[np.cos(theta[1]) * np.cos(theta[2]),
np.sin(theta[0]) * np.sin(theta[1]) * np.cos(theta[2]) - np.sin(theta[2]) * np.cos(theta[0]),
np.sin(theta[1]) * np.cos(theta[0]) * np.cos(theta[2]) + np.sin(theta[0]) * np.sin(
theta[2])],
[np.sin(theta[2]) * np.cos(theta[1]),
np.sin(theta[0]) * np.sin(theta[1]) * np.sin(theta[2]) + np.cos(theta[0]) * np.cos(theta[2]),
np.sin(theta[1]) * np.sin(theta[2]) * np.cos(theta[0]) - np.sin(theta[0]) * np.cos(
theta[2])],
[-np.sin(theta[1]), np.sin(theta[0]) * np.cos(theta[1]),
np.cos(theta[0]) * np.cos(theta[1])]])
return R
class LiDAR_Camera_Calibration(object):
def __init__(self, file, chess = True, debug=True):
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * .1
self.debug = debug
self.file = file
self.chess = chess
if chess:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside',
'icp_finetuned_inside','closest_lidar_points','closest_lidar_points_inside',
'pixelsPoints','Camera_XYZ_Stereo','Camera_XYZ']
else:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside','pixelsPoints',
'Camera_XYZ_Stereo','closest_lidar_points']
self.readIntrinsics()
try:
self.load_points()
except:
print('cannot load data points')
'''self.Rotation = np.array([[ 0.94901505, 0.01681284, 0.3147821 ],
[-0.01003801, 0.99968204, -0.02313113],
[-0.31507091, 0.018792, 0.94888207]]).squeeze()
self.Translation = np.array([[-0.98078971],
[ 0.00600202],
[ 0.19497569]]).squeeze()
#self.Translation[0] = -.64
euler = euler_from_matrix(self.Rotation)
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.Rotation)
print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler[1] = np.deg2rad(22.598)
self.Rotation = euler_matrix(euler)'''
def rmse(self, objp, imgp, K, D, rvec, tvec):
print('objp:{}, imgp:{}'.format(np.shape(objp), np.shape(imgp)))
predicted, _ = cv2.projectPoints(objp, rvec, tvec, K, D)
print('rmse=====================================================')
print('predicted -> {}, type - >{}'.format(np.shape(predicted), type(predicted)))
predicted = cv2.undistortPoints(predicted, K, D, P=K)
predicted = predicted.squeeze()
pix_serr = []
for i in range(len(predicted)):
xp = predicted[i, 0]
yp = predicted[i, 1]
xo = imgp[i, 0]
yo = imgp[i, 1]
pix_serr.append((xp - xo) ** 2 + (yp - yo) ** 2)
ssum = sum(pix_serr)
return math.sqrt(ssum / len(pix_serr))
def readIntrinsics(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_right = self.camera_model['K_left']
self.K_left = self.camera_model['K_right']
self.D_right = self.camera_model['D_left']
self.D_left = self.camera_model['D_right']
print(' self.K_right')
print( self.K_right)
print(' self.K_left')
print(self.K_left)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.K = self.K_right
self.D = self.D_right
print('self T before {}'.format(np.shape(self.T)))
self.T = np.array([-0.96, 0., 0.12])[:, np.newaxis]
print('self T after {}'.format(np.shape(self.T)))
angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
self.R = euler_matrix(angles)
#-----------------------------------------------------
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#print(self.R)
print('translation is {}-----------------------------'.format(self.T))
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
#print('R1:{}'.format(R1))
#print('R2:{}'.format(R2))
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.R)
print('self.R: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R1)
#print('euler1->{}'.format(euler))
angles = euler_from_matrix(R1)
#print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R2)
#print('euler2->{}'.format(euler))
angles = euler_from_matrix(R2)
#print('rotation2: ', [(180.0 / math.pi) * i for i in angles])
self.R1 = R1
self.R2 = R2
self.P1 = P1
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
print('Got camera intrinsic')
print('Got camera-lidar extrinsics')
def load_points(self):
self.Lidar_3D, self.Image_2D,self.Image_2D2, self.Image_3D,self.Camera_XYZ = [],[],[],[],[]
with open(self.file, 'rb') as f:
self.dataPoinst = pickle.load(f, encoding='latin1')
#with open(self.file,'rb') as f:
#self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
#self.N = 1
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] #N x 3
#pixelsPoints = dictionary_data['pixelsPoints'] #N x 2
#StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] #N x 3
pixelsPointsLeft = dictionary_data['pixelsPointsLeft']
pixelsPointsRight = dictionary_data['pixelsPointsRight']
StereoCam_3D_points = dictionary_data['_3DreconstructedBoard'] #N x 3
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPointsLeft)
self.Image_2D2.append(pixelsPointsRight)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
#print('Cannot read data')
pass
#self.Lidar_3D = np.array(self.Lidar_3D).reshape(-1,3)
#self.Image_2D = np.array(self.Image_2D).reshape(-1,2)
#self.Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_2D:{}, Image_2D2:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),np.shape(self.Image_2D2),
np.shape(self.Image_3D)))
def plotData(self):
self.fig = plt.figure(figsize=plt.figaspect(0.33))
self.fig.tight_layout()
for i in range(self.N):
print('{}/{}'.format(i+1,self.N))
ax1 = self.fig.add_subplot(1, 3, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax2 = self.fig.add_subplot(1, 3, 2, projection='3d')
ax2.set_title('3D Stereo cameras')
ax2.set_xlabel('X', fontsize=8)
ax2.set_ylabel('Y', fontsize=8)
ax2.set_zlabel('Z', fontsize=8)
ax3 = self.fig.add_subplot(1, 3, 3, projection='3d')
ax3.set_title('2D pixels')
ax3.set_xlabel('X', fontsize=8)
ax3.set_ylabel('Y', fontsize=8)
ax3.set_zlabel('Z', fontsize=8)
_3d_LIDAR = np.array(self.Lidar_3D[i])
ax1.scatter(*_3d_LIDAR.T)
self.axisEqual3D(ax1, _3d_LIDAR)
_3d_cam = np.array(self.Image_3D[i])
ax2.scatter(*_3d_cam.T, c='r')
self.axisEqual3D(ax2,_3d_cam)
_2d_cam = np.array(self.Image_2D[i])
ax3.scatter(*_2d_cam.T, c='g')
self.axisEqual3D(ax3, _2d_cam)
plt.show()
def axisEqual3D(self,ax,data):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(data, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def get3D_3D_homography(self, src, dst): #both or Nx3 matrices
src_mean = np.mean(src, axis=0)
dst_mean = np.mean(dst, axis=0)
# Compute covariance
"""try:
H = reduce(lambda s, (a, b): s + np.outer(a, b), zip(src - src_mean, dst - dst_mean), np.zeros((3, 3)))
u, s, v = np.linalg.svd(H)
R = v.T.dot(u.T) # Rotation
T = - R.dot(src_mean) + dst_mean # Translation
H = np.hstack((R, T[:, np.newaxis]))
return H,R.T,T
except:
print('switch to python 2')"""
def calibrate_3D_3D_old(self):
print('3D-3D ========================================================================================')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_3D3D_{}.pkl'.format('chess')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format('chess')
self.Lidar_3D, self.Image_2D, self.Image_3D, self.Camera_XYZ = [], [], [], []
with open(file, 'rb') as f:
self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] # N x 3
pixelsPoints = dictionary_data['pixelsPoints'] # N x 2
StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] # N x 3
#StereoCam_3D_points = dictionary_data['point3D_trianguate']
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPoints)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
print('Cannot read data===================================================')
break
print('Lidar_3D:{}, Image_2D:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),
np.shape(self.Image_3D)))
Lidar_3D = np.array(self.Lidar_3D).reshape(-1, 3)
Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_3D:{}'.format(np.shape(Lidar_3D),np.shape(Image_3D)))
#-------------------------------------#-------------------------------------
c_, R_, t_ = self.estimate(Lidar_3D,Image_3D)
#import superpose3d as super
#(RMSD, R_, t_, c_) = super.Superpose3D(Lidar_3D, Image_3D)
#print('RMSD -> {}, t_{}, c_->{}'.format(RMSD, t_, c_))
# -------------------------------------#-------------------------------------
def similarity_transform(from_points, to_points):
assert len(from_points.shape) == 2, \
"from_points must be a m x n array"
assert from_points.shape == to_points.shape, \
"from_points and to_points must have the same shape"
N, m = from_points.shape
mean_from = from_points.mean(axis=0)
mean_to = to_points.mean(axis=0)
delta_from = from_points - mean_from # N x m
delta_to = to_points - mean_to # N x m
sigma_from = (delta_from * delta_from).sum(axis=1).mean()
sigma_to = (delta_to * delta_to).sum(axis=1).mean()
cov_matrix = delta_to.T.dot(delta_from) / N
U, d, V_t = np.linalg.svd(cov_matrix, full_matrices=True)
cov_rank = np.linalg.matrix_rank(cov_matrix)
S = np.eye(m)
if cov_rank >= m - 1 and np.linalg.det(cov_matrix) < 0:
S[m - 1, m - 1] = -1
elif cov_rank < m - 1:
raise ValueError("colinearility detected in covariance matrix:\n{}".format(cov_matrix))
R = U.dot(S).dot(V_t)
c = (d * S.diagonal()).sum() / sigma_from
t = mean_to - c * R.dot(mean_from)
print('R:{},t:{},c:{}'.format(R,t,c))
return c * R, t
print('similarity_transform===============================')
from_points = Lidar_3D
to_points = Image_3D
M_ans, t_ans = similarity_transform(from_points, to_points)
H, R, T = self.get3D_3D_homography(src = Lidar_3D, dst=Image_3D)
print('H:{}, R:{}, T:{}'.format(np.shape(H), np.shape(R), np.shape(T)))
print(H)
self.fig = plt.figure(figsize=plt.figaspect(1.))
ax1 = self.fig.add_subplot(1, 1, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax1.set_axis_off()
_3d_LIDAR = self.Lidar_3D[0]
ax1.scatter(*_3d_LIDAR.T, label = 'LiDAR')
_3d_Image = self.Image_3D[0]
ax1.scatter(*_3d_Image.T, s=25, label = 'Stereo Cam')
T = _3d_LIDAR.dot(c_ * R_) + t_
print('T -> {}'.format(np.shape(T)))
ax1.scatter(*T.T, marker='x', label='T')
d2 = distance_matrix(_3d_Image,_3d_Image)
print('d2:{}'.format(d2))
print('d2 shape :{}'.format(np.shape(d2)))
ones = np.ones(len(_3d_LIDAR))[:, np.newaxis]
transformed_ = np.hstack((_3d_LIDAR,ones))
transformed = np.dot(H, transformed_.T).T #transformation estimated with SVD
print(np.shape(transformed))
ax1.scatter(*transformed.T, s=25, label = 'ICP sol')
#ax1.set_axis_off()
primary = Lidar_3D# _3d_LIDAR
secondary = Image_3D# _3d_Image
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:, :-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y # to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
#print transform(primary)
print("Max error:", np.abs(secondary - transform(primary)).max())
trns2 = transform(_3d_LIDAR) #transformation estimated with LS
ax1.scatter(*trns2.T, label = 'least square sol')
to_points = M_ans.dot(_3d_LIDAR.T).T + t_ans
print('to_points ->{}'.format(np.shape(to_points)))
ax1.scatter(*to_points.T, label = 'to_points')
self.axisEqual3D(ax1, transformed)
ax1.legend()
plt.show()
#----------------------------------
if True:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_4.npy'
else:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_4.npy'
i = 12
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
#img, img2 = cv2.imread(l), cv2.imread(r)
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/cloud_{}.npy'.format(i)
if stereoRectify and True:
img = cv2.remap(src=img, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
img2 = cv2.remap(src=img2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
#Points in LiDAR frame
LiDAR_points3D = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3] #
print('LiDAR_points3D:{}'.format(np.shape(LiDAR_points3D)))
#converted in camera frame
ones = np.ones(len(LiDAR_points3D))[:, np.newaxis]
transformed_ = np.hstack((LiDAR_points3D, ones))
Camera_points3D = np.dot(H, transformed_.T).T
#Camera_points3D = transform(LiDAR_points3D)
#print('Camera_points3D:{}'.format(np.shape(Camera_points3D)))
#Camera_points3D = LiDAR_points3D.dot(c_ * R_) + t_
#Camera_points3D = LiDAR_points3D.dot(R_) + t_
#Camera_points3D = transform(LiDAR_points3D) #transformation estimated with LS
print('Camera_points3D -> {}'.format(Camera_points3D))
rvec, _ = cv2.Rodrigues(np.eye(3))
tvec = np.zeros(3)
#Camera_points3D = LiDAR_points3D#.dot(R_) + t_
#rvec = R_
#tran = t_
#tran[0] = -0.02
#tran[1] = -0.03
print('rvec -> {}, tvec->{}'.format(np.shape(rvec),np.shape(tvec)))
print('Camera_points3D -> {}'.format( | np.shape(Camera_points3D) | numpy.shape |
# This file is part of pyTSEB for calculating the net radiation and its divergence
# Copyright 2016 <NAME> and contributors listed in the README.md file.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Apr 6 2015
@author: <NAME> (<EMAIL>).
Modified on Jan 27 2016
@author: <NAME> (<EMAIL>).
DESCRIPTION
===========
This package contains functions for estimating the net shortwave and longwave radiation
for soil and canopy layers. Additional packages needed are.
* :doc:`meteoUtils` for the estimation of meteorological variables.
* `foursail` for the estimation net radiation using the 4SAIL Radiative Transfer Model.
PACKAGE CONTENTS
================
* :func:`calc_difuse_ratio` estimation of fraction of difuse shortwave radiation.
* :func:`calc_emiss_atm` Atmospheric emissivity.
* :func:`calc_K_be_Campbell` Beam extinction coefficient.
* :func:`calc_L_n_Kustas` Net longwave radiation for soil and canopy layers.
* :func:`calc_Rn_OSEB` Net radiation in a One Source Energy Balance model.
* :func:`calc_Rn_4SAIL` Soil and canopy net radiation using 4SAIL.
* :func:`calc_Sn_Campbell` Net shortwave radiation.
* :func:`calc_tau_below_Campbell` Radiation transmission through a canopy.
* :func:`calc_Sn_below_4SAIL Soil` net shortwave radiation using 4SAIL.
'''
#from FourSAIL import FourSAIL
import numpy as np
from .meteo_utils import calc_stephan_boltzmann
#==============================================================================
# List of constants used in the netRadiation Module
#==============================================================================
# Stephan Boltzmann constant (W m-2 K-4)
sb = 5.670373e-8
def calc_difuse_ratio(S_dn, sza, press=1013.25, SOLAR_CONSTANT=1320):
'''Fraction of difuse shortwave radiation.
Partitions the incoming solar radiation into PAR and non-PR and
diffuse and direct beam component of the solar spectrum.
Parameters
----------
S_dn : float
Incoming shortwave radiation (W m-2).
sza : float
Solar Zenith Angle (degrees).
Wv : float, optional
Total column precipitable water vapour (g cm-2), default 1 g cm-2.
press : float, optional
atmospheric pressure (mb), default at sea level (1013mb).
Returns
-------
difvis : float
diffuse fraction in the visible region.
difnir : float
diffuse fraction in the NIR region.
fvis : float
fration of total visible radiation.
fnir : float
fraction of total NIR radiation.
References
----------
.. [Weiss1985] Weiss and Norman (1985) Partitioning solar radiation into direct and diffuse,
visible and near-infrared components, Agricultural and Forest Meteorology,
Volume 34, Issue 2, Pages 205-213,
http://dx.doi.org/10.1016/0168-1923(85)90020-6.
'''
# Convert input scalars to numpy arrays
S_dn, sza, press = map(np.asarray, (S_dn, sza, press))
difvis, difnir, fvis, fnir = [np.zeros(S_dn.shape) for i in range(4)]
fvis = fvis + 0.6
fnir = fnir + 0.4
# Calculate potential (clear-sky) visible and NIR solar components
# Weiss & Norman 1985
Rdirvis, Rdifvis, Rdirnir, Rdifnir = calc_potential_irradiance_weiss(
sza, press=press, SOLAR_CONSTANT=SOLAR_CONSTANT)
# Potential total solar radiation
potvis = np.asarray(Rdirvis + Rdifvis)
potvis[potvis <= 0] = 1e-6
potnir = np.asarray(Rdirnir + Rdifnir)
potnir[potnir <= 0] = 1e-6
fclear = S_dn / (potvis + potnir)
fclear = np.minimum(1.0, fclear)
# Partition S_dn into VIS and NIR
fvis = potvis / (potvis + potnir) # Eq. 7
fnir = potnir / (potvis + potnir) # Eq. 8
fvis = np.maximum(0, fvis)
fvis = np.minimum(1, fvis)
fnir = 1.0 - fvis
# Estimate direct beam and diffuse fractions in VIS and NIR wavebands
ratiox = np.asarray(fclear)
ratiox[fclear > 0.9] = 0.9
dirvis = (Rdirvis / potvis) * (1. - ((.9 - ratiox) / .7)**.6667) # Eq. 11
ratiox = np.asarray(fclear)
ratiox[fclear > 0.88] = 0.88
dirnir = (Rdirnir / potnir) * \
(1. - ((.88 - ratiox) / .68)**.6667) # Eq. 12
dirvis = np.maximum(0.0, dirvis)
dirnir = np.maximum(0.0, dirnir)
dirvis = np.minimum(1, dirvis)
dirnir = np.minimum(1, dirnir)
difvis = 1.0 - dirvis
difnir = 1.0 - dirnir
return np.asarray(difvis), np.asarray(
difnir), np.asarray(fvis), np.asarray(fnir)
def calc_emiss_atm(ea, T_A_K):
'''Atmospheric emissivity
Estimates the effective atmospheric emissivity for clear sky.
Parameters
----------
ea : float
atmospheric vapour pressure (mb).
T_A_K : float
air temperature (Kelvin).
Returns
-------
emiss_air : float
effective atmospheric emissivity.
References
----------
.. [Brutsaert1975] <NAME>. (1975) On a derivable formula for long-wave radiation
from clear skies, Water Resour. Res., 11(5), 742-744,
htpp://dx.doi.org/10.1029/WR011i005p00742.'''
emiss_air = 1.24 * (ea / T_A_K)**(1. / 7.) # Eq. 11 in [Brutsaert1975]_
return np.asarray(emiss_air)
def calc_K_be_Campbell(theta, x_LAD=1):
''' Beam extinction coefficient
Calculates the beam extinction coefficient based on [Campbell1998]_ ellipsoidal
leaf inclination distribution function.
Parameters
----------
theta : float
incidence zenith angle (degrees).
x_LAD : float, optional
Chi parameter for the ellipsoidal Leaf Angle Distribution function,
use x_LAD=1 for a spherical LAD.
Returns
-------
K_be : float
beam extinction coefficient.
x_LAD: float, optional
x parameter for the ellipsoidal Leaf Angle Distribution function,
use x_LAD=1 for a spherical LAD.
References
----------
.. [Campbell1998] <NAME>. & <NAME>. (1998), An introduction to environmental
biophysics. Springer, New York
https://archive.org/details/AnIntroductionToEnvironmentalBiophysics.
'''
theta = np.radians(theta)
K_be = np.sqrt(x_LAD**2 + np.tan(theta)**2) / \
(x_LAD + 1.774 * (x_LAD + 1.182)**-0.733)
return np.asarray(K_be)
def calc_L_n_Kustas(T_C, T_S, L_dn, LAI, emisVeg, emisGrd, x_LAD=1):
''' Net longwave radiation for soil and canopy layers
Estimates the net longwave radiation for soil and canopy layers unisg based on equation 2a
from [Kustas1999]_ and incorporated the effect of the Leaf Angle Distribution based on [Campbell1998]_
Parameters
----------
T_C : float
Canopy temperature (K).
T_S : float
Soil temperature (K).
L_dn : float
Downwelling atmospheric longwave radiation (w m-2).
LAI : float
Effective Leaf (Plant) Area Index.
emisVeg : float
Broadband emissivity of vegetation cover.
emisGrd : float
Broadband emissivity of soil.
x_LAD: float, optional
x parameter for the ellipsoidal Leaf Angle Distribution function,
use x_LAD=1 for a spherical LAD.
Returns
-------
L_nC : float
Net longwave radiation of canopy (W m-2).
L_nS : float
Net longwave radiation of soil (W m-2).
References
----------
.. [Kustas1999] <NAME> (1999) Evaluation of soil and vegetation heat
flux predictions using a simple two-source model with radiometric temperatures for
partial canopy cover, Agricultural and Forest Meteorology, Volume 94, Issue 1,
Pages 13-29, http://dx.doi.org/10.1016/S0168-1923(99)00005-2.
'''
# Integrate to get the diffuse transmitance
taud = 0
for angle in range(0, 90, 5):
akd = calc_K_be_Campbell(angle, x_LAD) # Eq. 15.4
taub = np.exp(-akd * LAI)
taud = taud + taub * np.cos(np.radians(angle)) * \
np.sin( | np.radians(angle) | numpy.radians |
import pandas as pd
import numpy as np
def get_hardCoded_variableValues():
#Hard-coded values:
n = 3 #flow law exponent
A = 10**(-25) #rate factor
rho = 918 #density of the ice (kg/m3)
g = 3.710000 #gravitational acceleration on Mars (m/s2)
return (n, A, rho, g)
def read_shallowRadar_data():
sharad_data = pd.read_csv('Sharad.csv') #processed data in .csv format
a = np.array(sharad_data['a']).reshape(-1,1) #mass balance function values
dhdx = np.array(sharad_data['dhdx']).reshape(-1,1) #slope of the ice surface measurements
h_obs = np.array(sharad_data['H_obs']).reshape(-1,1) #thickness of ice surface measurements
x = | np.array(sharad_data['x']) | numpy.array |
# Libraries
from keras.datasets import mnist
from matplotlib import pyplot as plt
import matplotlib
import random
import numpy as np
# Classes
class Network:
# Methods
def __init__(self, sizes):
"""
DESCRIPTION: class constructor.
"""
# Store the network information
self.num_layers = len(sizes)
self.sizes = sizes
# Random parameter initialisation
self.biases = [np.random.rand(size, 1) for size in sizes[1:]]
self.weights = [np.random.rand(y, x) for (x, y) in zip(sizes[:-1], sizes[1:])]
self.biases[0] = 0.001 * self.biases[0]
self.weights[0] = 0.001 * self.weights[0]
def feedforward(self, a):
"""
DESCRIPTION: a method to compute the network from the initial to the
last layers.
Equation: a' = wa + b
:param a: [np.ndarray] n x 1 input vector.
:return: [float] network outcome
"""
a = np.reshape(a, (a.shape[0] ** 2, 1))
for (b, w) in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def update_mini_batch(self, mini_batch, eta):
"""
DESCRIPTION:
Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
:param mini_batch: [list] tuples (x, y), image to classify (numpy.array, x) and
correct classification (int, y).
:param eta: [float] learning rate.
"""
# Store space in memory for the parameter gradients
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# Update the parameters for every example in the mini batch
for x, y in mini_batch:
# Compute the gradients
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
# Sum the gradients for every batch
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]
self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]
def backprop(self, x, y):
"""
DESCRIPTION:
Return a tuple "(nabla_b, nabla_w)" representing the
gradient for the cost function C_x. "nabla_b" and
"nabla_w" are layer-by-layer lists of numpy arrays, similar
to "self.biases" and "self.weights".
:param x: [np.array] the image to classify.
:param y: [int] the correct class of the image.
:return: [tuple] np.arrays with the gradient vectors of both
biases and weights.
"""
# My solution
# Feedforward
# Obtain the input vector z for the last layer
activations = [np.reshape(x, (x.shape[0] ** 2, 1))]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activations[-1]) + b
zs.append(z)
activations.append(sigmoid(z))
# Compute delta (error) for that layer
y_digit = y - 1
y = np.zeros(activations[-1].shape)
y[y_digit] = 1
nabla_c = activations[-1] - y
# Backpropagate the error in the other layers
n_layers = len(self.biases)
deltas = [0.0] * n_layers
deltas[-1] = nabla_c * sigmoid_prime(zs[-1])
for i in range(n_layers - 2, -1, -1):
deltas[i] = np.dot(self.weights[i + 1].transpose(), deltas[i + 1]) * sigmoid_prime(zs[i])
# Build the parameter update
nabla_b = deltas
nabla_w = [np.dot(deltas[i], activations[i].transpose()) for i in range(len(deltas))]
return (nabla_b, nabla_w)
"""
# Store information for the computing
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# Feedforward
x = np.reshape(x, (x.shape[0] ** 2, 1))
activation = x
activations = [x] # List to store all the activations, layer by layer
zs = [] # List to store all the z vectors, layer by layer
# Iterate over all the parameters for all the neurons
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# Backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
"""
def evaluate(self, test_data):
"""
DESCRIPTION:
Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation.
:param test_data: [list] the numpy arrays representing the image list
to test the algorithm while training.
:return: [int] number of images correctly classified.
"""
n_corrects = sum([int((np.argmax(self.feedforward(image)) - 1) == correct_classification)
for image, correct_classification in test_data])
return n_corrects
return int(n_correct)
"""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
"""
def cost_derivative(self, output_activations, y):
"""
DESCRIPTION:
Return the vector of partial derivatives \partial C_x /
\partial a for the output activations.
"""
return (output_activations-y)
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
"""
DESCRIPTION: stochastic gradient descent algorithm. Train the neural
network using mini-batch stochastic gradient descent. The
"training_data" is a list of tuples "(x, y)" representing the training
inputs and the desired outputs. Essentially, the images are divided in
batches and then the parameters are updated for every batch.
:param training_data: [list] the numpy arrays representing the image list
to train the algorithm.
:param test_data: [list] the numpy arrays representing the image list
to test the algorithm while training.
:param epochs: [int] number of times that the training_data should be
computed.
:param mini_batch_size: [int] number of images to compute the gradient.
:param eta: [float] the learning rate.
"""
# Go over the data in all the epochs
for i in range(epochs):
# Shuffle the data to take different combination in every epoch
random.shuffle(training_data)
# Create the mini batches to compute the gradient
mini_batches = [training_data[k:k+mini_batch_size]
for k in range(0, len(training_data), mini_batch_size)]
# Update the parameters for every mini batch
[self.update_mini_batch(mini_batch, eta) for mini_batch in mini_batches]
# Compare with test data
if test_data:
# Return the number of test examples from which the network returns
# the correct output
print(f'Epoch {i + 1}: {self.evaluate(test_data)} / {len(test_data)} correct')
else:
print(f'Epoch {i + 1} completed')
# Functions
def sigmoid(z):
"""
DESCRIPTION:
Sigmoid function.
"""
result = 1.0 / (1.0 + np.exp(-z))
return result
def sigmoid_prime(z):
"""
DESCRIPTION:
Derivative of the sigmoid function.
"""
return sigmoid(z)*(1-sigmoid(z))
if __name__ == '__main__':
# Load data
(train_x, train_y), (test_x, test_y) = mnist.load_data()
# # Convert to float numbers
# train_x = train_x / 255.0
# test_x = test_x / 255.0
# Convert to image list
train_x = np.split(train_x, train_x.shape[0], axis=0)
train_x = list(map(lambda image: | np.squeeze(image) | numpy.squeeze |
"""
Module containing class representing a Gaussian distribution. Its PDF is
represented by: $$f(\\boldsymbol{x})=|2\\pi\\boldsymbol{\\Sigma}|^{-1/2}\\ \
\\exp{\\left[-\\frac{1}{2}(\\boldsymbol{x}-\\boldsymbol{\\mu})^T\
\\boldsymbol{\\Sigma}^{-1}(\\boldsymbol{x}-\\boldsymbol{\\mu})\\right]}$$
**File**: $DISTPY/distpy/distribution/GaussianDistribution.py
**Author**: <NAME>
**Date**: 31 May 2021
"""
from __future__ import division
import numpy as np
import numpy.random as rand
import numpy.linalg as npla
import scipy.linalg as scila
from scipy.special import erfinv
from ..util import numerical_types, int_types, sequence_types,\
create_hdf5_dataset, get_hdf5_value
from .Distribution import Distribution
natural_log_two_pi = np.log(2 * np.pi)
class GaussianDistribution(Distribution):
"""
Class representing a Gaussian distribution. Its PDF is represented by:
$$f(\\boldsymbol{x})=|2\\pi\\boldsymbol{\\Sigma}|^{-1/2}\\ \
\\exp{\\left[-\\frac{1}{2}(\\boldsymbol{x}-\\boldsymbol{\\mu})^T\
\\boldsymbol{\\Sigma}^{-1}(\\boldsymbol{x}-\\boldsymbol{\\mu})\\right]}$$
"""
def __init__(self, mean, covariance, metadata=None):
"""
Initializes a new `GaussianDistribution` with the given parameter
values.
Parameters
----------
mean : float or `numpy.ndarray`
- if this is a univariate Gaussian, `mean` is a real number giving
peak of distribution
- if this is a multivariate Gaussian, `mean` is a 1D array of real
numbers giving peak of distribution
covariance : float or `numpy.ndarray`
- if this is a univariate Gaussian, `covariance` is a real,
positive number giving size of distribution
- if this is a multivariate Gaussian, `covariance` is a square 2D
array giving covariance matrix of the distribution. Each dimension
should have the same length as `mean`
metadata : number or str or dict or `distpy.util.Savable.Savable`
data to store alongside this distribution.
"""
self.internal_mean = mean
self.covariance = covariance
self.metadata = metadata
@staticmethod
def combine(*distributions):
"""
Combines many `GaussianDistribution` objects into one by concatenating
their means and covariance matrices.
Parameters
----------
distributions : sequence
a sequence of `GaussianDistribution` objects to combine
Returns
-------
combined : `GaussianDistribution`
if the distributions in `distributions` have means
\\(\\boldsymbol{\\mu}_1,\\boldsymbol{\\mu}_2,\\ldots,\
\\boldsymbol{\\mu}_N\\) and covariances
\\(\\boldsymbol{\\Sigma}_1,\\boldsymbol{\\Sigma}_2,\\ldots,\
\\boldsymbol{\\Sigma}_N\\), then `combined` has mean
\\(\\begin{bmatrix} \\boldsymbol{\\mu}_1 \\\\\
\\boldsymbol{\\mu}_2 \\\\ \\vdots \\\\ \\boldsymbol{\\mu}_N\
\\end{bmatrix}\\) and covariance \\(\\begin{bmatrix}\
\\boldsymbol{\\Sigma}_1 & \\boldsymbol{0} & \\cdots &\
\\boldsymbol{0} \\\\ \\boldsymbol{0} & \\boldsymbol{\\Sigma}_2 &\
\\cdots & \\boldsymbol{0} \\\\ \\vdots & \\vdots & \\ddots &\
\\vdots \\\\ \\boldsymbol{0} & \\boldsymbol{0} & \\cdots &\
\\boldsymbol{\\Sigma}_N \\end{bmatrix}\\)
"""
if all([isinstance(distribution, GaussianDistribution)\
for distribution in distributions]):
new_mean = np.concatenate([distribution.internal_mean.A[0]\
for distribution in distributions])
new_covariance = scila.block_diag(*[distribution.covariance.A\
for distribution in distributions])
return GaussianDistribution(new_mean, new_covariance)
else:
raise TypeError("At least one of the distributions given to " +\
"the GaussianDistribution class' combine function was not " +\
"a GaussianDistribution.")
@property
def internal_mean(self):
"""
The mean of this `GaussianDistribution` in `numpy.matrix` form.
"""
if not hasattr(self, '_internal_mean'):
raise AttributeError("internal_mean was referenced before it " +\
"was set.")
return self._internal_mean
@internal_mean.setter
def internal_mean(self, value):
"""
Setter for `GaussianDistribution.internal_mean`.
Parameters
----------
value : float or `numpy.ndarray`
- if this distribution is univariate, `value` is a single number
- otherwise, `value` is a 1D numpy.ndarray of length
`GaussianDistribution.numparams`
"""
if type(value) in numerical_types:
value = [value]
if type(value) in sequence_types:
value = np.array(value)
if value.ndim != 1:
raise ValueError("The mean of a GaussianDistribution was " +\
"not 1 dimensional.")
elif value.size == 0:
raise ValueError("The mean of a GaussianDistribution was " +\
"set to something like an empty array.")
else:
self._internal_mean = np.matrix(value)
else:
raise ValueError("The mean of a GaussianDistribution is not of " +\
"a recognizable type.")
@property
def covariance(self):
"""
The covariance matrix of this `GaussianDistribution` in `numpy.matrix`
form.
"""
if not hasattr(self, '_covariance'):
raise AttributeError("covariance was referenced before it was " +\
"set.")
return self._covariance
@covariance.setter
def covariance(self, value):
"""
Setter for the `GaussianDistribution.covariance`.
Parameters
----------
value : float or numpy.ndarray
- if this distribution is univariate, then `value` can be a single
number representing the variance
- otherwise, this should be a square positive definite matrix of
rank numparams or a 1D array of variances (in which case the
variates are assumed independent)
"""
if type(value) in numerical_types:
if self.numparams == 1:
self._covariance = np.matrix([[value]])
else:
raise TypeError("covariance was set to a number even " +\
"though this Gaussian is multi-dimensional.")
elif type(value) in sequence_types:
value = np.array(value)
if np.any(np.isnan(value)):
raise ValueError(("For some reason, there are nan's in the " +\
"covariance matrix given to a GaussianDistribution, " +\
"which was:\n{}.").format(value))
elif value.shape == (self.numparams,):
self._covariance = np.matrix(np.diag(value))
elif value.shape == ((self.numparams,) * 2):
self._covariance = np.matrix((value + value.T) / 2)
else:
raise ValueError("The covariance given to a " +\
"GaussianDistribution was not castable to an array of " +\
"the correct shape. It should be a square shape with " +\
"the same side length as length of mean.")
else:
raise ValueError("The mean of a GaussianDistribution is " +\
"array-like but its covariance isn't matrix like.")
self.square_root_covariance
@property
def mean(self):
"""
The mean of this `GaussianDistribution`, \\(\\boldsymbol{\\mu}\\),
which is an array if this distribution is multivariate and a scalar if
it is univariate.
"""
if not hasattr(self, '_mean'):
if self.numparams == 1:
self._mean = self.internal_mean.A[0,0]
else:
self._mean = self.internal_mean.A[0]
return self._mean
@property
def variance(self):
"""
The (co)variance of this `GaussianDistribution`,
\\(\\boldsymbol{\\Sigma}\\).
"""
if not hasattr(self, '_variance'):
if self.numparams == 1:
self._variance = self.covariance.A[0,0]
else:
self._variance = self.covariance.A
return self._variance
@property
def log_determinant_covariance(self):
"""
The natural logarithm of the determinant of the covariance matrix,
given by \\(\\ln{|\\boldsymbol{\\Sigma}|}\\).
"""
if not hasattr(self, '_log_determinant_covariance'):
self._log_determinant_covariance = npla.slogdet(self.covariance)[1]
return self._log_determinant_covariance
@property
def inverse_covariance(self):
"""
The inverse of the covariance matrix, given by
\\(\\boldsymbol{\\Sigma}^{-1}\\).
"""
if not hasattr(self, '_inverse_covariance'):
if self.covariance_diagonal:
self._inverse_covariance =\
np.matrix(np.diag(1 / np.diag(self.covariance.A)))
else:
self._inverse_covariance = npla.inv(self.covariance)
return self._inverse_covariance
@property
def numparams(self):
"""
The number of parameters of this `GaussianDistribution`.
"""
if not hasattr(self, '_numparams'):
self._numparams = len(self.internal_mean.A[0])
return self._numparams
def __add__(self, other):
"""
Adds the Gaussian random variate described by this distribution
to the given object.
Parameters
----------
other : float or numpy.ndarray or `GaussianDistribution`
- if other is a constant, the returned Gaussian is the same as this
one with other added to the mean and the same covariance
- if other is a 1D `numpy.ndarray`, it must be of the same length
as the dimension of this `GaussianDistribution`. In this case, the
returned `GaussianDistribution` is the distribution of the sum of
this Gaussian variate with other
- if other is a `GaussianDistribution`, it must have the same
number of parameters as this one
Returns
-------
distribution : `GaussianDistribution`
distribution of the sum of this Gaussian variate and `other`
"""
if isinstance(other, GaussianDistribution):
if self.numparams == other.numparams:
new_mean = self.internal_mean.A[0] + other.internal_mean.A[0]
new_covariance = self.covariance.A + other.covariance.A
else:
raise ValueError("Cannot add two GaussianDistribution " +\
"objects with different numbers of parameters.")
elif type(other) in [list, tuple, np.ndarray]:
other = np.array(other)
if other.ndim == 1:
if len(other) == self.numparams:
new_mean = self.internal_mean.A[0] + other
new_covariance = self.covariance.A
else:
raise ValueError("Cannot multiply Gaussian distributed " +\
"random vector by a vector of different size.")
else:
raise ValueError("Cannot multiply Gaussian distributed " +\
"random vector by a tensor with more than 1 index.")
else:
# assume other is a constant
new_mean = self.internal_mean.A[0] + other
new_covariance = self.covariance.A
return GaussianDistribution(new_mean, new_covariance)
def __radd__(self, other):
"""
Adds the Gaussian random variate described by this distribution
to the given object.
Parameters
----------
other : float or numpy.ndarray or `GaussianDistribution`
- if other is a constant, the returned Gaussian is the same as this
one with other added to the mean and the same covariance
- if other is a 1D `numpy.ndarray`, it must be of the same length
as the dimension of this `GaussianDistribution`. In this case, the
returned `GaussianDistribution` is the distribution of the sum of
this Gaussian variate with other
- if other is a `GaussianDistribution`, it must have the same
number of parameters as this one
Returns
-------
distribution : `GaussianDistribution`
distribution of the sum of this Gaussian variate and `other`
"""
return self.__add__(other)
def __sub__(self, other):
"""
Subtracts the given object from the Gaussian random variate described
by this distribution.
Parameters
----------
other : float or numpy.ndarray or `GaussianDistribution`
- if other is a constant, the returned Gaussian is the same as this
one with other subtracted from the mean and the same covariance
- if other is a 1D `numpy.ndarray`, it must be of the same length
as the dimension of this `GaussianDistribution`. In this case, the
returned `GaussianDistribution` is the distribution of the
difference of this Gaussian variate with `other`
- if other is a `GaussianDistribution`, it must have the same
number of parameters as this one
Returns
-------
distribution : `GaussianDistribution`
distribution of the difference of this Gaussian variate and `other`
"""
return self.__add__(-other)
def __rsub__(self, other):
"""
Subtracts the Gaussian random variate described by this distribution
from `other`.
Parameters
----------
other : float or numpy.ndarray or `GaussianDistribution`
- if other is a constant, the returned Gaussian is the same as this
one with mean replaces with other-mean and the same covariance
- if other is a 1D `numpy.ndarray`, it must be of the same length
as the dimension of this `GaussianDistribution`. In this case, the
returned `GaussianDistribution` is the distribution of the
difference of this Gaussian variate with `other`
- if other is a `GaussianDistribution`, it must have the same
number of parameters as this one
Returns
-------
distribution : `GaussianDistribution`
distribution of the difference of this Gaussian variate and `other`
"""
return self.__sub__(other).__neg__()
def __neg__(self):
"""
Finds the distribution of the negated gaussian variate.
Returns
-------
distribution : `GaussianDistribution`
distribution with the same covariance but a negated mean
"""
return GaussianDistribution(-self.internal_mean.A[0],\
self.covariance.A)
def __mul__(self, other):
"""
Multiplies the Gaussian random variate described by this distribution
by the given constant.
Parameters
----------
other : float
any real number
Returns
-------
distribution : `GaussianDistribution`
distribution of the product of the random variable with this
distribution and the constant `other`
"""
new_mean = self.internal_mean.A[0] * other
new_covariance = self.covariance.A * (other ** 2)
return GaussianDistribution(new_mean, new_covariance)
def __rmul__(self, other):
"""
Multiplies the Gaussian random variate described by this distribution
by the given constant.
Parameters
----------
other : float
any real number
Returns
-------
distribution : `GaussianDistribution`
distribution of the product of the random variable with this
distribution and the constant `other`
"""
return self.__mul__(other)
def __div__(self, other):
"""
Divides the Gaussian random variate described by this distribution
by the given constant.
Parameters
----------
other : float
any real number
Returns
-------
distribution : `GaussianDistribution`
distribution of the quotient of the random variable with this
distribution and the constant `other`
"""
return self.__mul__(1 / other)
@property
def covariance_diagonal(self):
"""
A boolean describing whether the covariance matrix is exactly diagonal
or not.
"""
if not hasattr(self, '_covariance_diagonal'):
self._covariance_diagonal = np.all(\
self.covariance.A == np.diag(np.diag(self.covariance.A)))
return self._covariance_diagonal
def _make_square_root_and_inverse_square_root_covariance(self):
"""
Computes the square root and inverse square root of the covariance
matrix and stores them in internal properties, allowing
`GaussianDistribution.square_root_covariance` and
`GaussianDistribution.inverse_square_root_covariance` properties to be
referenced.
"""
if self.covariance_diagonal:
self._square_root_covariance =\
np.diag(np.sqrt(np.diag(self.covariance.A)))
self._inverse_square_root_covariance =\
np.diag(1 / np.sqrt(np.diag(self.covariance.A)))
else:
(eigenvalues, eigenvectors) = npla.eigh(self.covariance.A)
if np.any(eigenvalues <= 0):
raise ValueError(("Something went wrong, causing the " +\
"square root of the covariance matrix of this " +\
"GaussianDistribution to have at least one complex " +\
"element. The eigenvalues of the covariance matrix " +\
"are {!s}.").format(eigenvalues))
eigenvalues = np.sqrt(eigenvalues)
self._square_root_covariance =\
| np.dot(eigenvectors * eigenvalues[None,:], eigenvectors.T) | numpy.dot |
import qiskit
import qtm.progress_bar
import qtm.constant
import qtm.qfim
import qtm.noise
import qtm.optimizer
import qtm.fubini_study
import numpy as np
import types, typing
def measure(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""Measuring the quantu circuit which fully measurement gates
Args:
- qc (QuantumCircuit): Measured circuit
- qubits (np.ndarray): List of measured qubit
Returns:
- float: Frequency of 00.. cbit
"""
n = len(qubits)
if cbits == []:
cbits = qubits.copy()
for i in range(0, n):
qc.measure(qubits[i], cbits[i])
if qtm.constant.noise_prob > 0:
noise_model = qtm.noise.generate_noise_model(
n, qtm.constant.noise_prob)
results = qiskit.execute(qc, backend=qtm.constant.backend,
noise_model=noise_model,
shots=qtm.constant.num_shots).result()
# Raw counts
counts = results.get_counts()
# Mitigating noise based on https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html
meas_filter = qtm.noise.generate_measurement_filter(
n, noise_model=noise_model)
# Mitigated counts
counts = meas_filter.apply(counts.copy())
else:
counts = qiskit.execute(
qc, backend=qtm.constant.backend,
shots=qtm.constant.num_shots).result().get_counts()
return counts.get("0" * len(qubits), 0) / qtm.constant.num_shots
def x_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def y_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.sdg(qubits[i])
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def z_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.measure(qubits[i], cbits[i])
return qc
def get_u_hat(thetas: np.ndarray, create_circuit_func: types.FunctionType, num_qubits: int,
**kwargs):
"""Return inverse of reconstructed gate
Args:
- thetas (np.ndarray): Parameters
- num_qubits (Int): number of qubit
Returns:
- Statevector: The state vector of when applying u_1q gate
"""
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
if not kwargs:
qc = create_circuit_func(qc, thetas).inverse()
else:
qc = create_circuit_func(qc, thetas, **kwargs).inverse()
return qiskit.quantum_info.Statevector.from_instruction(qc)
def get_cry_index(create_circuit_func: types.FunctionType, thetas: np.ndarray, num_qubits, **kwargs):
"""Return a list where i_th = 1 mean thetas[i] is parameter of CRY gate
Args:
- func (types.FunctionType): The creating circuit function
- thetas (np.ndarray): Parameters
Returns:
- np.ndarray: The index list has length equal with number of parameters
"""
qc = qiskit.QuantumCircuit(num_qubits)
qc = create_circuit_func(qc, thetas, **kwargs)
layers = qtm.fubini_study.split_into_layers(qc)
index_list = []
for layer in layers:
for gate in layer[1]:
if gate[0] == 'cry':
index_list.append(1)
else:
index_list.append(0)
if len(index_list) == len(thetas):
return index_list
return index_list
def grad_loss(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, **kwargs):
"""Return the gradient of the loss function
L = 1 - |<psi~|psi>|^2 = 1 - P_0
=> nabla_L = - nabla_P_0 = - r (P_0(+s) - P_0(-s))
Args:
- qc (QuantumCircuit): The quantum circuit want to calculate the gradient
- create_circuit_func (Function): The creating circuit function
- thetas (np.ndarray): Parameters
- c_0 (float): cost value
- **kwargs: additional parameters for different create_circuit_func()
Returns:
- np.ndarray: the gradient vector
"""
index_list = get_cry_index(create_circuit_func, thetas,
num_qubits=qc.num_qubits, **kwargs)
grad_loss = np.zeros(len(thetas))
for i in range(0, len(thetas)):
if index_list[i] == 0:
# In equation (13)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.two_term_psr['s']
thetas2[i] -= qtm.constant.two_term_psr['s']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
grad_loss[i] = -qtm.constant.two_term_psr['r'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits))))
if index_list[i] == 1:
# In equation (14)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas3, thetas4 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.four_term_psr['alpha']
thetas2[i] -= qtm.constant.four_term_psr['alpha']
thetas3[i] += qtm.constant.four_term_psr['beta']
thetas4[i] -= qtm.constant.four_term_psr['beta']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
qc3 = create_circuit_func(qc.copy(), thetas3, **kwargs)
qc4 = create_circuit_func(qc.copy(), thetas4, **kwargs)
grad_loss[i] = - (qtm.constant.four_term_psr['d_plus'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits)))) - qtm.constant.four_term_psr['d_minus'] * (
qtm.base.measure(qc3, list(range(qc3.num_qubits))) -
qtm.base.measure(qc4, list(range(qc4.num_qubits)))))
return grad_loss
def grad_psi(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, r: float, s: float, **kwargs):
"""Return the derivatite of the psi base on parameter shift rule
Args:
- qc (qiskit.QuantumCircuit): circuit
- create_circuit_func (types.FunctionType)
- thetas (np.ndarray): parameters
- r (float): in psr
- s (float): in psr
Returns:
- np.ndarray: N x N matrix
"""
gradient_psi = []
for i in range(0, len(thetas)):
thetas_copy = thetas.copy()
thetas_copy[i] += s
qc_copy = create_circuit_func(qc.copy(), thetas_copy, **kwargs)
psi_qc = qiskit.quantum_info.Statevector.from_instruction(qc_copy).data
psi_qc = np.expand_dims(psi_qc, 1)
gradient_psi.append(r * psi_qc)
gradient_psi = | np.array(gradient_psi) | numpy.array |
from .visual import CocoPart
import numpy as np
from helpers import *
from default_params import *
def match_ip(ip_set, new_ips, lstm_set, num_matched, consecutive_frames=DEFAULT_CONSEC_FRAMES):
len_ip_set = len(ip_set)
added = [False for _ in range(len_ip_set)]
new_len_ip_set = len_ip_set
for new_ip in new_ips:
if not is_valid(new_ip):
continue
# assert valid_candidate_hist(new_ip)
cmin = [MIN_THRESH, -1]
for i in range(len_ip_set):
if not added[i] and dist(last_ip(ip_set[i])[0], new_ip) < cmin[0]:
# here add dome condition that last_ip(ip_set[0] >-5 or someting)
cmin[0] = dist(last_ip(ip_set[i])[0], new_ip)
cmin[1] = i
if cmin[1] == -1:
ip_set.append([None for _ in range(consecutive_frames - 1)] + [new_ip])
lstm_set.append([None, 0, 0, 0]) # Initial hidden state of lstm is None
new_len_ip_set += 1
else:
added[cmin[1]] = True
pop_and_add(ip_set[cmin[1]], new_ip, consecutive_frames)
new_matched = num_matched
removed_indx = []
removed_match = []
for i in range(len(added)):
if not added[i]:
pop_and_add(ip_set[i], None, consecutive_frames)
if ip_set[i] == [None for _ in range(consecutive_frames)]:
if i < num_matched:
new_matched -= 1
removed_match.append(i)
new_len_ip_set -= 1
removed_indx.append(i)
for i in sorted(removed_indx, reverse=True):
ip_set.pop(i)
lstm_set.pop()
return new_matched, new_len_ip_set, removed_match
def extend_vector(p1, p2, l):
p1 += (p1-p2)*l/(2*np.linalg.norm((p1-p2), 2))
p2 -= (p1-p2)*l/(2*np.linalg.norm((p1-p2), 2))
return p1, p2
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
# return
def seg_intersect(a1, a2, b1, b2):
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom.astype(float))*db + b1
def get_kp(kp):
threshold1 = 5e-3
# dict of np arrays of coordinates
inv_pend = {}
# print(type(kp[CocoPart.LEar]))
numx = (kp[CocoPart.LEar][2]*kp[CocoPart.LEar][0] + kp[CocoPart.LEye][2]*kp[CocoPart.LEye][0] +
kp[CocoPart.REye][2]*kp[CocoPart.REye][0] + kp[CocoPart.REar][2]*kp[CocoPart.REar][0])
numy = (kp[CocoPart.LEar][2]*kp[CocoPart.LEar][1] + kp[CocoPart.LEye][2]*kp[CocoPart.LEye][1] +
kp[CocoPart.REye][2]*kp[CocoPart.REye][1] + kp[CocoPart.REar][2]*kp[CocoPart.REar][1])
den = kp[CocoPart.LEar][2] + kp[CocoPart.LEye][2] + kp[CocoPart.REye][2] + kp[CocoPart.REar][2]
if den < HEAD_THRESHOLD:
inv_pend['H'] = None
else:
inv_pend['H'] = | np.array([numx/den, numy/den]) | numpy.array |
import GPy
import numpy as np
# Before running this test, ensure andro.arff, downloaded from
# http://mulan.sourceforge.net/datasets-mtr.html is in this folder.
# Run this test from the command line using
# `python3 slfm_experiment.py`
if __name__ == "__main__":
with open('andro.arff') as f:
for row in range(0,40):
f.readline()
i = -1
locs = []
dat = np.zeros((54,6))
for i in range(0,48):
s = f.readline()
vals = list(map(float,s.split(',')))
dat[i,0] = vals[0]
dat[i,1] = vals[1]
dat[i,2] = vals[2]
dat[i,3] = vals[3]
dat[i,4] = vals[4]
dat[i,5] = vals[5]
s = f.readline()
vals = list(map(float,s.split(',')))
for i in range(0,6):
for v in range(0,6):
dat[i+48,v] = vals[v + 6*i]
X = np.linspace(0,53,54)
train = np.copy(dat)
test = np.copy(dat)
K1 = GPy.kern.RBF(1)
K2 = GPy.kern.RBF(1)
K3 = GPy.kern.RBF(1)
K4 = GPy.kern.RBF(1)
K5 = GPy.kern.RBF(1)
K6 = GPy.kern.RBF(1)
# Best performance achieved with 6 kernels
LCM = GPy.util.multioutput.LCM(input_dim=1,num_outputs=6,kernels_list=[K1,K2,K3,K4,K5,K6])
XTemp = X[:,None]
YTemp = train[:,0][:,None]
XPH = X[:,None]
YPH = train[:,1][:,None]
XCond = X[:,None]
YCond = train[:,2][:,None]
XSal = np.concatenate((X[0:20],X[30:]),axis=0)[:,None]
YSal = np.concatenate((train[0:20,3],train[30:,3]),axis=0)[:,None]
XOxy = | np.concatenate((X[0:30],X[40:]),axis=0) | numpy.concatenate |
import os
import numpy as np
import xarray as xr
import zarr
import time
from dask.distributed import wait, Client, progress, Future
from typing import Union, Callable, Tuple, Any
from itertools import groupby, count
import shutil
from HSTB.kluster import kluster_variables
from HSTB.kluster.backends._base import BaseBackend
class ZarrBackend(BaseBackend):
"""
Backend for writing data to disk, used with fqpr_generation.Fqpr and xarray_conversion.BatchRead.
"""
def __init__(self, output_folder: str = None):
super().__init__(output_folder)
def _get_zarr_path(self, dataset_name: str, sys_id: str = None):
"""
Get the path to the zarr folder based on the dataset name that we provide. Ping zarr folders are based on
the serial number of the system, and that must be provided here as the sys_id
"""
if self.output_folder is None:
return None
if dataset_name == 'ping':
if not sys_id:
raise ValueError('Zarr Backend: No system id provided, cannot build ping path')
return os.path.join(self.output_folder, 'ping_' + sys_id + '.zarr')
elif dataset_name == 'navigation':
return os.path.join(self.output_folder, 'navigation.zarr')
elif dataset_name == 'ppnav':
return os.path.join(self.output_folder, 'ppnav.zarr')
elif dataset_name == 'attitude':
return os.path.join(self.output_folder, 'attitude.zarr')
else:
raise ValueError('Zarr Backend: Not a valid dataset name: {}'.format(dataset_name))
def _get_zarr_indices(self, zarr_path: str, time_array: list, append_dim: str):
"""
Get the chunk indices (based on the time dimension) using the proivded time arrays
"""
return get_write_indices_zarr(zarr_path, time_array, append_dim)
def _get_chunk_sizes(self, dataset_name: str):
"""
Pull from kluster_variables to get the correct chunk size for each dataset
"""
if dataset_name == 'ping':
return kluster_variables.ping_chunks
elif dataset_name in ['navigation', 'ppnav']:
return kluster_variables.nav_chunks
elif dataset_name == 'attitude':
return kluster_variables.att_chunks
else:
raise ValueError('Zarr Backend: Not a valid dataset name: {}'.format(dataset_name))
def _autodetermine_times(self, data: list, time_array: list = None, append_dim: str = 'time'):
"""
Get the time arrays for the dataset depending on the dataset type.
"""
if time_array:
return time_array
elif any([isinstance(d, Future) for d in data]):
raise ValueError('Zarr Backend: cannot autodetermine times from Futures')
else:
return [d[append_dim] for d in data]
def delete(self, dataset_name: str, variable_name: str, sys_id: str = None):
"""
Delete the provided variable name from the datastore on disk. var_path will be a directory of chunked files, so
we use rmtree to remove all files in the var_path directory.
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
var_path = os.path.join(zarr_path, variable_name)
if not os.path.exists(var_path):
print('Unable to remove variable {}, path does not exist: {}'.format(variable_name, var_path))
else:
shutil.rmtree(var_path)
def write(self, dataset_name: str, data: Union[list, xr.Dataset, Future], time_array: list = None, attributes: dict = None,
sys_id: str = None, append_dim: str = 'time', skip_dask: bool = False):
"""
Write the provided data to disk, finding the correct zarr folder using dataset_name. We need time_array to get
the correct write indices for the data. If attributes are provided, we write those as well as xarray Dataset
attributes.
"""
if not isinstance(data, list):
data = [data]
if attributes is None:
attributes = {}
time_array = self._autodetermine_times(data, time_array, append_dim)
zarr_path = self._get_zarr_path(dataset_name, sys_id)
data_indices, final_size, push_forward = self._get_zarr_indices(zarr_path, time_array, append_dim)
chunks = self._get_chunk_sizes(dataset_name)
fpths = distrib_zarr_write(zarr_path, data, attributes, chunks, data_indices, final_size, push_forward, self.client,
skip_dask=skip_dask, show_progress=self.show_progress,
write_in_parallel=self.parallel_write)
return zarr_path, fpths
def write_attributes(self, dataset_name: str, attributes: dict, sys_id: str = None):
"""
If the data is written to disk, we write the attributes to the zarr store as attributes of the dataset_name record.
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
if zarr_path is not None:
zarr_write_attributes(zarr_path, attributes)
else:
print('Writing attributes is disabled for in-memory processing')
def remove_attribute(self, dataset_name: str, attribute: str, sys_id: str = None):
"""
Remove the attribute matching name provided in the dataset_name_sys_id folder
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
if zarr_path is not None:
zarr_remove_attribute(zarr_path, attribute)
else:
print('Removing attributes is disabled for in-memory processing')
def _get_indices_dataset_notexist(input_time_arrays):
"""
Build a list of [start,end] indices that match the input_time_arrays, starting at zero.
Parameters
----------
input_time_arrays
list of 1d xarray dataarrays or numpy arrays for the input time values
Returns
-------
list
list of [start,end] indexes for the indices of input_time_arrays
"""
running_total = 0
write_indices = []
for input_time in input_time_arrays:
write_indices.append([0 + running_total, len(input_time) + running_total])
running_total += len(input_time)
return write_indices
def _get_indices_dataset_exists(input_time_arrays: list, zarr_time: zarr.Array):
"""
I am so sorry for whomever finds this. I had this 'great' idea a while ago to concatenate all the multibeam
lines into daily datasets. Overall this has been a great thing, except for the sorting. We have to figure out
how to assemble daily datasets from lines applied in any order imaginable, with overlap and all kinds of things.
This function should provide the indices that allow this to happen.
Recommend examining the test_backend tests if you want to understand this a bit more
build the indices for where the input_time_arrays fit within the existing zarr_time. We have three ways to proceed
within this function:
1. input time arrays are entirely within the existing zarr_time, we build a numpy array of indices that describe
where the input_time_arrays will overwrite the zarr_time
2. input time arrays are entirely outside the existing zarr_time, we just build a 2 element list describing the
start and end index to append the data to zarr_time
3 input time arrays are before and might overlap existing data, we build a 2 element list starting with zero
describing the start and end index and return a push_forward value, letting us know how much the zarr data
needs to be pushed forward. If there is overlap, the last index is a numpy array of indices.
4 input time arrays are after and might overlap existing data, we build a 2 element list starting with the
index of the start of overlap. If there is overlap, the last index is a numpy array of indices.
Parameters
----------
input_time_arrays
list of 1d xarray dataarrays or numpy arrays for the input time values
zarr_time
zarr array 1d for the existing time values saved to disk
Returns
-------
list
list of either [start,end] indexes or numpy arrays for the indices of input_time_arrays in zarr_time
list
list of [index of push, total amount to push] for each push
int
how many values need to be inserted to make room for this new data at the beginning of the zarr rootgroup
"""
running_total = 0
push_forward = []
total_push = 0
write_indices = []
# time arrays must be in order in case you have to do the 'partly in datastore' workaround
input_time_arrays.sort(key=lambda x: x[0])
min_zarr_time = zarr_time[0]
max_zarr_time = zarr_time[-1]
zarr_time_len = len(zarr_time)
for input_time in input_time_arrays: # for each chunk of data that we are wanting to write, look at the times to see where it fits
input_time_len = len(input_time)
input_is_in_zarr = np.isin(input_time, zarr_time) # where is there overlap with existing data
if isinstance(input_time, xr.DataArray):
input_time = input_time.values
if input_is_in_zarr.any(): # this input array is at least partly in this datastore already
if not input_is_in_zarr.all(): # this input array is only partly in this datastore
starter_indices = np.full_like(input_time, -1) # -1 for values that are outside the existing values
inside_indices = search_not_sorted(zarr_time, input_time[input_is_in_zarr]) # get the indices for overwriting where there is overlap
starter_indices[input_is_in_zarr] = inside_indices
count_outside = len(starter_indices) - len(inside_indices) # the number of values that do not overlap
if starter_indices[-1] == -1: # this input_time contains times after the existing values
max_inside_index = inside_indices[-1]
# now add in a range starting with the last index for all values outside the zarr time range
starter_indices[~input_is_in_zarr] = np.arange(max_inside_index + 1, max_inside_index + count_outside + 1)
if input_time[-1] < max_zarr_time: # data partially overlaps and is after existing data, but not at the end of the existing dataset
push_forward.append([max_inside_index + 1 + total_push, count_outside])
else:
running_total += count_outside
write_indices.append(starter_indices + total_push)
elif starter_indices[0] == -1: # this input_time contains times before the existing values
if input_time[0] < min_zarr_time:
starter_indices = np.arange(input_time_len)
push_forward.append([total_push, count_outside])
else:
min_inside_index = inside_indices[0]
starter_indices = np.arange(input_time_len) + min_inside_index
push_forward.append([min_inside_index + total_push, count_outside])
write_indices.append(starter_indices + total_push)
total_push += count_outside
else:
raise NotImplementedError('_get_indices_dataset_exists: Found a gap in the overlap between the data provided and the existing dataset on disk')
else:
# input data is entirely within the existing zarr data, the input_time is going to be sorted, but the zarr
# time will be in the order of lines received and saved to disk. Have to get indices of input_time in zarr_time
write_indices.append(search_not_sorted(zarr_time, input_time) + total_push)
else: # zarr datastore exists, but this data is not in it. Append to the existing datastore
if input_time[0] < min_zarr_time: # data is before existing data, have to push existing data up
write_indices.append([total_push, input_time_len + total_push])
push_forward.append([total_push, input_time_len])
total_push += input_time_len
elif input_time[0] > max_zarr_time: # data is after existing data, just tack it on
write_indices.append([zarr_time_len + running_total + total_push, zarr_time_len + input_time_len + running_total + total_push])
running_total += input_time_len
else: # data is in between existing data, but there is no overlap
next_value_index = np.where(zarr_time - input_time[0] > 0)[0][0]
write_indices.append([next_value_index + total_push, next_value_index + input_time_len + total_push])
push_forward.append([next_value_index + total_push, input_time_len])
total_push += input_time_len
return write_indices, push_forward, total_push
def get_write_indices_zarr(output_pth: str, input_time_arrays: list, index_dim='time'):
"""
In Kluster, we parallel process the multibeam data and write it out to zarr chunks. Here we need to figure out
if the input data should be appended or if it should overwrite existing data. This is controlled by the returned
list of data locations.
Take the dimension we are using as the index (usually time) and see where the input arrays fit in
the list of write indices could include:
- [startidx, endidx] when the written data is new and not in the zarr store yet
- np.array(0,1,2,3....) when the written data is in the zarr store and may not be continuous
Parameters
----------
output_pth
str, path to the zarr rootgroup
input_time_arrays
list of xarray DataArray, the time dimension for each input array
index_dim
str identifier for the dimension name that we are using as the index. Generally time.
Returns
-------
list
write indices to use to write the input_time_arrays to the zarr datastore at outputpth
int
final size of the rootgroup after the write, needed to resize zarr to the appropriate length
list
list of [index of push, total amount to push] for each push
"""
zarr_time = np.array([])
mintimes = [float(i.min()) for i in input_time_arrays]
if not (np.diff(mintimes) > 0).all(): # input arrays are not time sorted
raise NotImplementedError('get_write_indices_zarr: input arrays are out of order in time')
push_forward = []
total_push = 0
if os.path.exists(output_pth):
rootgroup = zarr.open(output_pth, mode='r') # only opens if the path exists
zarr_time = rootgroup[index_dim]
write_indices, push_forward, total_push = _get_indices_dataset_exists(input_time_arrays, zarr_time)
else: # datastore doesn't exist, we just return the write indices equal to the shape of the input arrays
write_indices = _get_indices_dataset_notexist(input_time_arrays)
final_size = np.max([write_indices[-1][-1], len(zarr_time)]) + total_push
return write_indices, final_size, push_forward
def search_not_sorted(base: np.ndarray, search_array: np.ndarray):
"""
Implement a way to find the indices where search_array is within base when base is not sorted. I've found that
simply sorting and running searchsorted is the fastest way to handle this. I even tested against iterating through
the array with Numba, and it was close, but this was faster.
Parameters
----------
base
the array you want to search against
search_array
the array to search with
Returns
-------
np.ndarray
indices of where search_array is within base
"""
if not set(search_array).issubset(set(base)):
raise ValueError('search must be a subset of master')
sorti = np.argsort(base)
# get indices in sorted version
tmpind = np.searchsorted(base, search_array, sorter=sorti)
final_inds = sorti[tmpind]
return final_inds
def retry_call(callabl: Callable, args=None, kwargs=None, exceptions: Tuple[Any, ...] = (),
retries: int = 200, wait: float = 0.1):
"""
Make several attempts to invoke the callable. If one of the given exceptions
is raised, wait the given period of time and retry up to the given number of
retries.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
for attempt in range(1, retries+1):
try:
return callabl(*args, **kwargs)
except exceptions:
if attempt < retries:
time.sleep(wait)
else:
print('WARNING: attempted {} retries at {} second interval, unable to complete process'.format(retries, wait))
return callabl(*args, **kwargs)
class ZarrWrite:
"""
Class for handling writing xarray data to Zarr. I started off using the xarray to_zarr functions, but I found
that they could not handle changes in size/distributed writes very well, so I came up with my own. This class
currently supports:
| 1. writing to zarr from dask map function, see distrib_zarr_write
| 2. writing data with a larger expand dimension than currently exists in zarr (think new data has more beams)
| 3. writing new variable to existing zarr data store (must match existing data dimensions)
| 4. appending to existing zarr by filling in the last zarr chunk with data and then writing new chunks (only last
chunk of zarr array is allowed to not be of length equal to zarr chunk size)
"""
def __init__(self, zarr_path: str, desired_chunk_shape: dict = None, append_dim: str = 'time', expand_dim: str = 'beam',
float_no_data_value: float = np.nan, int_no_data_value: int = 999):
"""
Initialize zarr write class
Parameters
----------
zarr_path
str, full file path to where you want the zarr data store to be written to
desired_chunk_shape
dict, keys are dimension names, vals are the chunk size for that dimension
append_dim
str, dimension name that you are appending to (generally time)
expand_dim
str, dimension name that you need to expand if necessary (generally beam)
float_no_data_value
float, no data value for variables that are dtype float
int_no_data_value
int, no data value for variables that are dtype int
"""
self.zarr_path = zarr_path
self.desired_chunk_shape = desired_chunk_shape
self.append_dim = append_dim
self.expand_dim = expand_dim
self.float_no_data_value = float_no_data_value
self.int_no_data_value = int_no_data_value
self.rootgroup = None
self.zarr_array_names = []
self.merge_chunks = False
if self.zarr_path is not None:
self.open()
else:
print('Warning: starting zarr_write with an empty rootgroup, writing to disk not supported')
self.rootgroup = zarr.group()
def open(self):
"""
Open the zarr data store, will create a new one if it does not exist. Get all the existing array names.
"""
sync = zarr.ProcessSynchronizer(self.zarr_path + '.sync')
self.rootgroup = zarr.open(self.zarr_path, mode='a', synchronizer=sync)
self.get_array_names()
def get_array_names(self):
"""
Get all the existing array names as a list of strings and set self.zarr_array_names with that list
"""
self.zarr_array_names = [t for t in self.rootgroup.array_keys()]
def _attributes_only_unique_profile(self, attrs: dict):
"""
Given attribute dict from dataset (attrs) retain only unique sound velocity profiles
Parameters
----------
attrs
input attribution from converted dataset
Returns
-------
dict
attrs with only unique sv profiles
"""
try:
new_profs = [x for x in attrs.keys() if x[0:7] == 'profile']
curr_profs = [x for x in self.rootgroup.attrs.keys() if x[0:7] == 'profile']
current_vals = [self.rootgroup.attrs[p] for p in curr_profs]
for prof in new_profs:
val = attrs[prof]
if val in current_vals:
try: # find matching attribute key if exists
tstmp = prof.split('_')[1]
matching_attr = 'attributes_{}'.format(tstmp)
if matching_attr in attrs:
attrs.pop(matching_attr)
except:
pass
attrs.pop(prof)
except:
pass
return attrs
def _attributes_only_unique_runtime(self, attrs: dict):
"""
Given attribute dict from dataset (attrs) retain only unique runtime settings dicts
Parameters
----------
attrs
input attribution from converted dataset
Returns
-------
dict
attrs with only unique runtime settings dicts
"""
try:
new_settings = [x for x in attrs.keys() if x[0:7] == 'runtime']
curr_settings = [x for x in self.rootgroup.attrs.keys() if x[0:7] == 'runtime']
current_vals = [self.rootgroup.attrs[p] for p in curr_settings]
for sett in new_settings:
val = attrs[sett]
if val in current_vals:
attrs.pop(sett)
except:
pass
return attrs
def _attributes_only_unique_settings(self, attrs: dict):
"""
Given attribute dict from dataset (attrs) retain only unique settings dicts
Parameters
----------
attrs
input attribution from converted dataset
Returns
-------
dict
attrs with only unique settings dicts
"""
try:
new_settings = [x for x in attrs.keys() if x[0:7] == 'install']
curr_settings = [x for x in self.rootgroup.attrs.keys() if x[0:7] == 'install']
current_vals = [self.rootgroup.attrs[p] for p in curr_settings]
for sett in new_settings:
val = attrs[sett]
if val in current_vals:
attrs.pop(sett)
except:
pass
return attrs
def _attributes_only_unique_xyzrph(self, attrs: dict):
"""
Given attribute dict from dataset (attrs) retain only unique xyzrph constructs
xyzrph is constructed in processing as the translated settings
Parameters
----------
attrs
input attribution from converted dataset
Returns
-------
dict
attrs with only unique xyzrph timestamped records
"""
try:
new_xyz = attrs['xyzrph']
new_tstmps = list(new_xyz[list(new_xyz.keys())[0]].keys())
curr_xyz = self.rootgroup.attrs['xyzrph']
curr_tstmps = list(curr_xyz[list(curr_xyz.keys())[0]].keys())
curr_vals = []
for tstmp in curr_tstmps:
curr_vals.append([curr_xyz[x][tstmp] for x in curr_xyz])
for tstmp in new_tstmps:
new_val = [new_xyz[x][tstmp] for x in new_xyz]
if new_val in curr_vals:
for ky in new_xyz:
new_xyz[ky].pop(tstmp)
if not new_xyz[list(new_xyz.keys())[0]]:
attrs.pop('xyzrph')
# if not attrs:
# attrs = None
except:
pass
return attrs
def write_attributes(self, attrs: dict):
"""
Write out attributes to the zarr data store
Parameters
----------
attrs
attributes associated with this zarr rootgroup
"""
if attrs is not None:
attrs = self._attributes_only_unique_profile(attrs)
attrs = self._attributes_only_unique_settings(attrs)
attrs = self._attributes_only_unique_runtime(attrs)
attrs = self._attributes_only_unique_xyzrph(attrs)
_my_xarr_to_zarr_writeattributes(self.rootgroup, attrs)
def remove_attribute(self, attr: str):
if attr in self.rootgroup.attrs:
self.rootgroup.attrs.pop(attr)
def _check_fix_rootgroup_expand_dim(self, xarr: xr.Dataset):
"""
Check if this xarr is greater in the exand dimension (probably beam) than the existing rootgroup beam array. If it is,
we'll need to expand the rootgroup to cover the max beams of the xarr.
Parameters
----------
xarr
data that we are trying to write to rootgroup
Returns
-------
bool
if True expand the rootgroup expand dimension
"""
if (self.expand_dim in self.rootgroup) and (self.expand_dim in xarr):
last_expand = self.rootgroup[self.expand_dim].size
if last_expand < xarr[self.expand_dim].shape[0]:
return True # last expand dim isn't long enough, need to fix the chunk
else:
return False # there is a chunk there, but it is of size equal to desired
else:
return False # first write
def _get_arr_nodatavalue(self, arr_dtype: np.dtype):
"""
Given the dtype of the array, determine the appropriate no data value. Fall back on empty string if not int or
float.
Parameters
----------
arr_dtype
numpy dtype, dtype of input array
Returns
-------
Union[str, int, float]
no data value, one of [self.float_no_data_value, self.int_no_data_value, '']
"""
isfloat = np.issubdtype(arr_dtype, np.floating)
if isfloat:
nodata = self.float_no_data_value
else:
isint = np.issubdtype(arr_dtype, np.integer)
if isint:
nodata = self.int_no_data_value
else:
nodata = ''
return nodata
def fix_rootgroup_expand_dim(self, xarr: xr.Dataset):
"""
Once we've determined that the xarr Dataset expand_dim is greater than the rootgroup expand_dim, expand the
rootgroup expand_dim to match the xarr. Fill the empty space with the appropriate no data value.
Parameters
----------
xarr
data that we are trying to write to rootgroup
"""
curr_expand_dim_size = self.rootgroup[self.expand_dim].size
for var in self.zarr_array_names:
newdat = None
newshp = None
if var == self.expand_dim:
newdat = np.arange(xarr[self.expand_dim].shape[0])
newshp = xarr[self.expand_dim].shape
elif self.rootgroup[var].ndim >= 2:
if self.rootgroup[var].shape[1] == curr_expand_dim_size: # you found an array with a beam dimension
nodata_value = self._get_arr_nodatavalue(self.rootgroup[var].dtype)
newdat = self._inflate_expand_dim(self.rootgroup[var], xarr[self.expand_dim].shape[0], nodata_value)
newshp = list(self.rootgroup[var].shape)
newshp[1] = xarr[self.expand_dim].shape[0]
newshp = tuple(newshp)
if newdat is not None:
self.rootgroup[var].resize(newshp)
self.rootgroup[var][:] = newdat
def _inflate_expand_dim(self, input_arr: Union[np.array, zarr.Array, xr.DataArray],
expand_dim_size: int, nodata: Union[int, float, str]):
"""
Take in the rootgroup and expand the beam dimension to the expand_dim_size, filling the empty space with the
nodata value.
Parameters
----------
input_arr
numpy like object, includes zarr.core.Array and xarray.core.dataarray.DataArray, data that we want to expand to match the expand dim size
expand_dim_size
size of the expand_dim (probably beam) that we need
nodata
one of [self.float_no_data_value, self.int_no_data_value, '']
Returns
-------
Union[np.array, zarr.Array, xr.DataArray]
input_arr with expanded beam dimension
"""
if input_arr.ndim == 3:
appended_data = np.full((input_arr.shape[0], expand_dim_size - input_arr.shape[1], input_arr.shape[2]), nodata)
else:
appended_data = | np.full((input_arr.shape[0], expand_dim_size - input_arr.shape[1]), nodata) | numpy.full |
# <NAME>
# <EMAIL>
from rescalex.rescale import Rescale
import numpy as np
import copy
class Polynomial2D:
"""
#########################################
### Polynomial2D is a class containing data and methods to represent 2D polynomial function: y = f(x1,x2,norder,[cij]), where fij = cij * x1**i * x2**j and y = sum_{i=0}^{i=norder} sum_{j=0}^{j=i} fkj given k = i-j and k >= 0.
### Polynomial2D().data = dict of data. These data are inputs, and will not be modified.
### Polynomial2D().model = dict of data associated to a specified 2D polynomial model.
### Polynomial2D().rescale = dict of specification for rescaling X1,X2,Y.
### This can be specified by Polynomial2D(rescale=(a,b,c,d)) where a = bool to specify whether rescaling would be performed.
### For b,c,d, each is a dict specifying arguments for rescalex.rescale.Rescale for X1,X2,Y respectively.
### For example, b = {'method':'linear','minmax':(-1.,1.),'params':'None'}.
### Set b = None for not performing rescale with X1, and vice versa.
### Polynomial2D().sigclip = dict of specification for sigma-cipping.
### This can be specified by Polynomial2D(sigclip=(bool,niter,sigma)) where
### bool to specify whether sigma-clipping would be performed.
### niter = numbers of iteration
### sigma = sigma clipping level
### Polynomial2D().sim = dict of X1,X2,Y for a simulation using COEF
### Polynomial2D().compute() = compute YFIT given a model and data.
### Polynomial2D().fit() = fit for 2D polynomial coefficients given a model and data. This method includes an interation routine for outlier detection using outlierdetection.py.
### Polynomial2D().simulate() = simulate using COEF and obj.sim['X1'],obj.sim['X2']. Result is kept in obj.sim['Y'].
### Polynomial2D().test() = randomly simulate x1, x2, and coefficients given nsize and norder.
### Polynomial2D().data['MASK'] keeps the original mask of data with False as good data.
### Polynomial2D().model['MASKFIT'] updated mask from iterative fitting with sigclip
Note: if rescale is performed, Polynomial2D().model['COEF'] results from rescaled space
while Polynomial2D().model['YFIT'] results in non-rescaled spcae.
#########################################
### call: obj = Polynomial2D(x1,x2,y,mask,norder,coef,rescale,sigclip)
### call: obj.data['key'] for key = 'X1','X2','Y','MASK'
### call: obj.model['key'] for key = 'NORDER','COEF',...(created using model)...
### call: obj.rescale for examining rescaling results
### call: obj.sigclip for examining sigma-clipping setting
x1,x2,y,mask must have the same dimensions.
mask, if not specified, is set to all False (= good data).
norder is non-negative integer representing the highest polynomial order.
coef is a dict of key:value = (i,j):cij where cij*x1**i*x2**j
#########################################
### call: obj.compute() for computing polynomial2D using x1,x2,coef
### call: obj.model['YFIT'] for the output
#########################################
### call: obj.fit() for fitting polynomial2D using x1,x2,y,maskfit,norder
Sigma clipping can be specify by obj.sigclip
obj.model['MASKFIT'] = obj.data['MASK'], if not specified.
maskfit = False for good data, and is used for the fitting.
maskfit, after iterations with sigma clipping, is updated as the last iteration.
#########################################
### call: obj.simulate() for simulating polynomial2D using obj.model['COEF'], obj.sim['X1'], obj.sim['X2'].
obj.sim['Y'] keeps the result.
#########################################
### call: obj.test(nsize,norder) for simulate x1,x2,coef
x1,x2 are 1d-vectors of size = nsize, each.
coef is obj.model['COEF'][(i,j)] = cij.
use obj.compute() to get yfit.
"""
def __init__(self,x1=None,x2=None,y=None,mask=None,
norder=None,coef=None,
rescale=(False,None,None,None),
sigclip=(False,None,None)
):
if (mask is None) and (x1 is not None):
mask = np.full_like(x1,False,dtype=bool)
self.data = {'X1':x1,
'X2':x2,
'Y':y,
'MASK':mask
}
self.model = {'NORDER':norder,
'COEF':coef,
'YFIT':None,
'MASKFIT':None
}
self.rescale = {'RESCALE':rescale,
'X1':None,
'X2':None,
'Y':None
}
self.sigclip = sigclip
self.sim = {'X1':None,'X2':None,'Y':None}
self._rescale()
##########
##########
def simulate(self):
x1,x2 = self.sim['X1'],self.sim['X2']
if self.rescale['RESCALE'][0]:
x1,x2 = self.rescale['X1'].transform(x1),self.rescale['X2'].transform(x2)
obj = Polynomial2D(x1=x1,x2=x2,y=None,mask=None)
obj.model = copy.deepcopy(self.model)
obj.compute(rescale=False)
y = obj.model['YFIT']
if self.rescale['RESCALE'][0]:
y = self.rescale['Y'].invtransform(y)
self.sim['Y'] = y.copy()
print('Simulate...')
##########
##########
def test(self,nsize=2,norder=2):
self.data['X1'] = np.random.uniform(low=-1.,high=1.,size=nsize)
self.data['X2'] = np.random.uniform(low=-1.,high=1.,size=nsize)
self.data['MASK'] = (self.data['X1']*0.).astype(bool)
self.model['NORDER'] = norder
tmp = np.random.uniform(low=-1.,high=1.,size=self._coefsize())
self.model['COEF'] = self._makecoef(tmp)
print('Simulate test')
##########
##########
def compute(self,rescale=True):
if rescale:
x1 = self.rescale['X1'].transform(self.data['X1'])
x2 = self.rescale['X2'].transform(self.data['X2'])
else:
x1 = self.data['X1']
x2 = self.data['X2']
norder = self.model['NORDER']
coef = self.model['COEF']
tmp = np.full_like(x1,0.,dtype=float)
for i in coef:
tmppowerx1,tmppowerx2 = i[0],i[1]
tmpcoef = coef[i]
tmpp = tmpcoef * np.power(x1,tmppowerx1) * np.power(x2,tmppowerx2)
tmp += tmpp
if rescale:
self.model['YFIT'] = self.rescale['Y'].invtransform(tmp)
else:
self.model['YFIT'] = tmp
print('Perform compute')
##########
##########
def fit(self):
if self.model['MASKFIT'] is None:
self.model['MASKFIT'] = self.data['MASK'].copy()
if self.rescale['RESCALE'][0]:
tmpx1 = self.rescale['X1'].transform(self.data['X1'])
tmpx2 = self.rescale['X2'].transform(self.data['X2'])
tmpy = self.rescale['Y'].transform(self.data['Y'])
tmpm = self.data['MASK'].copy()
newobj = Polynomial2D(x1=tmpx1,x2=tmpx2,y=tmpy,mask=tmpm)
newobj.model = copy.deepcopy(self.model)
else:
newobj = copy.deepcopy(self)
# sigclip
doclip,niter,sigma_level = self.sigclip
sentinel = True
counter = 0
while sentinel:
counter += 1
newobj._curvefit()
newobj.compute(rescale=False)
print('Sigma clipping = {0}, sigma level = {1}, iter #{2}'.format(doclip,sigma_level,counter))
if not doclip or counter>=niter:
break
else:
newobj._sigclip(sigma_level)
if self.rescale['RESCALE'][0]:
newobj.model['YFIT'] = self.rescale['Y'].invtransform(newobj.model['YFIT'])
self.model = copy.deepcopy(newobj.model)
##########
##########
def _sigclip(self,sigma_level):
tmp = self.data['Y'] - self.model['YFIT']
std = np.std(tmp)
n_sig = np.abs(tmp)/std
tmpmask = (n_sig > sigma_level) # good data == False
self.model['MASKFIT'] = (self.model['MASKFIT'] | tmpmask)
print('Update maskfit')
def _rescale(self):
rescale = self.rescale['RESCALE']
KEY = {1:'X1',2:'X2',3:'Y'}
print('Rescale = {0}'.format(rescale[0]))
if not rescale[0]:
return
x1,x2,y = self.data['X1'].copy(),self.data['X2'].copy(),self.data['Y'].copy()
for i in KEY:
print('')
tmpdata = self.data[KEY[i]].copy()
rescale_arg_x = rescale[i]
obj = Rescale(data=tmpdata,**rescale_arg_x)
obj.compute()
self.rescale[KEY[i]] = copy.deepcopy(obj)
def _curvefit(self):
from scipy.optimize import curve_fit
x1 = self.data['X1'].copy()
x2 = self.data['X2'].copy()
y = self.data['Y'].copy()
norder = self.model['NORDER']
m = self.model['MASKFIT']
# check nan
mnan = np.where(~np.isfinite(y))
# update mask
m[mnan] = True
# apply mask
xx = x1[~m],x2[~m]
yy = y[~m]
print('Perform fit')
if norder==0:
popt,pcov = curve_fit(self._n0,xx,yy)
elif norder==1:
popt,pcov = curve_fit(self._n1,xx,yy)
elif norder==2:
popt,pcov = curve_fit(self._n2,xx,yy)
elif norder==3:
popt,pcov = curve_fit(self._n3,xx,yy)
elif norder==4:
popt,pcov = curve_fit(self._n4,xx,yy)
elif norder==5:
popt,pcov = curve_fit(self._n5,xx,yy)
elif norder==6:
popt,pcov = curve_fit(self._n6,xx,yy)
elif norder==7:
popt,pcov = curve_fit(self._n7,xx,yy)
elif norder==8:
popt,pcov = curve_fit(self._n8,xx,yy)
else:
popt,pcov = None,None
print('Not support this norder')
self.model['COEF'] = self._makecoef(popt)
self.model['pcov'] = pcov
def _n0(self,x,c00):
return c00
def _n1(self,x,c00,c10,c01):
x1,x2=x
tmp = self._n0(x,c00)
tmp += c10*x1 + c01*x2
return tmp
def _n2(self,x,c00,c10,c01,c20,c11,c02):
x1,x2=x
tmp = self._n1(x,c00,c10,c01)
tmp += c20*np.power(x1,2) + c11*x1*x2 + c02*np.power(x2,2)
return tmp
def _n3(self,x,c00,c10,c01,c20,c11,c02,
c30,c21,c12,c03
):
x1,x2=x
tmp = self._n2(x,c00,c10,c01,c20,c11,c02)
tmp += c30*np.power(x1,3) + c21*np.power(x1,2)*x2 + c12*x1* | np.power(x2,2) | numpy.power |
import numpy as np
import config
from optimizers import get_optimizer
import copy
from scipy.special import expit as sigmoid # Sigmoid function
"""
NOTE:
- Only use dropout during training
"""
class FullyConnected:
def __init__(self,
input_dim,
output_dim,
weight_initializer,
weight_decay=0.,
use_bias=False,
use_weight_norm=False,
opt=config.OPT,
clip_gradients=False):
self.use_bias = use_bias
self.use_weight_norm = use_weight_norm
self.clip_gradients = clip_gradients # Option for clipping gradients
if weight_decay < 0:
raise ValueError('FullyConnected: negative weight_decay not allowed')
self.weight_decay = weight_decay
self.optimizer = get_optimizer(opt)
param_shapes = [] # For initializing the optimizer
self.input = None
self.b = np.zeros(output_dim, dtype='float64')
self.db = None
if use_weight_norm:
# There are `output_dim` number of weight vectors `w` with length `input_dim`
self.v_shape = (input_dim, output_dim)
self.g_shape = (output_dim,)
self.v = weight_initializer(self.v_shape) # Initialise using the given initialiser
self.g = np.linalg.norm(self.v, axis=0)
self.dv = None # No need to initialse gradients
self.dg = None
param_shapes.extend([self.v_shape, self.g_shape]) # Shapes for optimizer
else:
self.W_shape = (input_dim, output_dim)
self.W = weight_initializer(self.W_shape)
self.dW = None
param_shapes.append(self.W_shape)
if use_bias:
param_shapes.append(self.b.shape)
# Init optimizer using shape of used parameters; e.g. gradient velocities
self.optimizer.init_shape(param_shapes)
def clip_grad(self, gradient, mingrad=-1., maxgrad=1.):
''' Clip gradients in a range to prevent explosion '''
return np.maximum(mingrad, np.minimum(maxgrad, gradient))
def get_weight(self):
''' Return weights with shape (input_dim x output_dim), depending on weight_norm '''
if self.use_weight_norm:
v_norm = np.linalg.norm(self.v, axis=0)
return self.g * self.v / np.maximum(v_norm, config.EPSILON) # EPSILON for stability
else:
return self.W
def forward(self, input, training=None):
'''
Compute forward pass and save input for backprop
`training` parameter is ignored for conforming with interface
'''
self.input = input
return (self.input @ self.get_weight()) + (self.b if self.use_bias else 0)
def backward(self, backproped_grad):
'''
Use back-propagated gradient (n x out_dim) to compute this layer's gradient
This function saves dW and returns d(Loss)/d(input)
'''
assert backproped_grad.shape == (self.input.shape[0], self.get_weight().shape[1])
dweights = self.input.T @ backproped_grad # shape = (input_dim, output_dim)
if self.use_weight_norm:
v_norm = np.maximum(np.linalg.norm(self.v, axis=0), config.EPSILON) # Clip for numerical stability
self.dg = np.sum(dweights * self.v / v_norm, axis=0) # Use sum since g was broadcasted
self.dv = (self.g / v_norm * dweights) - (self.g * self.dg / np.square(v_norm) * self.v)
else:
self.dW = dweights
if self.use_bias:
self.db = np.sum(backproped_grad, axis=0) # Sum gradient since bias was broadcasted
dinput = backproped_grad @ self.get_weight().T # shape = (batch, input_dim)
return dinput
def update(self):
''' Update the weights using the optimizer using the latest weights/gradients '''
params_gradient = []
if self.use_weight_norm:
if self.clip_gradients:
self.dv = self.clip_grad(self.dv)
self.dg = self.clip_grad(self.dg)
params_gradient.extend([(self.v, self.dv), (self.g, self.dg)])
else:
if self.clip_gradients:
self.dW = self.clip_grad(self.dW)
params_gradient.append((self.W, self.dW))
if self.use_bias:
if self.clip_gradients:
self.db = self.clip_grad(self.db)
params_gradient.append((self.b, self.db))
# Let the optimizer to do optimization
self.optimizer.optimize(params_gradient, self.weight_decay)
class ReLU:
def __init__(self):
self.input = None
def forward(self, input, training=None):
''' input.shape = output.shape = (batch x input_dims) '''
self.input = input
return np.maximum(input, 0)
def backward(self, backproped_grad):
deriv = np.where(self.input < 0, 0, 1)
return backproped_grad * deriv
def update(self):
pass # Nothing to update
class LeakyReLU:
def __init__(self, alpha=0.01):
if alpha <= 0 or alpha > 1:
raise ValueError('LeakyReLU: alpha must be between 0 and 1')
self.alpha = alpha
self.input = None
def forward(self, input, training=None):
''' input.shape = output.shape = (batch x input_dims) '''
self.input = input
return np.maximum(input, self.alpha * input)
def backward(self, backproped_grad):
''' Compute gradient of LeakyReLU and backprop '''
deriv = np.where(self.input < 0, self.alpha, 1)
return backproped_grad * deriv
def update(self):
pass # Nothing to update
class Sigmoid:
def __init__(self):
self.output = None
def forward(self, input, training=None):
''' Compute the sigmoid function; training status is ignored '''
self.output = sigmoid(input) # Sigmoid from SciPy
return self.output
def backward(self, backproped_grad):
''' Compute the gradient w.r.t. input '''
deriv = self.output * (1. - self.output)
return deriv * backproped_grad
def update(self):
pass # Nothing to update
class Tanh:
def __init__(self):
self.output = None
def forward(self, input, training=None):
''' Compute the tanh function; training status is ignored '''
self.output = np.tanh(input)
return self.output
def backward(self, backproped_grad):
''' Compute gradient w.r.t. input '''
deriv = 1. - np.square(self.output)
return deriv * backproped_grad
def update(self):
pass # Nothing to update
class Dropout:
def __init__(self, drop_rate):
if drop_rate < 0 or drop_rate >= 1:
raise ValueError('Dropout: dropout rate must be >= 0 and < 1')
self.retain_rate = 1. - drop_rate
self.mask = None
self.input = None
def forward(self, input, training):
''' Drop units according to the drop_rate; rescale weights as needed '''
if not training:
return input # During test time, no dropout required
self.input = input
self.mask = np.random.binomial(1, self.retain_rate, input.shape)
self.mask = self.mask / self.retain_rate # divide rate, so no change for prediction
return input * self.mask
def backward(self, backproped_grad):
''' Mask gradients according to drop mask; rescale gradients as needed '''
return backproped_grad * self.mask / self.retain_rate # divide rate so no change for prediction
def update(self):
pass # Nothing to update
class BatchNorm:
def __init__(self, input_dim, avg_decay=0.99, epsilon=1e-3, weight_decay=0., opt=config.OPT):
self.gamma = np.ones(input_dim, dtype='float64')
self.beta = np.zeros(input_dim, dtype='float64')
self.d_gamma = None
self.d_beta = None
self.running_avg_mean = np.zeros(input_dim, dtype='float64')
self.running_avg_std = np.zeros(input_dim, dtype='float64')
self.avg_decay = avg_decay
self.epsilon = epsilon
self.input_hat = None
self.std = None
self.optimizer = get_optimizer(opt)
if weight_decay < 0:
raise ValueError('FullyConnected: negative weight_decay not allowed')
self.weight_decay = weight_decay
param_shapes = [self.gamma.shape, self.beta.shape]
self.optimizer.init_shape(param_shapes)
def forward(self, input, training):
''' Compute forward pass of BatchNorm depending on whether we are training '''
if training:
# normalise input: 0 mean and unit std
self.std = np.sqrt(np.var(input, axis=0) + self.epsilon)
mean = np.mean(input, axis=0)
self.input_hat = (input - mean) / self.std
# Compute Exponentially Weighted Averages
self.running_avg_mean = self.avg_decay * self.running_avg_mean + (1 - self.avg_decay) * mean
self.running_avg_std = self.avg_decay * self.running_avg_std + (1 - self.avg_decay) * self.std
return self.gamma * self.input_hat + self.beta
else:
# Use running average and std to normalise
input_hat = (input - self.running_avg_mean) / self.running_avg_std
return self.gamma * input_hat + self.beta
def backward(self, backproped_grad):
''' Backprop of BatchNorm, computes gradients of dx (input), d_gamma, d_beta '''
# Compute derivative w.r.t. input
d_xhat = backproped_grad * self.gamma
numerator = len(self.input_hat) * d_xhat - np.sum(d_xhat, axis=0)
numerator -= self.input_hat * np.sum(d_xhat * self.input_hat, axis=0)
dx = (1. / len(self.input_hat)) * numerator / self.std
# Compute derivative w.r.t. gamma and beta
self.d_gamma = np.sum(backproped_grad * self.input_hat, axis=0)
self.d_beta = np.sum(backproped_grad, axis=0)
return dx
def update(self):
params_gradient = [(self.gamma, self.d_gamma), (self.beta, self.d_beta)]
self.optimizer.optimize(params_gradient, self.weight_decay)
class SoftmaxCrossEntropy:
def __init__(self):
self.y_pred = None
self.y_true = None
def softmax(self, input, training=None):
''' Compute the softmax (prediction) given input '''
input -= np.max(input, axis=-1, keepdims=True) # For numerical stability
exps = np.exp(input)
return exps / np.sum(exps, axis=-1, keepdims=True)
def cross_entropy(self, y_pred, y_true):
'''
Compute CrossEntropy loss given predictions and labels;
Calls self.softmax() for prediction
'''
y_pred = np.copy(y_pred) # Copy to ensure not corrupting original predictions
# negative log likelihood of the right class
logs = -np.log(y_pred[range(len(y_pred)), np.argmax(y_true, axis=-1)])
loss = np.mean(logs) # Real valued average loss over batch
self.y_true = y_true
self.y_pred = y_pred
return loss
def backward(self):
'''
Compute gradient of loss directly with respect to self.input (batch before softmax)
across Softmax and CrossEntropy; this way is more numerically stable
'''
grad = self.y_pred
# gradient = y_pred - y_true, and y_true == 1 only for the right classes
grad[range(len(grad)), | np.argmax(self.y_true, axis=-1) | numpy.argmax |
import argparse
import os
import pandas as pd
import glob
import numpy as np
import cv2 as cv
from lib.data_formats.read_events import read_memmap_events, read_h5_events_dict
if __name__ == "__main__":
"""
Quick demo
"""
parser = argparse.ArgumentParser()
parser.add_argument("path", help="events path")
parser.add_argument("flow_path", help="flow path")
parser.add_argument("--output_path", type=str, default="/tmp/visualization", help="Where to save image outputs")
parser.add_argument('--plot_method', default='between_frames', type=str,
help='which method should be used to visualize',
choices=['between_frames', 'k_events', 't_seconds'])
parser.add_argument('--w_width', type=float, default=0.01,
help='new plot is formed every t seconds (required if voxel_method is t_seconds)')
parser.add_argument('--sw_width', type=float,
help='sliding_window size in seconds (required if voxel_method is t_seconds)')
parser.add_argument("--num_bins", type=int, default=6, help="How many bins voxels should have.")
parser.add_argument('--show_plot', action='store_true', help='If true, will also display the plot in an interactive window.\
Useful for selecting the desired orientation.')
parser.add_argument("--num_show", type=int, default=-1, help="How many events to show per plot. If -1, show all events.")
parser.add_argument("--event_size", type=float, default=2, help="Marker size of the plotted events")
parser.add_argument("--ts_scale", type=int, default=10000, help="Scales the time axis. Only applicable for mayavi rendering.")
parser.add_argument("--elev", type=float, default=0, help="Elevation of plot")
parser.add_argument("--azim", type=float, default=45, help="Azimuth of plot")
parser.add_argument("--stride", type=int, default=1, help="Downsample stride for plotted images.")
parser.add_argument("--skip_frames", type=int, default=1, help="Amount of frames to place per plot.")
parser.add_argument("--start_frame", type=int, default=0, help="On which frame to start.")
parser.add_argument('--hide_skipped', action='store_true', help='Do not draw skipped frames into plot.')
parser.add_argument('--hide_events', action='store_true', help='Do not draw events')
parser.add_argument('--hide_frames', action='store_true', help='Do not draw frames')
parser.add_argument('--show_axes', action='store_true', help='Draw axes')
parser.add_argument("--num_compress", type=int, default=0, help="How many events to draw compressed. If 'auto'\
will automatically determine.", choices=['value', 'auto'])
parser.add_argument('--compress_front', action='store_true', help='If set, will put the compressed events at the _start_\
of the event volume, rather than the back.')
parser.add_argument('--invert', action='store_true', help='If the figure is for a black background, you can invert the \
colors for better visibility.')
parser.add_argument("--crop", type=str, default=None, help="Set a crop of both images and events. Uses 'imagemagick' \
syntax, eg for a crop of 10x20 starting from point 30,40 use: 10x20+30+40.")
parser.add_argument("--renderer", type=str, default="matplotlib", help="Which renderer to use (mayavi is faster)", choices=["matplotlib", "mayavi"])
args = parser.parse_args()
events = read_h5_events_dict(args.path)
xs = events['xs']
ys = events['ys']
ts = events['ts']
ps = events['ps']
t0 = ts[0]
ts = ts-t0
frames = [np.flip(np.flip(x/255., axis=0), axis=1) for x in events['frames']]
frame_ts = events['frame_timestamps'][1:]-t0
frame_end = events['frame_event_indices'][1:]
frame_start = np.concatenate((np.array([0]), frame_end))
frame_idx = np.stack((frame_end, frame_start[0:-1]), axis=1)
ys = frames[0].shape[0]-ys
xs = frames[0].shape[1]-xs
flow_paths = sorted(glob.glob(os.path.join(args.flow_path, "*.npy")))
flow_img_paths = sorted(glob.glob(os.path.join(args.flow_path, "*.png")))
flow_ts = pd.read_csv(os.path.join(args.flow_path, "timestamps.txt"), delimiter=" ", names=["fname", "timestamp"])
flow_ts = np.array(flow_ts["timestamp"])
#flows = [-np.flip(np.flip(np.load(fp), axis=1), axis=2) for fp in flow_paths]
flows = [- | np.load(fp) | numpy.load |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 12:03:31 2019
@author: rocco
"""
#open coincidence file and load variables
import os
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import datetime
files = [i for i in os.listdir("../data/mipas_pd")]
#files = files[19:24]
files = files[21:22]
ind_arr = np.empty([0,1])
ind = np.empty([0,1])
ind_cal = np.empty([0,1])
lab_cl = np.empty([0,1])
lab_cl_2 = np.empty([0,1])
lab_cl_3 = np.empty([0,1])
for file in files:
#load mipas df
df_reduced = pd.read_hdf(os.path.join('../data/mipas_pd', file),'df_reduced')
s0 = df_reduced.shape[0]
cl_max_class = np.ones(s0)*(-1)
cl_dense = np.ones(s0)*(-1)
df_reduced = df_reduced.assign(cal_max_cl=cl_max_class)
df_reduced = df_reduced.assign(caliop_class=cl_max_class)
df_reduced = df_reduced.assign(caliop_class_dense=cl_dense)
#load coinc data
file_coinc = "coinc_MipCalV2_" +file.split("_")[0] +file.split("_")[1] + "_main.nc"
dataset = Dataset("../data/classification/coinc/" + file_coinc)
cal_max_cl = dataset["caliop_max_class"][:]
caliop_class = dataset["caliop_class"][:]
nclass_cal4mip = dataset["nclass_cal4mip"][:]
htang_caliop = dataset["htang"][:]
time_caliop = dataset.variables["time"][:]
lat_caliop = dataset["latitude"][:]
lon_caliop = dataset["longitude"][:]
dataset.close()
ind_arr = np.empty([0,1])
ind_cal = np.empty([0,1])
for i in range(0, len(caliop_class)):
if (nclass_cal4mip[i, :].max() )/ nclass_cal4mip[i, :].sum() > 0.7:
cl_dense[i] = | np.argmax(nclass_cal4mip[i, :], axis=0) | numpy.argmax |
import os
import sys
file_dir = os.path.dirname(os.path.abspath(__file__))
tools_dir_path = os.path.dirname(file_dir)
sys.path.insert(0, tools_dir_path)
from tools import ProgressIterator, DataML, classified_error, allexcept, a_vs_b, output
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score, KFold
import numpy as np
def trial_all_except(training_set, testing_set, digit, kernel, c, degree=None):
training_set, testing_set = allexcept(digit, training_set, testing_set)
if degree == None:
svm = SVC(kernel=kernel, C=c)
else:
svm = SVC(kernel=kernel, C=c, degree=degree)
svm.fit(training_set.z, training_set.y)
training_predicted = svm.predict(training_set.z)
in_sample_error = classified_error(training_predicted, training_set.y)
testing_predicted = svm.predict(testing_set.z)
out_of_sample_error = classified_error(testing_predicted, testing_set.y)
return svm.n_support_, (in_sample_error, out_of_sample_error)
def trial_a_vs_b(training_set, testing_set, a, b, kernel, c, degree=None):
training_set, testing_set = a_vs_b(a, b, training_set, testing_set)
if degree == None:
svm = SVC(kernel=kernel, C=c)
else:
svm = SVC(kernel=kernel, C=c, degree=degree)
svm.fit(training_set.z, training_set.y)
training_predicted = svm.predict(training_set.z)
in_sample_error = classified_error(training_predicted, training_set.y)
testing_predicted = svm.predict(testing_set.z)
out_of_sample_error = classified_error(testing_predicted, testing_set.y)
return svm.n_support_, (in_sample_error, out_of_sample_error)
def best_c(training_set):
training_set = a_vs_b(1, 5, training_set)[0]
svcs = [ SVC(kernel='poly', C=c, degree=2)
for c in [0.0001, 0.001, 0.01, 0.1, 1] ]
cv = KFold(n=len(training_set.y), n_folds=10, shuffle=True)
score_c = [ np.mean(
cross_val_score(polysvm, training_set.z, training_set.y, cv=cv))
for polysvm in svcs ]
return np.argmax(score_c), score_c
def main():
output(simulations)
def simulations():
que = {}
training_data = np.genfromtxt(os.path.join(file_dir, "features.train"))
testing_data = np.genfromtxt(os.path.join(file_dir, "features.test"))
def convert_raw(t_data):
return DataML((t_data[:,1:], np.array(t_data[:,0], dtype="int")))
training_set = convert_raw(training_data)
testing_set = convert_raw(testing_data)
progress_iterator = ProgressIterator(5)
progress_iterator.next()
results_even = [ trial_all_except(training_set, testing_set, digit, 'poly', 0.1, 2)
for digit in range(0,9,2) ]
in_sample_error_list_even = [ result[1][0] for result in results_even ]
que[2] = ("digit with highest in sample error :", (np.argmax(in_sample_error_list_even) * 2 , np.max(in_sample_error_list_even)) )
results_odd = [ trial_all_except(training_set, testing_set, digit, 'poly', 0.1, 2)
for digit in range(1,10,2) ]
in_sample_error_list_odd = [ result[1][0] for result in results_odd ]
que[3] = ("digit with lowest in sample error :", (np.argmin(in_sample_error_list_odd) * 2 + 1 , np.min(in_sample_error_list_odd)) )
support_vector_difference = abs(
sum(results_even[ | np.argmax(in_sample_error_list_even) | numpy.argmax |
import warnings
import numpy as np
from numba import jit
"""
This code is from scipy project with following license:
SciPy license
Copyright © 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright © 2003-2019 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived f
rom this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
_AXIS_TO_IND = {"x": 0, "y": 1, "z": 2}
def _elementary_basis_vector(axis):
b = np.zeros(3)
b[_AXIS_TO_IND[axis]] = 1
return b
def compute_euler_from_matrix(matrix, seq, extrinsic=False):
# The algorithm assumes intrinsic frame transformations. The algorithm
# in the paper is formulated for rotation matrices which are transposition
# rotation matrices used within Rotation.
# Adapt the algorithm for our case by
# 1. Instead of transposing our representation, use the transpose of the
# O matrix as defined in the paper, and be careful to swap indices
# 2. Reversing both axis sequence and angles for extrinsic rotations
if extrinsic:
seq = seq[::-1]
if matrix.ndim == 2:
matrix = matrix[None, :, :]
num_rotations = matrix.shape[0]
# Step 0
# Algorithm assumes axes as column vectors, here we use 1D vectors
n1 = _elementary_basis_vector(seq[0])
n2 = _elementary_basis_vector(seq[1])
n3 = _elementary_basis_vector(seq[2])
# Step 2
sl = np.dot(np.cross(n1, n2), n3)
cl = np.dot(n1, n3)
# angle offset is lambda from the paper referenced in [2] from docstring of
# `as_euler` function
offset = np.arctan2(sl, cl)
c = np.vstack((n2, np.cross(n1, n2), n1))
# Step 3
rot = np.array([[1, 0, 0], [0, cl, sl], [0, -sl, cl]])
res = np.einsum("...ij,...jk->...ik", c, matrix)
matrix_transformed = np.einsum("...ij,...jk->...ik", res, c.T.dot(rot))
# Step 4
angles = np.empty((num_rotations, 3))
# Ensure less than unit norm
positive_unity = matrix_transformed[:, 2, 2] > 1
negative_unity = matrix_transformed[:, 2, 2] < -1
matrix_transformed[positive_unity, 2, 2] = 1
matrix_transformed[negative_unity, 2, 2] = -1
angles[:, 1] = np.arccos(matrix_transformed[:, 2, 2])
# Steps 5, 6
eps = 1e-7
safe1 = np.abs(angles[:, 1]) >= eps
safe2 = np.abs(angles[:, 1] - np.pi) >= eps
# Step 4 (Completion)
angles[:, 1] += offset
# 5b
safe_mask = np.logical_and(safe1, safe2)
angles[safe_mask, 0] = np.arctan2(
matrix_transformed[safe_mask, 0, 2], -matrix_transformed[safe_mask, 1, 2]
)
angles[safe_mask, 2] = np.arctan2(
matrix_transformed[safe_mask, 2, 0], matrix_transformed[safe_mask, 2, 1]
)
if extrinsic:
# For extrinsic, set first angle to zero so that after reversal we
# ensure that third angle is zero
# 6a
angles[~safe_mask, 0] = 0
# 6b
angles[~safe1, 2] = np.arctan2(
matrix_transformed[~safe1, 1, 0] - matrix_transformed[~safe1, 0, 1],
matrix_transformed[~safe1, 0, 0] + matrix_transformed[~safe1, 1, 1],
)
# 6c
angles[~safe2, 2] = -(
np.arctan2(
matrix_transformed[~safe2, 1, 0] + matrix_transformed[~safe2, 0, 1],
matrix_transformed[~safe2, 0, 0] - matrix_transformed[~safe2, 1, 1],
)
)
else:
# For instrinsic, set third angle to zero
# 6a
angles[~safe_mask, 2] = 0
# 6b
angles[~safe1, 0] = np.arctan2(
matrix_transformed[~safe1, 1, 0] - matrix_transformed[~safe1, 0, 1],
matrix_transformed[~safe1, 0, 0] + matrix_transformed[~safe1, 1, 1],
)
# 6c
angles[~safe2, 0] = np.arctan2(
matrix_transformed[~safe2, 1, 0] + matrix_transformed[~safe2, 0, 1],
matrix_transformed[~safe2, 0, 0] - matrix_transformed[~safe2, 1, 1],
)
# Step 7
if seq[0] == seq[2]:
# lambda = 0, so we can only ensure angle2 -> [0, pi]
adjust_mask = np.logical_or(angles[:, 1] < 0, angles[:, 1] > np.pi)
else:
# lambda = + or - pi/2, so we can ensure angle2 -> [-pi/2, pi/2]
adjust_mask = np.logical_or(angles[:, 1] < -np.pi / 2, angles[:, 1] > np.pi / 2)
# Dont adjust gimbal locked angle sequences
adjust_mask = np.logical_and(adjust_mask, safe_mask)
angles[adjust_mask, 0] += np.pi
angles[adjust_mask, 1] = 2 * offset - angles[adjust_mask, 1]
angles[adjust_mask, 2] -= np.pi
angles[angles < -np.pi] += 2 * np.pi
angles[angles > np.pi] -= 2 * np.pi
# Step 8
if not np.all(safe_mask):
warnings.warn(
"Gimbal lock detected. Setting third angle to zero since"
" it is not possible to uniquely determine all angles."
)
# Reverse role of extrinsic and intrinsic rotations, but let third angle be
# zero for gimbal locked cases
if extrinsic:
angles = angles[:, ::-1]
return angles
def compute_q_from_matrix(matrix):
is_single = False
matrix = np.asarray(matrix, dtype=float)
if matrix.ndim not in [2, 3] or matrix.shape[-2:] != (3, 3):
raise ValueError(
"Expected `matrix` to have shape (3, 3) or "
"(N, 3, 3), got {}".format(matrix.shape)
)
# If a single matrix is given, convert it to 3D 1 x 3 x 3 matrix but
# set self._single to True so that we can return appropriate objects in
# the `to_...` methods
if matrix.shape == (3, 3):
matrix = matrix.reshape((1, 3, 3))
is_single = True
num_rotations = matrix.shape[0]
decision_matrix = np.empty((num_rotations, 4))
decision_matrix[:, :3] = matrix.diagonal(axis1=1, axis2=2)
decision_matrix[:, -1] = decision_matrix[:, :3].sum(axis=1)
choices = decision_matrix.argmax(axis=1)
quat = np.empty((num_rotations, 4))
ind = np.nonzero(choices != 3)[0]
i = choices[ind]
j = (i + 1) % 3
k = (j + 1) % 3
quat[ind, i] = 1 - decision_matrix[ind, -1] + 2 * matrix[ind, i, i]
quat[ind, j] = matrix[ind, j, i] + matrix[ind, i, j]
quat[ind, k] = matrix[ind, k, i] + matrix[ind, i, k]
quat[ind, 3] = matrix[ind, k, j] - matrix[ind, j, k]
ind = | np.nonzero(choices == 3) | numpy.nonzero |
import sys, os
project_path = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
if project_path not in sys.path:
sys.path.append(project_path)
import argparse
import numpy as np
from copy import deepcopy
from sklearn.utils import shuffle
from typing import Callable, Optional, Sequence, Iterable
from commonly_used_code import config, helper_fn
class Batch:
def __init__(self):
self.init_all()
def init_all(self):
self.init_oovs()
self.init_src_tar()
self.init_fact()
self.init_conv()
def init_oovs(self):
# each sample has an oov ids and its max oov lens
# oov dict
self.oov_ids_dict = []
def init_src_tar(self):
# normal ids
self.src_ids = []
self.tar_ids = []
self.tar_loss_ids = []
# for multi-task, take the first fact as the answer
self.fact_tar_ids = []
self.fact_tar_loss_ids = []
# OOV expand for each data set
self.src_ids_exp = []
self.tar_ids_exp = []
self.tar_loss_ids_exp = []
def init_fact(self):
self.fact_ids = []
self.fact_ids_exp = []
def init_conv(self):
self.conv_ids = []
self.conv_ids_exp = []
def np_format(self):
self.src_ids = np.asarray(self.src_ids)
self.tar_ids = np.asarray(self.tar_ids)
self.tar_loss_ids = np.asarray(self.tar_loss_ids)
self.tar_loss_ids = np.reshape(self.tar_loss_ids, (self.tar_loss_ids.shape[0], self.tar_loss_ids.shape[1], 1))
self.fact_ids = np.asarray(self.fact_ids)
if len(self.conv_ids) != 0:
self.conv_ids = np.asarray(self.conv_ids)
# used for multi-task
self.fact_tar_ids = np.asarray(self.fact_tar_ids)
self.fact_tar_loss_ids = np.asarray(self.fact_tar_loss_ids)
self.fact_tar_loss_ids = np.reshape(self.fact_tar_loss_ids, (self.fact_tar_loss_ids.shape[0], self.fact_tar_loss_ids.shape[1], 1))
self.src_ids_exp = np.asarray(self.src_ids_exp)
self.tar_ids_exp = np.asarray(self.tar_ids_exp)
self.tar_loss_ids_exp = np.asarray(self.tar_loss_ids_exp)
self.tar_loss_ids_exp = np.reshape(self.tar_loss_ids_exp, (self.tar_loss_ids_exp.shape[0], self.tar_loss_ids_exp.shape[1], 1))
self.fact_ids_exp = np.asarray(self.fact_ids_exp)
if len(self.conv_ids_exp) != 0:
self.conv_ids_exp = | np.asarray(self.conv_ids_exp) | numpy.asarray |
"""This module provides the analytical solution for computing the hessian matrix of our
loglikelihood function
"""
import numpy as np
from scipy.stats import norm
def compute_hessian(x0, X1, X0, Z1, Z0, Y1, Y0):
"""This function wraps all subroutines and returns the hessian matrix of our
log-likelihood function
"""
# def auxiliary parameters
num_obs = X1.shape[0] + X0.shape[0]
n_col_X1 = X1.shape[1]
n_col_X0 = X0.shape[1]
n_col_Z = Z1.shape[1]
# parameters
num_col_X1X0 = n_col_X1 + n_col_X0
num_col_X1X0Z1 = num_col_X1X0 + n_col_Z
beta1, beta0, gamma = (
x0[:n_col_X1],
x0[n_col_X1:num_col_X1X0],
x0[num_col_X1X0:-4],
)
sd1, sd0, rho1v, rho0v = x0[-4], x0[-2], x0[-3], x0[-1]
# aux_params
nu1 = (Y1 - np.dot(beta1, X1.T)) / sd1
lambda1 = (np.dot(gamma, Z1.T) - rho1v * nu1) / (np.sqrt(1 - rho1v ** 2))
nu0 = (Y0 - np.dot(beta0, X0.T)) / sd0
lambda0 = (np.dot(gamma, Z0.T) - rho0v * nu0) / (np.sqrt(1 - rho0v ** 2))
eta1 = (
-lambda1 * norm.pdf(lambda1) * norm.cdf(lambda1) - norm.pdf(lambda1) ** 2
) / (norm.cdf(lambda1) ** 2)
eta0 = (
lambda0 * norm.pdf(lambda0) * (1 - norm.cdf(lambda0)) - norm.pdf(lambda0) ** 2
) / (1 - norm.cdf(lambda0)) ** 2
# combinations of obs
X1X1 = np.einsum("ij, i ->ij", X1, eta1).T @ X1
X1Z1 = np.einsum("ij, i ->ij", X1, eta1).T @ Z1
X0X0 = np.einsum("ij, i ->ij", X0, eta0).T @ X0
X0Z0 = np.einsum("ij, i ->ij", X0, eta0).T @ Z0
Z1Z1 = np.einsum("ij, i ->ij", Z1, eta1).T @ Z1
Z0Z0 = np.einsum("ij, i ->ij", Z0, eta0).T @ Z0
# beginning with derivations of beta1
derv_beta1 = calc_hess_beta1(
X1X1, X1Z1, X1, sd1, rho1v, nu1, lambda1, eta1, n_col_X1, n_col_X0, num_obs
)
derv_beta0 = calc_hess_beta0(
X0X0, X0Z0, X0, sd0, rho0v, nu0, lambda0, eta0, n_col_X1, n_col_X0, num_obs
)
derv_gamma = calc_hess_gamma(
Z1Z1,
Z0Z0,
Z1,
X1,
Z0,
X0,
sd0,
sd1,
rho0v,
rho1v,
eta1,
eta0,
nu0,
nu1,
lambda0,
lambda1,
num_col_X1X0,
num_obs,
)
derv_dist = calc_hess_dist(
Z1,
Z0,
gamma,
sd1,
sd0,
rho1v,
rho0v,
lambda1,
lambda0,
nu1,
nu0,
eta1,
eta0,
num_col_X1X0Z1,
num_obs,
)
# convert results to a symmetric hessian matrix
hessian_upper = np.triu(
np.concatenate((derv_beta1, derv_beta0, derv_gamma, derv_dist), axis=0)
)
aux = hessian_upper.copy()
for i in range(hessian_upper.shape[0]):
hessian_upper[:, i][i + 1 :] = hessian_upper[i][i + 1 :]
return hessian_upper, aux
def calc_hess_beta1(
X1X1, X1Z1, X1, sd1, rho1v, nu1, lambda1, eta1, n_col_X1, n_col_X0, num_obs
):
"""This function computes the derivatives of the first order conditions of beta1 wrt
all other parameters.
"""
# define some auxiliary variables
rho_aux1 = lambda1 * rho1v / (1 - rho1v ** 2) - nu1 / (1 - rho1v ** 2) ** 0.5
rho_aux2 = rho1v ** 2 / ((1 - rho1v ** 2) ** (3 / 2)) + 1 / (1 - rho1v ** 2) ** 0.5
sd_aux1 = rho1v ** 2 / (1 - rho1v ** 2)
sd_aux2 = rho1v / np.sqrt(1 - rho1v ** 2)
# derivation wrt beta1
der_b1_beta1 = -(
X1X1 * (rho1v ** 2 / (1 - rho1v ** 2)) * 1 / sd1 ** 2 - X1.T @ X1 / sd1 ** 2
)
# add zeros for derv beta 0
der_b1_beta1 = np.concatenate(
(der_b1_beta1, np.zeros((n_col_X1, n_col_X0))), axis=1
)
# derivation wrt gamma
der_b1_gamma = -(X1Z1 * rho1v / (sd1 * (1 - rho1v ** 2)))
der_b1_gamma = np.concatenate((der_b1_beta1, der_b1_gamma), axis=1)
# derv wrt sigma 1
der_b1_sd = (
-1
/ sd1
* (
(
(eta1 * sd_aux1 * nu1 - norm.pdf(lambda1) / norm.cdf(lambda1) * sd_aux2)
- 2 * nu1
)
* 1
/ sd1
)
)
# expand_dimensions and add
der_b1_sd = np.expand_dims((der_b1_sd.T @ X1), 1)
der_b1_sd = np.concatenate((der_b1_gamma, der_b1_sd), axis=1)
# derv wrt rho1
der_b1_rho = (
-(
eta1 * rho_aux1 * rho1v / ((1 - rho1v ** 2) ** 0.5)
+ norm.pdf(lambda1) / norm.cdf(lambda1) * rho_aux2
)
* 1
/ sd1
)
# expand_dimensions and add
der_b1_rho = np.expand_dims((der_b1_rho.T @ X1), 1)
der_b1_rho = np.concatenate((der_b1_sd, der_b1_rho), axis=1)
# add zeros for sigma0 and rho0
der_b1 = np.concatenate((der_b1_rho, np.zeros((n_col_X1, 2))), axis=1)
der_beta1 = der_b1 / num_obs
return der_beta1
def calc_hess_beta0(
X0X0, X0Z0, X0, sd0, rho0v, nu0, lambda0, eta0, n_col_X1, n_col_X0, num_obs
):
"""This function computes the derivatives of the first order conditions of beta0 wrt
all other parameters.
"""
# define some aux_vars
rho_aux1 = lambda0 * rho0v / (1 - rho0v ** 2) - nu0 / (1 - rho0v ** 2) ** 0.5
rho_aux2 = rho0v ** 2 / ((1 - rho0v ** 2) ** (3 / 2)) + 1 / (1 - rho0v ** 2) ** 0.5
sd_aux1 = rho0v ** 2 / (1 - rho0v ** 2)
sd_aux2 = rho0v / (np.sqrt(1 - rho0v ** 2))
# add zeros for beta0
der_b0_beta1 = np.zeros((n_col_X1, n_col_X0))
# beta0
der_b0_beta0 = (
-(X0X0 * (rho0v ** 2 / (1 - rho0v ** 2)) * 1 / sd0 ** 2) + X0.T @ X0 / sd0 ** 2
)
der_b0_beta0 = np.concatenate((der_b0_beta1, der_b0_beta0), axis=1)
# gamma
der_b0_gamma = -X0Z0 * rho0v / (1 - rho0v ** 2) * 1 / sd0
der_b0_gamma = | np.concatenate((der_b0_beta0, der_b0_gamma), axis=1) | numpy.concatenate |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from functools import partial
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
def split(data, segment_ids, indices=None):
"""
Given:
data[M1 x M2 x ... x Md]
the input data
indices[N] the index of each entry of segment_ids into data,
where 0 <= index[i] < M1,
with default indices=[0,1,...N]
segment_ids[N] the segment_id for each entry of indices,
returns K outputs, each one containing data entries corresponding
to one of the segments present in `segment_ids`.
"""
K = max(segment_ids) + 1
outputs = [
np.zeros(
(np.count_nonzero(segment_ids == seg_id),) + data.shape[1:],
dtype=data.dtype)
for seg_id in range(0, K)]
counts = np.zeros(K)
for i, seg_id in enumerate(segment_ids):
data_idx = i if indices is None else indices[i]
outputs[seg_id][counts[seg_id]] = data[data_idx]
counts[seg_id] += 1
return outputs
def unsplit(inputs, segment_ids):
""" Inverse operation to `split`, with indices=None """
output = np.zeros((len(segment_ids),) + inputs[0].shape[1:])
K = max(segment_ids) + 1
counts = | np.zeros(K) | numpy.zeros |
"""
Collection of functions to calculate lag correlations
and significance following Ebisuzaki 97 JCLIM
"""
def phaseran(recblk, nsurr,ax):
""" Phaseran by <NAME>: http://www.mathworks.nl/matlabcentral/fileexchange/32621-phase-randomization/content/phaseran.m
Args:
recblk (2D array): Row: time sample. Column: recording.
An odd number of time samples (height) is expected.
If that is not the case, recblock is reduced by 1 sample before the surrogate data is created.
The class must be double and it must be nonsparse.
nsurr (int): is the number of image block surrogates that you want to generate.
Returns:
surrblk: 3D multidimensional array image block with the surrogate datasets along the third dimension
Reference:
<NAME>., <NAME>. Generating Surrogate Data for Time Series with Several Simultaneously Measured Variables (1994)
Physical Review Letters, Vol 73, Number 7
NOTE: Extended to xy data and converted to python by <NAME>
"""
import numpy as np
from ds21grl.misc import AxRoll
# make sure time dimension is axis=0
recblk = AxRoll(recblk,ax)
# Get time length
nfrms = recblk.shape[0]
# force data to have odd time length
if nfrms % 2 == 0:
nfrms = nfrms-1
recblk = recblk[0:nfrms]
# define fft frequency intervals
len_ser = int((nfrms-1)/2)
interv1 = np.arange(1, len_ser+1)
interv2 = np.arange(len_ser+1, nfrms)
# Fourier transform of the original dataset
fft_recblk = np.fft.fft(recblk,axis=0)
# Create nsurr timeseries of random numbers (0,1)
# Also tile fft array for later
if np.ndim(recblk) == 1:
ph_rnd = np.random.rand(len_ser,nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1))
elif np.ndim(recblk) == 2:
ph_rnd = np.random.rand(len_ser,recblk.shape[1],nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1,1))
elif np.ndim(recblk) == 3:
ph_rnd = np.random.rand(len_ser,recblk.shape[1],recblk.shape[2],nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1,1,1))
fft_recblk_surr = np.moveaxis(fft_recblk_surr,0,-1)
# Create the random phases for all the time series
ph_interv1 = np.exp(2*np.pi*1j*ph_rnd)
ph_interv2 = np.conj( | np.flipud(ph_interv1) | numpy.flipud |
##########################################################################
##########################################################################
##
## What are you doing looking at this file?
##
##########################################################################
##########################################################################
#
# Just kidding. There is some useful stuff in here that will help you complete
# some of the labs and your project. Feel free to adapt it.
#
# (Sorry about the awful commenting though. Do as I say, not as I do, etc...)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from ipywidgets import interact, fixed, interactive_output, HBox, Button, VBox, Output, IntSlider, Checkbox, FloatSlider, FloatLogSlider, Dropdown
TEXTSIZE = 16
from IPython.display import clear_output
import time
from scipy.optimize import curve_fit
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as colmap
from copy import copy
from scipy.stats import multivariate_normal
# commenting in here is pretty shocking tbh
# wairakei model
def wairakei_data():
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
# plot some data
f,ax1 = plt.subplots(1,1,figsize=(12,6))
ax1.plot(tq,q,'b-',label='production')
ax1.plot([],[],'ro',label='pressure')
ax1.set_xlabel('time [yr]',size=TEXTSIZE)
ax1.set_ylabel('production rate [kg/s]',size=TEXTSIZE)
ax2 = ax1.twinx()
ax2.plot(tp,p,'ro')
v = 2.
for tpi,pi in zip(tp,p):
ax2.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
ax2.set_ylabel('pressure [bar]',size=TEXTSIZE);
for ax in [ax1,ax2]:
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.set_xlim([None,1980])
ax1.legend(prop={'size':TEXTSIZE})
plt.show()
def lpm_plot(i=1):
f,ax = plt.subplots(1,1, figsize=(12,6))
ax.axis('off')
ax.set_xlim([0,1])
ax.set_ylim([0,1])
r = 0.3
cx,cy = [0.5,0.35]
h = 0.3
dh = -0.13
dh2 = 0.05
e = 4.
th = np.linspace(0,np.pi,101)
col = 'r'
ax.fill_between([0,1],[0,0],[1,1],color='b',alpha=0.1, zorder = 0)
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h, color = col, ls = '--')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h, color = col, ls = '--')
ax.plot([cx+r,cx+r],[cy,cy+h],color=col,ls='--')
ax.plot([cx-r,cx-r],[cy,cy+h],color=col,ls='--')
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color = col, ls = '-')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color = col, ls = '-')
ax.plot([cx+r,cx+r],[cy,cy+h+(i>0)*dh+(i>1)*dh2],color=col,ls='-')
ax.plot([cx-r,cx-r],[cy,cy+h+(i>0)*dh+(i>1)*dh2],color=col,ls='-')
ax.fill_between(cx + r*np.cos(th),cy - r*np.sin(th)/e,cy + r*np.sin(th)/e+h+(i>0)*dh+(i>1)*dh2, color='r', alpha = 0.1)
if i > 0:
cube(ax, 0.90, 0.8, 0.025, 'r')
ax.arrow(cx+1.05*r,cy+1.2*(h+dh)+0.05, 0.05, 0.14, color = 'r', head_width=0.02, head_length=0.04, length_includes_head=True)
if i > 1:
cube(ax, 0.85, 0.5, 0.015, 'b')
cube(ax, 0.15, 0.5, 0.015, 'b')
cube(ax, 0.85, 0.35, 0.015, 'b')
cube(ax, 0.15, 0.35, 0.015, 'b')
cube(ax, 0.25, 0.23, 0.015, 'b')
cube(ax, 0.50, 0.18, 0.015, 'b')
cube(ax, 0.75, 0.23, 0.015, 'b')
ax.arrow(0.17,0.5,0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.83,0.5,-0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.17,0.35,0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.83,0.35,-0.02,0.0, color = 'b', head_width=0.02, head_length=0.01, length_includes_head=True)
ax.arrow(0.50,0.21,0.,0.04, color = 'b', head_width=0.01, head_length=0.02, length_includes_head=True)
ax.arrow(0.26,0.25,0.015,0.025, color = 'b', head_width=0.015, head_length=0.01, length_includes_head=True)
ax.arrow(0.74,0.25,-0.015,0.025, color = 'b', head_width=0.015, head_length=0.01, length_includes_head=True)
if i > 2:
for fr in [0.35,0.70,0.90]:
ax.plot(cx + r*np.cos(th), cy + r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', ls = '--')
ax.plot(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', ls = '--')
ax.fill_between(cx + r*np.cos(th), cy - r*np.sin(th)/e+h+fr*(dh+dh2), cy + r*np.sin(th)/e+h+fr*(dh+dh2), color = 'k', alpha = 0.1)
ax.arrow(0.18, cy+h, 0, dh+dh2, color = 'k', head_width=0.01, head_length=0.02, length_includes_head=True)
ax.text(0.17, cy+h+0.5*(dh+dh2), 'lowers\nover time', color='k', ha = 'right', va='center', size=TEXTSIZE-1, fontstyle = 'italic')
xt1,xt2,xt3,xt4 = [0.2,0.06,0.07,0.07]
yt = 0.85
yt2 = 0.05
ax.text(xt1,yt,r'$\dot{P}$ =', color = 'k', size = TEXTSIZE+4)
if i == 0:
ax.text(xt1+xt2,yt,r'$0$', color = 'k', size = TEXTSIZE+4)
if i > 0:
ax.text(xt1+xt2,yt,r'$-aq$', color = 'r', size = TEXTSIZE+4)
if i > 1:
ax.text(xt1+xt2+xt3,yt,r'$-bP$', color = 'b', size = TEXTSIZE+4)
if i > 2:
ax.text(xt1+xt2+xt3+xt4,yt,r'$-c\dot{q}$', color = 'k', size = TEXTSIZE+4)
if i == 0:
ax.text(0.5, yt2, 'reservoir initially at pressure equilibrium', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 1:
ax.text(0.5, yt2, 'extraction from reservoir at rate, $q$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 2:
ax.text(0.5, yt2, 'recharge from surrounding rock, proportional to $P$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
elif i == 3:
ax.text(0.5, yt2, 'response to extraction not instantaneous: "slow drainage", $\dot{q}$', size = TEXTSIZE+4, ha = 'center', va = 'bottom', fontstyle = 'italic')
plt.show()
def cube(ax,x0,y0,dx,col):
dy = dx*2.
s2 = 2
ax.plot([x0+dx/s2,x0, x0-dx,x0-dx,x0,x0],[y0+dy/s2,y0,y0,y0-dy,y0-dy,y0],color=col,ls='-')
ax.plot([x0-dx,x0-dx+dx/s2,x0+dx/s2,x0+dx/s2,x0],[y0,y0+dy/s2,y0+dy/s2,y0+dy/s2-dy,y0-dy],color=col,ls='-')
ax.fill_between([x0-dx,x0-dx+dx/s2,x0,x0+dx/s2],[y0-dy,y0-dy,y0-dy,y0-dy+dy/s2],[y0,y0+dy/s2,y0+dy/s2,y0+dy/s2],color=col,alpha=0.1)
def lpm_demo():
sldr = IntSlider(value=0, description='slide me!', min = 0, max = 3, step = 1, continuous_update = False, readout=False)
return VBox([sldr, interactive_output(lpm_plot, {'i':sldr})])
def plot_lpm_models(a,b,c):
# load some data
tq,q = np.genfromtxt('wk_production_history.csv', delimiter = ',')[:28,:].T
tp,p = np.genfromtxt('wk_pressure_history.csv', delimiter = ',')[:28,:].T
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
# plot the data with error bars
f,ax = plt.subplots(1,1,figsize=(12,6))
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp,p,'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp,p):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
pm = solve_lpm(tp,a,b,c)
ax.plot(tp, pm, 'k-', label='model')
# axes upkeep
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
plt.show()
def lpm_model():
# load flow rate data and compute derivative
tq,q = np.genfromtxt('wk_production_history.csv', delimiter = ',')[:28,:].T
tp,p = np.genfromtxt('wk_pressure_history.csv', delimiter = ',')[:28,:].T
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an imporved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# use CURVE_FIT to find "best" model
from scipy.optimize import curve_fit
pars = curve_fit(solve_lpm, tp, p, [1,1,1])[0]
# plot the best solution
pm = solve_lpm(tp,*pars)
f,ax = plt.subplots(1,1,figsize=(12,6))
ax.plot(tp, p, 'ro', label = 'observations')
ax.plot(tp, pm, 'k-', label='model')
ax.set_ylabel("pressure [bar]",size=14); ax.set_xlabel("time",size=14)
ax.legend(prop={'size':14})
ax.set_ylim([25,60])
ax.set_title('a={:2.1e}, b={:2.1e}, c={:2.1e}'.format(*pars),size=14);
def lpm_models():
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
a = FloatLogSlider(value=a0, base=10, description=r'$a$', min = np.log10(a0)-dlog, max = np.log10(a0)+dlog, step = dlog/10, continuous_update = False)
b = FloatLogSlider(value=b0, base=10, description=r'$b$', min = np.log10(b0)-dlog, max = np.log10(b0)+dlog, step = dlog/10, continuous_update = False)
dlog*=5
c = FloatLogSlider(value=c0, base=10, description=r'$c$', min = np.log10(c0)-dlog, max = np.log10(c0)+dlog, step = dlog/10, continuous_update = False)
io = interactive_output(plot_lpm_models, {'a':a,'b':b,'c':c})
return VBox([HBox([a,b,c]),io])
def plot_lpm_posterior(sa,sb,sc,Nmods):
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
Nmods = int(Nmods)
a = np.random.randn(Nmods)*sa+a0
b = np.random.randn(Nmods)*sb+b0
c = np.random.randn(Nmods)*sc+c0
# plot the data with error bars
f = plt.figure(figsize=(12,6))
ax = plt.axes([0.15,0.15,0.5,0.7])
ax1 = plt.axes([0.70,0.69,0.2,0.15])
ax2 = plt.axes([0.70,0.42,0.2,0.15])
ax3 = plt.axes([0.70,0.15,0.2,0.15])
for m0,sm,axi,mv in zip([a0,b0,c0],[sa,sb,sc],[ax1,ax2,ax3],[a,b,c]):
axi.set_yticks([])
if sm < 1.e-6:
axi.plot([m0-3*dlog*m0, m0,m0,m0,m0+3*dlog*m0],[0,0,1,0,0],'r-',zorder=2)
else:
x = np.linspace(m0-3*dlog*m0, m0+3*dlog*m0, 101)
y = np.exp(-(x-m0)**2/(2*sm**2))/np.sqrt(2*np.pi*sm**2)
axi.plot(x,y,'r-',zorder=2)
bins = np.linspace(m0-3*dlog*m0, m0+3*dlog*m0, int(4*np.sqrt(Nmods))+1)
h,e = np.histogram(mv, bins)
h = h/(np.sum(h)*(e[1]-e[0]))
axi.bar(e[:-1],h,e[1]-e[0], color = [0.5,0.5,0.5])
if axi is ax2: dlog*=5
ax1.set_xlabel('$a$',size=TEXTSIZE)
ax2.set_xlabel('$b$',size=TEXTSIZE)
ax3.set_xlabel('$c$',size=TEXTSIZE)
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp,p,'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp,p):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
alpha = np.min([0.5,10./Nmods])
lw = 0.5
for ai,bi,ci in zip(a,b,c):
pm = solve_lpm(tp,ai,bi,ci)
ax.plot(tp, pm, 'k-', alpha = alpha, lw = lw)
ax.plot([],[],'k-',alpha=alpha,lw=lw,label='possible models')
# axes upkeep
pm = solve_lpm(tp,a0,b0,c0)
ax.plot(tp, pm, 'k-', lw = 2, label = 'best model')
ax.set_ylabel('pressure [bar]',size=TEXTSIZE);
ax.tick_params(axis='both',labelsize=TEXTSIZE)
ax.legend(prop={'size':TEXTSIZE})
ax.set_xlim([None,1980])
ax.set_title(r'$\sigma_a='+'{:2.1e}'.format(sa)+r'$, $\sigma_b='+'{:2.1e}'.format(sb)+r'$, $\sigma_c='+'{:2.1e}'.format(sc)+'$',size=TEXTSIZE);
plt.show()
def lpm_posterior():
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
sa = FloatSlider(value=dlog*a0/2, description=r'$\sigma_a$', min = 0., max = dlog*a0, step = dlog*a0/10., continuous_update = False)
sb = FloatSlider(value=dlog*b0/2, description=r'$\sigma_b$', min = 0., max = dlog*b0, step = dlog*b0/10., continuous_update = False)
dlog*=5
sc = FloatSlider(value=dlog*c0/2, description=r'$\sigma_c$', min = 0., max = dlog*c0, step = dlog*c0/10., continuous_update = False)
Nmods = FloatLogSlider(value = 4, base=2, description='samples', min = 0, max = 8, step = 1, continuous_update=False)
io = interactive_output(plot_lpm_posterior, {'sa':sa,'sb':sb,'sc':sc,'Nmods':Nmods})
return VBox([HBox([sa,sb,sc,Nmods]),io])
def plot_lpm_prediction(Nmods, reveal, sa, sb, sc):
# load some data
tq, q = np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True)
tp, p = np.genfromtxt('wk_pressure_history.csv', delimiter=',', unpack=True)
dqdt = 0.*q # allocate derivative vector
dqdt[1:-1] = (q[2:]-q[:-2])/(tq[2:]-tq[:-2]) # central differences
dqdt[0] = (q[1]-q[0])/(tq[1]-tq[0]) # forward difference
dqdt[-1] = (q[-1]-q[-2])/(tq[-1]-tq[-2]) # backward difference
if not reveal:
iq = np.argmin(abs(tq-1981))
ip = np.argmin(abs(tp-1981))
else:
iq = len(tq)
ip = len(tp)
a0,b0,c0 = [2.2e-3,1.1e-1,6.8e-3]
dlog = 0.1
Nmods = int(Nmods)
np.random.seed(13)
a = np.random.randn(Nmods)*sa+a0
b = np.random.randn(Nmods)*sb+b0
c = np.random.randn(Nmods)*sc+c0
# plot the data with error bars
f = plt.figure(figsize=(15,5))
ax = plt.axes([0.15,0.15,0.5,0.7])
ax2 = plt.axes([0.75,0.15,0.20,0.7])
ax.set_xlabel('time [yr]',size=TEXTSIZE)
ax.plot(tp[:ip],p[:ip],'ro', label = 'observations')
v = 2.
for tpi,pi in zip(tp[:ip],p[:ip]):
ax.plot([tpi,tpi],[pi-v,pi+v], 'r-', lw=0.5)
# define derivative function
def lpm(pi,t,a,b,c): # order of variables important
qi = np.interp(t,tq,q) # interpolate (piecewise linear) flow rate
dqdti = np.interp(t,tq,dqdt) # interpolate derivative
return -a*qi - b*pi - c*dqdti # compute derivative
# implement an improved Euler step to solve the ODE
def solve_lpm(t,a,b,c):
pm = [p[0],] # initial value
for t0,t1 in zip(tp[:-1],tp[1:]): # solve at pressure steps
dpdt1 = lpm(pm[-1]-p[0], t0, a, b, c) # predictor gradient
pp = pm[-1] + dpdt1*(t1-t0) # predictor step
dpdt2 = lpm(pp-p[0], t1, a, b, c) # corrector gradient
pm.append(pm[-1] + 0.5*(t1-t0)*(dpdt2+dpdt1)) # corrector step
return np.interp(t, tp, pm) # interp onto requested times
# solve and plot model
alpha = np.min([0.5,10./Nmods])
lw = 0.5
pmf = []
for ai,bi,ci in zip(a,b,c):
pm = solve_lpm(tp,ai,bi,ci)
ax.plot(tp, pm, 'k-', alpha = alpha, lw = lw)
pmf.append(pm[-1])
ax.plot([],[],'k-',alpha=0.5,lw=lw,label='possible models')
pm = solve_lpm(tp,a0,b0,c0)
ax.plot(tp, pm, 'k-', lw = 2, label = 'best model')
ax.axvline(tp[-1], color = 'k', linestyle = ':', label='predict future')
bins = np.linspace(np.min(pmf)*0.999, np.max(pmf)*1.001, int(np.sqrt(Nmods))+1)
h,e = np.histogram(pmf, bins)
h = h/(np.sum(h)*(e[1]-e[0]))
ax2.bar(e[:-1],h,e[1]-e[0], color = [0.5,0.5,0.5])
ax2.set_xlim([30,45])
ax2.set_ylim([0,1])
if Nmods>10:
ax2.axvline(pm[-1], label='best model',color = 'k', linestyle = '-')
if reveal:
ax2.axvline(p[-1], label='true process',color = 'r', linestyle = '-')
ax2.fill_between([p[-1]-v, p[-1]+v], [0,0], [1,1], color='r', alpha=0.5)
yf5,yf95 = | np.percentile(pmf, [5,95]) | numpy.percentile |
"""
network for learning
"""
import tensorflow as tf
import numpy as np
from config import CFG
import random
class value_network:
def __init__(self, sess, game, filter_size = 256):
self.sess = sess
self.name = 'value_network'
self.rows = game.rows
self.columns = game.columns
self.action_size = game.action_size
self.filter_size = filter_size
self.num_actions = game.columns*(game.columns-1)
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
self.state = tf.placeholder(tf.float32, shape=[None, self.rows * 2 * self.columns])
input_layer = tf.reshape(self.state, [-1, self.rows * 2, self.columns, 1])
self.value = tf.placeholder(tf.float32, shape=[None])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=self.filter_size, kernel_size=[2, 2], padding='SAME',
kernel_initializer=tf.contrib.layers.xavier_initializer(), strides=1)
conv1_batch_norm = tf.layers.batch_normalization(inputs=conv1)
resnet_in_out = tf.nn.relu(conv1_batch_norm)
for _ in range(CFG.value_head_layers):
res_conv1 = tf.layers.conv2d(inputs=resnet_in_out, filters=self.filter_size, kernel_size=[2, 2],
kernel_initializer=tf.contrib.layers.xavier_initializer(), padding='SAME', strides=1)
batch_norm1 = tf.layers.batch_normalization(inputs=res_conv1)
activate_batch_norm1 = tf.nn.relu(batch_norm1)
res_conv2 = tf.layers.conv2d(inputs=activate_batch_norm1, filters=self.filter_size, kernel_size=[2, 2],
kernel_initializer=tf.contrib.layers.xavier_initializer(), padding='SAME', strides=1)
batch_norm2 = tf.layers.batch_normalization(inputs=res_conv2)
added_layer = tf.add(batch_norm2, resnet_in_out)
resnet_in_out = tf.nn.relu(added_layer)
flatten_resnet = tf.reshape(resnet_in_out, [-1, self.filter_size * self.rows * 2 * self.columns])
dense1 = tf.layers.dense(inputs=flatten_resnet, units=self.filter_size * self.rows * self.columns/4, activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
dense2 = tf.layers.dense(inputs=dense1, units=self.filter_size * self.rows * self.columns / 16,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
vs = tf.layers.dense(inputs=dense2, units=1)
self.vs = tf.nn.sigmoid(vs)
self.v_cost = tf.losses.mean_squared_error(self.vs, tf.reshape(self.value, shape=[-1, 1]))
# l2_regularization
total_vars = tf.compat.v1.trainable_variables()
weights_name_list = [var for var in total_vars if "kernel" in var.name]
loss_holder = []
for w in range(len(weights_name_list)):
l2_loss = tf.nn.l2_loss(weights_name_list[w])
loss_holder.append(l2_loss)
self.regular_cost = tf.reduce_mean(loss_holder) * CFG.l2_val
self.total_cost = self.v_cost + self.regular_cost
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=CFG.learning_rate).minimize(self.total_cost)
class value_pi_network:
def __init__(self, sess, game, filter_size = 256):
self.sess = sess
self.name = 'value_pi_network'
self.rows = game.rows
self.columns = game.columns
self.action_size = game.action_size
self.filter_size = filter_size
self.num_actions = game.columns*(game.columns-1)
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
self.state = tf.placeholder(tf.float32, shape=[None, self.rows * 2 * self.columns])
input_layer = tf.reshape(self.state, [-1, self.rows * 2, self.columns, 1])
self.value_pi = tf.placeholder(tf.float32, shape=[None, self.num_actions])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=self.filter_size, kernel_size=[2, 2], padding='SAME',
kernel_initializer=tf.contrib.layers.xavier_initializer(), strides=1)
conv1_batch_norm = tf.layers.batch_normalization(inputs=conv1)
resnet_in_out = tf.nn.relu(conv1_batch_norm)
for _ in range(CFG.value_pi_head_layers):
res_conv1 = tf.layers.conv2d(inputs=resnet_in_out, filters=self.filter_size, kernel_size=[2, 2],
kernel_initializer=tf.contrib.layers.xavier_initializer(), padding='SAME', strides=1)
batch_norm1 = tf.layers.batch_normalization(inputs=res_conv1)
activate_batch_norm1 = tf.nn.relu(batch_norm1)
res_conv2 = tf.layers.conv2d(inputs=activate_batch_norm1, filters=self.filter_size, kernel_size=[2, 2],
kernel_initializer=tf.contrib.layers.xavier_initializer(), padding='SAME', strides=1)
batch_norm2 = tf.layers.batch_normalization(inputs=res_conv2)
added_layer = tf.add(batch_norm2, resnet_in_out)
resnet_in_out = tf.nn.relu(added_layer)
flatten_resnet = tf.reshape(resnet_in_out, [-1, self.filter_size * self.rows * self.columns * 2])
dense1 = tf.layers.dense(inputs=flatten_resnet, units=self.filter_size * self.rows * self.columns/4, activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
dense2 = tf.layers.dense(inputs=dense1, units=self.filter_size * self.rows * self.columns / 16,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
value_policy = tf.layers.dense(inputs=dense2, units=self.num_actions, kernel_initializer=tf.contrib.layers.xavier_initializer())
self.value_policy = tf.nn.sigmoid(value_policy)
self.value_pi_cost = tf.losses.mean_squared_error(self.value_policy, self.value_pi)
# l2_regularization
total_vars = tf.compat.v1.trainable_variables()
weights_name_list = [var for var in total_vars if "kernel" in var.name]
loss_holder = []
for w in range(len(weights_name_list)):
l2_loss = tf.nn.l2_loss(weights_name_list[w])
loss_holder.append(l2_loss)
self.regular_cost = tf.reduce_mean(loss_holder) * CFG.l2_val
self.total_cost = self.value_pi_cost + self.regular_cost
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=CFG.learning_rate).minimize(self.total_cost)
class NeuralNetworkWrapper:
def __init__(self, game, sess):
self.game = game
self.value_net = value_network(sess, self.game)
self.value_pi_net = value_pi_network(sess, self.game)
self.sess = sess
def predict_value(self, state):
state = np.reshape(state, newshape=[-1, self.game.rows * 2 * self.game.columns])
v = self.sess.run(self.value_net.vs, feed_dict={self.value_net.state: state})
return v[0][0]
def predict_value_policy(self, state):
state = | np.reshape(state, newshape=[-1, self.game.rows * 2 * self.game.columns]) | numpy.reshape |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# Added optional external roi loader for Fast R-CNN
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast/er R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import cv2
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
assert (cfg.TRAIN.BATCH_SIZE % num_images == 0
), 'num_images ({}) must divide BATCH_SIZE ({})'.format(
num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes,
# exclude the ones that are ''iscrowd''
gt_inds = np.where(
roidb[0]['gt_classes'] !=
0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
if not cfg.TRAIN.HAS_RPN: # not using RPN
# Now, build the region of interest blob
roi_inds = | np.where(roidb[0]['gt_classes'] == 0) | numpy.where |
import itertools
import numpy as np
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def struct_to_ndarray(array):
"""Turns returns a view of a structured array as a regular ndarray."""
return array.view(array.dtype[0]).reshape((array.shape[0], -1))
def reindex_vertices(arrays=None):
all_arrays = np.hstack(arrays)
array_ncols = tuple(array.shape[1] for array in arrays)
# Build a new array list, composed of only the unique combinations (no redundant data)
row_searchable_array = all_arrays.view(all_arrays.dtype.descr * all_arrays.shape[1])
unique_combs = np.sort( | np.unique(row_searchable_array) | numpy.unique |
from jax.scipy.signal import convolve2d as conv2
import jax, jax.numpy as jnp
import tqdm
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import gridspec
from .helpers import reconstruct, reconstruct_numpy, shift_factors, compute_loadings_percent_power, get_shapes, shifted_matrix_product, trim_shapes
def update_W(W, H, X, Lambda, M, L, K, smooth_kernel, eps, lambda_OrthW, lambda_L1W):
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
XHT = shifted_matrix_product(X,H.T,jnp.arange(L)-1,None,0)
X_hat_HT = shifted_matrix_product(X_hat,H.T,jnp.arange(L)-1,None,0)
XS = conv2(X, smooth_kernel, 'same')
XS_HT = shifted_matrix_product(XS, H.T, jnp.arange(L)-1,None,0)
dWWdW = jnp.dot(lambda_OrthW * jnp.sum(W, axis=2), 1. - jnp.eye(K))
dRdW = Lambda * jax.vmap(lambda x: jnp.dot(x, 1-jnp.eye(K)))(XS_HT) + lambda_L1W + dWWdW
return W * jnp.moveaxis(jnp.divide(XHT, X_hat_HT + dRdW + eps),0,2)
def seqnmf_iter(W, H, X, X_hat, Lambda, M, L, K, smooth_kernel, shift, eps,
W_fixed, lambda_OrthW, lambda_OrthH, lambda_L1W, lambda_L1H):
WTX = shifted_matrix_product(W.T,X,-jnp.arange(L)+1,0,1).sum(0)
WTX_hat = shifted_matrix_product(W.T,X_hat,-jnp.arange(L)+1,0,1).sum(0)
dRdH = jnp.dot(Lambda * (1 - jnp.eye(K)), conv2(WTX, smooth_kernel, 'same'))
dHHdH = jnp.dot(lambda_OrthH * (1 - jnp.eye(K)), conv2(H, smooth_kernel, 'same'))
dRdH += lambda_L1H + dHHdH
H = H * jnp.divide(WTX, WTX_hat + dRdH + eps)
W,H = jax.lax.cond(shift, shift_factors, lambda WH: WH, (W,H))
W = W + eps*shift
norms = jnp.sqrt(jnp.sum(jnp.power(H, 2), axis=1)).T
H = jnp.dot(jnp.diag(jnp.divide(1., norms + eps)), H)
W = jax.vmap(jnp.dot, in_axes=(2,None), out_axes=2)(W,jnp.diag(norms))
update = lambda w: update_W(w, H, X, Lambda, M, L, K, smooth_kernel, eps, lambda_OrthW, lambda_L1W)
W = jax.lax.cond(not W_fixed, update, lambda w: w, W)
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
cost = jnp.sqrt(jnp.mean(jnp.power(X - X_hat, 2)))
return W, H, X, X_hat, cost
def seqnmf(X, K=10, L=100, Lambda=.001, W_init=None, H_init=None,
plot_it=False, max_iter=100, tol=-np.inf, shift=True, sort_factors=True,
lambda_L1W=0, lambda_L1H=0, lambda_OrthH=0, lambda_OrthW=0, M=None, W_fixed=False):
'''
:param X: an N (features) by T (timepoints) data matrix to be factorized using seqNMF
:param K: the (maximum) number of factors to search for; any unused factors will be set to all zeros
:param L: the (maximum) number of timepoints to consider in each factor; any unused timepoints will be set to zeros
:param Lambda: regularization parameter (default: 0.001)
:param W_init: initial factors (if unspecified, use random initialization)
:param H_init: initial per-timepoint factor loadings (if unspecified, initialize randomly)
:param plot_it: if True, display progress in each update using a plot (default: False)
:param max_iter: maximum number of iterations/updates
:param tol: if cost is within tol of the average of the previous 5 updates, the algorithm will terminate (default: tol = -inf)
:param shift: allow timepoint shifts in H
:param sort_factors: sort factors by time
:param lambda_L1W: regularization parameter for W (default: 0)
:param lambda_L1H: regularization parameter for H (default: 0)
:param lambda_OrthH: regularization parameter for H (default: 0)
:param lambda_OrthW: regularization parameter for W (default: 0)
:param M: binary mask of the same size as X, used to ignore a subset of the data during training (default: use all data)
:param W_fixed: if true, fix factors (W), e.g. for cross validation (default: False)
:return:
:W: N (features) by K (factors) by L (per-factor timepoints) tensor of factors
:H: K (factors) by T (timepoints) matrix of factor loadings (i.e. factor timecourses)
:cost: a vector of length (number-of-iterations + 1) containing the initial cost and cost after each update (i.e. the reconstruction error)
:loadings: the per-factor loadings-- i.e. the explanatory power of each individual factor
:power: the total power (across all factors) explained by the full reconstruction
'''
assert np.all(X >= 0), 'all data values must be positive!'
N = X.shape[0]
T = X.shape[1] + 2 * L
X = jnp.concatenate((jnp.zeros([N, L]), X, jnp.zeros([N, L])), axis=1)
if W_init is None:
W_init = jnp.array(np.max(X) * np.random.rand(N, K, L))
if H_init is None:
H_init = jnp.array(np.max(X) * np.random.rand(K, T) / np.sqrt(T / 3))
if M is None:
M = jnp.ones([N, T])
W = W_init
H = H_init
X_hat = reconstruct(W, H)
X = jnp.where(M==0, X_hat, X)
smooth_kernel = jnp.ones([1, (2 * L) - 1])
eps = jnp.max(X) * 1e-6
last_time = False
costs = np.zeros(max_iter + 1)
costs[0] = jnp.sqrt(jnp.mean(jnp.power(X - X_hat, 2)))
update = jax.jit(lambda W,H,X,X_hat,Lambda: seqnmf_iter(
W, H, X, X_hat, Lambda, M, L, K, smooth_kernel, shift, eps,
W_fixed, lambda_OrthW, lambda_OrthH, lambda_L1W, lambda_L1H))
for i in tqdm.trange(max_iter):
if (i == max_iter - 1) or ((i > 6) and (costs[i + 1] + tol) > np.mean(costs[i - 6:i])):
costs = costs[:(i + 2)]
last_time = True
if i > 0: Lambda = 0
W, H, X, X_hat, cost = update(W, H, X, X_hat, Lambda)
costs[i] = cost
if plot_it:
if i > 0:
try:
h.close()
except:
pass
h = plot(W, H)
h.suptitle(f'iteration {i}', fontsize=8)
h.show()
if last_time:
break
X = X[:, L:-L]
X_hat = X_hat[:, L:-L]
H = H[:, L:-L]
power = jnp.divide(jnp.sum(jnp.power(X, 2)) - jnp.sum(jnp.power(X - X_hat, 2)), jnp.sum(jnp.power(X, 2)))
loadings = compute_loadings_percent_power(X, W, H)
W = np.array(W)
H = np.array(H)
power = np.array(power)
loadings = | np.array(loadings) | numpy.array |
import logging
logging.basicConfig(filename='logs.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info("let's get it started")
import numpy as np
from neuron import h
h.load_file('nrngui.hoc')
#paralleling NEURON staff
pc = h.ParallelContext()
rank = int(pc.id())
nhost = int(pc.nhost())
#param
ees_fr = 40 # frequency of EES
nMN = 20
nAff = 12
nInt = 19
N = 50
exnclist = []
inhnclist = []
eesnclist = []
stimnclist = []
from interneuron import interneuron
from motoneuron import motoneuron
from bioaff import bioaff
from bioaffrat import bioaffrat
import random
'''
network creation
see topology https://github.com/research-team/memristive-spinal-cord/blob/master/doc/diagram/cpg_generator_FE_paper.png
and all will be clear
'''
class RA:
def __init__(self, ees_fr, N):
self.interneurons = []
self.motoneurons = []
self.afferents = []
self.stims = []
self.ncell = N
self.groups = []
self.motogroups = []
self.affgroups = []
self.Ia_aff_E = self.addpool(nAff, "Ia_aff_E", "aff")
self.Ia_aff_F = self.addpool(nAff, "Ia_aff_F", "aff")
self.mns_E = self.addpool(nMN, "mns_E", "moto")
self.mns_F = self.addpool(nMN, "mns_F", "moto")
'''reflex arc'''
self.Ia_E = self.addpool(nInt, "Ia_E", "int")
self.R_E = self.addpool(nInt, "R_E", "int")
self.Ia_F = self.addpool(nInt, "Ia_F", "int")
self.R_F = self.addpool(nInt, "R_F", "int")
# self.Iagener_E = []
# self.Iagener_F = []
'''ees'''
self.ees = self.addgener(1, ees_fr, 10000, False)
self.C1 = self.addgener(50, 200, 15)
self.C0 = self.addgener(150, 200, 15)
self.Iagener_E = self.addIagener(self.mns_E)
self.Iagener_F = self.addIagener(self.mns_F)
genconnect(self.ees, self.Ia_aff_E, 0.65, 2)
genconnect(self.ees, self.Ia_aff_F, 0.5, 2)
genconnect(self.Iagener_E, self.Ia_aff_E, 0.5, 2)
genconnect(self.Iagener_F, self.Ia_aff_F, 0.5, 2)
connectcells(self.Ia_aff_E, self.mns_E, 0.65, 2)
connectcells(self.Ia_aff_F, self.mns_F, 0.65, 2)
genconnect(self.C1, self.mns_E, 0.5, 3)
genconnect(self.C0, self.mns_F, 0.5, 3)
genconnect(self.C1, self.Ia_aff_F, 0.8, 1, True)
genconnect(self.C0, self.Ia_aff_E, 0.8, 1, True)
'''reflex arc'''
connectcells(self.Ia_aff_E, self.Ia_E, 0.08, 1)
connectcells(self.mns_E, self.R_E, 0.00025, 1)
connectcells(self.Ia_E, self.mns_F, 0.08, 1, True)
connectcells(self.R_E, self.mns_E, 0.0005, 1, True)
connectcells(self.R_E, self.Ia_E, 0.001, 1, True)
connectcells(self.Ia_aff_F, self.Ia_F, 0.08, 1)
connectcells(self.mns_F, self.R_F, 0.0004, 1)
connectcells(self.Ia_F, self.mns_E, 0.04, 1, True)
connectcells(self.R_F, self.mns_F, 0.0005, 1, True)
connectcells(self.R_F, self.Ia_F, 0.001, 1, True)
connectcells(self.R_E, self.R_F, 0.04, 1, True)
connectcells(self.R_F, self.R_E, 0.04, 1, True)
connectcells(self.Ia_E, self.Ia_F, 0.08, 1, True)
connectcells(self.Ia_F, self.Ia_E, 0.08, 1, True)
def addpool(self, num, name="test", neurontype="int"):
'''
Creates interneuronal pool and returns gids of pool
Parameters
----------
num: int
neurons number in pool
neurontype: string
int: interneuron
delay: interneuron with 5ht
moto: motoneuron
aff: afferent
Returns
-------
gids: list
the list of neurons gids
'''
gids = []
gid = 0
if neurontype.lower() == "delay":
delaytype = True
else:
delaytype = False
if neurontype.lower() == "moto":
diams = motodiams(num)
for i in range(rank, num, nhost):
if neurontype.lower() == "moto":
cell = motoneuron(diams[i])
self.motoneurons.append(cell)
elif neurontype.lower() == "aff":
cell = bioaffrat()
self.afferents.append(cell)
else:
cell = interneuron(delaytype)
self.interneurons.append(cell)
while pc.gid_exists(gid) != 0:
gid += 1
gids.append(gid)
pc.set_gid2node(gid, rank)
nc = cell.connect2target(None)
pc.cell(gid, nc)
# ToDo remove me (Alex code) - NO
if neurontype.lower() == "moto":
self.motogroups.append((gids, name))
elif neurontype.lower() == "aff":
self.affgroups.append((gids, name))
else:
self.groups.append((gids, name))
return gids
def addgener(self, start, freq, nums, r=True):
'''
Creates generator and returns generator gid
Parameters
----------
start: int
generator start up
freq: int
generator frequency
nums: int
signals number
Returns
-------
gid: int
generator gid
'''
gid = 0
stim = h.NetStim()
stim.number = nums
if r:
stim.noise = 0.1
else:
stim.noise = 0.0
stim.interval = 1000 / freq
stim.start = start
#skinstim.noise = 0.1
self.stims.append(stim)
while pc.gid_exists(gid) != 0:
gid += 1
pc.set_gid2node(gid, rank)
ncstim = h.NetCon(stim, None)
pc.cell(gid, ncstim)
return gid
def addIagener(self, mn):
'''
Creates self.Ia generators and returns generator gids
Parameters
----------
start: int
generator start up
num: int
number in pool
Returns
-------
gids: list
generators gids
'''
gid = 0
srcgid = random.randint(mn[0], mn[-1])
moto = pc.gid2cell(srcgid)
print(moto)
stim = h.IaGenerator(0.5)
h.setpointer(moto.muscle.muscle_unit(0.5)._ref_F_fHill, 'fhill', stim)
self.stims.append(stim)
while pc.gid_exists(gid) != 0:
gid += 1
pc.set_gid2node(gid, rank)
ncstim = h.NetCon(stim, None)
pc.cell(gid, ncstim)
print(gid)
return gid
def connectcells(pre, post, weight, delay, inhtype = False):
''' Connects with excitatory synapses
Parameters
----------
pre: list
list of presynase neurons gids
post: list
list of postsynapse neurons gids
weight: float
weight of synapse
used with Gaussself.Ian distribution
delay: int
synaptic delay
used with Gaussself.Ian distribution
nsyn: int
numder of synapses
inhtype: bool
is this connection inhibitory?
'''
nsyn = random.randint(3, 5)
for i in post:
if pc.gid_exists(i):
for j in range(nsyn):
srcgid = random.randint(pre[0], pre[-1])
target = pc.gid2cell(i)
if inhtype:
syn = target.synlistinh[j]
nc = pc.gid_connect(srcgid, syn)
inhnclist.append(nc)
# str nc.weight[0] = 0
else:
syn = target.synlistex[j]
nc = pc.gid_connect(srcgid, syn)
exnclist.append(nc)
# str nc.weight[0] = random.gauss(weight, weight / 10)
nc.weight[0] = random.gauss(weight, weight / 10)
nc.delay = random.gauss(delay, delay / 9)
def genconnect(gen_gid, afferents_gids, weight, delay, inhtype = False):
''' Connects with generator
Parameters
----------
afferents_gids: list
list of presynase neurons gids
gen_gid: int
generator gid
weight: float
weight of synapse
used with Gaussian distribution
delay: int
synaptic delay
used with Gaussian distribution
nsyn: int
numder of synapses
inhtype: bool
is this connection inhibitory?
'''
nsyn = random.randint(3, 5)
for i in afferents_gids:
if pc.gid_exists(i):
for j in range(nsyn):
target = pc.gid2cell(i)
if inhtype:
syn = target.synlistinh[j]
else:
syn = target.synlistex[j]
nc = pc.gid_connect(gen_gid, syn)
stimnclist.append(nc)
nc.delay = random.gauss(delay, delay / 7)
nc.weight[0] = random.gauss(weight, weight / 10)
def spike_record(pool):
''' Records spikes from gids
Parameters
----------
pool: list
list of neurons gids
version: int
test number
Returns
-------
v_vec: list of h.Vector()
recorded voltage
'''
v_vec = []
for i in pool:
cell = pc.gid2cell(i)
vec = h.Vector()
vec.record(cell.soma(0.5)._ref_v)
v_vec.append(vec)
return v_vec
def motodiams(number):
nrn_number = number
standby_percent = 70
active_percent = 100 - standby_percent
standby_size = int(nrn_number * standby_percent / 100)
active_size = nrn_number - standby_size
loc_active, scale_active = 27, 3
loc_stanby, scale_stanby = 44, 4
x2 = np.concatenate([np.random.normal(loc=loc_active, scale=scale_active, size=active_size),
| np.random.normal(loc=loc_stanby, scale=scale_stanby, size=standby_size) | numpy.random.normal |
import sys
import numpy as np
import pandas as pd
import pmdarima as pm
from timeseries.transform.transformer import Transformer
class IHSTransformer(Transformer):
def __init__(self, ts, interval=None, d="auto", lmb="auto",
difference_first=True,
save_loglikelihood_deriv=False, verbose=False):
self.verbose = verbose
self.d = d
if lmb is not None and lmb != "auto":
lmb = float(lmb)
self.lmb = lmb
self.ihs_after_diff = difference_first
self.save_loglikelihood_deriv = save_loglikelihood_deriv
self.std = None
self.mean = None
self.transform(ts, interval)
def __next_val__(self, diff):
self.prev_val = self.prev_val + diff
return self.prev_val
def transform(self, ts, interval=None):
def difference(x):
if self.d >= 1:
x = np.diff(x, 1)
if self.d >= 2:
x = np.diff(x, self.d - 1)
return x
def ihs_trans(x):
if self.lmb == "auto":
if self.save_loglikelihood_deriv:
self.lmb, self.loglikelihood_deriv = \
calc_mle_of_lmb(x, get_loglikelihood_deriv=True)
else:
self.lmb = calc_mle_of_lmb(x)
if self.verbose:
if self.lmb is None:
print(f"MLE of IHS lambda cannot be found",
file=sys.stderr)
else:
print(f"MLE of IHS lambda: {self.lmb:e}",
file=sys.stderr)
if type(self.lmb) is float:
x = np.arcsinh(x * self.lmb) / self.lmb
return x
ts, interval = self.__get_ts_and_interval__(ts, interval)
if np.any(ts.isnull()):
raise Exception("Series has missing value")
if not self.ihs_after_diff:
# if self.d == "auto":
# x = interval.view(x) if interval is not None else x
# x, interval = self.__get_ts_and_interval__(x, interval)
ts = ihs_trans(ts)
if self.d == "auto":
ts = interval.view(ts) if interval is not None else ts
try:
self.d = pm.arima.ndiffs(ts)
except:
self.d = 2
if self.verbose:
print(f"Order of differencing: {self.d}", file=sys.stderr)
ts, interval = self.__get_ts_and_interval__(ts, interval)
assert (interval is not None)
ts = interval.view(ts, prevs=self.d)
x = difference(ts)
if self.ihs_after_diff:
x = ihs_trans(x)
if self.mean is None:
self.mean = np.mean(x)
x = x - self.mean
if self.std is None:
self.std = np.sqrt(np.var(x))
if int(self.std) == 0:
self.std = 1
x /= self.std
if type(ts) is pd.Series:
index = ts.index[self.d:]
x = pd.Series(x, index=index)
return x
def detransform(self, diffs_ts, prev_original_values, index=None):
def dedifference(ts, prev_original_values):
if self.d == 2:
self.prev_val = prev_original_values[-1] - \
prev_original_values[-2]
ts = ts.apply(self.__next_val__)
if self.d >= 1:
self.prev_val = prev_original_values[-1]
ts = ts.apply(self.__next_val__)
return ts
def ihs_trans(x):
if type(self.lmb) is float:
x = np.arcsinh(x * self.lmb) / self.lmb
return x
def ihs_detrans(ts):
if type(self.lmb) is float:
ts = (ts * self.lmb).apply(np.sinh) / self.lmb
return ts
if type(prev_original_values) is not np.ndarray:
prev_original_values = np.array(prev_original_values)
assert len(prev_original_values) >= self.d
if index is None:
if type(diffs_ts) is pd.Series:
index = diffs_ts.index
else:
index = pd.Index(np.arange(len(diffs_ts)))
ts = pd.Series(diffs_ts, index=index)
ts = (ts * self.std)
ts += self.mean
if self.ihs_after_diff:
ts = ihs_detrans(ts)
ts = dedifference(ts, prev_original_values)
else:
prev_original_values = ihs_trans(prev_original_values)
ts = dedifference(ts, prev_original_values)
ts = ihs_detrans(ts)
return ts
def calc_mle_of_lmb(x, get_loglikelihood_deriv=False):
lmbs = pd.Series(dtype=np.float64)
for v in np.power(10., np.arange(-8, 30)):
lmbs = lmbs.append(pd.Series(np.arange(v, 10 * v, 0.1 * v)))
lmbs = lmbs.values
derivs = []
used_lmbs = []
for i, lmb in enumerate(lmbs):
try:
d = derivative_of_concentrated_loglikelihood(x, lmb)
if not np.any(np.isnan(d)):
used_lmbs.append(lmb)
derivs.append(d)
except:
pass
used_lmbs = np.array(used_lmbs)
derivs = np.array(derivs)
differential_quotients = []
for i in range(len(used_lmbs) - 1):
d = np.abs(derivs[i + 1] - derivs[i])
if np.sign(derivs[i + 1]) * | np.sign(derivs[i]) | numpy.sign |
# -*- coding: utf-8 -*-
'''
Project: Product Aesthetic Design: A Machine Learning Augmentation
Authors: <NAME>, Yale University
Email: <EMAIL>
License: MIT License
OSS Code Attribution (see Licensing Inheritance):
Portions of Code From or Modified from Open Source Projects:
https://github.com/tkarras/progressive_growing_of_gans
https://github.com/AaltoVision/pioneer
https://github.com/DmitryUlyanov/AGE
https://github.com/akanimax/attn_gan_pytorch/
'''
import h5py
import numpy as np
import torch
import config
c = config.c
class Chairs(torch.utils.data.Dataset):
"""
This is the chair dataset for the open source / open data code release.
It is different than the car dataset (primary dataset) in the paper due to
data mapping, such that this code may not be as efficient as possible for the
chair dataset.
The dataset is built on wrapping around the Torch Dataset object as well as HDF5
for the underlying dataformat. This is a very fast data format and supports both
loading into RAM or directly off disk.
Make sure your HDF5 installation is updated to support SWMR mode for parallel
access, as most default OS packages are older than this support.
"""
def __init__(self,
use_RAM,
train_x=None,
train_y=None,
valid_x=None,
valid_y=None,
test_x=None,
test_y=None,
c=None):
self.use_RAM = use_RAM
if train_x is not None:
self.train_x = train_x
self.train_y = train_y
self.valid_x = valid_x
self.valid_y = valid_y
self.test_x = test_x
self.test_y = test_y
assert c is not None
self.c = c
resolution = ['IMG_8', 'IMG_16', 'IMG_32', 'IMG_64', 'IMG_128', 'IMG_256', 'IMG_512']
self._base_key = 'IMG_'
self._base_masks_key = 'IMG_'
if self.use_RAM:
print('Loading Images into RAM...')
self.dataset = h5py.File(self.c.images_dir, 'r', driver='core')
if self.c.use_masks:
self.masks = h5py.File(self.c.masks_dir, 'r', driver='core')
print('Done loading Images into RAM...')
else:
self.dataset = h5py.File(self.c.images_dir, 'r')
if self.c.use_masks:
self.masks = h5py.File(self.c.masks_dir, 'r')
self.chair_full_inds = np.loadtxt(self.c.dining_room_chair_full_inds_dir, dtype=int)
print('{} chairs in the dataset'.format(self.chair_full_inds.shape[0]))
self.chair_labeled_inds = np.loadtxt(self.c.dining_room_chair_labeled_inds_dir, dtype=int)
self._len = {k: len(self.dataset[k]) for k in resolution}
# self.num_data = self._len['data8x8']
self.num_data = self.chair_full_inds.shape[0]
assert all([resol in self.dataset.keys() for resol in resolution])
# Training image inds
# image_ids outside train, valid, test - take % of that
# then concat back with train_image_inds as training_image_inds
# Note: For chairs there is a different mapping scheme than vehicles dataset, so this code is unnecessarily complex
# self.design_ids_of_images = design_ids
self.design_ids_of_images = self.chair_full_inds
train_design_ids, valid_design_ids, test_design_ids = np.unique(self.train_x), np.unique(self.valid_x), np.unique(self.test_x)
# train_bool_array = np.isin(self.design_ids_of_images, train_design_ids)
# self.training_image_inds = np.nonzero(train_bool_array)[0]
# num_training_inds = len(self.training_image_inds)
# labeled_design_ids = np.concatenate((train_design_ids, valid_design_ids, test_design_ids))
unlabeled_bool_array = ~ | np.isin(self.chair_full_inds, self.chair_labeled_inds) | numpy.isin |
#This script combines and transposes CovCountyHospitalTimeSeries.csv, and StateTestingTimeSeries.csv
# into MasterTimeSeries.csv a days X 3142(num of us counties+dc) long time series with variables stored as a proportion of population
#<NAME>
import sys
import time
import numpy as np
import pandas as pd
import datetime
sys.path.append('..')
import lib
#load data
CCHTS = lib.loadCCHTimeSeries()
STTS = lib.loadSTTS()
#get date data
CCHTS_startDate = np.datetime64(((CCHTS.columns).to_numpy()[10])[-10:])
CCHTS_endDate = np.datetime64(((CCHTS.columns).to_numpy()[-1])[-10:])
#create range from start and end date
dateRange = np.arange(CCHTS_startDate, CCHTS_endDate+np.timedelta64(1,'D'), dtype='datetime64[D]')
numDays = len(dateRange)
#get county level data
counties = CCHTS['county'].to_numpy()
county_states = CCHTS['state'].to_numpy()
county_populations = CCHTS['population'].to_numpy()
beds = CCHTS['beds'].to_numpy()
helipads = CCHTS['helipads'].to_numpy()
nonProf = CCHTS['nonProf'].to_numpy()
private = CCHTS['private'].to_numpy()
governm = CCHTS['governm'].to_numpy()
lat = CCHTS['lat'].to_numpy()
lon = CCHTS['lon'].to_numpy()
#grab cases/deaths as a matrix of shape (counties+dc, days in dataset)
casesMatrix = CCHTS.iloc[:,10:10+numDays].to_numpy()
deathsMatrix = CCHTS.iloc[:,10+numDays:10+numDays+numDays].to_numpy()
#calulate variables as a proportion of population
bedsPerPop = beds/county_populations
helipadsPerPop = helipads/county_populations
nonProfPerPop = nonProf/county_populations
privatePerPop = private/county_populations
governmPerPop = governm/county_populations
casesPerPopMatrix = np.zeros(casesMatrix.shape,dtype=np.float64)
deathsPerPopMatrix = np.zeros(deathsMatrix.shape,dtype=np.float64)
np.divide(casesMatrix, county_populations.reshape((len(county_populations),1)), out=casesPerPopMatrix)
np.divide(deathsMatrix, county_populations.reshape((len(county_populations),1)), out=deathsPerPopMatrix)
#get state level data
states = STTS['state'].to_numpy()
state_pop = STTS['population'].to_numpy()
#grab cases/deaths as a matrix of shape (states+dc, days in dataset)
testMatrix = STTS.iloc[:,2:2+numDays].to_numpy()
tPosMatrix = STTS.iloc[:,2+numDays:2+numDays+numDays].to_numpy()
tNegMatrix = STTS.iloc[:,2+numDays+numDays:2+numDays+numDays+numDays].to_numpy()
#calulate variables as a proportion of population
testPerPopMatrix = np.zeros(testMatrix.shape,dtype=np.float64)
tPosPerPopMatrix = np.zeros(tPosMatrix.shape,dtype=np.float64)
tNegPerPopMatrix = np.zeros(tNegMatrix.shape,dtype=np.float64)
np.divide(testMatrix, state_pop.reshape((len(state_pop), 1)), out=testPerPopMatrix)
np.divide(tPosMatrix, state_pop.reshape((len(state_pop), 1)), out=tPosPerPopMatrix)
np.divide(tNegMatrix, state_pop.reshape((len(state_pop), 1)), out=tNegPerPopMatrix)
#get testing variables as shape(counties+dc, days in dataset)
cTestPerPop = np.zeros(casesMatrix.shape, dtype=np.float64)
cTPosPerPop = np.zeros(casesMatrix.shape, dtype=np.float64)
cTNegPerPop = np.zeros(casesMatrix.shape, dtype=np.float64)
countyIndex = np.arange(len(counties), dtype=np.intc)
statesIndex = np.arange(len(states), dtype=np.intc)
start = | np.intc(0) | numpy.intc |
# data loader
from __future__ import print_function, division
import glob
import torch
# from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import os
import cv2
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
img = transform.resize(image,(new_h,new_w),mode='constant')
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
# change the r,g,b to b,r,g from [0,255] to [0,1]
#transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/( | np.max(tmpImgtl[:,:,2]) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 05 11:55:37 2018
@author: hugonnet
DDEM LIBRARY:
Library of Python functions for manipulating DEM differences
"""
import os, sys, shutil
import csv
import numpy as np
import pandas as pd
from numpy.polynomial.polynomial import polyfit, polyval
import random
from vectlib import simplify_shp_fn, buffer_shp_fn, inters_shp_fn, union_shp_fn, poi_polygon, isempty_firstfeat, copy_shp_fn, extent_shp_ref
from rastlib import rasterize_shp, proximity_shp, polygonize_fn, write_nanarray, read_nanarray, proximity_rast_fn, pixel_size, create_mem_raster_on_ref
from shlib import create_tmp_dir_for_outfile, remove_tmp_dir_for_outfile
from scipy.ndimage.filters import convolve
import scipy.stats as st
from fillalglib import floodfill_discontinuous
import matplotlib.pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
from subprocess import Popen
from pybob.GeoImg import GeoImg
from pybob.plot_tools import plot_ddem_results, plot_polygon_df
import geopandas as gpd
def ddem_discrete_hypso(ddem,dem,mask,gsd,proxi=None,bin_type='fixed',bin_val=50.,filt='5NMAD'):
final_mask = np.logical_and(np.logical_and(np.isfinite(ddem), np.isfinite(dem)),mask)
dem_on_mask = dem[final_mask]
ddem_on_mask = ddem[final_mask]
if proxi is not None:
proxi_on_mask = proxi[final_mask]
ddem_out = np.copy(ddem)
min_elev = np.min(dem_on_mask) - (np.min(dem_on_mask) % bin_val)
max_elev = np.max(dem_on_mask) + 1
if bin_type == 'fixed':
bin_final = bin_val
elif bin_type == 'percentage':
bin_final = np.ceil(bin_val / 100. * (max_elev - min_elev))
else:
sys.exit('Bin type not recognized.')
bins_on_mask = np.arange(min_elev, max_elev, bin_final)
nb_bin = len(bins_on_mask)
elev_bin = np.zeros(nb_bin)*np.nan
nmad_bin = np.zeros(nb_bin)*np.nan
med_bin = np.zeros(nb_bin)*np.nan
std_bin = np.zeros(nb_bin)*np.nan
area_tot_bin = np.zeros(nb_bin)*np.nan
area_meas_bin = np.zeros(nb_bin)*np.nan
prox = np.zeros(nb_bin)*np.nan
for i in np.arange(nb_bin):
idx_bin = np.array(dem_on_mask >= bins_on_mask[i]) & np.array(
dem_on_mask < (bins_on_mask[i] + bin_final))
idx_orig = np.array(dem >= bins_on_mask[i]) & np.array(
dem < (bins_on_mask[i] + bin_final)) & mask
area_tot_bin[i] = np.count_nonzero(idx_orig)*gsd**2
area_meas_bin[i] = np.count_nonzero(idx_bin)*gsd**2
elev_bin[i] = bins_on_mask[i] + bin_final / 2.
dh_bin = ddem_on_mask[idx_bin]
if proxi is not None:
proxi_bin = proxi_on_mask[idx_bin]
if len(proxi_bin[~np.isnan(proxi_bin)])>0:
prox[i] = np.nanmax(proxi_bin)
if len(dh_bin[~np.isnan(dh_bin)]) > 0:
std_bin[i] = np.nanstd(dh_bin)
med_bin[i] = np.nanmedian(dh_bin)
if filt:
median_temp = np.nanmedian(dh_bin)
MAD_temp = np.nanmedian(np.absolute(dh_bin[~np.isnan(dh_bin)] - median_temp))
NMAD_temp = 1.4826 * MAD_temp
nmad_bin[i] = NMAD_temp
# dh_bin[np.absolute(dh_bin - median_temp) > 5 * NMAD_temp] = np.NaN
ddem_out[idx_orig & np.array(np.absolute(ddem_out - median_temp) > 5 * NMAD_temp)] = np.nan
return ddem_out, elev_bin, med_bin, std_bin, nmad_bin, area_tot_bin, area_meas_bin, prox
def plot_hypso_fit(ddem_masked,dem_masked,elev,med,nmad,std,elevfit,poly_order,pp):
mykeep = np.logical_and(np.isfinite(ddem_masked),np.isfinite(dem_masked))
H = dem_masked[mykeep]
dH = ddem_masked[mykeep]
sampsize = 25000
if H.size > sampsize:
mysamp = np.random.randint(0, H.size, sampsize)
else:
mysamp = np.arange(0, H.size)
newelev=np.arange(min(elev),max(elev),1)
interp_dH = polyval(newelev,elevfit)
first_der=np.polyder(elevfit)
second_der=np.polyder(first_der)
der1_dH = | polyval(newelev,first_der) | numpy.polynomial.polynomial.polyval |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
def cameraCalibration():
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_size = (gray.shape[1], gray.shape[0])
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Use all found corners and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return mtx, dist
def undistort(img, mtx, dist):
# Use generated parameters to undistort each image without recalculating coefficients
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
return undistorted
# Define a function that takes an image, number of x and y points,
# camera matrix and distortion coefficients
def perspectiveTransform(img):
# Estimated source and destination values for a bird's eye view of the road
# Values were determined using estimates from two straight line images
h = img.shape[0]
w = img.shape[1]
img_size = (w,h)
mid_offset = 90
# Top left, top right, bottom left, bottom right
src = np.float32([[w/2-mid_offset, 460], [w/2+mid_offset, 460], [0, h-15], [w, h-15]])
dst = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
transformed = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
# Return the resulting image and matrix
return transformed
def inversePerspectiveTransform(img):
# Estimated source and destination values for a bird's eye view of the road
# Values were determined using estimates from two straight line images
# Values were determined using estimates from two straight line images
h = img.shape[0]
w = img.shape[1]
img_size = (w,h)
mid_offset = 90
# Top left, top right, bottom left, bottom right
src = np.float32([[w/2-mid_offset, 460], [w/2+mid_offset, 460], [0, h-15], [w, h-15]])
dst = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
# Given src and dst points, calculate the perspective transform matrix
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the image using OpenCV warpPerspective()
transformed = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR)
# Return the resulting image and matrix
return transformed
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
color_binary = np.zeros_like(s_channel)
color_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return color_binary
def measure_curvature_real():
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = | np.int(binary_warped.shape[0]//nwindows) | numpy.int |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
from coremltools.models.neural_network import flexible_shape_utils
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=(1, 10, 1, 1),
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
@unittest.skip('TO FIX')
def test_shape_flexibility_enumeration(self):
input_features = [('data', datatypes.Array(*(3,4,6)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [(1, 5, 7), (60, 5, 2), (22, 4, 9), (5, 3, 56)]
flexible_shape_utils.add_multiarray_ndshape_enumeration(spec, feature_name='data', enumerated_shapes=shapes)
shapes.append((3,4,6))
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_batched_mat_mul_cpu(self):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input, expected, useCPUOnly=True,
output_name_shape_dict=shape_dict)
def test_batched_mat_mul_with_transposes_cpu(self):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_batched_mat_mul_single_input_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=True)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_concat_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_like_cpu(self):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_static_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_fill_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_broadcast_to_like_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_broadcast_to_static_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_broadcast_to_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_trigonometry_cpu(self):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=True)
def test_exp2_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_elementwise_binary_cpu(self):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_elementwise_boolean_unary_cpu(self):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_logical_not_cpu(self):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_stack_cpu(self):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_ceil_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_floor_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_round_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_sign_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_clip_cpu(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_split_nd_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_split_nd_with_split_sizes_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_slice_static_cpu(self):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_slice_dynamic_cpu(self):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = np.random.rand(*input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
input_names = ['data']
inputs = dict()
inputs['data'] = x
if num_inputs == 2:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids)))]
input_names = ['data', 'begin_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
elif num_inputs == 3:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids)))]
input_names = ['data', 'begin_ids', 'end_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
elif num_inputs == 4:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
elif num_inputs == 5:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides', 'begin_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
elif num_inputs == 6:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks))),
('end_masks', datatypes.Array(len(end_masks)))]
input_names = ['data', 'begin_ids', 'end_ids',
'strides', 'begin_masks', 'end_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
inputs['end_masks'] = np.array(end_masks, dtype=np.int32)
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 2:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 3:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
strides=strides, begin_masks=begin_masks,
end_masks=end_masks)
elif num_inputs == 4:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 5:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_masks=end_masks)
elif num_inputs == 6:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output')
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
@unittest.skip('fix')
def test_tile_cpu(self):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=5, size=rank)
reps = list(np.random.randint(low=1, high=4, size=rank))
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_tile('Tile', 'data', 'output', reps)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.tile(x, reps)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_sliding_windows_cpu(self):
def numpy_sliding_windows(a, np_axis, np_size, np_step):
n = (a.shape[np_axis] - np_size) // np_step + 1
shape = list(a.shape)
shape[np_axis] = n
if np_axis < 0:
np_axis += len(shape)
shape.insert(np_axis + 1, np_size)
strides = list(a.strides)
effstride = strides[np_axis] * np_step
strides.insert(np_axis, effstride)
return np.lib.stride_tricks.as_strided(a, shape, strides)
for rank in range(1, 5):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
output_shape = list(input_shape)
window_size = np.random.randint(low=1, high=input_shape[axis])
length = 0
while length <= 0:
step = np.random.randint(low=1, high=input_shape[axis])
length = (input_shape[axis] - window_size) // step + 1
output_shape[axis] = length
pos_axis = axis if axis >= 0 else axis + rank
output_shape.insert(pos_axis + 1, window_size)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sliding_windows('sliding_windows',
input_name='data',
output_name='output',
axis=axis,
window_size=window_size,
step=step)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': numpy_sliding_windows(x, axis, window_size, step)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_range_static_cpu(self):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
for param in params:
start, end, step = param
input_features = [('multiplicative_input', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_range_static('range_static', 'output_range',
end=end, start=start, step=step)
builder.add_multiply_broadcastable(
name='multiply_broadcastable',
input_names=['multiplicative_input', 'output_range'],
output_name='output')
# save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
inputs = dict()
inputs['multiplicative_input'] = np.ones((1,), dtype=np.float64)
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_range_dynamic_cpu(self):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
# input size == 1: end is input, start and step are read from parameters
# input size == 2: end, start are inputs, step is read from parameters
# input size == 3: start, end, step are all inputs, none of the parameters are used.
for num_inputs in [1, 2, 3]:
for param in params:
inputs = dict()
start, end, step = param
if num_inputs == 1:
input_features = [('end', datatypes.Array(1))]
elif num_inputs == 2:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1))]
elif num_inputs == 3:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1)),
('step', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 1:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end'],
start=start, step=step)
elif num_inputs == 2:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start'],
step=step)
elif num_inputs == 3:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
inputs['step'] = step * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start', 'step'])
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_linear_activation_different_ranks_cpu(self):
for input_dim in [(10, 15), (10, 15, 2, 3),
(10, 2, 4, 15, 1, 4), (6,)]:
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_topk(self):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
K = [3, 5]
axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]]
for ii, input_shape in enumerate(test_input_shapes):
for k in K:
for n_inputs in [1, 2]:
for bottom_k_flag in [False, True]:
for axis in axes[ii]:
for negative_axis in [False, True]:
if negative_axis:
axis = axis - len(input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('values', None), ('indices', None)]
input_names = ['data']
output_names = ['values', 'indices']
if n_inputs == 2:
input_names.append('k_in')
input_features.append(('k_in', datatypes.Array(1)))
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_topk('topk', input_names, output_names,
k=k, axis=axis, use_bottom_k=bottom_k_flag)
data = np.random.randint(low=0, high=int(np.prod(input_shape)), size=input_shape)
data = data.astype(np.float32)
input = {'data': data}
if n_inputs == 2:
input['k_in'] = k * np.ones([1], dtype=np.float32)
# numpy reference values
if bottom_k_flag:
ref_indices = np.argsort(data, axis=axis)
else:
ref_indices = np.argsort(-data, axis=axis)
slc = [slice(None)] * len(input_shape)
slc[axis] = slice(0, k)
ref_indices = ref_indices[tuple(slc)]
ref_values = np.take_along_axis(data, ref_indices, axis=axis)
expected = {'values': ref_values, 'indices': ref_indices}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_rank_preserving_reshape(self):
input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)]
target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)]
output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_rank_preserving_reshape(
name='rank_preserving_reshape', input_name='data',
output_name='output', output_shape=target_shapes[i])
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_expand_dims(self):
input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)]
axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)]
output_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (1, 10, 1, 5), (10, 5, 1, 1), (1, 1, 1, 10)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_expand_dims(
name='expand_dims', input_name='data', output_name='output',
axes=axes[i]
)
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_squeeze(self):
input_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1),
(10, 5, 1, 1), (1,), (10, 5, 1, 1), (3, 1, 7)]
axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)]
output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', axes=list(axes[i]))
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_squeeze_all(self):
input_shapes = [
(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1), (10, 5, 1, 1), (1,),
(10, 5, 1, 1), (3, 1, 7), (3,), (5, 6)
]
for input_shape in input_shapes:
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', squeeze_all=True)
x = np.random.rand(*input_shape)
input = {'data': x}
reference = np.squeeze(x)
if not reference.shape:
reference = np.reshape(reference, (1,))
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_argmax_argmin(self):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
# (1+2+3+4+5) * 2^3 = 120 test cases
for input_shape in test_input_shapes:
for negative_axis in [False, True]:
for mode in ['argmax', 'argmin']:
for keep_dims in [True, False]:
for axis in np.arange(len(input_shape)):
if negative_axis:
axis_val = axis - len(input_shape)
else:
axis_val = axis
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
x = np.random.rand(*input_shape)
if mode == 'argmax':
builder.add_argmax('argmax', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmax(x, axis=axis_val)
else:
builder.add_argmin('argmin', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmin(x, axis=axis_val)
if keep_dims:
np_out = np.expand_dims(np_out, axis=axis_val)
elif len(input_shape) == 1:
np_out = np.expand_dims(np_out, axis=axis_val)
input = {'data': x}
expected = {'output': np_out}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_get_shape(self):
dims = [1, 2, 3, 4, 5]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_get_shape(name='get_shape_layer', input_name='data',
output_name='output')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': np.array(input_shape)}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
def test_load_constant_nd(self):
dims = [2, 3, 4, 5, 6]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_load_constant_nd('load_const_nd_layer', 'tmp',
constant_value=np.ones(input_shape),
shape=input_shape)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output',
mode='ADD')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': feed['data'] + 1}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
@unittest.skip('fix')
def test_simple_array_alloc_scatter(self):
alloc_shape = [2, 3, 4]
value_shape = [1, 3, 4]
input_features = [('alloc_shape', datatypes.Array(len(alloc_shape))),
('value', datatypes.Array(*value_shape)),
('index', datatypes.Array(1))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic_layer', input_name='alloc_shape',
output_name='array', value=np.float(0.0))
# CoreML input order: container (array), indices, slices (value)
builder.add_scatter(name='scatter_layer',
input_names=['array', 'index', 'value'],
output_name='output')
value = np.random.rand(*value_shape).astype('float')
feed = {'alloc_shape': np.array(alloc_shape, dtype='float'),
'value': value,
'index': np.array([1], dtype='float')}
ref = np.zeros(alloc_shape)
ref[1, :, :] = value
expected = {'output': ref}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
def test_erf_activation(self):
input_features = [('data', datatypes.Array(10, 45))]
output_features = [('output', datatypes.Array(10, 45))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_erf(name='erf', input_name='data',
output_name='output')
x = np.random.rand(10, 45)
input = {'data': x}
expected = {
'output': np.asarray([math.erf(i) for i in
x.flatten().tolist()]).reshape(10, 45)
}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_gelu_activation(self):
for mode in ['EXACT', 'TANH_APPROXIMATION', 'SIGMOID_APPROXIMATION']:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_gelu(name='gelu', input_name='data',
output_name='output', mode=mode)
x = np.random.rand(*shape)
input = {'data': x}
exact = np.asarray([0.5 * i * (1.0 + math.erf(i / math.sqrt(2)))
for i in x.flatten().tolist()]).reshape(*shape)
expected = {'output': exact}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_lower_triangular_cpu(self):
for rank in range(2, 6):
for k in range(-7, 8):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_lower_triangular('tril', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.tril(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_upper_triangular_cpu(self):
for rank in range(2, 6):
for k in range(-7, 8):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_upper_triangular('triu', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.triu(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_where_broadcastable_cpu(self):
for _ in range(150):
rank_cond = np.random.randint(low=1, high=6)
rank_true = np.random.randint(low=1, high=6)
rank_false = np.random.randint(low=1, high=6)
rank_out = max(rank_cond, rank_true, rank_false)
shape_cond = np.random.randint(low=2, high=8, size=rank_cond)
shape_true = np.random.randint(low=2, high=8, size=rank_true)
shape_false = np.random.randint(low=2, high=8, size=rank_false)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_cond: dims.append(shape_cond[i])
if -i <= rank_true: dims.append(shape_true[i])
if -i <= rank_false: dims.append(shape_false[i])
dim = np.random.choice(dims)
if -i <= rank_cond: shape_cond[i] = np.random.choice([1, dim])
if -i <= rank_true: shape_true[i] = np.random.choice([1, dim])
if -i <= rank_false: shape_false[i] = np.random.choice([1, dim])
input_features = [
('cond', datatypes.Array(*shape_cond)),
('true', datatypes.Array(*shape_true)),
('false', datatypes.Array(*shape_false))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_where_broadcastable('if_broadcastable', input_names=['cond', 'true', 'false'],
output_name='output')
cond = np.random.choice([1.0, 0.0], size=shape_cond)
true = np.random.rand(*shape_true)
false = np.random.rand(*shape_false)
input = {'cond': cond, 'true': true, 'false': false}
expected = {'output': np.where(cond, true, false)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_random_normal_like_cpu(self):
mean, stddev, seed = 0., 1., 42
for rank in range(5, -1, -1):
if rank > 0:
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
else: # one extra test to test more moments
shape = np.array([10, 10, 10, 10, 10000])
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_like(name='random_normal_like',
input_name='tensor',
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'tensor': np.random.rand(*shape)}
expected = {'output': np.random.normal(mean, stddev, shape)}
if rank > 0:
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
else: # one extra test to test more moments
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=6)
def test_random_normal_static_cpu(self):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_static(name='random_normal_static',
output_name='tmp',
output_shape=list(shape),
mean=mean, stddev=stddev, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_normal_dynamic_cpu(self):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_dynamic(name='random_normal_dynamic',
input_names=['shape'],
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_uniform_like_cpu(self):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_like(name='random_uniform_like',
input_name='tensor',
output_name='output',
minval=minval, maxval=maxval, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_uniform_static_cpu(self):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_static(name='random_uniform_static',
output_name='tmp',
output_shape=list(shape),
minval=minval, maxval=maxval, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_uniform_dynamic_cpu(self):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_dynamic(name='random_uniform_dynamic',
input_names=['shape'],
output_name='output',
minval=minval, maxval=maxval, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_bernoulli_like_cpu(self):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_like(name='random_bernoulli_like',
input_name='tensor',
output_name='output',
prob=prob, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_bernoulli_static_cpu(self):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_static(name='random_bernoulli_static', output_name='tmp',
output_shape=list(shape), prob=prob, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_random_bernoulli_dynamic_cpu(self):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_dynamic(name='random_bernoulli_dynamic',
input_names=['shape'],
output_name='output',
prob=prob, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_categorical_distribution_cpu_shapes(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
num_samples = np.random.randint(low=10, high=1000)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name='data',
output_name='output',
num_samples=num_samples)
x = np.random.randint(low=0, high=20, size=shape).astype(np.float32)
inputs = {'data': x}
shape[-1] = num_samples
expected = {'output': np.random.rand(*shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True, validate_shapes_only=True)
def test_categorical_distribution_cpu_logits(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=True,
seed=42)
x = np.random.rand(*shape)
inputs = {input_name: x}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
logits = x.reshape(2, num_class)
probs = [softmax(logits[0]), softmax(logits[1])]
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_categorical_distribution_cpu_probs(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=False,
seed=42)
x = np.random.rand(*shape)
probs = x.reshape(2, num_class)
probs[0], probs[1] = softmax(probs[0]), softmax(probs[1])
inputs = {input_name: np.reshape(probs, shape)}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
probs = probs.reshape(2, num_class)
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_reverse_cpu(self):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
reverse_dim = [np.random.choice([True, False]) for _ in range(rank)]
axes = [i for i in range(rank) if reverse_dim[i] == True]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_reverse('reverse', 'data', 'output', reverse_dim)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.flip(x, axis=axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_matrix_band_part_cpu(self):
for rank in range(2, 6):
for _ in range(20):
num_lower = np.random.randint(low=-7, high=8)
num_upper = np.random.randint(low=-7, high=8)
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_matrix_band_part('matrix_band_part', 'data', 'output',
num_lower=num_lower, num_upper=num_upper)
x = np.random.rand(*shape)
input = {'data': x}
rows, cols = shape[-2:]
band = np.ones((rows, cols))
for m in range(rows):
for n in range(cols):
band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and (num_upper < 0 or (n - m) <= num_upper)
expected = {'output': np.multiply(band, x)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_flatten_to_2d_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank + 1):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_flatten_to_2d('flatten_to_2d', 'data', 'output', axis=axis)
x = np.random.rand(*shape)
np_axis = axis + rank if axis < 0 else axis
pl, pr = 1, 1
for i in range(0, np_axis):
pl *= shape[i]
for i in range(np_axis, len(shape)):
pr *= shape[i]
new_shape = [pl, pr]
ref = x.reshape(new_shape)
input = {'data': x}
expected = {'output': ref}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reshape_like_cpu(self):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = n / np.prod(target_shape)
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_like(name='reshape_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_reshape_static_cpu(self):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_static(name='reshape_static',
input_name='data',
output_name='output',
output_shape=target_shape)
data = np.random.rand(*input_shape)
inputs = {'data': data}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_reshape_dynamic_cpu(self):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_dynamic(name='reshape_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True)
def test_reduce_sum_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sum('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.add.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_prod_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_prod('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.multiply.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_mean_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_mean('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.mean(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_max_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_max('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.maximum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_min_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_min('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.minimum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_l2_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l2('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sqrt(np.sum(np.square(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_l1_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l1('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.abs(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_sumsquare_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sumsquare('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.square(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_logsum_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsum('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(x, axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reduce_logsumexp_cpu(self):
for rank in range(1, 6):
axes_list = [axes for len in range(1, rank + 1) for axes in itertools.combinations(range(rank), len)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsumexp('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(np.exp(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_reverse_sequence_cpu(self):
for rank in range(2, 6):
for i in range(20):
input_shape = np.random.randint(low=2, high=6, size=rank)
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
while pos_batch_axis >= pos_seq_axis:
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
input_features = [('data', datatypes.Array(*input_shape)),
('lengths', datatypes.Array(input_shape[batch_axis]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reverse_sequence('reverse_sequence', ['data', 'lengths'],
'output', batch_axis=batch_axis,
seq_axis=seq_axis)
data = np.random.rand(*input_shape)
lengths = np.random.randint(low=0, high=input_shape[seq_axis], size=input_shape[batch_axis])
input = {'data': data, 'lengths': lengths.astype(np.float32)}
with tf.Graph().as_default(), tf.Session() as sess:
tf_op = tf.reverse_sequence(input=data, seq_lengths=lengths,
seq_axis=pos_seq_axis, batch_axis=pos_batch_axis)
expected = {'output': sess.run(tf_op)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_where_nonzero_cpu(self):
for rank in range(1, 6):
for i in range(10):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_where_nonzero('multi_indices', 'data', 'output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.transpose(np.nonzero(x)).astype(np.float)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_gather_cpu(self):
for rankParams, rankIndices in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for axis in range(-rankParams, rankParams):
shapeParams = np.random.randint(low=2, high=5, size=rankParams)
shapeIndices = np.random.randint(low=2, high=5,
size=rankIndices)
input_shapes = [shapeParams, shapeIndices]
posAxis = axis if axis >= 0 else axis + rankParams
output_shape = list(shapeParams[:posAxis]) + list(
shapeIndices) + list(shapeParams[posAxis + 1:])
if len(output_shape) > 5:
continue
input_names = ['params', 'indices']
input_features = [
('params', datatypes.Array(*input_shapes[0])),
('indices', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_gather(name='gather', input_names=input_names,
output_name='output', axis=axis)
a = np.random.rand(*input_shapes[0])
b = np.random.randint(-shapeParams[axis], shapeParams[axis],
size=shapeIndices)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': np.take(a, b, axis=axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_gather_along_axis_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
for _ in range(5):
params_shape = np.random.randint(low=2, high=8, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_gather_along_axis('gather_along_axis', ['params', 'indices'], 'output', axis=axis)
a = np.random.rand(*params_shape)
b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': np.take_along_axis(a, b, axis=axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_gather_nd_cpu(self):
for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
params_shape = np.random.randint(low=2, high=8, size=params_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
indices_shape[-1] = np.random.randint(low=1, high=params_rank + 1)
for _ in range(5):
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
output_shape = list(indices_shape[:-1]) + list(params_shape[indices_shape[-1]:])
if len(output_shape) > 5:
continue
builder.add_gather_nd('gather_nd', ['params', 'indices'], 'output')
a = np.random.rand(*params_shape)
indices_list = []
for i in range(indices_shape[-1]):
indices_list.append(np.random.randint(0, params_shape[i], size=indices_shape[:-1]))
indices = np.stack(indices_list, axis=-1)
input = {'params': a, 'indices': indices.astype(np.float)}
with tf.Graph().as_default(), tf.Session() as sess:
tf_op = tf.gather_nd(a, indices)
expected = {'output': sess.run(tf_op)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_scatter_cpu(self):
for ref_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for accumulate_mode in ["UPDATE", "ADD", "SUB", "MUL", "DIV", "MAX", "MIN"]:
for _ in range(5):
ref_shape = np.random.randint(low=2, high=8, size=ref_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
updates_shape = list(indices_shape) + list(ref_shape[1:])
input_features = [('ref', datatypes.Array(*ref_shape)),
('indices', datatypes.Array(*indices_shape)),
('updates', datatypes.Array(*updates_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
if len(updates_shape) > 5:
continue
builder.add_scatter('scatter', ['ref', 'indices', 'updates'], 'output', axis=0,
mode=accumulate_mode)
ref = np.random.rand(*ref_shape)
updates = np.random.rand(*updates_shape)
indices = np.random.randint(0, ref_shape[0], size=indices_shape)
input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}
with tf.Graph().as_default(), tf.Session() as sess:
tf_output = tf.Variable(ref)
sess.run(tf.global_variables_initializer())
if accumulate_mode == "UPDATE":
sess.run(tf.scatter_update(tf_output, indices, updates))
if accumulate_mode == "ADD":
sess.run(tf.scatter_add(tf_output, indices, updates))
if accumulate_mode == "SUB":
sess.run(tf.scatter_sub(tf_output, indices, updates))
if accumulate_mode == "MUL":
sess.run(tf.scatter_mul(tf_output, indices, updates))
if accumulate_mode == "DIV":
sess.run(tf.scatter_div(tf_output, indices, updates))
if accumulate_mode == "MAX":
sess.run(tf.scatter_max(tf_output, indices, updates))
if accumulate_mode == "MIN":
sess.run(tf.scatter_min(tf_output, indices, updates))
expected = {'output': sess.run(tf_output)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_gather_scatter_multiple_axis_cpu(self):
for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for axis in range(-params_rank, params_rank):
for _ in range(5):
params_shape = np.random.randint(low=2, high=8, size=params_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
pos_axis = axis if axis >= 0 else axis + params_rank
output_shape = list(params_shape[:pos_axis]) + list(indices_shape) + list(
params_shape[pos_axis + 1:])
if len(output_shape) > 5:
continue
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_gather('gather', ['params', 'indices'], 'updates', axis=axis)
builder.add_scatter('scatter', ['params', 'indices', 'updates'], 'output', axis=axis, mode='UPDATE')
a = np.random.rand(*params_shape)
b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': a}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_scatter_along_axis_cpu(self):
for rank in range(1, 6):
for axis in range(-rank, rank):
for id in range(5):
ref_shape = np.random.randint(low=2, high=8, size=rank)
indices_shape = np.copy(ref_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
updates_shape = indices_shape
input_features = [('ref', datatypes.Array(*ref_shape)),
('indices', datatypes.Array(*indices_shape)),
('updates', datatypes.Array(*updates_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_scatter_along_axis('scatter_along_axis', ['ref', 'indices', 'updates'], 'output',
axis=axis, mode="UPDATE")
ref = np.random.rand(*ref_shape)
updates = np.random.rand(*updates_shape)
indices = np.random.randint(-ref_shape[axis], ref_shape[axis], size=indices_shape)
input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}
np_output = np.copy(ref)
np.put_along_axis(np_output, indices, updates, axis=axis)
expected = {'output': np_output}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_scatter_nd_cpu(self):
for ref_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(2, 6)]:
ref_shape = np.random.randint(low=2, high=8, size=ref_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
indices_shape[-1] = np.random.randint(low=1, high=ref_rank + 1)
for accumulate_mode in ["UPDATE", "ADD", "SUB"]:
for id in range(20):
updates_shape = list(indices_shape[:-1]) + list(ref_shape[indices_shape[-1]:])
if len(updates_shape) > 5: continue
input_features = [('ref', datatypes.Array(*ref_shape)),
('indices', datatypes.Array(*indices_shape)),
('updates', datatypes.Array(*updates_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_scatter_nd('scatter_nd', ['ref', 'indices', 'updates'], 'output', mode=accumulate_mode)
ref = np.random.rand(*ref_shape)
updates = np.random.rand(*updates_shape)
indices_list = []
for i in range(indices_shape[-1]):
indices_list.append(np.random.randint(0, ref_shape[i], size=indices_shape[:-1]))
indices = np.stack(indices_list, axis=-1)
input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}
with tf.Graph().as_default(), tf.Session() as sess:
tf_output = tf.Variable(ref)
sess.run(tf.global_variables_initializer())
if accumulate_mode == "UPDATE":
sess.run(tf.scatter_nd_update(tf_output, indices, updates))
if accumulate_mode == "ADD":
sess.run(tf.scatter_nd_add(tf_output, indices, updates))
if accumulate_mode == "SUB":
sess.run(tf.scatter_nd_sub(tf_output, indices, updates))
expected = {'output': sess.run(tf_output)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_layer_normalization_cpu(self):
def layer_norm_numpy(x, shapes, gamma_, beta_, eps=1e-5):
axes = [-i - 1 for i, _ in enumerate(shapes)]
num = x - np.mean(x, axis=tuple(axes), keepdims=True)
dem = np.sqrt(np.sum(np.square(num), axis=tuple(axes),
keepdims=True) / np.prod(shapes) + eps)
return num / dem * gamma_ + beta_
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=6, size=rank)
for axis in range(1, len(input_shape) + 1):
norm_shapes = input_shape[-axis:]
data = | np.random.rand(*input_shape) | numpy.random.rand |
import numpy as np
import sys
np.set_printoptions(linewidth=np.inf)
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
import math
n_x=30
n_y=30
dx=1.0/n_x
dy=1.0/n_y
Re=100
def momentum_link_coefficients(u_star,u_face,v_face,p,source_x,source_y,A_p,A_e,A_w,A_n,A_s):
D_e=dy/(dx*Re)
D_w=dy/(dx*Re)
D_n=dx/(dy*Re)
D_s=dx/(dy*Re)
#interior cells
for i in range(2,n_y):
for j in range(2,n_x):
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j-1]
F_n=dx*v_face[i-1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e-F_w) + (F_n-F_s)
source_x[i,j]=0.5*(p[i,j-1]-p[i,j+1])*dx
source_y[i,j]=0.5*(p[i+1,j]-p[i-1,j])*dy
#left wall
j=1
for i in range(2,n_y):
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1] #left face velocity is initialised to zero
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j] - p[i,j + 1])*dx # P_o - 0.5(P_o+P_e)
source_y[i,j]=0.5*(p[i + 1,j] - p[i - 1,j])*dy
#bottom wall
i=n_y
for j in range(2,n_x):
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1]
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j] #bottom wall v-velocity is already initialised to zero
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=2*D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j - 1] - p[i,j + 1])*dx
source_y[i,j]=0.5*(p[i,j] - p[i - 1,j])*dy #P_o - 0.5(P_o+P_n)
#right wall
j=n_x
for i in range(2,n_y):
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1] #right face velocity is initialised to zero
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j-1] - p[i,j ])*dx #0.5(P_w+P_o)-P_o
source_y[i,j]=0.5*(p[i + 1,j] - p[i - 1,j])*dy
#top wall
i=1
for j in range(2,n_y):
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1]
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=D_w + max(0.0,F_w)
A_n[i,j]=2*D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j - 1] - p[i,j + 1])*dx
source_y[i,j]=0.5*(p[i + 1,j] - p[i,j])*dy #0.5(P_s+P_o) - P_o
#top left corner
i=1
j=1
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1]
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=2*D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j] - p[i,j + 1])*dx # P_o - 0.5(P_o+P_e)
source_y[i,j]=0.5*(p[i + 1,j] - p[i,j])*dy #0.5(P_s+P_o) - P_o
#top right corner
i=1
j=n_x
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1] #right face velocity is initialised to zero
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=2*D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j - 1] - p[i,j])*dx #0.5(P_w+P_o)-P_o
source_y[i,j]=0.5*(p[i + 1,j] - p[i,j])*dy #0.5(P_s+P_o) - P_o
#bottom left corner
i=n_y
j=1
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1] #left face velocity is initialised to zero
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=2*D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j] - p[i,j + 1])*dx # P_o - 0.5(P_o+P_e)
source_y[i,j]=0.5*(p[i,j] - p[i - 1,j])*dy #P_o - 0.5(P_o+P_n)
#bottom right corner
i=n_y
j=n_x
F_e=dy*u_face[i,j]
F_w=dy*u_face[i,j - 1] #right face velocity is initialised to zero
F_n=dx*v_face[i - 1,j]
F_s=dx*v_face[i,j]
A_e[i,j]=2*D_e + max(0.0,-F_e)
A_w[i,j]=2*D_w + max(0.0,F_w)
A_n[i,j]=D_n + max(0.0,-F_n)
A_s[i,j]=D_s + max(0.0,F_s)
A_p[i,j]=A_w[i,j] + A_e[i,j] + A_n[i,j] + A_s[i,j] + (F_e - F_w) + (F_n - F_s)
source_x[i,j]=0.5*(p[i,j - 1] - p[i,j])*dx #0.5(P_w+P_o)-P_o
source_y[i,j]=0.5*(p[i,j] - p[i - 1,j])*dy #P_o - 0.5(P_o+P_n)
return A_p,A_e,A_w,A_n,A_s,source_x,source_y
def solve(u,u_star,A_p,A_e,A_w,A_n,A_s,source_x,alpha,epsilon,max_inner_iteration,l2_norm):
for n in range(1,max_inner_iteration+1):
l2_norm=0
for i in range(1,n_y+1):
for j in range(1,n_x+1):
u[i,j]= alpha*(A_e[i,j]*u[i,j+1] + A_w[i,j]*u[i,j-1] + A_n[i,j]*u[i-1,j] + A_s[i,j]*u[i+1,j] + source_x[i,j])/A_p[i,j] + (1-alpha)*u_star[i,j]
l2_norm+=(u[i,j] - alpha*(A_e[i,j]*u[i,j+1] + A_w[i,j]*u[i,j-1] + A_n[i,j]*u[i - 1,j] + A_s[i,j]*u[i+1,j] +source_x[i,j])/A_p[i,j] - (1-alpha)*u_star[i,j])**2
for i in range(1,n_y+1):
for j in range(1,n_x+1):
l2_norm+=(u[i,j] - alpha*(A_e[i,j]*u[i,j+1] + A_w[i,j]*u[i,j-1] + A_n[i,j]*u[i - 1,j] + A_s[i,j]*u[i+1,j] +source_x[i,j])/A_p[i,j] - (1-alpha)*u_star[i,j])**2
if(n==1):
norm=math.sqrt(l2_norm)
l2_norm=math.sqrt(l2_norm)
if(l2_norm<epsilon):
#print("Converged in ",n, " iterations")
break
return u,norm
def face_velocity(u,v,u_face,v_face,p,A_p,alpha_uv):
#uface velocity
for i in range(1,n_y+1):
for j in range(1,n_x):
u_face[i,j]=0.5*(u[i,j] + u[i,j + 1]) + 0.25*alpha_uv*(p[i,j + 1] - p[i,j - 1])*dy/A_p[
i,j] + 0.25*alpha_uv*(p[i,j + 2] - p[i,j])*dy/A_p[i,j + 1]\
- 0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(p[i,j + 1] - p[i,j])*dy
#v face velocity
for i in range(2,n_y+1):
for j in range(1,n_x+1):
v_face[i-1,j]=0.5*(v[i,j] + v[i - 1,j]) + 0.25*alpha_uv*(p[i - 1,j] - p[i + 1,j])*dy/A_p[i,j] + 0.25*alpha_uv*(
p[i - 2,j] - p[i,j])*dy/A_p[i - 1,j]\
- 0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(p[i - 1,j] - p[i,j])*dy
return u_face,v_face
def pressure_correction_link_coefficients(u,u_face,v_face,Ap_p,Ap_e,Ap_w,Ap_n,Ap_s,source_p,A_p,A_e,A_w,A_n,A_s,alpha_uv):
#interior cells
for i in range(2,n_y):
for j in range(2,n_x):
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#top
i=1
for j in range(2,n_x):
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#left
j=1
for i in range(2,n_y):
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#right
j=n_x
for i in range(2,n_y):
Ap_e[i,j]=0
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#bottom
i=n_y
for j in range(2,n_x):
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#top left corner
i=1
j=1
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0
Ap_n[i,j]=0
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#top right corner
i=1
j=n_x
Ap_e[i,j]=0
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0
Ap_s[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i + 1,j])*(dx**2)
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#bottom left corner
i=n_y
j=1
Ap_e[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j + 1])*(dy**2)
Ap_w[i,j]=0
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
#bottom right corner
i=n_y
j=n_x
Ap_e[i,j]=0
Ap_w[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i,j - 1])*(dy**2)
Ap_n[i,j]=0.5*alpha_uv*(1/A_p[i,j] + 1/A_p[i - 1,j])*(dx**2)
Ap_s[i,j]=0
Ap_p[i,j]=Ap_e[i,j] + Ap_w[i,j] + Ap_n[i,j] + Ap_s[i,j]
source_p[i,j]=-(u_face[i,j] - u_face[i,j - 1])*dy - (v_face[i - 1,j] - v_face[i,j])*dx
return Ap_p,Ap_e,Ap_w,Ap_n,Ap_s,source_p
def correct_pressure(p_star,p,p_prime,alpha_p):
p_star=p+alpha_p*p_prime
#BC
#top wall
p_star[0,1:n_x+1]=p_star[1,1:n_x+1]
#left wall
p_star[1:n_y+1,0]=p_star[1:n_y+1,1]
#right wall
p_star[1:n_y+1,n_x+1]=p_star[1:n_y+1,n_x]
#bottom wall
p_star[n_y+1,1:n_x+1]=p_star[n_y,1:n_x+1]
#top left corner
p_star[0,0]=(p_star[1,2]+p_star[0,1]+p_star[1,0])/3
#top right corner
p_star[0,n_x+1]=(p_star[0,n_x]+p_star[1,n_x]+p_star[1,n_x+1])/3
#bottom left corner
p_star[n_y+1,0]=(p_star[n_y,0]+p_star[n_y,1]+p_star[n_y+1,1])/3
#bottom right corner
p_star[n_y+1,n_x+1]=(p_star[n_y,n_x+1]+p_star[n_y+1,n_x]+p_star[n_y,n_x])/3
return p_star
def correct_cell_center_velocity(u,v,u_star,v_star,p_prime,A_p,alpha_uv):
#u velocity
#interior cells
for i in range(1,n_y+1):
for j in range(2,n_x ):
u_star[i,j]= u[i,j] + 0.5*alpha_uv*(p_prime[i,j-1]-p_prime[i,j+1])*dy/A_p[i,j]
#left
j=1
for i in range(1,n_y+1):
u_star[i,j]=u[i,j] + 0.5*alpha_uv*(p_prime[i,j] - p_prime[i,j+1])*dy/A_p[i,j]
#right
j=n_x
for i in range(1,n_y+1):
u_star[i,j]=u[i,j] + 0.5*alpha_uv*(p_prime[i,j-1] - p_prime[i,j])*dy/A_p[i,j]
#v velocity
for i in range(2,n_y):
for j in range(1,n_x+1):
v_star[i,j]=v[i,j] + 0.5*alpha_uv*(p_prime[i+1,j]-p_prime[i-1,j])*dx/A_p[i,j]
#top
i=1
for j in range(1,n_x + 1):
v_star[i,j]=v[i,j] + 0.5*alpha_uv*(p_prime[i + 1,j] - p_prime[i,j])*dx/A_p[i,j]
#bottom
i=n_y
for j in range(1,n_x + 1):
v_star[i,j]=v[i,j] + 0.5*alpha_uv*(p_prime[i,j] - p_prime[i - 1,j])*dx/A_p[i,j]
return u_star,v_star
def correct_face_velocity(u_face,v_face,p_prime,A_p,alpha_uv):
for i in range(1,n_y+1):
for j in range(1,n_x):
u_face[i,j]=u_face[i,j]+ 0.5*alpha_uv*(1/A_p[i,j]+1/A_p[i,j+1])*(p_prime[i,j]-p_prime[i,j+1])*dy
for i in range(2,n_y+1):
for j in range(1,n_x+1):
v_face[i-1,j]=v_face[i-1,j] + 0.5*alpha_uv*(1/A_p[i,j]+1/A_p[i-1,j])*(p_prime[i,j]-p_prime[i-1,j])*dx
return u_face,v_face
def post_processing(u_star,v_star,p_star,X,Y,x,y):
#u velocity contours
plt.figure(1)
plt.contourf(X,Y,np.flipud(u_star),levels=50,cmap='jet')
plt.colorbar()
plt.title('U contours')
plt.show()
#v velocity contours
plt.figure(2)
plt.contourf(X,Y,np.flipud(v_star),levels=50,cmap='jet')
plt.colorbar()
plt.title('V contours' )
plt.show()
#pressure contours
plt.figure(3)
plt.contourf(X,Y,np.flipud(p_star),levels=50,cmap='jet')
plt.colorbar()
plt.title('P contours')
plt.show()
#u centerline velocity
plt.figure(4)
plt.plot(1-y,u_star[:,round(n_x/2)])
plt.xlabel('y')
plt.ylabel('u')
plt.title('U centerline velocity')
plt.show()
#v centerline velocity
plt.figure(5)
plt.plot(x,v_star[round(n_y/2),:])
plt.xlabel('x')
plt.ylabel('v')
plt.title('V centerline velocity')
plt.show()
#Declaring primitive variables
u=np.zeros((n_y+2,n_x+2),dtype=np.float64)
u_star=np.zeros((n_y+2,n_x+2),dtype=np.float64)
v=np.zeros((n_y+2,n_x+2),dtype=np.float64)
v_star=np.zeros((n_y+2,n_x+2),dtype=np.float64)
p_star= | np.zeros((n_y+2,n_x+2),dtype=np.float64) | numpy.zeros |
import itk
import numpy as np
from FemurSegmentation.utils import image2array, array2image, cast_image
# TODO add healt check and error/exception handling
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
# TODO allows a customize InputType, not only retrive it from the current image
# TODO allows to not alwaise set the input when you define a filter. This
# will help with the application slice by slice
def erode(image, radius=1, frg_val=1, bkg_val=0):
'''
'''
ImageType = itk.Image[itk.SS, 3]
StructuringElementType = itk.FlatStructuringElement[3]
structuringElement = StructuringElementType.Ball(int(radius))
ErodeFilterType = itk.BinaryErodeImageFilter[ImageType, ImageType, StructuringElementType]
erodeFilter = ErodeFilterType.New()
_ = erodeFilter.SetInput(image)
_ = erodeFilter.SetKernel(structuringElement)
_ = erodeFilter.SetForegroundValue(frg_val)
_ = erodeFilter.SetBackgroundValue(bkg_val)
return erodeFilter
def dilate(image, radius=1, frg_val=1, bkg_val=0):
'''
'''
ImageType = itk.Image[itk.SS, 3]
StructuringElementType = itk.FlatStructuringElement[3]
structuringElement = StructuringElementType.Ball(int(radius))
DilateFilterType = itk.BinaryDilateImageFilter[ImageType, ImageType, StructuringElementType]
dilateFilter = DilateFilterType.New()
_ = dilateFilter.SetInput(image)
_ = dilateFilter.SetKernel(structuringElement)
_ = dilateFilter.SetForegroundValue(frg_val)
_ = dilateFilter.SetBackgroundValue(bkg_val)
return dilateFilter
def binary_threshold(image, upper_thr, lower_thr,
in_value=1, out_val=0, out_type=None) :
'''
Return a binary image whith in_value where input image voxel value is in
]lower_thr, upper_thr[, out_value otherwise.
Parameters
----------
image : itk.Image or numpy array
input image in which apply thresholding
upper_thr : int of float
upper threshold value
lower_thr : int or float
lower threhsold value
in_value : int of float, dafault : 1
value used to label the voxel inside ]lower_thr, upper_thr[
out_value : int or float, defaul : 0
value used to label the voxel outside [lower_thr, upper_thr]
out_type : itk pixel type. Default None
if specified cast the output voxel type to out_type
Return
------
thr: itk.Image
binary thresholded image
'''
if isinstance(image, type(np.array)) :
array = image.copy()
info = None
else :
array, info = image2array(image)
cond = (array > lower_thr) & (array < upper_thr)
array[cond] = in_value
array[~cond] = out_val
thr = array2image(array, info)
if out_type is not None :
thr = cast_image(thr, out_type)
return thr
def itk_threshold_below(image, thr, outside_value=-1024):
'''
'''
PixelType, Dimension = itk.template(image)[1]
ImageType = itk.Image[PixelType, Dimension]
thr_filter = itk.ThresholdImageFilter[ImageType].New()
_ = thr_filter.SetInput(image)
_ = thr_filter.SetOutsideValue(outside_value)
_ = thr_filter.ThresholdBelow(thr)
return thr_filter
def threshold(image, upper_thr, lower_thr, outside_value=-1500, out_type=None):
'''
Assign to all the voxels outside [lower_thr, upper_thr] the value : outside_value
Parameters
----------
image : itk.Image
image to threshold
upper_thr : int, float
upper threshold value
lower_thr : int, float
lower threshold value
outside_value : int, float
value to assign to the voxels outside the inteval [lower_thr, upper_thr]
out_type : itk pixel type (e.g. itk.F), defaul None
if provided, cast the output image to out_type
Return
------
thr : itk.Image
thresholded image
'''
arr, info = image2array(image)
cond = (arr < lower_thr) & (arr > upper_thr)
arr[cond] = outside_value
thr = array2image(arr, info)
if out_type is not None :
thr = cast_image(thr, out_type)
return thr
def median_filter(image, radius=1) :
'''
'''
PixelType, Dim = itk.template(image)[1]
ImageType = itk.Image[PixelType, Dim]
median_filter = itk.MedianImageFilter[ImageType, ImageType].New()
_ = median_filter.SetInput(image)
_ = median_filter.SetRadius(int(radius))
return median_filter
def connected_components(image, voxel_type=itk.SS) :
'''
'''
ImageType = itk.Image[voxel_type, 3]
cc = itk.ConnectedComponentImageFilter[ImageType, ImageType].New()
_ = cc.SetInput(image)
return cc
def relabel_components(image, offset=1, out_type=None) :
array, info = image2array(image)
max_label = int(array.max())
labels, labels_counts = np.unique(array, return_counts=True)
labels = labels[np.argsort(labels_counts)[::-1]]
labels0 = labels[labels != 0]
new_max_label = offset - 1 + len(labels0)
new_labels0 = np.arange(offset, new_max_label + 1)
required_type = | np.min_scalar_type(new_max_label) | numpy.min_scalar_type |
"""Functions that help with data processing for human3.6m"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import copy
import pyximport; pyximport.install()
import data_utils_cvt
import os
def rotmat2euler(R):
"""
Converts a rotation matrix to Euler angles
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/RotMat2Euler.m#L1
Args
R: a 3x3 rotation matrix
Returns
eul: a 3x1 Euler angle representation of R
"""
if R[0, 2] == 1 or R[0, 2] == -1:
# special case
E3 = 0 # set arbitrarily
dlta = np.arctan2(R[0, 1], R[0, 2]);
if R[0, 2] == -1:
E2 = np.pi / 2;
E1 = E3 + dlta;
else:
E2 = -np.pi / 2;
E1 = -E3 + dlta;
else:
E2 = -np.arcsin(R[0, 2])
E1 = np.arctan2(R[1, 2] / np.cos(E2), R[2, 2] / np.cos(E2))
E3 = np.arctan2(R[0, 1] / np.cos(E2), R[0, 0] / np.cos(E2))
eul = np.array([E1, E2, E3]);
return eul
def quat2expmap(q):
"""
Converts a quaternion to an exponential map
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args
q: 1x4 quaternion
Returns
r: 1x3 exponential map
Raises
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q) - 1) > 1e-3):
print(np.linalg.norm(q))
raise (ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide(q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2(sinhalftheta, coshalftheta)
theta = np.mod(theta + 2 * np.pi, 2 * np.pi)
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r
def rotmat2quat(R):
"""
Converts a rotation matrix to a quaternion
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/rotmat2quat.m#L4
Args
R: 3x3 rotation matrix
Returns
q: 1x4 quaternion
"""
rotdiff = R - R.T;
r = np.zeros(3)
r[0] = -rotdiff[1, 2]
r[1] = rotdiff[0, 2]
r[2] = -rotdiff[0, 1]
sintheta = np.linalg.norm(r) / 2;
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps);
costheta = (np.trace(R) - 1) / 2;
theta = np.arctan2(sintheta, costheta);
q = np.zeros(4)
q[0] = np.cos(theta / 2)
q[1:] = r0 * np.sin(theta / 2)
return q
def rotmat2expmap(R):
return quat2expmap(rotmat2quat(R));
def expmap2rotmat(r):
"""
Converts an exponential map angle to a rotation matrix
Matlab port to python for evaluation purposes
I believe this is also called Rodrigues' formula
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/expmap2rotmat.m
Args
r: 1x3 exponential map
Returns
R: 3x3 rotation matrix
"""
theta = np.linalg.norm(r)
r0 = np.divide(r, max(theta, np.finfo(np.float32).eps))
r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3, 3)
r0x = r0x - r0x.T
R = np.eye(3, 3) + np.sin(theta) * r0x + (1 - np.cos(theta)) * (r0x).dot(r0x);
return R
def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore, actions, one_hot):
"""Borrowed from SRNN code. Reads a csv file and returns a float32 matrix.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
Args
normalizedData: nxd matrix with normalized data
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
actions: list of strings with the encoded actions
one_hot: whether the data comes with one-hot encoding
Returns
origData: data originally used to
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
if one_hot:
origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]
else:
origData[:, dimensions_to_use] = normalizedData[:, :]
origData = origData * np.expand_dims(data_std, 0) + np.expand_dims(data_mean, 0)
return origData
def revert_output_format(poses, data_mean, data_std, dim_to_ignore, actions, one_hot):
"""
Converts the output of the neural network to a format that is more easy to
manipulate for, e.g. conversion to other format or visualization
Args
poses: The output from the TF model. A list with (seq_length) entries,
each with a (batch_size, dim) output
Returns
poses_out: A tensor of size (batch_size, seq_length, dim) output. Each
batch is an n-by-d sequence of poses.
"""
# seq_len = len(poses)
# if seq_len == 0:
# return []
#
# batch_size, dim = poses[0].shape
#
# poses_out = np.concatenate(poses)
# poses_out = np.reshape(poses_out, (seq_len, batch_size, dim))
# poses_out = np.transpose(poses_out, [1, 0, 2])
poses_out = poses
poses_out_list = []
for i in xrange(poses_out.shape[0]):
poses_out_list.append(
unNormalizeData(poses_out[i, :, :], data_mean, data_std, dim_to_ignore, actions, one_hot))
return poses_out_list
def readCSVasFloat(filename):
"""
Borrowed from SRNN code. Reads a csv and returns a float matrix.
https://github.com/asheshjain399/NeuralModels/blob/master/neuralmodels/utils.py#L34
Args
filename: string. Path to the csv file
Returns
returnArray: the read data in a float32 matrix
"""
returnArray = []
lines = open(filename).readlines()
for line in lines:
line = line.strip().split(',')
if len(line) > 0:
returnArray.append(np.array([np.float32(x) for x in line]))
returnArray = np.array(returnArray)
return returnArray
def load_data(path_to_dataset, actions):
nactions = len(actions)
trainData = {}
completeData = []
for action_index in np.arange(nactions):
action = actions[action_index]
path='{}/{}'.format(path_to_dataset,action)
count=0
for fn in os.listdir(path):
count=count+1
for examp_index in np.arange(count):
filename = '{}/{}/{}_{}.txt'.format(path_to_dataset, action, action, examp_index+1)
action_sequence = readCSVasFloat(filename)
n, d = action_sequence.shape
even_list = range(0, n, 2)
trainData[(action, examp_index+1, 'downsampling')] = action_sequence[even_list, :]
if len(completeData) == 0:
completeData = copy.deepcopy(action_sequence)
else:
completeData = np.append(completeData, action_sequence, axis=0)
return trainData, completeData
def normalize_data(data, data_mean, data_std, dim_to_use, actions, one_hot):
"""
Normalize input data by removing unused dimensions, subtracting the mean and
dividing by the standard deviation
Args
data: nx99 matrix with data to normalize
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dim_to_use: vector with dimensions used by the model
actions: list of strings with the encoded actions
one_hot: whether the data comes with one-hot encoding
Returns
data_out: the passed data matrix, but normalized
"""
data_out = {}
nactions = len(actions)
if not one_hot:
# No one-hot encoding... no need to do anything special
for key in data.keys():
data_out[key] = np.divide((data[key] - data_mean), data_std)
data_out[key] = data_out[key][:, dim_to_use]
else:
# TODO hard-coding 99 dimensions for un-normalized human poses
for key in data.keys():
data_out[key] = np.divide((data[key][:, 0:117] - data_mean), data_std)
data_out[key] = data_out[key][:, dim_to_use]
data_out[key] = np.hstack((data_out[key], data[key][:, -nactions:]))
return data_out
def normalization_stats(completeData):
""""
Also borrowed for SRNN code. Computes mean, stdev and dimensions to ignore.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/processdata.py#L33
Args
completeData: nx99 matrix with data to normalize
Returns
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
dimensions_to_use: vector with dimensions used by the model
"""
data_mean = np.mean(completeData, axis=0)
data_std = np.std(completeData, axis=0)
dimensions_to_ignore = []
dimensions_to_use = []
dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))
dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))
data_std[dimensions_to_ignore] = 1.0
return data_mean, data_std, dimensions_to_ignore, dimensions_to_use
def revert_output_format_lstm(poses, data_mean, data_std, dim_to_ignore, actions, one_hot):
"""
Converts the output of the neural network to a format that is more easy to
manipulate for, e.g. conversion to other format or visualization
Args
poses: The output from the TF model. A list with (seq_length) entries,
each with a (batch_size, dim) output
Returns
poses_out: A tensor of size (batch_size, seq_length, dim) output. Each
batch is an n-by-d sequence of poses.
"""
seq_len = len(poses)
if seq_len == 0:
return []
batch_size, dim = poses[0].shape
poses_out = | np.concatenate(poses) | numpy.concatenate |
import numpy as np
np.random.seed(111)
'''
The data is generated adding noise to the values from y = 0.8x + 2 equation
Therefore the expectation of the auto encoder is to get the values w and b closer to 0.8 and 2 respectively
'''
'''generate random x values'''
X_train = np.random.random((1, 50))[0]
'''get the reference y value'''
y_reference = 0.8*X_train + 2
'''add noise to the reference y value'''
y_train = y_reference + np.sqrt(0.01)*np.random.random((1, 50))[0]
W = | np.random.random() | numpy.random.random |
import os
import numpy as np
from rlkit.core.vistools import plot_histogram
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
# her_demos_path = '/u/kamyar/baselines/baselines/her/data_fetch_random_100.npz'
# her_demos_path = '/ais/gobi6/kamyar/oorl_rlkit/expert_demos/her_fetch_pick_and_place/fetch_reach_and_lift/data_for_fetch_reach_and_lift_random_100.npz'
# her_demos_path = '/ais/gobi6/kamyar/oorl_rlkit/expert_demos/her_fetch_pick_and_place/easy_0p01_range_1_goal_high/data_easy_0p01_range_goal_high_prob_1_fetch_pick_and_place_random_100.npz'
# her_demos_path = '/ais/gobi6/kamyar/oorl_rlkit/expert_demos/her_fetch_pick_and_place/easy_0p01_range_1_goal_high/1000_demos/data_easy_0p01_range_goal_high_prob_1_fetch_pick_and_place_random_1000.npz'
her_demos_path = '/ais/gobi6/kamyar/oorl_rlkit/expert_demos/her_fetch_pick_and_place/easy_0p01_range_1_goal_high/1000_clipped_demos/clipped_acts_data_easy_0p01_range_goal_high_prob_1_fetch_pick_and_place_random_1000.npz'
d = np.load(her_demos_path)
rews = []
path_lens = []
for path in d['obs']:
path_rew = 0
# for step in path:
for i in range(50):
step = path[i]
ag = step['achieved_goal']
dg = step['desired_goal']
dist = goal_distance(ag, dg)
if dist > 0.05:
path_rew += -1.0
else:
path_rew += -1.0*dist
rews.append(path_rew)
# path_lens.append(len(path))
path_lens.append(50)
zipped = list(zip(rews, path_lens))
print(zipped)
solved = [t[0] > -1.0*t[1] for t in zipped]
print(solved)
print('%.4f +/- %.4f' % ( | np.mean(rews) | numpy.mean |
"""
Train a recurrent neural network using minibatch stochastic gradient descent
with the modifications described in
On the difficulty of training recurrent neural networks.
<NAME>, <NAME>, & <NAME>, ICML 2013.
https://github.com/pascanur/trainingRNNs
"""
from __future__ import absolute_import
from __future__ import division
import cPickle as pickle
import datetime
import os
import sys
import numpy as np
import theano
import theano.tensor as T
from . import theanotools
from .rnn import RNN
from .utils import dump
THIS = 'pycog.sgd'
class SGD(object):
"""
Stochastic gradient descent training for RNNs.
"""
@staticmethod
def clip_norm(v, norm, maxnorm):
"""
Renormalize the vector `v` if `norm` exceeds `maxnorm`.
"""
return T.switch(norm > maxnorm, maxnorm*v/norm, v)
def __init__(self, trainables, inputs, costs, regs, x, z, params, save_values,
extras):
"""
Construct the necessary Theano functions.
Parameters
----------
trainables : list
List of Theano variables to optimize.
inputs : [inputs, targets]
Dataset used to train the RNN.
costs : [loss, ...]
`costs[0]` is the loss that is optimized. `costs[1:]` are used for
monitoring only.
regs : Theano variable
Regularization terms to add to costs[0].
x : Theano variable
Hidden unit activities.
z : Theano variable
Outputs.
params : dict
All parameters associated with the training of this network -- this
will be saved as part of the RNN savefile.
save_values : list
List of Theano variables to save.
extras : dict
Additinal information needed by the SGD training algorithm
(specifically, for computing the regularization term) that may not
be needed by other training algorithms (e.g., Hessian-free).
"""
self.trainables = trainables
self.p = params
self.save_values = save_values
# Trainable variable names
self.trainable_names = [tr.name for tr in trainables]
#---------------------------------------------------------------------------------
# Setup
#---------------------------------------------------------------------------------
lambda_Omega = T.scalar('lambda_Omega')
lr = T.scalar('lr')
maxnorm = T.scalar('maxnorm')
bound = T.scalar('bound')
#---------------------------------------------------------------------------------
# Compute gradient
#---------------------------------------------------------------------------------
# Pascanu's trick for getting dL/dxt
# scan_node.op.n_seqs is the number of sequences in the scan
# init_x is the initial value of x at all time points, including x0
scan_node = x.owner.inputs[0].owner
assert isinstance(scan_node.op, theano.scan_module.scan_op.Scan)
npos = scan_node.op.n_seqs + 1
init_x = scan_node.inputs[npos]
g_x, = theanotools.grad(costs[0], [init_x])
# Get into "standard" order, by filling `self.trainables` with
# `None`s if some of the parameters are not trained.
Win, Wrec, Wout, brec, bout, x0 = RNN.fill(self.trainables, self.trainable_names)
# Gradients
g = theanotools.grad(costs[0] + regs, self.trainables)
g_Win, g_Wrec, g_Wout, g_brec, g_bout, g_x0 = RNN.fill(g, self.trainable_names)
#---------------------------------------------------------------------------------
# For vanishing gradient regularizer
#---------------------------------------------------------------------------------
self.Wrec_ = extras['Wrec_'] # Actual recurrent weight
d_f_hidden = extras['d_f_hidden'] # derivative of hidden activation function
#---------------------------------------------------------------------------------
# Regularization for the vanishing gradient problem
#---------------------------------------------------------------------------------
if np.isscalar(self.p['tau']):
alpha = T.scalar('alpha')
else:
alpha = T.vector('alpha')
d_xt = T.tensor3('d_xt') # Later replaced by g_x, of size (time+1), batchsize, N
xt = T.tensor3('xt') # Later replaced by x, of size time, batchsize, N
# Using temporary variables instead of actual x variables
# allows for calculation of immediate derivatives
# Here construct the regularizer Omega for the vanishing gradient problem
# Numerator of Omega (d_xt[1:] returns time X batchsize X N)
# Notice Wrec_ is used in the network equation as: T.dot(r_tm1, Wrec_.T)
num = (1 - alpha)*d_xt[1:] + T.dot(alpha*d_xt[1:], self.Wrec_)*d_f_hidden(xt)
num = (num**2).sum(axis=2)
# Denominator of Omega, small denominators are not considered
# \partial E/\partial x_{t+1}, squared and summed over hidden units
denom = (d_xt[1:]**2).sum(axis=2)
Omega = (T.switch(T.ge(denom, bound), num/denom, 1) - 1)**2
# First averaged across batches (.mean(axis=1)),
# then averaged across all time steps where |\p E/\p x_t|^2 > bound
nelems = T.mean(T.ge(denom, bound), axis=1)
Omega = Omega.mean(axis=1).sum()/nelems.sum()
# tmp_g_Wrec: immediate derivative of Omega with respect to Wrec
# Notice grad is computed before the clone.
# This is critical for calculating the immediate derivative.
tmp_g_Wrec = theanotools.grad(Omega, Wrec)
Omega, tmp_g_Wrec, nelems = theano.clone([Omega, tmp_g_Wrec, nelems.mean()],
replace=[(d_xt, g_x), (xt, x)])
# Add the gradient to the original gradient
g_Wrec += lambda_Omega * tmp_g_Wrec
#---------------------------------------------------------------------------------
# Gradient clipping
#---------------------------------------------------------------------------------
g = []
if 'Win' in self.trainable_names:
g += [g_Win]
g += [g_Wrec, g_Wout]
if 'brec' in self.trainable_names:
g += [g_brec]
if 'bout' in self.trainable_names:
g += [g_bout]
if 'x0' in self.trainable_names:
g += [g_x0]
# Clip
gnorm = T.sqrt(sum([(i**2).sum() for i in g]))
g = [SGD.clip_norm(i, gnorm, maxnorm) for i in g]
g_Win, g_Wrec, g_Wout, g_brec, g_bout, g_x0 = RNN.fill(g, self.trainable_names)
# Pascanu's safeguard for numerical precision issues with float32
new_cond = T.or_(T.or_(T.isnan(gnorm), T.isinf(gnorm)),
T.or_(gnorm < 0, gnorm > 1e10))
if 'Win' in self.trainable_names:
g_Win = T.switch(new_cond, np.float32(0), g_Win)
g_Wrec = T.switch(new_cond, np.float32(0.02)*Wrec, g_Wrec)
g_Wout = T.switch(new_cond, np.float32(0), g_Wout)
if 'brec' in self.trainable_names:
g_brec = T.switch(new_cond, | np.float32(0) | numpy.float32 |
import numpy as np
from matplotlib.pylab import plt
import seaborn as sns
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import ot
import ot.plot
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def find_nearest_index(array,value):
idx = (np.abs(array-value)).argmin()
return idx
sns.set(style='whitegrid', font_scale=1.2)
surfacecolor = 'dodgerblue'
firstcloudcolor = 'k'
secondcloudcolor = 'forestgreen'
#%%
xL = -30; yL = -30;
sigma = 9
sigma2 = 8
bias = 10
res = 3
con = 3
con2 = 32
n = 8
np.random.seed(1)
x1 = np.random.normal(xL+bias,sigma2,n) + 12*con
x2 = np.random.normal(xL,sigma,n)+14
y1 = np.random.normal(yL,sigma2+2,n) + 16
y2 = np.random.normal(yL+bias,sigma,n)+con2
#Define OT
M = ot.dist(np.concatenate((x1[:,np.newaxis],y1[:,np.newaxis]), axis=1), np.concatenate((x2[:,np.newaxis],y2[:,np.newaxis]), axis=1))
M /= M.max()
G0 = ot.emd(np.ones((n,)) / n, np.ones((n,)) / n, M)
sns.set_style("dark")
#%%
from matplotlib import cm
import matplotlib.colors as colors
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
fig = plt.figure(figsize=(10,8))
#ax = plt.subplot(211)
ax = plt.subplot2grid((9,3), (0,0), colspan=3, rowspan=5, projection='3d')
ax.invert_zaxis()
x = np.linspace(-40, 25, 10)
y = np.linspace(-40, 25, 10)
X, Y = np.meshgrid(x, y)
Z = 1.5+np.random.rand(X.shape[0],X.shape[1])/3.
#ax.contour3D(X, Y, Z, color='k', zorder=-10)
ax.plot_surface(X, Y, Z, cmap=truncate_colormap(cm.Reds, 0.3, 1),
linewidth=0, antialiased=False, vmin=1.5, vmax=1.75, alpha=0.3, zorder=-100)
#ax.contourf(X, Y, Z, cmap=cm.coolwarm,
# antialiased=False,vmin=0.5, vmax=1.8, alpha=0.7, zorder=-100)
plt.xlim(-40,25)
plt.ylim(-40,25)
ax.set_xlabel('$^{\circ}$E', fontsize=18)
ax.set_ylabel('$^{\circ}$N', fontsize=18)
ax.set_zlabel('depth (km)', fontsize=18)
leng = 20
xs = np.linspace(xL+20, x1[1], leng) + 10*np.sin(np.linspace(0,4*np.pi,leng))
ys = np.linspace(yL, y1[1], leng) + 1*np.cos(np.linspace(0,4*np.pi,leng))
zs = np.linspace(0.9, 0, leng)+ 0.1*np.sin(np.linspace(0,2*np.pi,leng))
ax.plot(xs, ys, zs,':', color='k', linewidth = 2, zorder = 10)
#a = Arrow3D([xL+20, x1[0]], [yL+4, y1[0]],
# [1, 0], mutation_scale=20,
# lw=3, arrowstyle="->", color="k", zorder=10)
#ax.add_artist(a)
ax.scatter3D(x1,y1, color=firstcloudcolor,alpha=1, s=50, label='first distribution')
ax.scatter3D(xL+17,yL, [0.9], color='k', marker = 'P', s=255, label = 'release location', zorder=10)
ax.zaxis.set_ticks([1,0])
ax.zaxis.set_ticklabels([1,0])
#ax.tick_params(axis='x',labelbottom=False, labelleft=False, colors='red', width=0)
#ax.tick_params(axis='y',labelbottom=False, labelleft=False, colors='red', width=0)
ax.set_yticks([])
ax.set_xticks([])
plt.title('(a)', fontsize=18)
#%%
ax = plt.subplot2grid((9,3), (6,2), rowspan=3)
plt.plot(np.concatenate((x1[np.newaxis,:],x2[np.where(G0>0)[1]][np.newaxis,:]), axis=0),
np.concatenate((y1[np.newaxis,:],y2[ | np.where(G0>0) | numpy.where |
import os
from PIL import Image
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.transforms as mtrans
from matplotlib.ticker import MultipleLocator
from algorithms.aaa_util import calc_overlap
from evaluations import ope_benchmark
from visualizes.draw_tables import get_mean_succ, get_mean_ratio, calc_rank, is_algorithm
plt.rcParams.update({"font.size": 12})
sns.set()
sns.set_style("whitegrid")
LINE_WIDTH = 2
BOX_WIDTH = 10
ANNO_SIZE = 20
def diffmean(x):
return x - np.mean(x)
def draw_pie(
datasets_name,
experts_name,
specific_names,
success_rets,
figsize,
save_dir,
legend=False,
file_name=None,
):
fig, axes = plt.subplots(
nrows=2,
ncols=(len(datasets_name) + 1) // 2,
figsize=figsize,
subplot_kw=dict(aspect="equal"),
)
fig.add_subplot(111, frameon=False)
def label(pct):
if pct > 10:
return f"{pct:.0f}%"
else:
return ""
for i, dataset_name in enumerate(datasets_name):
n_row = i % 2
n_col = i // 2
ax = axes[n_row, n_col]
seq_names = sorted(success_rets[dataset_name][experts_name[0]].keys())
ranks = calc_rank(dataset_name, seq_names, experts_name, success_rets)
lines, _, _ = ax.pie(
[np.sum(rank == 1) / len(seq_names) * 100 for rank in ranks.T],
autopct=label,
radius=1.2,
)
ax.set_title(dataset_name)
# hide tick and tick label of the big axes
plt.tick_params(
labelcolor="none",
which="both",
top=False,
bottom=False,
left=False,
right=False,
)
plt.grid(False)
if legend:
fig.legend(
lines,
specific_names,
frameon=False,
loc="center left",
bbox_to_anchor=(0, 0.5),
)
if file_name is None:
file_name = "rank_legend" if legend else "rank"
plt.subplots_adjust(wspace=-0.2, hspace=0.2)
plt.savefig(save_dir / f"{file_name}.pdf", bbox_inches="tight")
plt.close()
def draw_curves(
datasets,
algorithm_name,
experts_name,
color_map,
success_rets,
precision_rets,
figsize,
save_dir,
file_name=None,
):
trackers_name = experts_name + [algorithm_name]
fig, axes = plt.subplots(nrows=2, ncols=len(datasets), figsize=figsize)
fig.add_subplot(111, frameon=False)
lines = []
for i, dataset_name in enumerate(datasets):
# draw success plot
ax = axes[0, i]
# draw curve of trackers
thresholds = | np.arange(0, 1.05, 0.05) | numpy.arange |
import json
import os
import pickle
from operator import itemgetter
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.models import SBMEstimator
from graspy.simulations import p_from_latent, sample_edges, sbm
from graspy.utils import binarize
def hardy_weinberg(theta):
"""
Maps a value from [0, 1] to the hardy weinberg curve.
"""
print("nojjjj!")
hw = [theta ** 2, 2 * theta * (1 - theta), (1 - theta) ** 2]
return np.array(hw).T
def gen_hw_graph(n_verts):
thetas = np.random.uniform(0, 1, n_verts)
latent = hardy_weinberg(thetas)
p_mat = p_from_latent(latent, rescale=False, loops=False)
graph = sample_edges(p_mat, directed=True, loops=False)
return (graph, p_mat)
def compute_rss(estimator, graph):
"""Computes RSS, matters whether the estimator is directed
Parameters
----------
estimator : graspy estimator object
[description]
graph : nparray
[description]
Returns
-------
[type]
[description]
"""
graph = graph.copy()
p_mat = estimator.p_mat_.copy()
if not estimator.directed:
inds = np.triu_indices_from(p_mat)
p_mat = p_mat[inds]
graph = graph[inds]
diff = (p_mat - graph) ** 2
rss = np.sum(diff)
return rss
def compute_mse(estimator, graph):
"""
Matters whether the estimator is directed
"""
rss = compute_rss(estimator, graph)
if not estimator.directed: # REVIEW double check that this is right
size = graph.shape[0] * (graph.shape[0] - 1) / 2
else:
size = graph.size - graph.shape[0]
return rss / size
def compute_log_lik(estimator, graph, c=0):
"""This is probably wrong right now"""
p_mat = estimator.p_mat_.copy()
graph = graph.copy()
inds = np.triu_indices(graph.shape[0])
p_mat = p_mat[inds]
graph = graph[inds]
p_mat[p_mat < c] = c
p_mat[p_mat > 1 - c] = 1 - c
successes = np.multiply(p_mat, graph)
failures = np.multiply((1 - p_mat), (1 - graph))
likelihood = successes + failures
return np.sum(np.log(likelihood))
def _n_to_labels(n):
n_cumsum = n.cumsum()
labels = np.zeros(n.sum(), dtype=np.int64)
for i in range(1, len(n)):
labels[n_cumsum[i - 1] : n_cumsum[i]] = i
return labels
def gen_B(n_blocks, a=0.1, b=0.2, assortivity=4):
B_mat = np.random.uniform(a, b, size=(n_blocks, n_blocks))
B_mat -= np.diag(np.diag(B_mat))
B_mat += np.diag(np.random.uniform(assortivity * a, assortivity * b, size=n_blocks))
return B_mat
def gen_sbm(n_verts, n_blocks, B_mat):
ps = np.array(n_blocks * [1 / n_blocks])
n_vec = np.random.multinomial(n_verts, ps)
graph = sbm(n_vec, B_mat, directed=False, loops=False)
labels = _n_to_labels(n_vec)
return graph, labels
def run_to_df(file_path):
out = get_json(file_path)
result = out["result"]
if "py/tuple" in result:
dfs = []
for elem in result["py/tuple"]:
df = pd.DataFrame.from_dict(elem["values"])
dfs.append(df)
return dfs
else:
print(result["values"][:100])
return pd.DataFrame.from_dict(result["values"])
def get_json(file_path):
f = open(str(file_path), mode="r")
out = json.load(f)
f.close()
return out
def compute_mse_from_assignments(assignments, graph, directed=True, loops=False):
estimator = SBMEstimator(loops=loops, directed=directed)
estimator.fit(graph, y=assignments)
return compute_mse(estimator, graph)
def get_best_df(input_df):
"""super hard coded right now (e.g. column names)
Parameters
----------
df : dataframe
[description]
Returns
-------
[type]
[description]
"""
param_df = input_df[input_df["sim_ind"] == 0]
labels = ["n_block_try", "n_components_try", "mse"]
param_df = param_df.loc[:, labels]
param_df["best_sim"] = 0
param_df["best_ind"] = 0
for i in range(50):
df = input_df[input_df["sim_ind"] == i]
for j, row in df.iterrows():
temp_df = param_df.loc[
(param_df[labels[0]] == row[labels[0]])
& (param_df[labels[1]] == row[labels[1]])
]
ind = temp_df.index
if row["mse"] <= param_df.loc[ind, "mse"].values[0]:
param_df.loc[ind, "mse"] = row["mse"]
param_df.loc[ind, "best_sim"] = row["sim_ind"]
param_df.loc[ind, "best_ind"] = j
best_df = input_df.loc[param_df["best_ind"].values, :]
return best_df
def get_best_df2(input_df):
"""super hard coded right now (e.g. column names)
Parameters
----------
df : dataframe
[description]
Returns
-------
[type]
[description]
"""
# param_df = input_df[input_df["sim_ind"] == 0]
labels = ["n_block_try", "mse"]
param_df = pd.DataFrame()
# param_df = param_df.loc[:, labels]
param_df["n_block_try"] = np.unique(input_df["n_block_try"].values)
param_df["best_sim"] = 0
param_df["best_ind"] = 0
param_df["mse"] = np.inf
for i in range(max(input_df["sim_ind"].values) + 1):
df = input_df[input_df["sim_ind"] == i]
for j, row in df.iterrows():
temp_df = param_df.loc[(param_df[labels[0]] == row[labels[0]])]
ind = temp_df.index
if row["mse"] <= param_df.loc[ind, "mse"].values[0]:
param_df.loc[ind, "mse"] = row["mse"]
param_df.loc[ind, "best_sim"] = row["sim_ind"]
param_df.loc[ind, "best_ind"] = j
best_df = input_df.loc[param_df["best_ind"].values, :]
return best_df
def get_best_df3(input_df):
"""super hard coded right now (e.g. column names)
Parameters
----------
df : dataframe
[description]
Returns
-------
[type]
[description]
"""
param_df = input_df[input_df["sim_ind"] == 0]
labels = ["n_block_try", "rank_try", "mse"]
param_df = param_df.loc[:, labels]
param_df["best_sim"] = 0
param_df["best_ind"] = 0
for i in range(50):
df = input_df[input_df["sim_ind"] == i]
for j, row in df.iterrows():
temp_df = param_df.loc[
(param_df[labels[0]] == row[labels[0]])
& (param_df[labels[1]] == row[labels[1]])
]
ind = temp_df.index
if row["mse"] <= param_df.loc[ind, "mse"].values[0]:
param_df.loc[ind, "mse"] = row["mse"]
param_df.loc[ind, "best_sim"] = row["sim_ind"]
param_df.loc[ind, "best_ind"] = j
best_df = input_df.loc[param_df["best_ind"].values, :]
return best_df
def load_config(path, experiment, run):
exp_path = Path(path)
exp_path = exp_path / experiment
exp_path = exp_path / str(run)
run_path = exp_path / "run.json"
config_path = exp_path / "config.json"
config = get_json(config_path)
print(f"Experiment: {experiment}")
print(f"Run: {run}")
print(f"Path: {run_path}")
print()
print("Experiment configuration:")
print()
for key, value in config.items():
print(key)
print(value)
return config
def load_run(path, experiment, run):
exp_path = Path(path)
exp_path = exp_path / experiment
exp_path = exp_path / str(run)
run_path = exp_path / "run.json"
try:
dfs = run_to_df(run_path)
return dfs
except:
print("Could not find df in run")
def load_pickle(path, experiment, run, name="master_out_df"):
exp_path = Path(path)
exp_path = exp_path / experiment
exp_path = exp_path / str(run)
exp_path = exp_path / str(name + ".pickle")
with open(exp_path, "rb") as f:
data = pickle.load(f)
return data
def save_obj(obj, fso, name):
path = fso.dir
path = Path(path)
path = path / str(name + ".pickle")
with open(path, "wb") as file:
pickle.dump(obj, file)
print(f"Saved to {path}")
def get_best(df, param_name="param_n_components", score_name="mse", small_better=True):
param_range = np.unique(df[param_name].values)
best_rows = []
for param_value in param_range:
temp_df = df[df[param_name] == param_value]
if small_better:
ind = temp_df[score_name].idxmin() # this is the metric we are choosing on
else:
ind = temp_df[score_name].idxmax()
best_rows.append(temp_df.loc[ind, :])
return pd.DataFrame(best_rows)
def get_subgraph(graph, feature, key):
"""return the subgraph of a networkx object
based on the node data "feature" being equal to "key"
Parameters
----------
graph : [type]
[description]
feature : [type]
[description]
key : [type]
[description]
Returns
-------
[type]
[description]
"""
sub_nodes = [node for node, meta in graph.nodes(data=True) if meta[feature] == key]
return graph.subgraph(sub_nodes)
def to_simple_class(classes):
if not isinstance(classes, (list, np.ndarray)):
classes = [classes]
name_map = {
"CN": "C/LH",
"DANs": "I",
"KCs": "KC",
"LHN": "C/LH",
"LHN; CN": "C/LH",
"MBINs": "I",
"MBON": "O",
"MBON; CN": "O",
"OANs": "I",
"ORN mPNs": "P",
"ORN uPNs": "P",
"tPNs": "P",
"vPNs": "P",
"Unidentified": "U",
"Other": "U",
}
simple_classes = np.array(itemgetter(*classes)(name_map))
return simple_classes
def meta_to_array(graph, key, nodelist=None):
# if nodelist is not None:
# nodelist_map = dict(zip(nodelist, range(len(nodelist))))
# data = np.zeros(len(graph), dtype=)
# for node, meta in graph.nodes(data=True):
# node_ind = nodelist_map[node]
# data[node_ind] = meta
data = [meta[key] for node, meta in graph.nodes(data=True)]
return np.array(data)
def get_simple(graph):
classes = meta_to_array(graph, "Class")
simple_classes = to_simple_class(classes)
return simple_classes
def savefig(
name,
fmt="png",
foldername=None,
subfoldername="figs",
pathname="./maggot_models/notebooks/outs",
bbox_inches="tight",
pad_inches=0.5,
**kws,
):
path = Path(pathname)
if foldername is not None:
path = path / foldername
if not os.path.isdir(path):
os.mkdir(path)
if subfoldername is not None:
path = path / subfoldername
if not os.path.isdir(path):
os.mkdir(path)
plt.savefig(
path / str(name + "." + fmt),
fmt=fmt,
facecolor="w",
bbox_inches=bbox_inches,
pad_inches=pad_inches,
**kws,
)
def relabel(labels):
"""
Remaps integer labels based on who is most frequent
"""
uni_labels, uni_inv, uni_counts = np.unique(
labels, return_inverse=True, return_counts=True
)
sort_inds = np.argsort(uni_counts)[::-1]
new_labels = range(len(uni_labels))
uni_labels_sorted = uni_labels[sort_inds]
relabel_map = dict(zip(uni_labels_sorted, new_labels))
new_labels = np.array(itemgetter(*labels)(relabel_map))
return new_labels
def unique_by_size(data):
"""Equivalent to np.unique but returns data in order sorted by frequency of values
Parameters
----------
data : np.ndarray
array on which to find unique values
Returns
-------
np.ndarray
unique elements in `data` sorted by frequency, with the most observations first
np.ndarray
counts of the unique elements in `data`
"""
unique_data, counts = np.unique(data, return_counts=True)
sort_inds = np.argsort(counts)[::-1] # reverse order to get largest class first
unique_data = unique_data[sort_inds]
counts = counts[sort_inds]
return unique_data, counts
def export_skeleton_json(
name,
ids,
colors,
palette="tab10",
foldername=None,
subfoldername="jsons",
pathname="./maggot_models/notebooks/outs",
multiout=False,
):
""" Take a list of skeleton ids and output as json file for catmaid
Parameters
----------
name : str
filename to save output
ids : list or array
skeleton ids
colors : list or array
either a hexadecimal color for each skeleton or a label for each skeleton to be
colored by palette
palette : str or None, optional
if not None, this is a palette specification to use to color skeletons
"""
og_colors = colors.copy()
uni_labels = np.unique(colors)
n_labels = len(uni_labels)
if palette is not None:
pal = sns.color_palette(palette, n_colors=n_labels)
pal = pal.as_hex()
uni_labels = [int(i) for i in uni_labels]
colormap = dict(zip(uni_labels, pal))
colors = np.array(itemgetter(*colors)(colormap))
opacs = np.array(len(ids) * [1])
path = Path(pathname)
if foldername is not None:
path = path / foldername
if not os.path.isdir(path):
os.mkdir(path)
if subfoldername is not None:
path = path / subfoldername
if not os.path.isdir(path):
os.mkdir(path)
if multiout:
for l in uni_labels:
filename = path / str(name + "_" + str(l) + ".json")
inds = np.where(og_colors == l)[0]
spec_list = [
{"skeleton_id": int(i), "color": str(c), "opacity": float(o)}
for i, c, o in zip(ids[inds], colors[inds], opacs[inds])
]
with open(filename, "w") as fout:
json.dump(spec_list, fout)
else:
spec_list = [
{"skeleton_id": int(i), "color": str(c), "opacity": float(o)}
for i, c, o in zip(ids, colors, opacs)
]
filename = path / str(name + ".json")
with open(filename, "w") as fout:
json.dump(spec_list, fout)
if palette is not None:
return (spec_list, colormap, pal)
else:
return spec_list
def shuffle_edges(A):
n_verts = A.shape[0]
A_fake = A.copy().ravel()
np.random.shuffle(A_fake)
A_fake = A_fake.reshape((n_verts, n_verts))
return A_fake
def get_sbm_prob(adj, labels):
uni_labels, counts = np.unique(labels, return_counts=True)
label_map = dict(zip(uni_labels, range(len(uni_labels))))
y = np.array(itemgetter(*labels)(label_map))
sbm = SBMEstimator(directed=True, loops=True)
sbm.fit(binarize(adj), y=y)
data = sbm.block_p_
sort_inds = np.argsort(counts)[::-1]
uni_labels = uni_labels[sort_inds]
data = data[np.ix_(sort_inds, sort_inds)]
prob_df = pd.DataFrame(columns=uni_labels, index=uni_labels, data=data)
return prob_df
from graspy.utils import cartprod
def _get_block_indices(y):
"""
y is a length n_verts vector of labels
returns a length n_verts vector in the same order as the input
indicates which block each node is
"""
block_labels, block_inv, block_sizes = np.unique(
y, return_inverse=True, return_counts=True
)
n_blocks = len(block_labels)
block_inds = range(n_blocks)
block_vert_inds = []
for i in block_inds:
# get the inds from the original graph
inds = np.where(block_inv == i)[0]
block_vert_inds.append(inds)
return block_vert_inds, block_inds, block_inv
def _calculate_block_p(
graph, block_inds, block_vert_inds, return_counts=False, use_weights=False
):
"""
graph : input n x n graph
block_inds : list of length n_communities
block_vert_inds : list of list, for each block index, gives every node in that block
return_counts : whether to calculate counts rather than proportions
"""
n_blocks = len(block_inds)
block_pairs = cartprod(block_inds, block_inds)
block_p = | np.zeros((n_blocks, n_blocks)) | numpy.zeros |
import RLlib
import numpy as np
from copy import deepcopy
def trainAgents(critic, actor, reward, x0Max, episodes, T, offline_training=False, breakFlag=False, disturber=None, trace_P=False):
""" """
Noise = RLlib.NoiseClass()
trainer = RLlib.Trainer(critic, actor, reward, Noise.LPFNNoise, breakFlag, disturber)
L = []
Plist = []
for ep in range(episodes):
x0 = [np.random.uniform(-x0Max, x0Max), 0]
if offline_training:
loss = trainer.offline_train(x0, T)
else:
(_, _, _, loss) = trainer.online_train(x0, T, return_E=True)
if trace_P:
Plist.append(deepcopy(critic.P.detach().numpy()))
trainer.sigma0 = trainer.get('sigma0')/(1 + 99*ep/episodes)
L.append(float( | np.mean(loss) | numpy.mean |
import numpy as np
import sys
import time
from sklearn.model_selection import KFold
from scipy.spatial.distance import cdist
from sklearn.base import BaseEstimator
from scipy.linalg import cholesky, solve_triangular, svd
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator
from gaussian_process import GaussianProcess
from pygrid import PyGrid
def train_job(train_i, test_i, gamma, alpha, y_, dist):
K_train = -gamma * dist[np.ix_(train_i, train_i)]
np.exp(K_train, K_train)
K_test = -gamma * dist[np.ix_(test_i, train_i)]
np.exp(K_test, K_test)
K_train.flat[::K_train.shape[0] + 1] += alpha
try:
L_ = cholesky(K_train, lower=True)
x = solve_triangular(L_, y_[train_i], lower=True)
dual_coef_ = solve_triangular(L_.T, x)
pred_mean = np.dot(K_test, dual_coef_)
e = np.mean((pred_mean - y_[test_i]) ** 2, 0)
except np.linalg.LinAlgError:
e = np.inf
return e
def get_alpha_add(n_basis, n_grid, delta, v):
alpha_add = np.pi * ((np.arange(n_basis / 2) / (n_grid * delta))**2 + v**2)/v
alpha_add = np.repeat(alpha_add, 2)
return alpha_add
class MultivariateGaussianProcessCV(BaseEstimator):
def __init__(self, krr_param_grid=None, cv=5, n_components=None, single_combo=False,
verbose=0, copy_X=True, n_jobs=None, cluster_params=[],
v=None, n_basis=None, n_grid=None, delta=None):
self.krr_param_grid = krr_param_grid
self.verbose = verbose
self.cv = cv
self.n_components = n_components
self.single_combo = single_combo
self.copy_X = copy_X
self.n_jobs = n_jobs
self.cluster_params = cluster_params
self.n_grid = n_grid
self.delta = delta
self.n_basis = n_basis
if 'v' in self.krr_param_grid is not None and not single_combo:
raise ValueError('Can only add to alpha if single_combo=True')
def score(self, y_true, y_pred):
return | np.mean((y_true - y_pred) ** 2) | numpy.mean |
# Run a classification experiment
# implements model fitting with equalized odds and demonstrates
# how to use equlized coverage for unbiased uncertainty estimation
# We rely on the nonconformist package and CQR package, available at
# https://github.com/donlnz/nonconformist
# https://github.com/yromano/cqr
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.getcwd() + '/others/third_party/fairness_aware_learning')))
sys.path.append(os.path.abspath(os.path.join(os.getcwd() + '/others/third_party/cqr')))
base_path = os.getcwd() + '/data/'
import torch
import random
import get_dataset
import numpy as np
import pandas as pd
from others import adv_debiasing
from others import continuous_fairness
from fair_dummies import utility_functions
from fair_dummies import fair_dummies_learning
from nonconformist.nc import ClassifierNc
from nonconformist.cp import IcpClassifier
from nonconformist.base import ClassifierAdapter
from nonconformist.nc import InverseProbabilityErrFunc
pd.set_option('precision', 4)
# Get length
def get_length(Y,predicted_C):
length = sum(predicted_C)
return length
# Get coverage
def get_coverage(Y,predicted_C):
coverage = int( predicted_C[int(Y)] == True )
return coverage
# Get coverage and length
def get_stat(Y,predicted_C):
coverage = int( predicted_C[int(Y)] == True )
length = sum(predicted_C)
return coverage, length
def class_compute_coverage_len(y_test, y_set):
results = [get_stat(y_test[test],y_set[test]) for test in range(len(y_test))]
results = list(zip(*results))
coverage = pd.DataFrame([row for row in results[0]])
length = pd.DataFrame([r for r in results[1]])
return coverage.mean().values[0], length.mean().values[0]
def class_compute_coverage_per_sample(y_test,y_set,significance,x_test=None,condition=None):
if condition is not None:
category_map = np.array([condition((x_test[i, :], y_test[i])) for i in range(y_test.size)])
categories = np.unique(category_map)
coverage = np.empty(len(categories), dtype=np.object)
length = np.empty(len(categories), dtype=np.object)
cnt = 0
for cond in categories:
tmp = np.arange(len(y_test))
idx = tmp[category_map == cond]
coverage[cnt] = [get_coverage(y_test[idx[test]],y_set[idx[test],:]) for test in range(len(idx))]
coverage_avg = np.sum( coverage[cnt] ) / len(y_test[idx]) * 100
print("Group %d : Percentage in the range (expecting %.2f): %f" % (cond, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length[cnt] = [get_length(y_test[idx[test]],y_set[idx[test]]) for test in range(len(idx))]
print("Group %d : Average length: %f" % (cond, np.mean(length[cnt])))
sys.stdout.flush()
cnt = cnt + 1
else:
raise
return coverage, length
def run_experiment(cur_test_method,
cur_dataset_name,
cur_batch_size,
cur_lr_loss,
cur_lr_dis,
cur_loss_steps,
cur_dis_steps,
cur_mu_val,
cur_epochs,
cur_model_type,
cur_random_state,
cur_second_scale,
num_experiments):
method = cur_test_method
seed = cur_random_state
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
dataset = cur_dataset_name
batch_size = cur_batch_size
# step size to minimize loss
lr_loss = cur_lr_loss
# step size used to fit bianry classifier (discriminator)
lr_dis = cur_lr_dis
# inner epochs to fit loss
loss_steps = cur_loss_steps
# inner epochs to fit binary classifier (discriminator)
dis_steps = cur_dis_steps
# total number of epochs
epochs = cur_epochs
# utility loss
cost_pred = torch.nn.CrossEntropyLoss()
model_type = cur_model_type
metric = "equalized_odds"
print(dataset)
print(method)
sys.stdout.flush()
avg_length_0 = np.zeros(num_experiments)
avg_length_1 = np.zeros(num_experiments)
avg_coverage_0 = np.zeros(num_experiments)
avg_coverage_1 = np.zeros(num_experiments)
avg_p_val = np.zeros(num_experiments)
pred_error = np.zeros(num_experiments)
for i in range(num_experiments):
# Split into train and test
X, A, Y, X_cal, A_cal, Y_cal, X_test, A_test, Y_test = get_dataset.get_train_test_data(base_path, dataset, seed+i)
in_shape = X.shape[1]
num_classes = len(np.unique(Y))
print("n train = " + str(X.shape[0]) + " p = " + str(X.shape[1]))
print("n calibration = " + str(X_cal.shape[0]))
print("n test = " + str(X_test.shape[0]))
sys.stdout.flush()
if method == "AdversarialDebiasing":
class ClassAdapter(ClassifierAdapter):
def __init__(self, model=None,fit_params=None, params=None):
super(ClassAdapter, self).__init__(model,fit_params)
# Instantiate model
self.learner = adv_debiasing.AdvDebiasingClassLearner(lr_loss,
loss_steps,
dis_steps,
epochs,
cost_pred,
in_shape,
batch_size,
model_type,
num_classes,
cur_mu_val)
def fit(self, x, y):
self.learner.fit(x, y)
def predict(self, x):
return self.learner.predict(x)
elif method == "FairDummies":
class ClassAdapter(ClassifierAdapter):
def __init__(self, model=None,fit_params=None, params=None):
super(ClassAdapter, self).__init__(model,fit_params)
# Instantiate model
self.learner = fair_dummies_learning.EquiClassLearner(lr=lr_loss,
pretrain_pred_epochs=loss_steps,
pretrain_dis_epochs=dis_steps,
epochs=epochs,
loss_steps=1,
dis_steps=1,
cost_pred=cost_pred,
in_shape=in_shape,
batch_size=batch_size,
model_type=model_type,
lambda_vec=cur_mu_val,
second_moment_scaling=cur_second_scale,
num_classes=num_classes)
def fit(self, x, y):
self.learner.fit(x, y)
def predict(self, x):
return self.learner.predict(x)
elif method == "HGR":
class ClassAdapter(ClassifierAdapter):
def __init__(self, model=None,fit_params=None, params=None):
super(ClassAdapter, self).__init__(model,fit_params)
# Instantiate model
self.learner = continuous_fairness.HGR_Class_Learner(lr = lr_loss,
epochs = epochs,
mu=cur_mu_val,
cost_pred=cost_pred,
in_shape=in_shape,
out_shape=num_classes,
batch_size=batch_size,
model_type=model_type)
def fit(self, x, y):
self.learner.fit(x, y)
def predict(self, x):
return self.learner.predict(x)
elif method == "Baseline":
class ClassAdapter(ClassifierAdapter):
def __init__(self, model=None,fit_params=None, params=None):
super(ClassAdapter, self).__init__(model,fit_params)
# Instantiate model
self.learner = fair_dummies_learning.EquiClassLearner(lr=lr_loss,
pretrain_pred_epochs=epochs,
pretrain_dis_epochs=0,
epochs=0,
loss_steps=0,
dis_steps=0,
cost_pred=cost_pred,
in_shape=in_shape,
batch_size=batch_size,
model_type=model_type,
lambda_vec=0,
second_moment_scaling=0,
num_classes=num_classes)
def fit(self, x, y):
self.learner.fit(x, y)
def predict(self, x):
return self.learner.predict(x)
fairness_class = ClassAdapter(model=None)
nc = ClassifierNc(fairness_class, InverseProbabilityErrFunc())
# function that extracts the group identifier
def condition(x, y=None):
return int(x[0][0]>0)
icp = IcpClassifier(nc,condition=condition)
input_data_train = np.concatenate((A[:,np.newaxis],X),1)
icp.fit(input_data_train, Y)
input_data_cal = np.concatenate((A_cal[:,np.newaxis],X_cal),1)
icp.calibrate(input_data_cal, Y_cal)
input_data_test = np.concatenate((A_test[:,np.newaxis],X_test),1)
Yhat_test = icp.predict(input_data_test, significance=0.1)
avg_coverage, avg_length = class_compute_coverage_len(Y_test, Yhat_test)
coverage_sample, length_sample = class_compute_coverage_per_sample(Y_test,
Yhat_test,
0.1,
input_data_test,
condition)
avg_length_0[i] = np.mean(length_sample[0])
avg_coverage_0[i] = np.mean(coverage_sample[0])
avg_length_1[i] = np.mean(length_sample[1])
avg_coverage_1[i] = np.mean(coverage_sample[1])
Yhat_out_cal = fairness_class.learner.predict(input_data_cal)
Yhat_out_test = fairness_class.learner.predict(input_data_test)
p_val = utility_functions.fair_dummies_test_classification(Yhat_out_cal,
A_cal,
Y_cal,
Yhat_out_test,
A_test,
Y_test,
num_reps=1,
num_p_val_rep=1000,
reg_func_name="Net")
avg_p_val[i] = p_val
pred_error[i] = 1.0-utility_functions.compute_acc_numpy(Yhat_out_test, Y_test)
print("experiment = " + str(i+1))
print("Coverage 0 = " + str(avg_coverage_0[i]))
print("Coverage 1 = " + str(avg_coverage_1[i]))
print("Length 0 = " + str(avg_length_0[i]))
print("Length 1 = " + str(avg_length_1[i]))
print("Prediction Error = " + str(pred_error[i]))
print("p_val = " + str(p_val))
sys.stdout.flush()
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
full_name = cur_test_method + "_" + cur_model_type
df = pd.DataFrame({'method' : [cur_test_method],
'dataset' : [cur_dataset_name],
'batch_size': [cur_batch_size],
'lr_loss' : [cur_lr_loss],
'lr_dis' : [cur_lr_dis],
'loss_steps': [cur_loss_steps],
'dis_steps' : [cur_dis_steps],
'mu_val' : [cur_mu_val],
'epochs' : [cur_epochs],
'second_scale' : [cur_second_scale],
'random_state' : [seed+i],
'model_type' : [cur_model_type],
'metric' : [metric],
'avg_length' : [avg_length],
'avg_coverage' : [avg_coverage],
'avg_length_0' : [avg_length_0[i]],
'avg_length_1' : [avg_length_1[i]],
'avg_coverage_0' : [avg_coverage_0[i]],
'avg_coverage_1' : [avg_coverage_1[i]],
'pred_error' : [pred_error[i]],
'p_val' : [p_val],
'full_name' : [full_name]
})
if os.path.isfile(out_name):
df2 = pd.read_csv(out_name)
df = pd.concat([df2, df], ignore_index=True)
df.to_csv(out_name, index=False)
print(full_name)
print("Num experiments %02d | Avg. Pred Err = %.4f | Avg Length 0 = %.4f | Avg Length 1 = %.4f | Avg Coverage 0 = %.4f | Avg Coverage 1 = %.4f | Avg p_val = %.4f | min p_val = %.4f" %
(i+1, np.mean(pred_error[:i+1]), np.mean(avg_length_0[:i+1]), | np.mean(avg_length_1[:i+1]) | numpy.mean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.