prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import pytest
from multinomial import MultinomialDistribution
def test_init_without_rso():
"""Initialize without rso"""
p = np.array([0.1, 0.5, 0.3, 0.1])
dist = MultinomialDistribution(p)
assert (dist.p == p).all()
assert (dist.logp == np.log(p)).all()
assert dist.rso is np.random
def test_init_with_rso():
"""Initialize with rso"""
p = np.array([0.1, 0.5, 0.3, 0.1])
rso =
|
np.random.RandomState(29348)
|
numpy.random.RandomState
|
'''
Data reader for MU radar
=========================
This module contains functionality to read the binary MUI files generated by
the MU radar and convert them to h5 for swifter loading and better data
overview.
Conventions of the h5-format
-----------------------------
* dates and time are saved in numpy.datetime64[ns]
* the voltage data nd-array is saved in a dataset called "/data"
'''
import numpy as np
import h5py
import logging
import pathlib
import os
import matplotlib.pyplot as plt
from .. import tools
from . import raw_data
from . import converters
logger = logging.getLogger(__name__)
@tools.profiling.timeing(f'{__name__}')
def _get_header_data(file):
"""
Retrieves the meta/headerdata from a MUI file for later conversion and for
use in parsing information.
"""
header_data = dict()
# Copyright <NAME>, <NAME>, <NAME>
"""
%-- Row 1 of the header
%-- Reading the header of one record
%-- (Byte position: 1-24) The first 6 numbers are integers,
%-- four bytes long, each byte consisting of 8 bits -> each
%-- integer is represented by 4x8=32 bits.
"""
header_data["mu_head_1_to_24"] = np.fromfile(file, dtype='>i4', count=6)
"""
%-- (25-32) The data taking program name is 8 bytes
%-- -> 8x8=64 bits and is written in ASCII.
"""
header_data["program_name"] = _decode_utf(np.fromfile(file, dtype='S8', count=1))
"""
%-- (33-56) Parameter file load time: 24 bytes, of characters: dd-mmm-yyyy hh:mm:ss(.ss?)
"""
header_data["file_load_time"] = np.datetime64(_convert_date(
np.fromfile(file, dtype='S24', count=1)).strip() + "Z", 'ns')
"""
%-- (57-60) The data taking program number, 4 bytes -> 4*8=32 bits.
"""
header_data["program_number"] = np.fromfile(
file, dtype='>i4', count=1)
"""
%-- (61-84) Record start time: DD-MMM-YYYY hh:mm:ss.ss is 24 bytes long, each 8 bits
"""
header_data["record_start_time"] = np.datetime64(_convert_date(
|
np.fromfile(file, dtype='S24', count=1)
|
numpy.fromfile
|
###################################################################################
# Script to interpolate native ocean model outputs to the standard MISOMIP2 grids.
#
# Notes:
# * interpolations are done on 1-dimensional vectors, so the script should be
# easily adaptable to non-structured grids.
# * needs some modificationds to work with sigma or HAL coordinates.
#
# History:
# * 2021-03: initial code + tests for NEMO & MITgcm outputs [<NAME>, IGE-CNRS]
#
###################################################################################
import numpy as np
import xarray as xr
import sys
import misomip2.preproc as mp
import os
np.seterr(divide='ignore', invalid='ignore') # to avoid warning due to divide by zero
#--------------------------------------------------------------------------
# 0- General information:
# Official name in MISOMIP2:
#model='NEMO_test'
model='MITGCM_test'
reg='Amundsen' # 'Amundsen' or 'Weddell'
exp='A1' # MISOMIP2 experiment ('A1', 'W1', 'A2', ...)
data_dir='models/oce/'+model
missval=9.969209968386869e36
#--------------------------------------------------------------------------
# 1- Files and variables
# loading an xarray dataset containing all useful variables with (x,y) reshaped
# as 1 dimension in case of original structured grid :
if ( model[0:4] == 'NEMO' ):
print('LOADING NEMO...')
f_mesh = data_dir+'/mesh_mask_test.nc'
f_bathy = data_dir+'/bathy_meter_test.nc'
fs_gridT = [data_dir+'/'+model+'_m0'+month.astype('str')+'_grid_T.nc' for month in np.arange(1,3)]
fs_gridU = [data_dir+'/'+model+'_m0'+month.astype('str')+'_grid_U.nc' for month in np.arange(1,3)]
fs_gridV = [data_dir+'/'+model+'_m0'+month.astype('str')+'_grid_V.nc' for month in np.arange(1,3)]
fs_SBC = [data_dir+'/'+model+'_m0'+month.astype('str')+'_SBC.nc' for month in np.arange(1,3)]
fs_ice = [data_dir+'/'+model+'_m0'+month.astype('str')+'_icemod.nc' for month in np.arange(1,3)]
# Barotropic Streamfunction calculated at U points using the cdfpsi function which is part of the cdftools (https://github.com/meom-group/CDFTOOLS):
fs_BSF = [data_dir+'/'+model+'_m0'+month.astype('str')+'_psi.nc' for month in np.arange(1,3)]
oce = mp.load_oce_mod_nemo( file_mesh_mask=f_mesh, file_bathy=f_bathy, files_gridT=fs_gridT,\
files_gridU=fs_gridU, files_gridV=fs_gridV, files_SBC=fs_SBC, files_ice=fs_ice,\
files_BSF=fs_BSF, rho0=1026.0, teos10=True )
elif ( model[0:6] == 'MITGCM' ):
print('LOADING MITGCM...')
fT = data_dir+'/'+model+'_THETA.nc'
fS = data_dir+'/'+model+'_SALT.nc'
fU = data_dir+'/'+model+'_UVEL.nc'
fV = data_dir+'/'+model+'_VVEL.nc'
oce = mp.load_oce_mod_mitgcm( files_T=fT, files_S=fS, files_U=fU, files_V=fV, rho0=1026.0, teos10=False )
else:
sys.exit('Unknown model ==> Write a function to load this model outputs')
print(oce)
#--------------------------------------------------------------------------
# 2- Global attributes of output netcdf :
def put_global_attrs(ds,experiment='TBD',avg_hor_res_73S=0.0,original_sim_name='None',\
original_min_lat=-90.0,original_max_lat=90.0,original_min_lon=-180.0,original_max_lon=180.0):
""" Put global attributes to the ds xarray dataset
"""
ds.attrs['project'] = 'MISOMIP2'
ds.attrs['contact'] = '<NAME> <<EMAIL>>' # name <email>
ds.attrs['institute'] = 'CNRS-UGA-IGE'
ds.attrs['computing_facility'] = 'occigen-CINES' # Computing center where the simulation was run
ds.attrs['ocean_model'] = 'NEMO3.6' # Model name and version
ds.attrs['reference'] = 'Jourdain et al. 2019 (doi:10.1016/j.ocemod.2018.11.001)' # publication describing the simulation or a similar configuration
ds.attrs['original_sim_name'] = original_sim_name # original simulation name
ds.attrs['experiment'] = experiment # in: 'A1', 'W1', 'A2', ...
ds.attrs['bathymetry'] = 'BedMachine-v1.33' # Bathymetry dataset (specify exact version)
ds.attrs['ice_shelf_draft'] = 'BedMachine-v1.33' # Ice draft depth dataset (specify exact version)
ds.attrs['atmosphere'] = 'DFS5.2' # in: 'ERA5', 'ERAint', 'CORE', 'MERRA2', 'JRA55do', ...
ds.attrs['iceberg'] = 'Prescribed Freshwater' # in: 'Lagrangian Model', 'Prescribed Freshwater',
# 'Prescribed Freshwater and Heat', 'None'
ds.attrs['sea_ice'] = 'Dynamics-Thermodynamics Model' # in: 'Dynamics-Thermodynamics Model', 'Thermodynamics Model', 'Prescribed Freshwater and Heat'
ds.attrs['ocean_lateral_bdy'] = 'Simulation' # in: 'None', 'Observational Data', 'Ocean Reanalysis', 'Simulation', 'Simulation with corrections'
ds.attrs['tides'] = 'Resolved (prescribed at bdy)' # in: 'None', 'Resolved (prescribed at bdy)', 'Resolved (tidal potential)',
# 'Parameterized (uniform tidal velocity)', 'Parameterized (non-uniform tidal velocity),
# 'Parameterized (other)'
ds.attrs['vertical_coordinate'] = 'Stretched Geopotential (Zstar)' # in: 'Geopotential (Z)', 'Stretched Geopotential (Zstar)', 'Pressure (P)',
# 'Stretched Pressure (P*)', 'Isopycnal', 'Terrain-Following (Sigma)',
# 'Arbitrary Lagrangian-Eulerian (ALE)'
ds.attrs['is_melt_param'] = '3-equation (velocity-dependent gamma)' # in: '3-equation (constant gamma)', '3-equation (velocity-dependent gamma)',
# '3-equation (stability and velocity-dependent gamma)', ...
ds.attrs['avg_hor_res_73S'] = avg_hor_res_73S # Average horizontal resolution (m) at 73degS in the MISOMIP2 domain (average of x and y resolution)
ds.attrs['original_min_lat'] = original_min_lat # Minimum latitude of the original domain in [-180:180]
ds.attrs['original_max_lat'] = original_max_lat # Minimum latitude of the original domain in [-180:180]
ds.attrs['original_min_lon'] = original_min_lon # Minimum longitude of the original domain in [-90:90]
ds.attrs['original_max_lon'] = original_max_lon # Minimum longitude of the original domain in [-90:90]
#--------------------------------------------------------------------------
# 3a- Interpolate to common 3d grid :
# Characteristics of MISOMIP 3d grid:
[lon_miso,lat_miso,dep_miso] = mp.generate_3d_grid_oce(region=reg)
mlon = np.size(lon_miso)
mlat = np.size(lat_miso)
mdep = np.size(dep_miso)
lon2d_miso, lat2d_miso = np.meshgrid( lon_miso, lat_miso )
lon_miso1d = np.reshape( lon2d_miso, mlon*mlat )
lat_miso1d = np.reshape( lat2d_miso, mlon*mlat )
# model coordinates:
lonT=oce.lonT ; lonU=oce.lonU ; lonV=oce.lonV
latT=oce.latT ; latU=oce.latU ; latV=oce.latV
# Some quantities needed to define the global attributes:
res_73S = 0.5 * ( oce.dxT.where( (latT < -72.5) & (latT >= -73.5) & (lonT >= lon_miso.min()) & (lonT <= lon_miso.max()) \
& (oce.maskT.isel(z=0)==1) ).mean().values \
+ oce.dyT.where( (latT < -72.5) & (latT >= -73.5) & (lonT >= lon_miso.min()) & (lonT <= lon_miso.max()) \
& (oce.maskT.isel(z=0)==1) ).mean().values )
print('Average horizontal resolution at 73S :',res_73S)
# Useful masks with nans
maskTnan = oce.maskT.where( (oce.maskT==1) ) # 3d mask (=1 if ocean, =nan elsewhere)
maskUnan = oce.maskU.where( (oce.maskU==1) )
maskVnan = oce.maskV.where( (oce.maskV==1) )
maskTnan2d = oce.maskT.max(dim='z').where( oce.maskT.max(dim='z')==1 ) # 2d mask (=1 for open ocean and cavities, =nan elsewhere)
maskUnan2d = oce.maskU.max(dim='z').where( oce.maskU.max(dim='z')==1 )
maskVnan2d = oce.maskV.max(dim='z').where( oce.maskU.max(dim='z')==1 )
# Lower and upper indices for vertical interpolation:
[kinf,ksup] = mp.vertical_interp(oce.depTUV.values,dep_miso)
mtime = np.shape(oce.SO)[0]
# mask showing the original domain (nan where interpolation of any of T, U, V grid is nan):
DOMMSK_miso = mp.horizontal_interp( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.DOMMSKT )
DOMMSK_miso = DOMMSK_miso + mp.horizontal_interp( lonU, latU, mlat, mlon, lon_miso1d, lat_miso1d, oce.DOMMSKU )
DOMMSK_miso = DOMMSK_miso + mp.horizontal_interp( lonV, latV, mlat, mlon, lon_miso1d, lat_miso1d, oce.DOMMSKV )
# vertical then horizontal interpolation of constant 3d fields to common grid :
LEVOF_miso = np.zeros((mdep,mlat,mlon)) + missval
for kk in np.arange(mdep):
if ( kinf[kk] == ksup[kk] ):
tmpaT = 1.e0
tmpbT = 0.e0
else:
tmpaT = oce.depTUV.isel(z=ksup[kk]) - dep_miso[kk]
tmpbT = dep_miso[kk] - oce.depTUV.isel(z=kinf[kk])
tmpxT = tmpaT + tmpbT
tmp_OC = ( oce.LEVOF.isel(z=kinf[kk]) * tmpaT + oce.LEVOF.isel(z=ksup[kk]) * tmpbT ) / tmpxT
tzz = mp.horizontal_interp( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, tmp_OC )
tzz[ np.isnan(DOMMSK_miso) ] = missval
LEVOF_miso[kk,:,:] = tzz
# horizontal interpolation of constant 2d fields to common horizontal grid :
theT = mp.horizontal_interp( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.thetaT )
theU = mp.horizontal_interp( lonU, latU, mlat, mlon, lon_miso1d, lat_miso1d, oce.thetaU )
theV = mp.horizontal_interp( lonV, latV, mlat, mlon, lon_miso1d, lat_miso1d, oce.thetaV )
# Ice shelf fraction
SFTFLI_miso = mp.horizontal_interp( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.SFTFLI )
SFTFLI_miso[ np.isnan(DOMMSK_miso) ] = missval
# Depth of floating ice (ice-shelf draft)
DEPFLI = oce.DEPFLI.where( (oce.SFTFLI.values > 1.), np.nan ) # to avoid interpolation with 0 depth beyond the grounding line
DEPFLI_miso = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, DEPFLI )
DEPFLI_miso[ (SFTFLI_miso < 1.) | np.isnan(DEPFLI_miso) ] = 0.e0
DEPFLI_miso[ np.isnan(DOMMSK_miso) ] = missval
# Ocean depth
DEPTHO = oce.DEPTHO.where( (oce.SFTFLI.values > 1.) | (oce.LEVOF[0,:].values > 1.), np.nan )
DEPTHO_miso = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, DEPTHO )
DEPTHO_miso[ ((SFTFLI_miso < 1.) & (LEVOF_miso[0,:,:] < 1.)) | np.isnan(DEPTHO_miso) ] = 0.e0
DEPTHO_miso[ np.isnan(DOMMSK_miso) ] = missval
#----- interpolation of time-varying fields:
SO_miso = np.zeros((mtime,mdep,mlat,mlon)) + missval
THETAO_miso = np.zeros((mtime,mdep,mlat,mlon)) + missval
UO_miso = np.zeros((mtime,mdep,mlat,mlon)) + missval
VO_miso = np.zeros((mtime,mdep,mlat,mlon)) + missval
ZOS_miso = np.zeros((mtime,mlat,mlon)) + missval
TOB_miso = np.zeros((mtime,mlat,mlon)) + missval
SOB_miso = np.zeros((mtime,mlat,mlon)) + missval
FICESHELF_miso = np.zeros((mtime,mlat,mlon)) + missval
MSFTBAROT_miso = np.zeros((mtime,mlat,mlon)) + missval
HFDS_miso = np.zeros((mtime,mlat,mlon)) + missval
WFOATRLI_miso = np.zeros((mtime,mlat,mlon)) + missval
WFOSICOR_miso = np.zeros((mtime,mlat,mlon)) + missval
SICONC_miso = np.zeros((mtime,mlat,mlon)) + missval
SIVOL_miso = np.zeros((mtime,mlat,mlon)) + missval
SIU_miso = np.zeros((mtime,mlat,mlon)) + missval
SIV_miso = np.zeros((mtime,mlat,mlon)) + missval
TAUUO_miso = np.zeros((mtime,mlat,mlon)) + missval
TAUVO_miso = np.zeros((mtime,mlat,mlon)) + missval
LEVOF_maxdep = np.amax(LEVOF_miso,axis=0)
# masking with nan to not consider points in interpolation
# (instead of defining a mask for the bottom layer):
TOB=oce.TOB.where( (np.abs(oce.SOB.values) > 1.e-3) )
SOB=oce.SOB.where( (~np.isnan(TOB.values)) )
for ll in np.arange(mtime):
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.ZOS.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; ZOS_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, TOB.isel(time=ll) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; TOB_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, SOB.isel(time=ll) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; SOB_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.FICESHELF.isel(time=ll)*maskTnan.max('z',skipna=True) )
tzz[ (SFTFLI_miso < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; FICESHELF_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonU, latU, mlat, mlon, lon_miso1d, lat_miso1d, oce.MSFTBAROT.isel(time=ll)*maskUnan.isel(z=0) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; MSFTBAROT_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.HFDS.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; HFDS_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.WFOATRLI.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_maxdep < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; WFOATRLI_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.WFOSICOR.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_miso[0,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; WFOSICOR_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.SICONC.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_miso[0,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; SICONC_miso[ll,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.SIVOL.isel(time=ll)*maskTnan.isel(z=0) )
tzz[ (LEVOF_miso[0,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval ; SIVOL_miso[ll,:,:] = tzz
# sea-ice velocities: rotation and interpolation weighted by sea-ice concentration
SIUX_notrot = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.SIUX.isel(time=ll)*oce.SICONC.isel(time=ll) )
SIUX_notrot[ (LEVOF_miso[0,:,:] < 1.) ] = np.nan
SIVY_notrot = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, oce.SIVY.isel(time=ll)*oce.SICONC.isel(time=ll) )
SIVY_notrot[ (LEVOF_miso[0,:,:] < 1.) ] = np.nan
SIU_miso[ll,:,:] = ( SIUX_notrot * np.cos(theT) + SIVY_notrot * np.sin(theT) ) / SICONC_miso[ll,:,:] # rotated to zonal
SIV_miso[ll,:,:] = ( SIVY_notrot * np.cos(theT) - SIUX_notrot * np.sin(theT) ) / SICONC_miso[ll,:,:] # rotated to meridional
SIU_miso[ ( np.isnan(SIU_miso) ) | ( np.isinf(SIU_miso) ) | (SICONC_miso[ll,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval
SIV_miso[ ( np.isnan(SIV_miso) ) | ( np.isinf(SIV_miso) ) | (SICONC_miso[ll,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval
# wind stress: rotation and interpolation
TAUX_notrot = mp.horizontal_interp_nonan( lonU, latU, mlat, mlon, lon_miso1d, lat_miso1d, oce.TAUX.isel(time=ll)*maskUnan2d )
TAUX_notrot[ (LEVOF_miso[0,:,:] < 1.) & (SFTFLI_miso < 1.) ] = np.nan
TAUY_notrot = mp.horizontal_interp_nonan( lonV, latV, mlat, mlon, lon_miso1d, lat_miso1d, oce.TAUY.isel(time=ll)*maskVnan2d )
TAUY_notrot[ (LEVOF_miso[0,:,:] < 1.) & (SFTFLI_miso < 1.) ] = np.nan
TAUUO_miso[ll,:,:] = TAUX_notrot * np.cos(theU) + TAUY_notrot * np.sin(theV) # rotated to zonal
TAUVO_miso[ll,:,:] = TAUY_notrot * np.cos(theV) - TAUX_notrot * np.sin(theU) # rotated to meridional
TAUUO_miso[ ( np.isnan(TAUUO_miso) ) | ( np.isinf(TAUUO_miso) ) | (np.isnan(DOMMSK_miso)) ] = missval
TAUVO_miso[ ( np.isnan(TAUVO_miso) ) | ( np.isinf(TAUVO_miso) ) | (np.isnan(DOMMSK_miso)) ] = missval
for kk in np.arange(mdep):
# vertical interpolation to common vertical grid :
if ( kinf[kk] == ksup[kk] ):
tmpaT = oce.maskT.isel(z=kinf[kk])
tmpbT = tmpaT*0
else:
tmpaT = oce.maskT.isel(z=kinf[kk]) * (oce.depTUV.isel(z=ksup[kk])-dep_miso[kk])
tmpbT = oce.maskT.isel(z=ksup[kk]) * (dep_miso[kk]-oce.depTUV.isel(z=kinf[kk]))
tmpxT = tmpaT + tmpbT
tmp_SS = ( oce.SO.isel(time=ll,z=kinf[kk]) * tmpaT + oce.SO.isel(time=ll,z=ksup[kk]) * tmpbT ) / tmpxT # Inf if no interpolable value
tmp_TT = ( oce.THETAO.isel(time=ll,z=kinf[kk]) * tmpaT + oce.THETAO.isel(time=ll,z=ksup[kk]) * tmpbT ) / tmpxT
if ( kinf[kk] == ksup[kk] ):
tmpaU = oce.maskU.isel(z=kinf[kk])
tmpbU = tmpaU*0
else:
tmpaU = oce.maskU.isel(z=kinf[kk]) * (oce.depTUV.isel(z=ksup[kk])-dep_miso[kk])
tmpbU = oce.maskU.isel(z=ksup[kk]) * (dep_miso[kk]-oce.depTUV.isel(z=kinf[kk]))
tmpxU = tmpaU + tmpbU
tmp_UX = ( oce.UX.isel(time=ll,z=kinf[kk]) * tmpaU + oce.UX.isel(time=ll,z=ksup[kk]) * tmpbU ) / tmpxU # Inf if no interpolable value
if ( kinf[kk] == ksup[kk] ):
tmpaV = oce.maskV.isel(z=kinf[kk])
tmpbV = tmpaV*0
else:
tmpaV = oce.maskV.isel(z=kinf[kk]) * (oce.depTUV.isel(z=ksup[kk])-dep_miso[kk])
tmpbV = oce.maskV.isel(z=ksup[kk]) * (dep_miso[kk]-oce.depTUV.isel(z=kinf[kk]))
tmpxV = tmpaV + tmpbV
tmp_VY = ( oce.VY.isel(time=ll,z=kinf[kk]) * tmpaV + oce.VY.isel(time=ll,z=ksup[kk]) * tmpbV ) / tmpxV # Inf if no interpolable value
# horizontal interpolations of time-varying 3d fields to common horizontal grid :
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, tmp_SS )
tzz[ (LEVOF_miso[kk,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval
SO_miso[ll,kk,:,:] = tzz
tzz = mp.horizontal_interp_nonan( lonT, latT, mlat, mlon, lon_miso1d, lat_miso1d, tmp_TT )
tzz[ (LEVOF_miso[kk,:,:] < 1.) | (np.isnan(DOMMSK_miso)) ] = missval
THETAO_miso[ll,kk,:,:] = tzz
UX_notrot = mp.horizontal_interp_nonan( lonU, latU, mlat, mlon, lon_miso1d, lat_miso1d, tmp_UX )
VY_notrot = mp.horizontal_interp_nonan( lonV, latV, mlat, mlon, lon_miso1d, lat_miso1d, tmp_VY )
tzz = UX_notrot * np.cos(theU) + VY_notrot * np.sin(theV) # rotated to zonal
tzz[ (LEVOF_miso[kk,:,:] < 1.) | (np.isnan(tzz)) | (np.isnan(DOMMSK_miso)) ] = missval
UO_miso[ll,kk,:,:] = tzz
tzz = VY_notrot * np.cos(theV) - UX_notrot * np.sin(theU) # rotated to meridional
tzz[ (LEVOF_miso[kk,:,:] < 1.) | (np.isnan(tzz)) | (np.isnan(DOMMSK_miso)) ] = missval
VO_miso[ll,kk,:,:] = tzz
#--------------------------------------------------------------------------
# 3b- Create new xarray dataset and save to netcdf
dsmiso3d = xr.Dataset(
{
"so": (["time", "depth", "latitude", "longitude"], np.float32(SO_miso)),
"thetao": (["time", "depth", "latitude", "longitude"], np.float32(THETAO_miso)),
"uo": (["time", "depth", "latitude", "longitude"], np.float32(UO_miso)),
"vo": (["time", "depth", "latitude", "longitude"], np.float32(VO_miso)),
"tauuo": (["time", "latitude", "longitude"], np.float32(TAUUO_miso)),
"tauvo": (["time", "latitude", "longitude"],
|
np.float32(TAUVO_miso)
|
numpy.float32
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Basic/mesh_subdivision.py
import numpy as np
import open3d as o3d
import time
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(dir_path, "../Misc"))
import meshes
def problem0():
mesh = meshes.plane(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 47, 48, 16, 51, 49, 50, 6, 31, 33, 32, 11, 26, 27, 25, 0, 64, 65,
20, 66, 68, 67, 7, 69, 71, 70, 22, 72, 74, 73, 3, 15, 44, 43, 45, 5, 41,
40, 42, 13, 39, 37, 38, 2, 56, 55, 19, 61, 60, 59, 8, 76, 75, 77, 23
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [4]
handle_positions = [vertices[4] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem1():
mesh = meshes.plane(height=1, width=1)
mesh = mesh.subdivide_midpoint(3)
vertices = np.asarray(mesh.vertices)
static_ids = [
1, 46, 15, 43, 5, 40, 13, 38, 2, 56, 37, 39, 42, 41, 45, 44, 48, 47
]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [21]
handle_positions = [vertices[21] + np.array((0, 0, 0.4))]
return mesh, static_ids + handle_ids, static_positions + handle_positions
def problem2():
mesh = meshes.armadillo()
vertices = np.asarray(mesh.vertices)
static_ids = [idx for idx in np.where(vertices[:, 1] < -30)[0]]
static_positions = []
for id in static_ids:
static_positions.append(vertices[id])
handle_ids = [2490]
handle_positions = [vertices[2490] +
|
np.array((-40, -40, -40))
|
numpy.array
|
import numpy as np
def project_simplex(x, mask=None):
""" Take a vector x (with possible nonnegative entries and non-normalized)
and project it onto the unit simplex.
mask: do not project these entries
project remaining entries onto lower dimensional simplex
"""
if mask is not None:
mask =
|
np.asarray(mask)
|
numpy.asarray
|
from __future__ import division
import random
import numpy as np
import os
from fire_risk.utils import UniformDraw
from math import log
from math import ceil
class LowerBoundGreaterThanUpperBoundException(Exception):
pass
class NotEnoughRecords(Exception):
pass
class DIST(object):
"""
The Differential In Standard Time (DIST) model.
"""
room_area_draw = UniformDraw(72, 380)
building_area_draw = UniformDraw(1088, 9004)
alarm_time_draw = UniformDraw(90, 120)
dispatch_time_draw = UniformDraw(40, 80)
turnout_time_draw = UniformDraw(60, 100)
arrival_time_draw = UniformDraw(300, 420)
suppression_time_draw = UniformDraw(60, 180)
floor_area_draw = None
floor_extent = True
minimum_number_of_records = 75
@property
def params(self):
return ['room_area_draw', 'building_area_draw', 'alarm_time_draw', 'dispatch_time_draw', 'turnout_time_draw',
'arrival_time_draw', 'suppression_time_draw', 'floor_area_draw', 'floor_extent',
'minimum_number_of_records', 'object_of_origin', 'room_of_origin', 'floor_of_origin',
'building_of_origin', 'beyond']
def __init__(self, object_of_origin, room_of_origin, floor_of_origin, building_of_origin, beyond, **kwargs):
"""initialize attributes of the DISTOutput class.
Args:
extent_list(list): list of firespread extents, see DISTImport class.
firespread_count(dict): dictionary of firespread counts by extent.
see DISTImport class.
>>> test = DIST(object_of_origin=93, room_of_origin=190, floor_of_origin=39, building_of_origin=64,
... beyond=9, floor_extent=False)
>>> test.object_of_origin
93
>>> test.room_of_origin
283
>>> test = DIST(object_of_origin=74, room_of_origin=0, floor_of_origin=0, building_of_origin=0,
... beyond=0, floor_extent=False)
Traceback (most recent call last):
...
NotEnoughRecords
>>> test = DIST(object_of_origin=93, room_of_origin=190, floor_of_origin=39, building_of_origin=64,
... beyond=9, room_area_draw=UniformDraw(20, 30), building_area_draw=UniformDraw(20,30),
... alarm_time_draw=UniformDraw(20,30), dispatch_time_draw=UniformDraw(20,30),
... turnout_time_draw=UniformDraw(20,30), arrival_time_draw=UniformDraw(20,30),
... suppression_time_draw=UniformDraw(20,30), floor_extent=False)
>>> test.room_area_draw.minimum
20
>>> test.room_area_draw.maximum
30
"""
self.object_of_origin = object_of_origin
self.room_of_origin = room_of_origin + object_of_origin
self.floor_of_origin = floor_of_origin
self.building_of_origin = building_of_origin
self.beyond = beyond
for key, value in kwargs.items():
if key in self.params:
setattr(self, key, value)
# TODO: Should raise error if self.floor_extent=True and floor_area_draw is None?
if not self.floor_extent:
self.building_of_origin += self.floor_of_origin
self.floor_of_origin = 0
if self.minimum_number_of_records:
if (self.room_of_origin + self.floor_of_origin + self.building_of_origin +
self.beyond) < self.minimum_number_of_records:
raise NotEnoughRecords
@property
def total_fires(self):
"""
Returns the count of fires.
>>> d = DIST(object_of_origin=93, room_of_origin=190, floor_of_origin=39, building_of_origin=64,
... beyond=9, floor_extent=False)
>>> d.total_fires
395
"""
return self.room_of_origin + self.floor_of_origin + self.building_of_origin + self.beyond
@staticmethod
def _task_time(uniform_values):
"""
Returns the sum of time values.
>>> random.seed(1234)
>>> test = DIST(object_of_origin=93, room_of_origin=190, floor_of_origin=39, building_of_origin=64,
... beyond=9, room_area_draw=UniformDraw(20, 30), building_area_draw=UniformDraw(20,30),
... alarm_time_draw=UniformDraw(20,30), dispatch_time_draw=UniformDraw(20,30),
... turnout_time_draw=UniformDraw(20,30), arrival_time_draw=UniformDraw(20,30),
... suppression_time_draw=UniformDraw(20,30), floor_extent=False)
>>> values = test._draw_values()
>>> round(test._task_time(values), 2)
131.12
>>> round(values['alarm_time'] + values['dispatch_time'] + values['turnout_time'] + values['arrival_time'] \
+ values['suppression_time'], 2)
131.12
"""
times = 'alarm_time dispatch_time turnout_time arrival_time suppression_time'.split()
return sum(map(lambda value: uniform_values.get(value, 0), times))
@staticmethod
def draw_uniform(uniform_limits):
"""
Draw a new value of of an attribute from its uniform distribution.
:param uniform_limits: a tuple of of uniform limits.
"""
return random.uniform(*uniform_limits)
@staticmethod
def draw_custom(draw_file_name, filter=False):
"""
Draw a new value of an attribute from a custom distribution.
This can be used when there is actual data for a given parameter
"""
custom_values = []
custom_cdf = []
with open(os.path.join(os.path.dirname(__file__), draw_file_name), 'r') as f:
for line in f:
custom_values.append(float(line.split('\t')[0]))
custom_cdf.append(float(line.split('\t')[1]))
index_array = range(0, len(custom_values))
lowerbound = 0
if filter:
lowerbound_index = int(ceil(
|
np.interp(filter, custom_values, index_array)
|
numpy.interp
|
""" Classes for defining optimization problem objects."""
# Author: <NAME>
# License: BSD 3 clause
import numpy as np
from sklearn.metrics import mutual_info_score
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree, depth_first_tree
from .fitness import TravellingSales
class OptProb:
"""Base class for optimisation problems.
Parameters
----------
length: int
Number of elements in state vector.
fitness_fn: fitness function object
Object to implement fitness function for optimization.
maximize: bool, default: True
Whether to maximize the fitness function.
Set :code:`False` for minimization problem.
"""
def __init__(self, length, fitness_fn, maximize=True):
if length < 0:
raise Exception("""length must be a positive integer.""")
elif not isinstance(length, int):
if length.is_integer():
self.length = int(length)
else:
raise Exception("""length must be a positive integer.""")
else:
self.length = length
self.state = np.array([0]*self.length)
self.neighbors = []
self.fitness_fn = fitness_fn
self.fitness = 0
self.population = []
self.pop_fitness = []
self.mate_probs = []
if maximize:
self.maximize = 1.0
else:
self.maximize = -1.0
def best_child(self):
"""Return the best state in the current population.
Returns
-------
best: array
State vector defining best child.
"""
best = self.population[np.argmax(self.pop_fitness)]
return best
def best_neighbor(self):
"""Return the best neighbor of current state.
Returns
-------
best: array
State vector defining best neighbor.
"""
fitness_list = []
for neigh in self.neighbors:
fitness = self.eval_fitness(neigh)
fitness_list.append(fitness)
best = self.neighbors[np.argmax(fitness_list)]
return best
def eval_fitness(self, state):
"""Evaluate the fitness of a state vector.
Parameters
----------
state: array
State vector for evaluation.
Returns
-------
fitness: float
Value of fitness function.
"""
if len(state) != self.length:
raise Exception("state length must match problem length")
fitness = self.maximize*self.fitness_fn.evaluate(state)
return fitness
def eval_mate_probs(self):
"""
Calculate the probability of each member of the population reproducing.
"""
pop_fitness = np.copy(self.pop_fitness)
# Set -1*inf values to 0 to avoid dividing by sum of infinity.
# This forces mate_probs for these pop members to 0.
pop_fitness[pop_fitness == -1.0*np.inf] = 0
if np.sum(pop_fitness) == 0:
self.mate_probs = np.ones(len(pop_fitness)) \
/ len(pop_fitness)
else:
self.mate_probs = pop_fitness/np.sum(pop_fitness)
self.mate_probs *= (self.mate_probs>0) # clips negative probs
self.mate_probs = self.mate_probs / sum(self.mate_probs)
def get_fitness(self):
""" Return the fitness of the current state vector.
Returns
-------
self.fitness: float
Fitness value of current state vector.
"""
return self.fitness
def get_length(self):
""" Return the state vector length.
Returns
-------
self.length: int
Length of state vector.
"""
return self.length
def get_mate_probs(self):
""" Return the population mate probabilities.
Returns
-------
self.mate_probs: array.
Numpy array containing mate probabilities of the current
population.
"""
return self.mate_probs
def get_maximize(self):
""" Return the maximization multiplier.
Returns
-------
self.maximize: int
Maximization multiplier.
"""
return self.maximize
def get_pop_fitness(self):
""" Return the current population fitness array.
Returns
-------
self.pop_fitness: array
Numpy array containing the fitness values for the current
population.
"""
return self.pop_fitness
def get_population(self):
""" Return the current population.
Returns
-------
self.population: array
Numpy array containing current population.
"""
return self.population
def get_state(self):
""" Return the current state vector.
Returns
-------
self.state: array
Current state vector.
"""
return self.state
def set_population(self, new_population):
""" Change the current population to a specified new population and get
the fitness of all members.
Parameters
----------
new_population: array
Numpy array containing new population.
"""
self.population = new_population
# Calculate fitness
pop_fitness = []
for i in range(len(self.population)):
fitness = self.eval_fitness(self.population[i])
pop_fitness.append(fitness)
self.pop_fitness = np.array(pop_fitness)
def set_state(self, new_state):
"""
Change the current state vector to a specified value
and get its fitness.
Parameters
----------
new_state: array
New state vector value.
"""
if len(new_state) != self.length:
raise Exception("""new_state length must match problem length""")
self.state = new_state
self.fitness = self.eval_fitness(self.state)
class DiscreteOpt(OptProb):
"""Class for defining discrete-state optimization problems.
Parameters
----------
length: int
Number of elements in state vector.
fitness_fn: fitness function object
Object to implement fitness function for optimization.
maximize: bool, default: True
Whether to maximize the fitness function.
Set :code:`False` for minimization problem.
max_val: int, default: 2
Number of unique values that each element in the state vector
can take. Assumes values are integers in the range 0 to
(max_val - 1), inclusive.
"""
def __init__(self, length, fitness_fn, maximize=True, max_val=2):
OptProb.__init__(self, length, fitness_fn, maximize)
if self.fitness_fn.get_prob_type() == 'continuous':
raise Exception("""fitness_fn must have problem type 'discrete',"""
+ """ 'either' or 'tsp'. Define problem as"""
+ """ ContinuousOpt problem or use alternative"""
+ """ fitness function."""
)
if max_val < 0:
raise Exception("""max_val must be a positive integer.""")
elif not isinstance(max_val, int):
if max_val.is_integer():
self.max_val = int(max_val)
else:
raise Exception("""max_val must be a positive integer.""")
else:
self.max_val = max_val
self.keep_sample = []
self.node_probs = np.zeros([self.length, self.max_val, self.max_val])
self.parent_nodes = []
self.sample_order = []
self.prob_type = 'discrete'
def eval_node_probs(self):
"""Update probability density estimates.
"""
# Create mutual info matrix
mutual_info = np.zeros([self.length, self.length])
for i in range(self.length - 1):
for j in range(i + 1, self.length):
mutual_info[i, j] = -1 * mutual_info_score(
self.keep_sample[:, i],
self.keep_sample[:, j])
# Find minimum spanning tree of mutual info matrix
mst = minimum_spanning_tree(csr_matrix(mutual_info))
# Convert minimum spanning tree to depth first tree with node 0 as root
dft = depth_first_tree(csr_matrix(mst.toarray()), 0, directed=False)
dft = np.round(dft.toarray(), 10)
# Determine parent of each node
parent = np.argmin(dft[:, 1:], axis=0)
# Get probs
probs = np.zeros([self.length, self.max_val, self.max_val])
probs[0, :] = np.histogram(self.keep_sample[:, 0],
np.arange(self.max_val + 1),
density=True)[0]
for i in range(1, self.length):
for j in range(self.max_val):
subset = self.keep_sample[np.where(
self.keep_sample[:, parent[i - 1]] == j)[0]]
if not len(subset):
probs[i, j] = 1/self.max_val
else:
probs[i, j] = np.histogram(subset[:, i],
np.arange(self.max_val + 1),
density=True)[0]
# Update probs and parent
self.node_probs = probs
self.parent_nodes = parent
def find_neighbors(self):
"""Find all neighbors of the current state.
"""
self.neighbors = []
if self.max_val == 2:
for i in range(self.length):
neighbor = np.copy(self.state)
neighbor[i] = np.abs(neighbor[i] - 1)
self.neighbors.append(neighbor)
else:
for i in range(self.length):
vals = list(np.arange(self.max_val))
vals.remove(self.state[i])
for j in vals:
neighbor = np.copy(self.state)
neighbor[i] = j
self.neighbors.append(neighbor)
def find_sample_order(self):
"""Determine order in which to generate sample vector elements.
"""
sample_order = []
last = [0]
parent = np.array(self.parent_nodes)
while len(sample_order) < self.length:
inds = []
# If last nodes list is empty, select random node than has not
# previously been selected
if len(last) == 0:
inds = [np.random.choice(list(set(np.arange(self.length)) -
set(sample_order)))]
else:
for i in last:
inds += list(np.where(parent == i)[0] + 1)
sample_order += last
last = inds
self.sample_order = sample_order
def find_top_pct(self, keep_pct):
"""Select samples with fitness in the top keep_pct percentile.
Parameters
----------
keep_pct: float
Proportion of samples to keep.
"""
if (keep_pct < 0) or (keep_pct > 1):
raise Exception("""keep_pct must be between 0 and 1.""")
# Determine threshold
theta = np.percentile(self.pop_fitness, 100*(1 - keep_pct))
# Determine samples for keeping
keep_inds = np.where(self.pop_fitness >= theta)[0]
# Determine sample for keeping
self.keep_sample = self.population[keep_inds]
def get_keep_sample(self):
""" Return the keep sample.
Returns
-------
self.keep_sample: array
Numpy array containing samples with fitness in the top keep_pct
percentile.
"""
return self.keep_sample
def get_prob_type(self):
""" Return the problem type.
Returns
-------
self.prob_type: string
Returns problem type.
"""
return self.prob_type
def random(self):
"""Return a random state vector.
Returns
-------
state: array
Randomly generated state vector.
"""
state = np.random.randint(0, self.max_val, self.length)
return state
def random_neighbor(self):
"""Return random neighbor of current state vector.
Returns
-------
neighbor: array
State vector of random neighbor.
"""
neighbor = np.copy(self.state)
i = np.random.randint(0, self.length)
if self.max_val == 2:
neighbor[i] = np.abs(neighbor[i] - 1)
else:
vals = list(
|
np.arange(self.max_val)
|
numpy.arange
|
#####################################################################################
# MIT License #
# #
# Copyright (C) 2019 <NAME>, <NAME>, <NAME> #
# #
# This file is part of Joint-Text-Image-Representation. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
import numpy as np
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import os
from tensorflow.python.keras.preprocessing import image
class TSNEImage2DScatter(object):
"""
Build the embedded t-SNE space of an image representation,
and output the representation in a 2D scatter image.
References
----------
https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
https://stackoverflow.com/a/22570069
"""
def __init__(self, input_directory, activations, output_dimension=15,
out_resolution=96, output_name='tsne_image_2d_scatter.jpg',
output_directory='./', perplexity=50, iterations=5000,
output_size=(70, 70), quality=100):
"""
Parameters
---------
input_directory : str
Source directory for images
activations : numpy.ndarray
Activations of a trained image model
output_dimension : int (default: 15)
Number of small images in output image
out_resolution : int, optional (default: 96)
Width/height of output square image
output_name : str, optional (default: tsne_image_2d_scatter.jpg)
Name of output image file
output_directory : str, optional (default: ./)
Destination directory for output image
perplexity : int, optional (default: 50)
t-SNE perplexity
iterations : int, optional (default: 5000)
Number of iterations in tsne algorithm
output_size : (int, int), optional (default: (70, 70))
The size (width, height) of the output image
quality : int, optional (default: 100)
Quality of the output image
"""
self.input_directory = input_directory
self.activations = activations
self.output_dimension = output_dimension
self.out_resolution = out_resolution
self.output_name = output_name
self.output_directory = output_directory
self.perplexity = perplexity
self.iterations = iterations
self.output_size = output_size
self.quality = quality
self.to_plot =
|
np.square(self.output_dimension)
|
numpy.square
|
# -*- coding: utf-8 -*-
import numpy as np
def Qx(w):
if type(w) != np.ndarray:
if type(w) != list:
w = np.array([w])
else:
w = np.array(w)
d = w.shape
if len(w) > 1:
out = np.array([[np.ones(d), np.zeros(d), np.zeros(d)],
[np.zeros(d), np.cos(w), np.sin(w)],
[np.zeros(d), -np.sin(w), np.cos(w)]])
else:
out = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(w), np.sin(w)],
[0.0, -np.sin(w), np.cos(w)]])
return out
def Qz(w):
if type(w) != np.ndarray:
if type(w) != list:
w = np.array([w])
else:
w = np.array(w)
d = w.shape
if len(w) > 1:
out = np.array([[np.cos(w), np.sin(w), np.zeros(d)],
[-np.sin(w), np.cos(w), np.zeros(d)],
[np.zeros(d), np.zeros(d), np.ones(d)]])
else:
out = np.array([[np.cos(w[0]), np.sin(w[0]), 0.0],
[-np.sin(w[0]), np.cos(w[0]), 0.0],
[0.0, 0.0, 1.0]])
return out
def Q(theta1, psi, theta2):
Q1 = Qz(theta1)
Q2 = Qx(psi)
Q3 = Qz(theta2)
Q12 = np.tensordot(Q1, Q2, axes=([0], [1]))
if len(Q12.shape) > 2:
Q12 = np.swapaxes(Q12, 1, 2)
Q123 = np.tensordot(Q12, Q3, axes=([0], [1]))
if len(Q123.shape) > 2:
Q123 = np.rollaxis(Q123, 3, 1)
return Q123
class UpdatesBank(object):
def __init__(self, error_rate=0.1, kappa=0.1, grains=100):
self.error_rate = error_rate
self.kappa = kappa
theta1 = np.linspace(0, np.pi, grains)
psi = np.linspace(0, 2*np.pi, grains)
theta2 = np.linspace(0, np.pi, grains)
Q123 = Q(theta1, psi, theta2)
self.Q = Q123
self.QQ = np.einsum('ijpqr,klpqr->ijklpqr', Q123, Q123)
self.probs = self.get_probs()
def update(self, Qhat):
Q123 = np.tensordot(Qhat, self.Q, axes=([0], [1]))
self.Q = Q123
self.QQ = np.einsum('ijpqr,klpqr->ijklpqr', Q123, Q123)
self.probs = self.get_probs()
def get_probs(self):
# Prepare some matrices for going between axis eccentricities
# and probabilities
T = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
Tinv = np.linalg.inv(T)
# Convert probabilities to axes
vecp = self.error_rate * np.array([1, 0, self.kappa])
veca = 1.0 - 2.0 * np.dot(T, vecp)
# Create channel matrix
M0 = np.diag(veca)
# Get all rotations of channel matrix
M1 = np.tensordot(self.QQ, M0, axes=([1, 2], [0, 1]))
# Retrieve projected axis eccentricities
veca = np.diagonal(M1)
veca = np.rollaxis(veca, 3, 0)
# Retrieve projected probabilities
vecp = np.tensordot(Tinv, veca - 1.0, axes=([0], [0]))
px = vecp[0, :, :, :]
py = vecp[1, :, :, :]
pz = vecp[2, :, :, :]
return {"x": px, "y": py, "z": pz, None: 1.0-px-py-pz}
class SurfboardEstimator(object):
def __init__(self, error_rate=0.1, kappa=0.1, grains=100):
self.kappa = kappa
self.error_rate = error_rate
self.grains = grains
self.p_angles = np.ones((grains,)*3) / float(grains ** 3)
theta1 = np.linspace(0, np.pi, grains)
psi = np.linspace(0, 2*np.pi, grains)
theta2 = np.linspace(0, np.pi, grains)
Q123 = Q(theta1, psi, theta2)
self.Q = Q123
self.Qhat = Q(0.0, 0.0, 0.0)
self.idx = [0, 0, 0]
self.bank = UpdatesBank(error_rate, kappa, grains)
def update(self, error):
p_update = self.bank.probs.get(error, None)
if p_update is not None:
self.p_angles = self.p_angles * p_update
self.p_angles = self.p_angles / np.sum(self.p_angles)
idx = np.argmax(self.p_angles)
idx = np.unravel_index(idx, self.p_angles.shape)
self.idx = idx
self.Qhat = self.Q[:, :, idx[0], idx[1], idx[2]]
self.bank.update(self.Qhat)
class SurfboardChannel(object):
def __init__(self, error_rate=0.1, kappa=0.1, grains=100):
self.error_rate = error_rate
self.kappa = kappa
theta1 = np.linspace(0, np.pi, grains)
psi = np.linspace(0, 2*np.pi, grains)
theta2 = np.linspace(0, np.pi, grains)
self.idx = np.random.randint(0, 100, 3)
self.theta1val = theta1[self.idx[0]]
self.psival = psi[self.idx[1]]
self.theta2val = theta2[self.idx[2]]
self.Qval = Q(self.theta1val, self.psival, self.theta2val)
self.Qeff = self.Qval
self.probs = self.get_probs()
def get_probs(self):
# Prepare some matrices for going between axis eccentricities
# and probabilities
T = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
Tinv = np.linalg.inv(T)
# Convert probabilities to axes
vecp = self.error_rate * np.array([1, 0, self.kappa])
veca = 1.0 - 2.0 *
|
np.dot(T, vecp)
|
numpy.dot
|
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row =
|
np.load(name + "_row.npy")
|
numpy.load
|
# source: https://github.com/l5shi/Multi-DDPG-with-parameter-noise
import numpy as np
import torch
from math import sqrt
class AdaptiveParamNoiseSpec(object):
def __init__(self, initial_stddev=0.1, desired_action_stddev=0.2, adaptation_coefficient=1.01):
"""
Note that initial_stddev and current_stddev refer to std of parameter noise,
but desired_action_stddev refers to (as name notes) desired std in action space
"""
self.initial_stddev = initial_stddev
self.desired_action_stddev = desired_action_stddev
self.adaptation_coefficient = adaptation_coefficient
self.current_stddev = initial_stddev
def adapt(self, distance):
if distance > self.desired_action_stddev:
# Decrease stddev.
self.current_stddev /= self.adaptation_coefficient
else:
# Increase stddev.
self.current_stddev *= self.adaptation_coefficient
def get_stats(self):
stats = {
'param_noise_stddev': self.current_stddev,
}
return stats
def __repr__(self):
fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adaptation_coefficient={})'
return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adaptation_coefficient)
def ddpg_distance_metric(actions1, actions2):
"""
Compute "distance" between actions taken by two policies at the same states
Expects numpy arrays
"""
diff = actions1-actions2
mean_diff = np.mean(
|
np.square(diff)
|
numpy.square
|
"""Basic cross validation methods"""
from abc import ABC
from typing import Dict, List, Optional
import numpy
import numpy.random
import pandas
class CrossValidationPlan(ABC):
"""Data splitting plan"""
def __init__(self):
pass
def split_plan(
self,
*,
n_rows: Optional[int] = None,
k_folds: Optional[int] = None,
data=None,
y=None,
) -> List[Dict[str, List[int]]]:
"""
Build a cross validation plan for the given parameters.
:param n_rows: (optional) number of input rows
:param k_folds: (optional) number of folds we want
:param data: (optional) explanatory variables
:param y: (optional) dependent variable
:return: cross validation plan (list of dictionaries)
"""
raise NotImplementedError("base class called")
def __repr__(self):
return str(type(self))
def __str__(self):
return self.__repr__()
def _k_way_cross_plan_y_stratified(
*, n_rows: int, k_folds: int, y
) -> List[Dict[str, List[int]]]:
"""
:param n_rows: number of input rows
:param k_folds: number of cross folds desired
:param y: values to stratify on
:return: cross validation plan (list of dictionaries)
"""
"""randomly split range(n_rows) into k_folds disjoint groups, attempting an even y-distribution"""
if k_folds < 2:
k_folds = 2
n2 = int(numpy.floor(n_rows / 2))
if k_folds > n2:
k_folds = n2
if n_rows <= 2 or k_folds <= 1:
# degenerate overlap cases
plan = [
{"train": [i for i in range(n_rows)], "app": [i for i in range(n_rows)]}
]
return plan
# first sort by y plus a random key
if y is None:
y =
|
numpy.zeros(n_rows)
|
numpy.zeros
|
import math
import numpy as np
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist
from blaze.expr.expressions import shape
from scipy import special
from statsmodels.sandbox.distributions.quantize import prob_bv_rectangle
from scipy.spatial.distance import cdist
class CrispIndices:
def __init__(self, X, labels):
if len(X) != len(labels):
raise ValueError('data list and labels must have same length')
self.labels = labels - 1 # As all clusters solutions starts to 1 to number of clusters, minus 1, starts from 0
self.X = np.array(X)
self.n_dims = self.X.shape[1]
self.N = len(self.X)
self.n_clusters = np.max(labels)
self.M = np.zeros((self.n_clusters, self.n_dims))
self.clusters_len = np.zeros((self.n_clusters,), dtype=np.int)
for i in range(self.n_clusters):
self.clusters_len[i] = int(np.sum(self.labels == i))
self.M[i] = np.mean(X[self.labels == i], axis=0)
self.G = np.mean(X, axis=0)
# разброс внутри кластера k
# returns float
def _delta_k(self, k):
result = 0.0
for i in range(self.N):
if self.labels[i] == k:
result += self.distance(self.X[i], self.M[k])
result /= self.clusters_len[k]
return result
# T
# returns np.array
def _total_dispersion_matrix(self):
return np.cov(self.X.T) * (self.N - 1)
# WG {k}
# returns np.array
def _within_group_k_matrix(self, k):
if self.clusters_len[k] > 1:
return np.cov((self.X[self.labels == k]).T) * (np.sum(self.labels == k) - 1)
else:
return np.zeros(shape=(self.X.shape[1], self.X.shape[1]))
# WG
# returns np.array
def _within_group_matrix(self):
WG = np.zeros((self.n_dims, self.n_dims))
for k in range(self.n_clusters):
WG += self._within_group_k_matrix(k)
return WG
# WGSS{k}
# returns float
def _within_cluster_dispersion_k(self, k):
if self.clusters_len[k] > 1:
return np.trace(self._within_group_k_matrix(k))
else:
return 0
# WGSS
# returns float
def _within_cluster_dispersion(self):
within = 0;
for k in range(self.n_clusters):
within += self._within_cluster_dispersion_k(k)
return within
# BG
# returns np.array
def _between_group_matrix(self):
return self._total_dispersion_matrix() - self._within_group_matrix()
# BGSS
# returns float
def _between_group_dispersion(self):
return np.trace(self._between_group_matrix())
def _s_b(self): # 1621161.
s_b = 0.0
ndist = 0
for k1 in range(self.n_clusters):
ind1 = np.where(self.labels == k1)[0]
for k2 in range(k1 + 1, self.n_clusters):
ind2 = np.where(self.labels == k2)[0]
for i in range(len(ind1)):
s_b = s_b + np.sum(np.linalg.norm(self.X[ind1[i]] - self.X[ind2], axis=1))
ndist = ndist + self.X[ind2].shape[0]
return s_b
def _s_w(self):
"""
:return: float, sum of distances between points in same cluster
"""
SW = 0
for k in range(self.n_clusters):
ind = np.where(self.labels == k)[0]
for j in range(len(ind) - 1):
SW = SW + np.sum(np.linalg.norm(self.X[ind[j]] - self.X[ind[j + 1:]], axis=1))
return SW
def Int_Ball_Hall(self):
W = 0
for i in range(int(self.n_clusters)):
ind = np.where(self.labels == i)[0]
W = W + np.sum((self.X[ind, :] - self.M[i]) ** 2) / len(ind)
return W / int(self.n_clusters)
def Int_Banfeld_Raftery(self):
index = 0.0
for k in range(self.n_clusters):
if self.clusters_len[k] > 1:
tr = np.trace(self._within_group_k_matrix(k))
index += self.clusters_len[k] * np.log(tr / self.clusters_len[k])
return index
def Int_C_index(self):
SW = 0
NW = 0
Total_distances = list()
for j in range(len(self.X) - 1):
dists = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1)
Total_distances.extend(dists)
for k in range(self.n_clusters):
ind = self.labels == k
Xtemp = self.X[ind]
for j in range(len(Xtemp) - 1):
SW = SW + np.sum(np.linalg.norm(Xtemp[j, :] - Xtemp[j + 1:, :], axis=1));
NW = NW + int(np.sum(ind) * (np.sum(ind) - 1) / 2)
sorted_dists = sorted(Total_distances)
S_min = sum(sorted_dists[:NW])
S_max = sum(sorted_dists[-NW:])
return float((SW - S_min) / (S_max - S_min))
def Int_Calinski_Harabasz(self):
traceW = self._within_cluster_dispersion()
traceB = self._between_group_dispersion()
return ((self.N - self.n_clusters) * traceB) / ((self.n_clusters - 1) * traceW)
def Int_Davies_Bouldin(self):
index = 0.0
delta_k = np.zeros((self.n_clusters,))
for k in range(int(self.n_clusters)):
ind = np.where(self.labels == k)[0]
delta_k[k] = np.mean(np.linalg.norm(self.X[ind] - self.M[k], axis=1))
C = 0
for k in range(int(self.n_clusters)):
delta_kk = np.linalg.norm(self.M[k] - np.concatenate((self.M[:k], self.M[k + 1:]), axis=0), axis=1)
C = C + np.max((delta_k[k] + np.concatenate((delta_k[:k], delta_k[k + 1:]), axis=0)) / delta_kk)
# np.concatenate((a[:k],a[k+1:]))
return C / self.n_clusters
def Int_Det_Ratio(self):
T = self._total_dispersion_matrix()
WG = self._within_group_matrix()
detT = np.linalg.det(T)
detWG = np.linalg.det(WG)
return detT / detWG
def Int_Dunn11(self):
return self.Dunn(within=1, between=1)
def Int_Dunn12(self):
return self.Dunn(within=1, between=2)
def Int_Dunn13(self):
return self.Dunn(within=1, between=3)
def Int_Dunn14(self):
return self.Dunn(within=1, between=4)
def Int_Dunn15(self):
return self.Dunn(within=1, between=5)
def Int_Dunn16(self):
return self.Dunn(within=1, between=6)
def Int_Dunn21(self):
return self.Dunn(within=2, between=1)
def Int_Dunn22(self):
return self.Dunn(within=2, between=2)
def Int_Dunn23(self):
return self.Dunn(within=2, between=3)
def Int_Dunn24(self):
return self.Dunn(within=2, between=4)
def Int_Dunn25(self):
return self.Dunn(within=2, between=5)
def Int_Dunn26(self):
return self.Dunn(within=2, between=6)
def Int_Dunn31(self):
return self.Dunn(within=3, between=1)
def Int_Dunn32(self):
return self.Dunn(within=3, between=2)
def Int_Dunn33(self):
return self.Dunn(within=3, between=3)
def Int_Dunn34(self):
return self.Dunn(within=3, between=4)
def Int_Dunn35(self):
return self.Dunn(within=3, between=5)
def Int_Dunn36(self):
return self.Dunn(within=3, between=6)
def Dunn(self, within, between):
delta_pq = np.empty(shape=(0,))
delta_CkCk = np.empty(shape=(0,))
if within == 1:
for i in range(self.n_clusters):
ind1 = self.labels == i
for j in np.where(ind1)[0]:
delta_CkCk = np.append(delta_CkCk, np.max(np.linalg.norm(self.X[j] - self.X[ind1], axis=1)))
if within == 2:
for i in range(self.n_clusters):
ind1 = self.labels == i
intrasum = 0
if len(np.where(ind1)[0]) > 1:
for j in np.where(ind1)[0]:
intrasum = intrasum + np.sum(np.linalg.norm(self.X[j] - self.X[ind1], axis=1))
delta_CkCk = np.append(delta_CkCk, 1 / (self.clusters_len[i] * (self.clusters_len[i] - 1)) * intrasum)
if within == 3:
for i in range(self.n_clusters):
ind1 = self.labels == i
delta_CkCk = np.append(delta_CkCk, 2 / self.clusters_len[i] * np.sum(np.linalg.norm(self.X[ind1] - self.M[i], axis=1)))
if between == 1:
for i in range(self.n_clusters):
ind1 = self.labels == i
for j in np.where(ind1)[0]:
delta_pq = np.append(delta_pq, np.min(np.linalg.norm(self.X[j] - self.X[~ind1], axis=1))) # distance j to the rest of clusters.
if between == 2:
for ci in range(self.n_clusters):
ind1 = self.labels == ci
for cj in range(ci + 1, self.n_clusters):
ind2 = self.labels == cj
max_p_q = -float('inf')
for j in np.where(ind1)[0]:
max_p_q = np.max(np.append(max_p_q, np.max(np.linalg.norm(self.X[j] - self.X[ind2], axis=1))))
delta_pq = np.append(delta_pq, max_p_q)
if between == 3:
for ci in range(self.n_clusters):
ind1 = self.labels == ci
for cj in range(ci + 1, self.n_clusters):
ind2 = self.labels == cj
sum_p_q = 0
for j in np.where(ind1)[0]:
sum_p_q = sum_p_q + np.sum(np.linalg.norm(self.X[j] - self.X[ind2], axis=1))
delta_pq = np.append(delta_pq, 1 / (self.clusters_len[ci] * self.clusters_len[cj]) * sum_p_q)
if between == 4:
for ci in range(self.n_clusters):
for cj in range(ci + 1, self.n_clusters):
delta_pq = np.append(delta_pq, np.linalg.norm(self.M[ci] - self.M[cj]))
if between == 5:
for ci in range(self.n_clusters):
ind1 = self.labels == ci
for cj in range(ci + 1, self.n_clusters):
ind2 = self.labels == cj
sum_pq = np.sum(np.linalg.norm(self.X[ind1] - self.M[cj], axis=1)) + np.sum(np.linalg.norm(self.X[ind2] - self.M[ci], axis=1))
delta_pq = np.append(delta_pq, 1 / (self.clusters_len[ci] + self.clusters_len[cj]) * sum_pq)
if between == 6:
for ci in range(self.n_clusters):
ind1 = self.labels == ci
for cj in range(ci + 1, self.n_clusters):
ind2 = self.labels == cj
max_p = 0
for i in np.where(ind1)[0]:
max_p = np.max(np.append(max_p, np.min(np.linalg.norm(self.X[i] - self.X[ind2], axis=1))))
max_q = 0
for j in np.where(ind2)[0]:
max_q = np.max(np.append(max_q, np.min(np.linalg.norm(self.X[j] - self.X[ind1], axis=1))))
delta_pq = np.append(delta_pq, np.max(np.append(max_p, max_q)))
min_delta_pq = np.min(delta_pq)
max_delta_CkCk = np.max(delta_CkCk)
return min_delta_pq / max_delta_CkCk
def _s_plus_minus(self):
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
parity_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
for j in range(len(self.X)):
parity_matrix[j, :] = self.labels[j] == self.labels
dist_matrix = dist_matrix + dist_matrix.T
np.fill_diagonal(parity_matrix, 5) # Fill diagonal with any other number
S_minus = 0
S_plus = 0
for j in range(len(self.X)):
# print(j)
ind = (parity_matrix[j, :] == 1) # Elements within the same cluster
for i in np.where(ind)[0]:
indnoparity = parity_matrix == 0
S_minus = S_minus + np.sum(dist_matrix[i, j] > dist_matrix[indnoparity])
S_plus = S_plus + np.sum(dist_matrix[i, j] < dist_matrix[indnoparity])
return S_plus / 4, S_minus / 4
def Int_Baker_Hubert_Gamma(self, s_plus, s_minus):
return float((s_plus - s_minus) / (s_plus + s_minus))
def Int_G_Plus(self, s_minus):
total_points_pairs = self.N * (self.N - 1) / 2
return float(2 * s_minus / (total_points_pairs * (total_points_pairs - 1)))
def Int_Ksq_Det_W(self):
matrix = self._within_group_matrix()
index = self.n_clusters ** 2
return index * np.linalg.det(matrix)
def Int_Log_Det_Ratio(self):
return self.N * np.log(self.Int_Det_Ratio())
def Int_Log_SS_Ratio(self):
bgss = self._between_group_dispersion()
wgss = self._within_cluster_dispersion()
return np.log(bgss / wgss)
def Int_Mcclain_Rao(self):
SW = self._s_w()
SB = self._s_b()
NW = 0
for k in range(self.n_clusters):
NW = NW + int(self.clusters_len[k] * (self.clusters_len[k] - 1) / 2)
NT = len(self.X) * (len(self.X) - 1) / 2
NB = NT - NW
return (NB * SW) / (NW * SB)
def Int_PBM(self): # Or I-Index
E1 = np.sum(np.linalg.norm(self.X - self.G, axis=1))
Ek = 0
Dk = np.empty(shape=(0,))
for k in range(self.n_clusters):
ind = self.labels == k
Ek = Ek + np.sum(np.linalg.norm(self.X[ind] - self.M[k], axis=1))
for k in range(self.n_clusters - 1):
Dk = np.append(Dk, np.max(np.linalg.norm(self.M[k] - self.M[k + 1:], axis=1)))
return ((E1 * np.max(Dk)) / (self.n_clusters * Ek)) ** 2
def Int_Point_Biserial(self):
SW = self._s_w()
SB = self._s_b()
NW = 0
for k in range(self.n_clusters):
NW = NW + int(self.clusters_len[k] * (self.clusters_len[k] - 1) / 2)
NT = len(self.X) * (len(self.X) - 1) / 2
NB = NT - NW
return float((SW / NW - SB / NB) * math.sqrt(NW * NB) / NT)
def Int_Ratkowsky_Lance(self):
bg = self._between_group_matrix()
ts = self._total_dispersion_matrix()
p = len(bg)
r = np.sum(np.diag(bg) / np.diag(ts)) / p
return math.sqrt(r / self.n_clusters)
def Int_Ray_Turi(self):
wgss = self._within_cluster_dispersion()
deltamin = np.empty(shape=(0,))
for k in range(self.n_clusters - 1):
ind = self.labels == k
deltamin = np.append(deltamin, np.min(np.linalg.norm(self.M[k] - self.M[k + 1:], axis=1) ** 2))
return wgss / (self.N * np.min(deltamin))
def Int_Scott_Symons(self):
dets_wg = []
for k in range(self.n_clusters):
det = np.linalg.det(self._within_group_k_matrix(k) / self.clusters_len[k])
if abs(det) < 1e-5:
return np.nan
dets_wg.append(det)
return sum([self.clusters_len[k] * np.log(dets_wg[k]) for k in range(self.n_clusters)])
def Int_SD(self):
return self.D_for_sdindex(), self.S_for_sdindex()
def D_for_sdindex(self):
Daux = 0
Dmax = 0
Dmin = float('Inf')
for k in range(self.n_clusters):
mediadiff = np.linalg.norm(np.concatenate((self.M[k] - self.M[:k], self.M[k] - self.M[k + 1:]), axis=0), axis=1)
Dmax = np.max(np.append(mediadiff, Dmax))
Dmin = np.min(np.append(mediadiff, Dmin))
Daux = Daux + 1 / np.sum(mediadiff)
return Dmax / Dmin * Daux
def S_for_sdindex(self):
Var = list()
for k in range(self.n_clusters):
ind = self.labels == k
Var.append(np.var(self.X[ind], axis=0))
S = 1 / self.n_clusters * np.sum(np.linalg.norm(Var, axis=1)) / np.linalg.norm(np.var(self.X, axis=0))
return S
def Int_Sdbw(self):
Var = list()
for k in range(self.n_clusters):
ind = self.labels == k
if self.clusters_len[k] > 1:
Var.append(np.var(self.X[ind], axis=0))
S = 1 / self.n_clusters * np.sum(np.linalg.norm(Var, axis=1)) / np.linalg.norm(np.var(self.X, axis=0))
Sigma = 1 / self.n_clusters * np.sqrt(np.sum(np.linalg.norm(Var, axis=1)))
Rkj = 0
for k in range(self.n_clusters):
indk = self.labels == k
for j in range(k + 1, self.n_clusters):
Hkj = (self.M[k] + self.M[j]) / 2
indj = self.labels == j
Y_Hkj = np.sum(np.linalg.norm(self.X[np.logical_or(indk, indj)] - Hkj, axis=1) < Sigma)
Y_Gk = np.sum(np.linalg.norm(self.X[np.logical_or(indk, indj)] - self.M[k], axis=1) < Sigma)
Y_Gj = np.sum(np.linalg.norm(self.X[np.logical_or(indk, indj)] - self.M[j], axis=1) < Sigma)
try:
Rkj = Rkj + Y_Hkj / max(Y_Gk, Y_Gj)
except ZeroDivisionError:
return np.nan
G = 2 / (self.n_clusters * (self.n_clusters - 1)) * Rkj
return S + G
def Int_Silhouette(self):
return silhouette_score(self.X, self.labels, metric='euclidean')
def Int_Tau(self, s_plus, s_minus):
NW = 0
for k in range(self.n_clusters):
ind = self.labels == k
NW = NW + int(np.sum(ind) * (np.sum(ind) - 1) / 2)
NT = len(self.X) * (len(self.X) - 1) / 2
NB = NT - NW
return (s_plus + s_minus) / math.sqrt(NB * NW * NT * (NT - 1) / 2)
def Int_Trace_W(self):
return self._within_cluster_dispersion()
def Int_Trace_WIB(self):
wg = self._within_group_matrix()
bg = self._between_group_matrix()
return np.matrix.trace(wg.transpose().dot(bg))
def Int_Wemmert_Gancarski(self):
Jk = 0
for k in range(self.n_clusters):
ind = np.where(self.labels == k)[0]
sumR = 0
for i in ind:
diff = np.linalg.norm(self.X[i] - self.M, axis=1)
ind_cluster = np.zeros(diff.shape[0], dtype=bool)
ind_cluster[self.labels[i]] = True
sumR = sumR + diff[ind_cluster] / np.min(diff[~ind_cluster])
Jk = Jk + float(max(0, self.clusters_len[k] - sumR))
return Jk / self.N
def Int_Xie_Beni(self):
diff = 0
sumdiff = 0
for k in range(self.n_clusters):
ind = np.where(self.labels == k)[0]
diff = np.linalg.norm(self.X[ind] - self.M[k], axis=1) ** 2
sumdiff = sumdiff + np.sum(diff)
diff = float('Inf')
for k in range(self.n_clusters):
diff = np.min(np.append(diff, np.linalg.norm(self.M[k] - self.M[k + 1:], axis=1) ** 2))
return sumdiff / (diff * self.N)
def AddedInt_CNN_005(self):
return self.CNN(5)
def AddedInt_CNN_010(self):
return self.CNN(10)
def AddedInt_CNN_020(self):
return self.CNN(20)
def AddedInt_CNN_050(self):
return self.CNN(50)
def AddedInt_CNN_100(self):
return self.CNN(100)
def CNN(self, k):
self.weight = np.zeros(shape=(len(self.X),))
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
parity_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
dist_matrix = dist_matrix + dist_matrix.T
np.fill_diagonal(dist_matrix, float('inf')) # Fill diagonal with any other number
for j in range(len(self.X)):
parity_matrix[j, :] = self.labels[j] == self.labels
indicesclosed = np.argsort(dist_matrix[j, :])[0:k]
self.weight[j] = np.sum(parity_matrix[j, indicesclosed] == 0) / k
# plt.plot(self.X[:,0],self.X[:,1],'.')
# plt.plot(self.X[j,0],self.X[j,1],'k.')
# plt.plot(self.X[indicesclosed,0],self.X[indicesclosed,1],'.')
# plt.title(str(self.weight[j]))
# plt.show()
separation = np.zeros(shape=(self.n_clusters,))
compactness = np.zeros(shape=(self.n_clusters,))
np.fill_diagonal(dist_matrix, 0)
for c in range(self.n_clusters):
ind = np.where(self.labels == c)[0]
separation[c] = np.mean(self.weight[ind])
for j in ind:
compactness[c] = compactness[c] + np.sum(dist_matrix[j][ind])
compactness[c] = (2 / self.clusters_len[c]) * (self.clusters_len[c] - 1) * compactness[c]
Sep = np.max(separation)
Comp = np.sum(compactness)
return Sep, Comp
def Int_WB_Index(self):
traceW = self._within_cluster_dispersion()
traceB = self._between_group_dispersion()
return self.n_clusters * traceW / traceB
def Int_WB_Index_zhao(self):
maxpercluster = np.empty(shape=(0,))
SSB = 0
for ci in range(self.n_clusters):
indi = np.where(self.labels == ci)[0]
Xaux = self.X[indi]
if len(indi) > 1:
maxpercluster = np.append(maxpercluster, np.max(pdist(Xaux, 'euclidean')))
for cj in range(ci + 1, self.n_clusters):
indj = np.where(self.labels == cj)[0]
Xaux2 = self.X[np.append(indi, indj)]
SSB = SSB + np.min(pdist(Xaux2, 'euclidean'))
SSW = np.max(maxpercluster) + np.sum(self.clusters_len == 1)
return self.n_clusters * SSW / SSB
def Int_Xu_Index(self):
traceW = self._within_cluster_dispersion()
D = self.X.shape[1]
return np.log10(self.n_clusters * (traceW / (D * self.N ** 2)) ** (D / 2))
def Int_ARsd(self, alpha=0.1):
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
parity_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
dist_matrix = dist_matrix + dist_matrix.T
for j in range(len(self.X)):
parity_matrix[j, :] = self.labels[j] == self.labels
CMsd = 0
md = 0
for ci in range(self.n_clusters):
indi = np.where(self.labels == ci)[0]
sm = 0
if len(indi) > 1: # If cluster only have 1 element that element does not have radius nor sd
for i in indi:
md = np.max(np.append(sm, np.max(dist_matrix[i, np.where(parity_matrix[i, :])])))
SD = np.sqrt(np.trace(np.cov(self.X[indi].T)))
CMsd = CMsd + (md - SD) / (md)
Fn = 0
k = self.n_clusters
ci = 0
labels = self.labels
X = self.X
while k > 2:
indwithin = np.where(labels == ci)[0]
indbetween = np.where(labels != ci)[0]
dintra = 0
diff = float('inf')
for i in indwithin:
if self.clusters_len[ci] > 1: # Else dintra 0
dintra = dintra + np.min(dist_matrix[i, indwithin])
diff = np.min(np.append(diff, np.min(dist_matrix[i, indbetween])))
dintra = dintra / self.clusters_len[ci]
dinter = diff
if dinter > 2 * dintra:
fn = 0
if dinter <= dintra:
fn = alpha
if dintra < dinter and dinter < 2 * dintra:
fn = alpha * (dinter - dintra) / dintra
Fn = Fn + fn
labels = np.delete(labels, np.where(labels == ci)[0])
X = np.delete(X, np.where(labels == ci)[0], axis=0)
ci = ci + 1
k = k - 1
DM = -Fn
return CMsd + DM
def ComputationsforEC_PC(self):
den_FR = np.sum(self.X, axis=0)
FR = np.zeros(shape=(self.n_clusters, self.X.shape[1]))
FP = np.zeros(shape=(self.n_clusters, self.X.shape[1]))
for ci in range(self.n_clusters):
indi = np.where(self.labels == ci)[0]
num = np.sum(self.X[indi], axis=0)
FP[ci, :] = num / np.sum(self.X[indi])
FR[ci, :] = num / den_FR
F_measure = 2 * (FR * FP) / (FP + FR)
F_measure[np.isnan(F_measure)] = 0
F_prom = np.mean(F_measure, axis=0)
Fall = np.mean(F_prom)
Indtoremove = np.sum(F_measure >= Fall, axis=0) == 0
F_measureREM = np.delete(F_measure, np.where(Indtoremove), axis=1)
F_promREM = np.delete(F_prom, np.where(Indtoremove))
firstcondition = F_measureREM > np.tile(F_promREM, (self.n_clusters, 1))
secondcondition = F_measureREM > Fall
self.selectedfeatures = firstcondition * secondcondition
self.Gi = F_measureREM / np.tile(F_promREM, (F_measureREM.shape[0], 1))
def Int_EC(self):
self.ComputationsforEC_PC()
EC = 0
for ci in range(self.n_clusters):
activefeat = self.selectedfeatures[ci, :]
EC = EC + 1 / self.clusters_len[ci] * (np.sum(activefeat == 1) * np.sum(self.Gi[ci, activefeat]) + np.sum(activefeat == 0) * np.sum(1 / self.Gi[ci, activefeat])) / (np.sum(activefeat == 1) + np.sum(activefeat == 0))
return EC / self.n_clusters
def Int_PC(self):
self.ComputationsforEC_PC()
PC = 0
for ci in range(self.n_clusters):
activefeat = self.selectedfeatures[ci, :]
PC = PC + np.sum(self.Gi[ci, activefeat]) / self.clusters_len[ci]
return PC / self.n_clusters
def Int_BIC(self):
K = self.n_clusters
N, M = self.X.shape
all_variance = 0
for ci in range(self.n_clusters):
ind = self.labels == ci
all_variance = all_variance + np.sum(np.linalg.norm(self.X[ind] - self.M[ci], axis=1) ** 2)
all_variance = (1.0 / (N - K) / M) * all_variance
const_term = 0.5 * K * np.log(N) * (M + 1)
BIC = self.clusters_len * np.log(self.clusters_len) - self.clusters_len * np.log(N) - ((self.clusters_len * M) / 2) * np.log(2 * np.pi * all_variance) - ((self.clusters_len - 1) * M / 2)
return np.sum(BIC) - const_term
def Int_AIC(self):
K = self.n_clusters
N, M = self.X.shape
all_variance = 0
for ci in range(self.n_clusters):
ind = self.labels == ci
all_variance = all_variance + np.sum(np.linalg.norm(self.X[ind] - self.M[ci], axis=1) ** 2)
all_variance = (1.0 / (N - K) / M) * all_variance
const_term = 0.5 * K * (M + 1)
AIC = self.clusters_len * np.log(self.clusters_len) - self.clusters_len * np.log(N) - ((self.clusters_len * M) / 2) * np.log(2 * np.pi * all_variance) - ((self.clusters_len - 1) * M / 2)
return np.sum(AIC) - const_term
def Int_STR(self):
return self.Dk_for_STR(), self.Ek_for_STR()
def Dk_for_STR(self): #
Dkmax = np.empty(shape=(0,))
Dkmin = np.empty(shape=(0,))
Dk = np.empty(shape=(1, 0))
for k in range(self.n_clusters - 1):
Dk = np.append(Dk, np.linalg.norm(self.M[k] - self.M[k + 1:], axis=1))
return np.max(Dk) / np.min(Dk)
def Ek_for_STR(self): #
E1 = np.sum(np.linalg.norm(self.X - self.G, axis=1))
Ek = 0
for k in range(self.n_clusters):
ind = self.labels == k
Ek = Ek + np.sum(np.linalg.norm(self.X[ind] - self.M[k], axis=1))
return E1 / Ek
def Int_Bhargavi_Gowda(self): # Cut_of f _ratio
traceW = self._within_cluster_dispersion()
traceB = self._between_group_dispersion()
traceT = traceB + traceW
Intra_dist = 0
Inter_dist = 0
for k in range(self.n_clusters):
ind = self.labels == k
Intra_dist = Intra_dist + np.sqrt(np.sum((self.X[ind] - self.M[k]) ** 2))
Inter_dist = Inter_dist + np.sum(np.sqrt(np.sum((self.M[k] - self.M) ** 2, axis=1)))
Inter_dist = Inter_dist / (self.n_clusters) ** 2
return np.abs(traceW * traceT / traceB - Intra_dist / Inter_dist - (self.N - self.n_clusters))
def Int_CS_Measure(self):
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
dist_matrix = dist_matrix + dist_matrix.T
Num = 0
Den = 0
for k in range(self.n_clusters):
ind = self.labels == k
num_aux = 0
for j in np.where(ind)[0]:
num_aux = num_aux + np.max(dist_matrix[j, ind])
Num = Num + num_aux / self.clusters_len[k]
Den = Den + np.min(np.linalg.norm(self.M[k] - np.delete(self.M, k, axis=0), axis=1))
return Num / Den
def Int_Score_function(self):
BCD = np.sum(np.linalg.norm(self.M - np.mean(self.X, axis=0), axis=1) * self.clusters_len) / (self.N * self.n_clusters)
WCB = 0
for k in range(self.n_clusters):
ind = self.labels == k
WCB = WCB + np.sum(np.linalg.norm(self.X[ind] - self.M[k], axis=1)) / self.clusters_len[k]
return 1 - 1 / np.exp(np.exp(BCD - WCB))
def Int_Sym(self):
num = np.empty(shape=(0,))
den = 0
for k in range(self.n_clusters):
num = np.append(num, np.linalg.norm(self.M[k] - self.M, axis=1))
ind = np.where(self.labels == k)[0]
if self.clusters_len[k] > 1:
den = den + np.sum(self.Sym_distance(self.X[ind], self.M[k]))
num = np.max(num)
return num / (self.n_clusters * den)
def Sym_distance(self, X, M):
num_minpts = 2
distances = cdist(2 * M - X, X, 'euclidean')
indices = np.argsort(distances, axis=1) # The first 2 distances
Mindistances = np.zeros(shape=(X.shape[0], num_minpts))
for j in range(indices.shape[0]):
Mindistances[j, :] = distances[j, indices[j, 0:num_minpts]]
return np.sum(Mindistances, axis=1) / 2
def Int_SymDB(self):
index = 0.0
delta_k = np.zeros((self.n_clusters,))
for k in range(int(self.n_clusters)):
ind = np.where(self.labels == k)[0]
delta_k[k] = np.mean(self.Sym_distance(self.X[ind], self.M[k]))
C = 0
for k in range(int(self.n_clusters)):
delta_kk = np.linalg.norm(self.M[k] - np.concatenate((self.M[:k], self.M[k + 1:]), axis=0), axis=1)
C = C + np.max((delta_k[k] + np.concatenate((delta_k[:k], delta_k[k + 1:]), axis=0)) / delta_kk)
return C / self.n_clusters
def Int_SymD(self):
delta_pq = np.empty(shape=(0,))
delta_CkCk = np.empty(shape=(0,))
for i in range(self.n_clusters):
ind1 = self.labels == i
delta_CkCk = np.append(delta_CkCk, np.max(self.Sym_distance(self.X[ind1], self.M[i])))
for j in np.where(ind1)[0]:
delta_pq = np.append(delta_pq, np.min(np.linalg.norm(self.X[j] - self.X[~ind1], axis=1))) # distance j to the rest of clusters.
min_delta_pq = np.min(delta_pq)
max_delta_CkCk = np.max(delta_CkCk)
return min_delta_pq / max_delta_CkCk
def Int_Sym33(self):
delta_pq = np.empty(shape=(0,))
delta_CkCk = np.empty(shape=(0,))
for i in range(self.n_clusters):
ind1 = self.labels == i
delta_CkCk = np.append(delta_CkCk, 2 / self.clusters_len[i] * np.sum(self.Sym_distance(self.X[ind1], self.M[i])))
for ci in range(self.n_clusters):
ind1 = self.labels == ci
for cj in range(ci + 1, self.n_clusters):
ind2 = self.labels == cj
sum_p_q = 0
for j in np.where(ind1)[0]:
sum_p_q = sum_p_q + np.sum(np.linalg.norm(self.X[j] - self.X[ind2], axis=1))
delta_pq = np.append(delta_pq, 1 / (self.clusters_len[ci] * self.clusters_len[cj]) * sum_p_q)
min_delta_pq = np.min(delta_pq)
max_delta_CkCk = np.max(delta_CkCk)
return min_delta_pq / max_delta_CkCk
def Int_COP(self):
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
dist_matrix = dist_matrix + dist_matrix.T
COP = 0
for k in range(self.n_clusters):
ind = self.labels == k
intraCOP = np.sum(np.linalg.norm(self.X[ind] - self.M[k], axis=1))
den = np.empty(shape=(0,))
indicesn = np.where(~ind)[0]
if len(indicesn) > 0 and np.sum(ind) > 0:
for j in range(len(indicesn)):
den = np.append(den, np.max(dist_matrix[indicesn[j], ind]))
COP = COP + intraCOP / np.min(den)
return COP / self.N
def Int_SV(self): # poor formulation
Num = 0
Den = 0
for k in range(self.n_clusters):
Num = Num + np.min(np.linalg.norm(self.M[k] - np.delete(self.M, k, axis=0), axis=1))
ind = self.labels == k
distances = np.linalg.norm(self.X[ind] - self.M[k], axis=1)
if self.clusters_len[k] > 9:
Den = Den + 10 / (self.clusters_len[k]) * np.sum(np.sort(distances)[-int(0.1 * self.clusters_len[k]):]) # 10% of the elements in clusters
try:
return Num / Den
except:
return np.nan
def Int_OS(self):
Den = 0
OV = 0
for k in range(self.n_clusters):
indl = self.labels == k
ind = np.where(indl)[0]
for j in ind:
a = np.sum(np.linalg.norm(self.X[j] - self.X[indl], axis=1)) / self.clusters_len[k]
b = np.sum(np.sort(np.linalg.norm(self.X[j] - self.X[~indl], axis=1))[:self.clusters_len[k] + 1]) / self.clusters_len[k] # sum of ni values closer
if (b - a) / (b + a) < 0.4:
ov = a / b
else:
ov = 0
OV = OV + ov
distances = np.linalg.norm(self.X[ind] - self.M[k], axis=1)
if self.clusters_len[k] > 9:
Den = Den + 10 / (self.clusters_len[k]) * np.sum(np.sort(distances)[-int(0.1 * self.clusters_len[k]):]) # 10% of the elements in clusters
try:
return OV / Den
except:
return np.nan
def Int_CVM(self):
dist_matrix = np.zeros(shape=(len(self.X), len(self.X)))
for j in range(len(self.X) - 1):
dist_matrix[j, j + 1:] = np.linalg.norm(self.X[j, :] - self.X[j + 1:, :], axis=1);
dist_matrix = dist_matrix + dist_matrix.T
CVM_first = 0
CVM_second = 0
for k in range(self.n_clusters):
ind = self.labels == k
dIn = np.empty(shape=(0,))
Rin_set = np.empty(shape=(0,))
indicescluster = np.where(ind)[0]
Dinter = np.empty(shape=(0,))
for j in indicescluster:
dIn = np.append(dIn, np.max(dist_matrix[j, ind]))
indcluster = np.delete(indicescluster, np.where(indicescluster == j))
if self.clusters_len[k] > 1:
Rin_set = np.append(Rin_set, np.min(dist_matrix[j, indcluster]))
else:
Rin_set = np.append(Rin_set, 1) # Homogeneus core
Dinter = np.append(Dinter, np.min(dist_matrix[j, ~ind]))
dIn = np.max(dIn) ** 2
RIn = np.max(Rin_set) ** 2 /
|
np.mean(Rin_set ** 2)
|
numpy.mean
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Hindsight Instruction Relabeling."""
# pylint: disable=unused-variable
# pylint: disable=unused-argument
# pylint: disable=g-explicit-length-test
from __future__ import absolute_import
from __future__ import division
import random
import time
import numpy as np
from hal.learner.language_utils import get_vocab_path
from hal.learner.language_utils import instruction_type
from hal.learner.language_utils import negate_unary_sentence
from hal.learner.language_utils import pad_to_max_length
from hal.learner.language_utils import paraphrase_sentence
from hal.utils.video_utils import add_text
from hal.utils.video_utils import pad_image
from hal.utils.video_utils import save_json
from hal.utils.video_utils import save_video
import hal.utils.word_vectorization as wv
class HIR:
"""Learner that executes Hindsight Instruction Relabeling.
Attributes:
cfg: configuration of this learner
step: current training step
epsilon: value of the epsilon for sampling random action
vocab_list: vocabulary list used for the instruction labeler
encode_fn: function that encodes a instruction
decode_fn: function that converts encoded instruction back to text
labeler: object that generates labels for transitions
"""
def __init__(self, cfg):
# making session
self.cfg = cfg
self.step = 0
self.epsilon = 1.0
# Vocab loading
vocab_path = get_vocab_path(cfg)
self.vocab_list = wv.load_vocab_list(vocab_path)
v2i, i2v = wv.create_look_up_table(self.vocab_list)
self.encode_fn = wv.encode_text_with_lookup_table(
v2i, max_sequence_length=self.cfg.max_sequence_length)
self.decode_fn = wv.decode_with_lookup_table(i2v)
def reset(self, env, agent, sample_new_scene=False, **kwargs):
"""Reset at the episode boundary.
Args:
env: the RL environment
agent: the RL agent
sample_new_scene: sample a brand new set of objects for the scene
**kwargs: other potential arguments
Returns:
the reset state of the environment
"""
if self.cfg.reset_mode == 'random_action':
for _ in range(20):
s, _, _, _ = env.step(env.sample_random_action())
elif self.cfg.reset_mode == 'none':
s = env.get_obs()
else:
s = env.reset(sample_new_scene)
return s
def learn(self, env, agent, replay_buffer, **kwargs):
"""Run learning for 1 cycle with consists of num_episode of episodes.
Args:
env: the RL environment
agent: the RL agent
replay_buffer: the experience replay buffer
**kwargs: other potential arguments
Returns:
statistics of the training episode
"""
average_per_ep_reward = []
average_per_ep_achieved_n = []
average_per_ep_relabel_n = []
average_batch_loss = []
curiosity_loss = 0
curr_step = agent.get_global_step()
self.update_epsilon(curr_step)
tic = time.time()
time_rolling_out, time_training = 0.0, 0.0
for _ in range(self.cfg.num_episode):
curr_step = agent.increase_global_step()
sample_new_scene = random.uniform(0, 1) < self.cfg.sample_new_scene_prob
s = self.reset(env, agent, sample_new_scene)
episode_experience = []
episode_reward = 0
episode_achieved_n = 0
episode_relabel_n = 0
# rollout
rollout_tic = time.time()
g_text, p = env.sample_goal()
if env.all_goals_satisfied:
s = self.reset(env, agent, True)
g_text, p = env.sample_goal()
g = np.squeeze(self.encode_fn(g_text))
for t in range(self.cfg.max_episode_length):
a = agent.step(s, g, env, self.epsilon)
s_tp1, r, _, _ = env.step(
a,
record_achieved_goal=True,
goal=p,
atomic_goal=self.cfg.record_atomic_instruction)
ag = env.get_achieved_goals()
ag_text = env.get_achieved_goal_programs()
ag_total = ag # TODO(ydjiang): more can be stored in ag
episode_experience.append((s, a, r, s_tp1, g, ag_total))
episode_reward += r
s = s_tp1
if r > env.shape_val:
episode_achieved_n += 1
g_text, p = env.sample_goal()
if env.all_goals_satisfied:
break
g = np.squeeze(self.encode_fn(g_text))
time_rolling_out += time.time() - rollout_tic
average_per_ep_reward.append(episode_reward)
average_per_ep_achieved_n.append(episode_achieved_n)
# processing trajectory
train_tic = time.time()
episode_length = len(episode_experience)
for t in range(episode_length):
s, a, r, s_tp1, g, ag = episode_experience[t]
episode_relabel_n += float(len(ag) > 0)
g_text = self.decode_fn(g)
if self.cfg.paraphrase:
g_text = paraphrase_sentence(
g_text, delete_color=self.cfg.diverse_scene_content)
g = self.encode_fn(g_text)
replay_buffer.add((s, a, r, s_tp1, g))
if self.cfg.relabeling:
self.hir_relabel(episode_experience, t, replay_buffer, env)
average_per_ep_relabel_n.append(episode_relabel_n / float(episode_length))
# training
if not self.is_warming_up(curr_step):
batch_loss = 0
for _ in range(self.cfg.optimization_steps):
experience = replay_buffer.sample(self.cfg.batchsize)
s, a, r, s_tp1, g = [
np.squeeze(elem, axis=1) for elem in np.split(experience, 5, 1)
]
s = np.stack(s)
s_tp1 = np.stack(s_tp1)
g = np.array(list(g))
if self.cfg.instruction_repr == 'language':
g = np.array(pad_to_max_length(g, self.cfg.max_sequence_length))
batch = {
'obs': np.asarray(s),
'action': np.asarray(a),
'reward': np.asarray(r),
'obs_next': np.asarray(s_tp1),
'g': np.asarray(g)
}
loss_dict = agent.train(batch)
batch_loss += loss_dict['loss']
if 'prediction_loss' in loss_dict:
curiosity_loss += loss_dict['prediction_loss']
average_batch_loss.append(batch_loss / self.cfg.optimization_steps)
time_training += time.time()-train_tic
time_per_episode = (time.time() - tic) / self.cfg.num_episode
time_training_per_episode = time_training / self.cfg.num_episode
time_rolling_out_per_episode = time_rolling_out / self.cfg.num_episode
# Update the target network
agent.update_target_network()
################## Debug ##################
sample = replay_buffer.sample(min(10000, len(replay_buffer.buffer)))
_, _, sample_r, _, _ = [
np.squeeze(elem, axis=1) for elem in np.split(sample, 5, 1)
]
print('n one:', np.sum(np.float32(sample_r == 1.0)), 'n zero',
np.sum(np.float32(sample_r == 0.0)), 'n buff',
len(replay_buffer.buffer))
################## Debug ##################
stats = {
'loss': np.mean(average_batch_loss) if average_batch_loss else 0,
'reward': np.mean(average_per_ep_reward),
'achieved_goal': np.mean(average_per_ep_achieved_n),
'average_relabel_goal': np.mean(average_per_ep_relabel_n),
'epsilon': self.epsilon,
'global_step': curr_step,
'time_per_episode': time_per_episode,
'time_training_per_episode': time_training_per_episode,
'time_rolling_out_per_episode': time_rolling_out_per_episode,
'replay_buffer_reward_avg': np.mean(sample_r),
'replay_buffer_reward_var':
|
np.var(sample_r)
|
numpy.var
|
import numpy as np
import os
import torch
from torch.utils.data import Dataset
import cv2
from data.imgaug_wo_shape import ImgAugWithoutShape
from data.imgaug_w_shape import ImgAugWithShape
from data.resize_uniform import resizeUniform
"""
一个image一个anno.txt
imageName.txt
xmin, ymin, w,h, cls0,cls1
xmin, ymin, w,h, cls0,cls1
xmin, ymin, w,h, cls0,cls1
这几个参数都是根目录
output
img 0-1
"""
class ListDataset(Dataset):
def __init__(self,
trainAnnoPath, # txt files root /
trainImgPath, # images root /
netInputSizehw,
imgChannelNumber,
augFlag=False,
clsname = {0: "person"}
):
self.trainAnnoPath = trainAnnoPath
self.trainImgPath = trainImgPath
self.netInputSizehw = tuple(netInputSizehw)
self.annNames = os.listdir(self.trainAnnoPath) # format me#["2008_000176.txt"]
self.imgChannelNumber = imgChannelNumber
self.augFlag = augFlag
self.clsname = clsname
self.showFlag = 0
def __getitem__(self, index):
"""bbox img org"""
txtPath = self.trainAnnoPath + self.annNames[index]
"""load infos"""
infos = np.loadtxt(txtPath)
if infos.ndim == 1:
rows = infos.shape[0]
infos = infos.reshape(-1, rows) #one row to 2dim
"""change int to float"""
infos = np.array(infos, dtype=np.float32)
"""判断是不是背景图片"""
if (infos ==np.array([[-1,-1,-1,-1,-1]])).all():
bgFlag = True
else:
bgFlag = False
bboxes = infos[:, :4]
classes = infos[:, 4]
"""input img rgb or gray"""
if self.imgChannelNumber == 3:
img = cv2.imread(self.trainImgPath + self.annNames[index].split('.')[0] + '.jpg')# cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.imgChannelNumber == 1:
img = cv2.imread(self.trainImgPath + self.annNames[index].split('.')[0] + '.jpg', cv2.IMREAD_GRAYSCALE)
img = img.astype(np.float32)
winName = ""#self.annNames[index]
# if self.showFlag:
# self.__show(np.copy(img).astype(np.uint8), bboxes, classes, winName, color = (0, 0, 255))
"""unifor resize 放在最后,输入网络的图片会有很多的0, 经过imgaug这些将会变为非0有利于学习"""
img, infos, bboxes = resizeUniform(img, self.netInputSizehw, bboxes)
if self.showFlag:
self.__show(
|
np.copy(img)
|
numpy.copy
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
DEBUG = False
class COVID_dataprocess():
def __init__(self,
path,
source_type,
train_state_list,
test_state_list,
if_onlynewcases,
if_moving_avg_data,
if_log_scale,
input_variable_list = []):
self.path = path
self.source_type = source_type
self.train_state_list = train_state_list
self.test_state_list = test_state_list
self.if_onlynewcases = if_onlynewcases
self.if_moving_avg_data = if_moving_avg_data
self.if_log_scale = if_log_scale
self.input_variable_list = input_variable_list
self.result_dict = {}
if self.source_type == 'COVID_ACT_NOW':
print('LOADING.. COVID_ACT_NOW')
self.input_variable_list = ['input_hospitalBedsRequired',
'input_ICUBedsInUse',
'input_ventilatorsInUse',
'input_Deaths',
'input_Infected']
self.df = self.read_data_COVIDactnow_NYT()
self.df = self.truncate_NAN_DataFrame()
self.df = self.moving_avg_log_scale()
self.data_test = self.extract_ndarray_from_DataFrame()
self.result_dict.update({'Data source': 'COVID_ACT_NOW'})
self.result_dict.update({'Full DataFrame': self.df})
self.result_dict.update({'Data array': self.data_test})
self.result_dict.update({'List_states': self.train_state_list})
self.result_dict.update({'List_variables': self.input_variable_list})
elif self.source_type == 'COVID_TRACKING_PROJECT':
print('LOADING.. COVID_TRACKING_PROJECT')
self.input_variable_list = ['input_hospitalized_Currently',
'input_inICU_Currently',
'input_daily_test_positive_rate',
'input_daily_cases',
'input_daily_deaths']
# 'input_daily_cases_pct_change']
self.df = self.read_data_COVIDtrackingProject()
self.df = self.truncate_NAN_DataFrame()
self.df = self.moving_avg_log_scale()
self.extract_ndarray_from_DataFrame()
self.result_dict.update({'Full DataFrame': self.df})
self.result_dict.update({'Data array (test)': self.data_test}) # state x days x variables
self.result_dict.update({'List_states (test)': self.test_state_list})
self.result_dict.update({'List_variables': self.input_variable_list})
self.result_dict.update({'Data array (train)': self.data_train}) # state x days x variables
self.result_dict.update({'List_states (train)': self.train_state_list})
else: ### JHU data
print('LOADING.. JHU Data')
self.data_test = self.combine_data(self.source)
def moving_avg_log_scale(self):
df = self.df
state_list_combined = self.train_state_list + self.test_state_list
state_list_combined = list(set(state_list_combined))
if self.if_moving_avg_data:
for state in state_list_combined:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = df2.rolling(window=5, win_type=None).sum() / 5 ### moving average with backward window size 5
df2 = df2.fillna(0)
df1[self.input_variable_list] = df2
df.update({state: df1})
if self.if_log_scale:
for state in state_list_combined:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = np.log(df2 + 1)
df1[self.input_variable_list] = df2
df.update({state: df1})
return df
def truncate_NAN_DataFrame(self):
df = self.df.copy()
### Take the maximal sub-dataframe that does not contain NAN
### If some state has all NANs for some variable, that variable is dropped from input_list_variable
start_dates = []
end_dates = []
state_list_combined = self.train_state_list + self.test_state_list
state_list_combined = list(set(state_list_combined))
print('!!! self.train_state_list', self.train_state_list)
print('!!! state_list_combined', state_list_combined)
input_variable_list_noNAN = self.input_variable_list.copy()
for column in input_variable_list_noNAN:
for state in state_list_combined:
df1 = df.get(state)
if df1[column].isnull().all():
input_variable_list_noNAN.remove(column)
self.input_variable_list = input_variable_list_noNAN
print('!!! New input_variable_list', self.input_variable_list)
for state in state_list_combined:
df1 = df.get(state)
for column in self.input_variable_list:
l_min = df1[column][df1[column].notnull()].index[0]
l_max = df1[column][df1[column].notnull()].index[-1]
start_dates.append(l_min)
end_dates.append(l_max)
max_min_date = max(start_dates)
min_max_date = min(end_dates)
for state in state_list_combined:
df1 = df.get(state)
df1 = df1[max_min_date:min_max_date]
print('!!! If any value is NAN:', df1.isnull())
df.update({state: df1})
return df
def extract_ndarray_from_DataFrame(self):
## Make numpy array of shape States x Days x variables for each test and train sets
data_combined = []
df = self.df
data_test = []
data_train = []
print('!!! self.state_list', self.test_state_list)
for state in self.test_state_list:
df1 = df.get(state)
data_combined = df1[self.input_variable_list].values ## shape Days x variables
data_test.append(data_combined)
for state in self.train_state_list:
df2 = df.get(state)
data_combined = df2[self.input_variable_list].values ## shape Days x variables
data_train.append(data_combined)
data_test = np.asarray(data_test)
self.data_test = np.nan_to_num(data_test, copy=True, nan=0, posinf=1, neginf=0)
print('!!!! data_test.shape', data_test.shape)
data_train = np.asarray(data_train)
self.data_train = np.nan_to_num(data_train, copy=True, nan=0, posinf=1, neginf=0)
print('!!!! data_train.shape', data_train.shape)
"""
def read_data_COVIDtrackingProject(self):
'''
Read input time series data as a dictionary of pandas dataframe
'''
print('??? Loading.. read_data_COVIDtrackingProject')
data = pd.read_csv(self.path, delimiter=',').sort_values(by="date")
### Convert the format of dates from string to datetime
data['date'] = pd.to_datetime(data['date'], format='%Y%m%d', utc=False)
df = {}
state_list_full = sorted([i for i in set([i for i in data['state']])])
print('!!! state_list_full', state_list_full)
### Find earliest starting date of the data
start_dates = []
for state in state_list_full:
df1 = data.loc[data['state'] == state]
start_dates.append(min(df1['date']).strftime("%Y-%m-%d"))
max_min_date = max(start_dates)
print('!!! min_dates', max_min_date)
for state in state_list_full:
df1 = data.loc[data['state'] == state].set_index('date')
# lastUpdatedDate = df1['lastUpdateEt'].iloc[0]
df1 = df1[max_min_date:]
### making new columns to process columns of interest and preserve the original data
df1['input_onVentilator_Increase'] = df1['onVentilatorCumulative']
df1['input_inICU_Increase'] = df1['inIcuCumulative']
df1['input_test_positive_rate'] = df1['positiveTestsViral'] / df1['totalTestsViral']
df1['input_case_Increase'] = df1['positiveIncrease']
df1['input_death_Increase'] = df1['deathIncrease']
df.update({state: df1})
if self.if_moving_avg_data:
for state in state_list_full:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = df2.rolling(window=5, win_type=None).sum() / 5 ### moving average with backward window size 5
df2 = df2.fillna(0)
df1[self.input_variable_list] = df2
df.update({state: df1})
if self.if_log_scale:
for state in state_list_full:
df1 = df.get(state)
df2 = df1[self.input_variable_list]
df2 = np.log(df2 + 1)
df1[self.input_variable_list] = df2
df.update({state: df1})
self.df = df
## Make numpy array of shape States x Days x variables
data_combined = []
for state in state_list_full:
df1 = df.get(state)
if state == state_list_full[0]:
data_combined = df1[self.input_variable_list].values ## shape Days x variables
data_combined = np.expand_dims(data_combined, axis=0)
print('!!!Data_combined.shape', data_combined.shape)
else:
data_new = df1[self.input_variable_list].values ## shape Days x variables
data_new = np.expand_dims(data_new, axis=0)
print('!!! Data_new.shape', data_new.shape)
data_combined = np.append(data_combined, data_new, axis=0)
self.data_test = data_combined
"""
def combine_data(self, source):
if len(source) == 1:
for path in source:
data, self.country_list = self.read_data_as_array_countrywise(path)
data_combined = np.expand_dims(data, axis=2)
else:
path = source[0]
data, self.country_list = self.read_data_as_array_countrywise(path)
data_combined = np.empty(shape=[data.shape[0], data.shape[1], 1])
for path in source:
data_new = self.read_data_as_array_countrywise(path)[0]
data_new = np.expand_dims(data_new, axis=2)
# print('data_new.shape', data_new.shape)
min_length = np.minimum(data_combined.shape[1], data_new.shape[1])
data_combined = np.append(data_combined[:, 0:min_length, :], data_new[:, 0:min_length, :], axis=2)
data_combined = data_combined[:, :, 1:]
print('data_combined.shape', data_combined.shape)
# data_full.replace(np.nan, 0) ### replace all NANs with 0
### Replace all NANs in data_combined with 0
where_are_NaNs = np.isnan(data_combined)
data_combined[where_are_NaNs] = 0
return data_combined
def read_data_COVIDtrackingProject(self):
'''
Read input time series data as a dictionary of pandas dataframe
'''
# path = "Data/us_states_COVID_tracking_project.csv"
data = pd.read_csv(self.path, delimiter=',').sort_values(by="date")
### Convert the format of dates from string to datetime
data['date'] = pd.to_datetime(data['date'], format='%Y%m%d', utc=False)
df = {}
### Use full state names
state_list = sorted([i for i in set([i for i in data['state']])])
print('!!! state_list', state_list)
### Find maximum earliest and the minimum latest date of both data
start_dates = []
end_dates = []
for state in state_list:
df1 = data.loc[data['state'] == state]
start_dates.append(min(df1['date']).strftime("%Y-%m-%d"))
end_dates.append(max(df1['date']).strftime("%Y-%m-%d"))
# print('State %s and end_date %s' % (state, max(df1['date']).strftime("%Y-%m-%d")))
max_min_date = max(start_dates)
min_max_date = min(end_dates)
print('!!! max_min_date', max_min_date)
print('!!! min_max_date', min_max_date)
original_list_variables = data.keys().tolist()
original_list_variables.remove('date')
for state in state_list:
df1 = data.loc[data['state'] == state].set_index('date')
# lastUpdatedDate = df1['lastUpdateEt'].iloc[0]
df1 = df1[max_min_date:min_max_date]
### making new columns to process columns of interest and preserve the original data
df1['input_hospitalized_Currently'] = df1['hospitalizedCurrently']
df1['input_inICU_Currently'] = df1['inIcuCurrently']
df1['input_daily_test_positive_rate'] = df1['positive'].diff() / df1['totalTestResults'].diff()
df1['input_daily_cases'] = df1['positive'].diff()
df1['input_daily_deaths'] = df1['death'].diff()
df1['input_daily_cases_pct_change'] = df1['positive'].pct_change()
# print('!!! If any value is NAN: %r for state %s:' % (df1.isnull().values.any(), state))
df.update({abbrev_us_state[state]: df1})
"""
for variable in original_list_variables:
for state in state_list:
df1 = data.loc[data['state'] == state].set_index('date')
if not df1[variable].isnull().values.any():
df.update({'list_states_observed_' + variable: abbrev_us_state[state]})
"""
return df
def read_data_as_array_countrywise(self, path):
'''
Read input time series as a narray
'''
data_full = pd.read_csv(path, delimiter=',').T
data = data_full.values[1:, :]
data = np.delete(data, [1, 2], 0) # delete lattitue & altitude
if self.country_list == None:
country_list = [i for i in set(data[0, :])]
country_list = sorted(country_list) # whole countries in alphabetical order
else:
country_list = self.country_list
### merge data according to country
data_new = np.zeros(shape=(data.shape[0] - 1, len(country_list)))
for i in np.arange(len(country_list)):
idx = np.where(data[0, :] == country_list[i])
data_sub = data[1:, idx]
data_sub = data_sub[:, 0, :]
data_sub = np.sum(data_sub, axis=1)
data_new[:, i] = data_sub
data_new = data_new.astype(int)
if self.country_list == None:
idx = np.where(data_new[-1, :] > 1000)
data_new = data_new[:, idx]
data_new = data_new[:, 0, :]
# data_new[:,1] = np.zeros(data_new.shape[0])
print('data_new', data_new)
country_list = [country_list[idx[0][i]] for i in range(len(idx[0]))]
print('country_list', country_list)
if self.if_onlynewcases:
data_new = np.diff(data_new, axis=0)
if self.if_moving_avg_data:
for i in np.arange(5, data_new.T.shape[1]):
data_new.T[:, i] = (data_new.T[:, i] + data_new.T[:, i - 1] + data_new.T[:, i - 2] + data_new.T[:,
i - 3] + data_new.T[
:,
i - 4]) / 5
# A_recons[:, i] = (A_recons[:, i] + A_recons[:, i-1]) / 2
if self.if_log_scale:
data_new = np.log(data_new + 1)
return data_new.T, country_list
def read_data_JHU_countrywise(self):
'''
Read input time series as a narray
'''
data_full = pd.read_csv(self.path, delimiter=',').T
data = data_full.values[1:, :]
data = np.delete(data, [1, 2], 0) # delete lattitue & altitude
country_list = [i for i in set(data[0, :])]
country_list = sorted(country_list) # whole countries in alphabetical order
### merge data according to country
data_new = np.zeros(shape=(data.shape[0] - 1, len(country_list)))
for i in np.arange(len(country_list)):
idx = np.where(data[0, :] == country_list[i])
data_sub = data[1:, idx]
data_sub = data_sub[:, 0, :]
data_sub =
|
np.sum(data_sub, axis=1)
|
numpy.sum
|
import random
import itertools as itr
import numpy as np
import subprocess as sp
from PIL import Image, ImageDraw
import time
import os
this_dir = os.path.abspath(os.path.dirname(__file__))
tmp_dir = os.path.join(this_dir, 'tmp')
def prod (xs, ys, margin=0):
return itr.product(range(xs - margin), range(ys - margin))
def generate_control_points (w, h, w_cnt, h_cnt):
# generates a grid of control points
# these are points of the form (x, y), and the x and y
# initially correspond to the x and y indicies for the point.
# the idea is that the point's indicies encode its "initial position"
# and the point itself encodes its resulting position after displacement.
# this seems a little crappy, maybe there is a more canonical way
# of doing this.
cps = np.zeros((h_cnt, w_cnt, 2))
xs = np.linspace(0, w, w_cnt)
ys = np.linspace(0, h, h_cnt)
for x, y in prod(w_cnt, h_cnt):
cps[y,x] = np.array([xs[x], ys[y]])
return cps
def cp_rect (x, y, cps):
# get a rect from a specific control point
# uses the control point down and to the right of it
# PIL uses this form of rectangle for the initial sections when
# doing mesh transforms
top_left = cps[y,x]
bottom_right = cps[y+1,x+1]
return (
int(top_left[0]),
int(top_left[1]),
int(bottom_right[0]),
int(bottom_right[1]),
)
def cp_quad (x, y, cps):
# a quadrilateral built from the south, south east, and east neighbors
# this is the format PIL expects for the target quads that initial rects
# get mapped to.
points = (
cps[y,x],
cps[y+1,x],
cps[y+1,x+1],
cps[y,x+1],
)
return tuple([int(i) for point in points for i in point])
def displace_point (x, y, vec, mesh):
# this is useful for showing how a point has shifted
mesh[y,x] += vec
return mesh[y,x]
def show_displacements ():
# TODO this is not working
# add displacement
d = ImageDraw.Draw(im)
xs = np.linspace(0, w, int(w / 2))
ys = np.linspace(0, h, int(h / 2))
for p in itr.product(xs, ys):
r_val = int(255 * (p[0] / 498))
g_val = int(255 * (p[1] / 498))
b_val = 255 - int(255 * (p[1] / 498))
d.point(p, fill=(r_val,g_val,b_val))
d.line([(100,300), (100,400), (200,400), (200,300), (100,300)], fill=(0,255,0))
def cartogram (img, z, clean=False):
c_start = time.perf_counter()
w, h, d =
|
np.asarray(img)
|
numpy.asarray
|
import torch
import numpy as np
import cv2
import os
from models.bua.layers.nms import nms
from models.bua.box_regression import BUABoxes
PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
TEST_SCALES = (600,)
TEST_MAX_SIZE = 1000
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def get_image_blob(im, pixel_means):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
pixel_means = np.array([[pixel_means]])
dataset_dict = {}
im_orig = im.astype(np.float32, copy=True)
im_orig -= pixel_means
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
for target_size in TEST_SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > TEST_MAX_SIZE:
im_scale = float(TEST_MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
dataset_dict["image"] = torch.from_numpy(im).permute(2, 0, 1)
dataset_dict["im_scale"] = im_scale
return dataset_dict
def save_roi_features(args, cfg, im_file, im, dataset_dict, boxes, scores, features_pooled, attr_scores=None):
MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH
dets = boxes[0].tensor.cpu() / dataset_dict['im_scale']
scores = scores[0].cpu()
feats = features_pooled[0].cpu()
max_conf = torch.zeros((scores.shape[0])).to(scores.device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep],
cls_scores[keep],
max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= CONF_THRESH).flatten()
if len(keep_boxes) < MIN_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MAX_BOXES]
image_feat = feats[keep_boxes]
image_bboxes = dets[keep_boxes]
image_objects_conf = np.max(scores[keep_boxes].numpy(), axis=1)
image_objects = np.argmax(scores[keep_boxes].numpy(), axis=1)
if not attr_scores is None:
attr_scores = attr_scores[0].cpu()
image_attrs_conf = np.max(attr_scores[keep_boxes].numpy(), axis=1)
image_attrs = np.argmax(attr_scores[keep_boxes].numpy(), axis=1)
info = {
'image_id': im_file.split('.')[0],
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes': len(keep_boxes),
'objects_id': image_objects,
'objects_conf': image_objects_conf,
'attrs_id': image_attrs,
'attrs_conf': image_attrs_conf,
}
else:
info = {
'image_id': im_file.split('.')[0],
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes': len(keep_boxes),
'objects_id': image_objects,
'objects_conf': image_objects_conf
}
output_file = os.path.join(args.output_dir, im_file.split('.')[0])
np.savez_compressed(output_file, feat=image_feat, bbox=image_bboxes, num_bbox=len(keep_boxes), image_h=np.size(im, 0), image_w=np.size(im, 1), info=info)
def save_bbox(args, cfg, im_file, im, dataset_dict, boxes, scores):
MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH
scores = scores[0].cpu()
boxes = boxes[0]
num_classes = scores.shape[1]
boxes = BUABoxes(boxes.reshape(-1, 4))
boxes.clip((dataset_dict['image'].shape[1]/dataset_dict['im_scale'], dataset_dict['image'].shape[2]/dataset_dict['im_scale']))
boxes = boxes.tensor.view(-1, num_classes*4).cpu() # R x C x 4
cls_boxes = torch.zeros((boxes.shape[0], 4))
for idx in range(boxes.shape[0]):
cls_idx = torch.argmax(scores[idx, 1:]) + 1
cls_boxes[idx, :] = boxes[idx, cls_idx * 4:(cls_idx + 1) * 4]
max_conf = torch.zeros((scores.shape[0])).to(scores.device)
for cls_ind in range(1, num_classes):
cls_scores = scores[:, cls_ind]
keep = nms(cls_boxes, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep],
cls_scores[keep],
max_conf[keep])
keep_boxes = torch.argsort(max_conf, descending=True)[:MAX_BOXES]
image_bboxes = cls_boxes[keep_boxes]
output_file = os.path.join(args.output_dir, im_file.split('.')[0])
np.savez_compressed(output_file, bbox=image_bboxes, num_bbox=len(keep_boxes), image_h=np.size(im, 0), image_w=np.size(im, 1))
def save_roi_features_by_gt_bbox(args, cfg, im_file, im, dataset_dict, boxes, scores, features_pooled, attr_scores=None):
MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH
dets = boxes[0].tensor.cpu() / dataset_dict['im_scale']
scores = scores[0].cpu()
feats = features_pooled[0].cpu()
keep_boxes = [i for i in range(scores.shape[0])]
image_feat = feats[keep_boxes]
image_bboxes = dets[keep_boxes]
image_objects_conf = np.max(scores[keep_boxes].numpy(), axis=1)
image_objects = np.argmax(scores[keep_boxes].numpy(), axis=1)
if not attr_scores is None:
attr_scores = attr_scores[0].data.cpu()
image_attrs_conf = np.max(attr_scores[keep_boxes].numpy(), axis=1)
image_attrs = np.argmax(attr_scores[keep_boxes].numpy(), axis=1)
info = {
'image_id': im_file.split('.')[0],
'image_h': np.size(im, 0),
'image_w':
|
np.size(im, 1)
|
numpy.size
|
#!/usr/bin/env python
#
# changes
# 20190208 Cicone+ still use their CO(2-1)/CO(1-0)=1.0 assumption.
#
from __future__ import print_function
import os, sys, re, copy, json, time, datetime, shutil, astropy
import numpy as np
from astropy.table import Table, Column, MaskedColumn, hstack
import pandas as pd
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
sys.path.append(os.path.dirname(__file__))
from calc_alpha_CO import ( calc_alphaCO_from_metalZ_following_Wilson1995,
calc_alphaCO_from_metalZ_following_Genzel2015a,
calc_alphaCO_from_metalZ_following_Genzel2015b,
calc_alphaCO_from_metalZ_following_Bolatto2013,
calc_alphaCO_from_metalZ_following_Accurso2017,
calc_alphaCO_from_metalZ_following_Bertemes2018,
calc_alphaCO_from_metalZ_following_Tacconi2018,
)
from calc_delta_GD import ( calc_deltaGD_from_metalZ_following_Leroy2011,
calc_deltaGD_from_metalZ_following_Magdis2012,
calc_deltaGD_from_metalZ_following_RemyRuyer2014a,
calc_deltaGD_from_metalZ_following_RemyRuyer2014b,
)
from calc_fmol import ( calc_fmol_from_metalZ_following_Krumholz2009,
calc_fmol_from_metalZ_following_Dave2016,
calc_fmol_from_metalZ_following_Popping2014,
)
from calc_metal_Z import ( calc_metalZ_from_FMR_following_Genzel2015_Eq12a,
calc_metalZ_from_FMR_following_Mannucci2010_Eq4,
convert_metalZ_M08_to_metalZ_PP04_N2_polynomial,
convert_metalZ_KK04_to_metalZ_PP04,
calc_metalZ_from_FMR_with_dzliu_selection,
calc_metalZ_from_FMR_following_Kewley2008_PP04_O3N2,
)
#
# def
#
def calc_Sargent2014_sSFR(z, lgMstar=10.5, DeltaMS=0.0):
return 0.095 * 10**(-0.21*(lgMstar-numpy.log10(5e10))) * numpy.exp(2.05*z/(1.0+0.16*z**1.54)) * 10**(DeltaMS)
def calc_Speagle2014_sSFR(cosmoAge, lgMstar=10.5, DeltaMS=0.0):
return 10**((0.84 - 0.026*cosmoAge) * lgMstar - (6.51 - 0.11*cosmoAge)) / 10**(lgMstar) * 1e9 * 10**(DeltaMS)
def calc_Scoville2017_sSFR(z, lgMstar=10.5, DeltaMS=0.0):
lgMstar_ref = 10.5
SFR_MS_ref = 10**(0.59*lgMstar_ref-5.77)* np.power((1.0+z),(0.22*lgMstar_ref+0.59))
SFR_MS = SFR_MS_ref * 10**(1.72-np.log10(1+np.power(10**(lgMstar-10.31),-1.07))) / 10**(1.72-np.log10(1+np.power(10**(lgMstar_ref-10.31),-1.07)))
sSFR_MS = SFR_MS / 10**(lgMstar) * 1e9 # Gyr
return sSFR_MS
def calc_sSFR_MS(lgMstar, z, cosmoAge=None):
if cosmoAge is None:
cosmoAge = cosmo.age(z).value
sSFR_MS = calc_Speagle2014_sSFR(cosmoAge, lgMstar)
#
#sSFR_MS = calc_Sargent2014_sSFR(z, lgMstar)
#
return sSFR_MS
#
# mask_data
#
def mask_dataset(input_data, mask_CPA = True, mask_SED = True, mask_IMG = True, mask_known_zspec = False):
# the input_data should be a dict with 'ID', ''
#
# mask valid sources
#
mask_valid_sources = (input_data['z']>0)
if mask_SED:
if os.path.isfile('datatable_discarded_sources_by_SED.txt'):
list_SED = Table.read('datatable_discarded_sources_by_SED.txt', format='ascii.commented_header') # must use 'ascii.commented_header' otherwise got a bug
mask_SED = np.isin(input_data['ID'], list_SED.columns[0].data) # SED ALMA band chisq identified spurious sources
mask_valid_sources = np.logical_and( mask_valid_sources, ~mask_SED )
if mask_CPA:
if os.path.isfile('datatable_discarded_sources_by_CPA.txt'):
list_CPA = Table.read('datatable_discarded_sources_by_CPA.txt', format='ascii.commented_header') # must use 'ascii.commented_header' otherwise got a bug
mask_CPA = np.isin(input_data['ID'], list_CPA.columns[0].data) # counterpart association identified spurious sources
mask_valid_sources = np.logical_and( mask_valid_sources, ~mask_CPA )
if mask_IMG:
if os.path.isfile('datatable_discarded_sources_by_IMG.txt'):
list_IMG = Table.read('datatable_discarded_sources_by_IMG.txt', format='ascii.commented_header') # must use 'ascii.commented_header' otherwise got a bug
mask_IMG = np.isin(input_data['ID'], list_IMG.columns[0].data) # bad image
mask_valid_sources = np.logical_and( mask_valid_sources, ~mask_IMG )
if mask_known_zspec:
if os.path.isfile('datatable_known_zspec.txt'):
list_known_zspec = Table.read('datatable_known_zspec.txt', format='ascii.commented_header') # must use 'ascii.commented_header' otherwise got a bug
mask_known_zspec = np.isin(input_data['ID'], list_known_zspec.columns[0]) # known_zspec
mask_valid_sources = np.logical_and( mask_valid_sources, mask_known_zspec )
print('selecting %d data after masking' % (np.sum(mask_valid_sources)))
output_data = copy.copy(input_data)
for keyname in output_data:
if not np.isscalar(input_data[keyname]):
output_data[keyname] = np.array(input_data[keyname])[mask_valid_sources]
return output_data
#
# def
#
def calc_metal_Z_high_z_method(M_star, SFR, z):
#return calc_metalZ_from_FMR_with_dzliu_selection(M_star, SFR, z)
return calc_metalZ_from_FMR_following_Genzel2015_Eq12a(M_star, z)
def calc_metal_Z_local_galaxy_method(M_star, SFR, z):
#return calc_metalZ_from_FMR_with_dzliu_selection(M_star, SFR, z)
return convert_metalZ_M08_to_metalZ_PP04_N2_polynomial(calc_metalZ_from_FMR_following_Mannucci2010_Eq4(M_star, SFR))
#return calc_metalZ_from_FMR_following_Kewley2008_PP04_O3N2(M_star)
#
# read data
#
def read_datasets():
#
# read tables
#
datasets = []
#which_gas_mass_calibration = 'dzliu 850'
#which_gas_mass_calibration = 'H17' # Hughes+2017
if True:
ds = {}
ds['label'] = 'This work (A3COSMOS)'
ds['color'] = 'gold'
ds['facecolor'] = 'gold'
ds['edgecolor'] = 'k'
ds['edgelinewidth'] = 0.5
ds['alpha'] = 1.0
ds['marker'] = 'o'
ds['markersize'] = 15
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20180720/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits'
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits'
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas_with_RemyRuyer2014_GDR_with_Genzel2015_Eq12a_MZR/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits'
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits' # with_RemyRuyer2014_GDR_with_Genzel2015_Eq12a_MZR
#print('Reading \'/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits\', updated on 2019-03-01 with RemyRuyer2014 GDR, KMT09 fmol, Genzel2015Eq12a MZR(z). ')
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits' # updated on 2019-03-01 with RemyRuyer2014 GDR, KMT09 fmol, Genzel2015Eq12a MZR(z).
#ds['datatable'] = '/Users/dzliu/Work/AlmaCosmos/Samples/20181203/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas_with_specz.fits' # 20190307 selected spec-z subsample
tbfile = os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables'+os.sep+'datatables_z_deltaGas'+os.sep+'datatable_a3cosmos'+os.sep+'dataset_v20180801'+os.sep+'datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.fits'
tbinfo = open(os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables'+os.sep+'datatables_z_deltaGas'+os.sep+'datatable_a3cosmos'+os.sep+'dataset_v20180801'+os.sep+'datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas.info.txt').readline().rstrip()
print('Reading \'%s\' %s'%(tbfile, tbinfo))
ds['datatable'] = tbfile
tb = Table.read(ds['datatable'])
mask = (tb['ID']!=850535) #<20190715># SED fitting Qz=99 not get prioritized bug
tb = tb[mask]
ds['ID'] = tb['ID']
ds['z'] = tb['z']
ds['SFR'] = tb['SFR']
ds['Mstar'] = tb['Mstar']
#ds['Mmol'] = tb['M_mol_gas_Method4_500_this_work'] # involves f_mol
#ds['Mmol'] = tb['M_mol_gas_Method4_850_this_work'] # involves f_mol
#ds['Mmol'] = tb['M_mol_gas_Method4_850_this_work'] #<20190122_with_Leroy_GDR_with_Genzel_Eq12a_MZR_with_KMT09_fmol>#
#ds['Mmol'] = tb['M_mol_gas_Method4_850_this_work'] #<20190301_with_RemyRuyer_GDR_with_Genzel_Eq12a_with_dzliu_limit_MZR_with_KMT09_fmol>#
#ds['Mmol'] = tb['M_mol_gas_Method2_850_Scoville2017']
ds['Mmol'] = tb['M_mol_gas_Method2_850_Hughes2017'] # FINAL GOOD TO USE
#ds['Mmol'] = tb['M_total_gas_Method1_GDR']
#ds['Mmol'] = tb['M_mol_gas_Method3_500_Groves2015']
#ds['Mmol'] = tb['deltaGas4'][mask] * tb['Mstar']
ds['sSFR'] = ds['SFR'] / ds['Mstar'] * 1e9
ds['sSFR_MS'] = calc_Speagle2014_sSFR(cosmo.age(ds['z']).value, np.log10(ds['Mstar']))
ds['DeltaMS'] = np.log10(ds['sSFR'] / ds['sSFR_MS'])
ds['deltaGas'] = ds['Mmol'] / ds['Mstar']
ds['tauDepl'] = ds['Mmol'] / ds['SFR'] / 1e9
ds['deltaGasErr'] = np.sqrt( ((1.0/tb['SNRObs']))**2 + (0.2)**2 ) * ds['deltaGas'] # dust continuum SNRObs, gas conversion 0 dex scatter (Hughes2017), stellar mass 0.2 dex uncertainty
ds['tauDeplErr'] = np.sqrt( ((1.0/tb['SNRObs']))**2 + (0.1)**2 ) * ds['tauDepl'] # dust continuum SNRObs, gas conversion 0 dex scatter (Hughes2017), SFR 0.1 dex uncertainty
#if False:
# mask = (tb['SNRObs']>=2.5)
# for tcolname in ds:
# ds[tcolname] = ds[tcolname][mask]
# ds = mask_dataset(ds) # for A3COSMOS dataset, we need to mask more sources
datasets.append(ds)
print('Read "%s" (%d data)'%(ds['datatable'], len(ds['deltaGas'])))
if True:
ds = {}
ds['label'] = 'Tacconi+2018' # only extracted PHBISS1 & 2
ds['color'] = 'seagreen'
ds['facecolor'] = 'seagreen'
ds['edgecolor'] = 'none'
ds['alpha'] = 0.7
ds['marker'] = 'D'
ds['markersize'] = 15
ds['datatable'] = os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables'+os.sep+'datatables_z_deltaGas'+os.sep+'datatable_Tacconi2018_PHIBSS2/datatable_ID_RA_Dec_z_Mstar_SFR_sSFR_with_deltaGas_with_Survey_Number_GE_1.txt'
tb = Table.read(ds['datatable'], format='ascii.commented_header')
mask = (tb['Mstar']>0)
tb = tb[mask]
ds['ID'] = tb['ID']
ds['z'] = tb['z']
ds['SFR'] = tb['SFR']
ds['Mstar'] = tb['Mstar'] #
#ds['Mmol'] = tb['deltaGas'] * tb['Mstar'] # they used metallicity-dependent alphaCO. For metallicity they used Genzel2015_Eq12a.
ds['MetalZ'] = calc_metal_Z_high_z_method(ds['Mstar'], ds['SFR'], ds['z'])
ds['Mmol'] = (tb['deltaGas'] * tb['Mstar']) / calc_alphaCO_from_metalZ_following_Tacconi2018(calc_metalZ_from_FMR_following_Genzel2015_Eq12a(ds['Mstar'], ds['z'])) * calc_alphaCO_from_metalZ_following_Tacconi2018(ds['MetalZ'])
# using calc_alphaCO_from_metalZ_following_Genzel2015b() will have much higher alphaCO than using calc_alphaCO_from_metalZ_following_Genzel2015b().
# comparing to their computed gas mass, my gas mass is slightly higher
ds['sSFR'] = ds['SFR'] / ds['Mstar'] * 1e9
ds['sSFR_MS'] = calc_Speagle2014_sSFR(cosmo.age(ds['z']).value, np.log10(ds['Mstar']))
ds['DeltaMS'] = np.log10(ds['sSFR'] / ds['sSFR_MS'])
ds['deltaGas'] = ds['Mmol'] / ds['Mstar']
ds['tauDepl'] = ds['Mmol'] / ds['SFR'] / 1e9
ds['deltaGasErr'] = np.sqrt( (0.3)**2 + (0.2)**2 ) * ds['deltaGas'] # CO 0.3 dex uncertainty, gas conversion 0 dex uncertainty (alphaCO), stellar mass 0.2 dex uncertainty
ds['tauDeplErr'] = np.sqrt( (0.3)**2 + (0.1)**2 ) * ds['tauDepl'] # CO 0.3 dex uncertainty, gas conversion 0 dex uncertainty (alphaCO), SFR 0.1 dex uncertainty
datasets.append(ds)
print('Read "%s" (%d data)'%(ds['datatable'], len(ds['deltaGas'])))
if True:
ds = {}
ds['label'] = 'Saintonge+2017'
ds['color'] = 'blue'
ds['facecolor'] = 'blue'
ds['edgecolor'] = 'none'
ds['alpha'] = 0.7
ds['marker'] = '+'
ds['markersize'] = 15
ds['datatable'] = os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables'+os.sep+'datatables_z_deltaGas'+os.sep+'datatable_Saintonge2017_xCOLDGASS/xCOLDGASS_PubCat.fits'
tb = Table.read(ds['datatable'], format='fits')
#mask = np.logical_and(np.logical_and(np.logical_and(np.logical_and(tb['SN_CO']>=3, tb['LOGMSTAR']>0), tb['LCO_COR']>0), tb['LOGSFR_BEST']>-99), tb['Z_PP04_O3N2']>0)
mask = np.logical_and(np.logical_and(np.logical_and(tb['SN_CO']>=3, tb['LOGMSTAR']>0), tb['LCO_COR']>0), tb['LOGSFR_BEST']>-99)
tb = tb[mask]
ds['ID'] = tb['ID']
ds['z'] = tb['Z_SDSS']
ds['SFR'] = np.power(10.0, tb['LOGSFR_BEST'])
ds['Mstar'] = np.power(10.0, tb['LOGMSTAR'])
#ds['Mmol'] = tb['LCO_COR'] * tb['XCO_A17']
ds['Mmol_Saintonge2017'] = tb['LCO_COR'] * tb['XCO_A17']
ds['alphaCO_Saintonge2017'] = tb['XCO_A17']
#ds['LPrmCO10'] = tb['LCO_COR']
ds['LPrmCO10'] = 23.5 * tb['ICO_COR'] * np.pi/(4*np.log(2)) * (22.0)**2 * (tb['LUMDIST'])**2 * np.power((1.+tb['Z_SDSS']),-3)
ds['LPrmCO10_err'] = 23.5 * tb['ICO_COR_ERR'] * np.pi/(4*np.log(2)) * (22.0)**2 * (tb['LUMDIST'])**2 * np.power((1.+tb['Z_SDSS']),-3)
mask2 = (tb['Z_PP04_O3N2']>0) # we use their metalZ if valid, otherwise compute from MZR or FMR with calc_metal_Z_local_galaxy_method()
ds['MetalZ'] = calc_metal_Z_local_galaxy_method(ds['Mstar'], ds['SFR'], ds['z'])
ds['MetalZ'][mask2] = tb['Z_PP04_O3N2'][mask2] # their metalZ are derived from optical nebular lines (SDSS) with PP04 O3N2 calibration
ds['MetalZ_Mannucci2010_Eq4_Method'] = calc_metalZ_from_FMR_following_Mannucci2010_Eq4(ds['Mstar'], ds['SFR'])
ds['MetalZ_Kewley2008_Method'] = calc_metalZ_from_FMR_following_Kewley2008_PP04_O3N2(ds['Mstar'])
ds['alphaCO'] = calc_alphaCO_from_metalZ_following_Tacconi2018(ds['MetalZ'])
ds['Mmol'] = (ds['LPrmCO10']) * ds['alphaCO']
ds['sSFR'] = ds['SFR'] / ds['Mstar'] * 1e9
ds['sSFR_MS'] = calc_Speagle2014_sSFR(cosmo.age(ds['z']).value, np.log10(ds['Mstar']))
ds['DeltaMS'] = np.log10(ds['sSFR'] / ds['sSFR_MS'])
ds['deltaGas'] = ds['Mmol'] / ds['Mstar']
ds['tauDepl'] = ds['Mmol'] / ds['SFR'] / 1e9
ds['deltaGasErr'] = np.sqrt( (tb['ICO_COR_ERR']/tb['ICO_COR'])**2 + (0.2)**2 ) * ds['deltaGas'] # CO obs uncertainty, gas conversion 0 dex scatter (alphaCO), stellar mass 0.2 dex uncertainty
ds['tauDeplErr'] = np.sqrt( (tb['ICO_COR_ERR']/tb['ICO_COR'])**2 + (tb['LOGSFR_ERR'])**2 ) * ds['tauDepl'] # CO obs uncertainty, gas conversion 0 dex scatter (alphaCO), SFR obs uncertainty
datasets.append(ds)
print('Read "%s" (%d data)'%(ds['datatable'], len(ds['deltaGas'])))
#
#dsout = {}
#for key in ['z','SFR','Mstar','Mmol','alphaCO','Mmol_Saintonge2017','alphaCO_Saintonge2017','MetalZ','MetalZ_Kewley2008_Method']:
# dsout[key] = copy.copy(ds[key])
#for key in ['<KEY>','Z_PP04_O3N2','Z_MZR']:
# dsout[key] = copy.copy(tb[key][mask].data)
#tbout = Table(dsout)
#tbout.write('check_Saintonge2017_sample.fits', format='fits', overwrite=True)
if True:
ds = {}
ds['label'] = 'Saintonge+2017 uplims'
ds['color'] = 'blue'
ds['facecolor'] = 'blue'
ds['edgecolor'] = 'none'
ds['alpha'] = 0.7
ds['marker'] = 'uplims'
ds['markersize'] = 15
ds['datatable'] = os.path.dirname(os.path.dirname(__file__))+os.sep+'Data_Tables'+os.sep+'datatables_z_deltaGas'+os.sep+'datatable_Saintonge2017_xCOLDGASS/xCOLDGASS_PubCat.fits'
tb = Table.read(ds['datatable'], format='fits')
#mask = np.logical_and(np.logical_and(np.logical_and(np.logical_and(tb['SN_CO']<3, tb['LOGMSTAR']>0), tb['RMS_CO']>0), tb['LOGSFR_BEST']>-99), tb['Z_PP04_O3N2']>0)
#mask = np.logical_and(np.logical_and(np.logical_and(tb['SN_CO']>=3, tb['LOGMSTAR']>0), tb['LCO_COR']>0), tb['LOGSFR_BEST']>-99)
mask = np.logical_and(np.logical_and(np.logical_and(tb['SN_CO']<3, tb['LOGMSTAR']>0), tb['LCO_COR']>0), tb['LOGSFR_BEST']>-99)
tb = tb[mask]
ds['ID'] = tb['ID']
ds['z'] = tb['Z_SDSS']
ds['SFR'] =
|
np.power(10.0, tb['LOGSFR_BEST'])
|
numpy.power
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import glob
import json
import os
# external packages
from PyQt5.QtWidgets import QMessageBox, QLineEdit, QInputDialog
import numpy as np
import matplotlib.pyplot
# local import
from logic.modeldata.modelHandling import writeRetrofitData
class ManageModel(object):
"""
"""
def __init__(self):
self.runningOptimize = False
self.fittedModelPoints = []
self.fittedModelPath = ''
self.plane = None
ms = self.app.mount.signals
ms.alignDone.connect(self.showModelPosition)
ms.alignDone.connect(self.showErrorAscending)
ms.alignDone.connect(self.showErrorDistribution)
ms.namesDone.connect(self.setNameList)
self.ui.showErrorValues.clicked.connect(self.showModelPosition)
self.ui.showNumbers.clicked.connect(self.showModelPosition)
self.ui.showNoAnnotation.clicked.connect(self.showModelPosition)
self.ui.refreshName.clicked.connect(self.refreshName)
self.ui.refreshModel.clicked.connect(self.refreshModel)
self.ui.clearModel.clicked.connect(self.clearModel)
self.ui.loadName.clicked.connect(self.loadName)
self.ui.saveName.clicked.connect(self.saveName)
self.ui.deleteName.clicked.connect(self.deleteName)
self.ui.runOptimize.clicked.connect(self.runOptimize)
self.ui.cancelOptimize.clicked.connect(self.cancelOptimize)
self.ui.deleteWorstPoint.clicked.connect(self.deleteWorstPoint)
self.ui.showActualModelAnalyse.clicked.connect(self.showActualModelAnalyse)
self.ui.showOriginalModelAnalyse.clicked.connect(self.showOriginalModelAnalyse)
self.ui.targetRMS.valueChanged.connect(self.showModelPosition)
self.ui.targetRMS.valueChanged.connect(self.showErrorAscending)
self.ui.targetRMS.valueChanged.connect(self.showErrorDistribution)
def initConfig(self):
"""
:return: True for test purpose
"""
config = self.app.config['mainW']
self.ui.showErrorValues.setChecked(config.get('showErrorValues', False))
self.ui.showNumbers.setChecked(config.get('showNumbers', False))
self.ui.showNoAnnotation.setChecked(config.get('showNoAnnotation', True))
self.ui.targetRMS.setValue(config.get('targetRMS', 10))
self.ui.optimizeOverall.setChecked(config.get('optimizeOverall', True))
self.ui.optimizeSingle.setChecked(config.get('optimizeSingle', True))
self.ui.autoUpdateActualAnalyse.setChecked(config.get('autoUpdateActualAnalyse', False))
self.showModelPosition()
self.showErrorAscending()
self.showErrorDistribution()
return True
def storeConfig(self):
"""
:return: True for test purpose
"""
config = self.app.config['mainW']
config['showErrorValues'] = self.ui.showErrorValues.isChecked()
config['showNumbers'] = self.ui.showNumbers.isChecked()
config['showNoAnnotation'] = self.ui.showNoAnnotation.isChecked()
config['targetRMS'] = self.ui.targetRMS.value()
config['optimizeOverall'] = self.ui.optimizeOverall.isChecked()
config['optimizeSingle'] = self.ui.optimizeSingle.isChecked()
config['autoUpdateActualAnalyse'] = self.ui.autoUpdateActualAnalyse.isChecked()
return True
def setNameList(self, model):
"""
setNameList populates the list of model names in the main window. before
adding the data, the existent list will be deleted.
:return: True if ok for testing
"""
self.ui.nameList.clear()
for name in model.nameList:
self.ui.nameList.addItem(name)
self.ui.nameList.sortItems()
self.ui.nameList.update()
return True
@staticmethod
def findKeysFromSourceInDest(buildModel, mountModel):
"""
:param buildModel:
:param mountModel:
:return: success
"""
pointsIn = []
pointsOut = []
for buildPoint in buildModel:
for mountPoint in mountModel:
dHA = mountModel[mountPoint]['ha'] - buildModel[buildPoint]['ha']
dHA = dHA / mountModel[mountPoint]['ha']
dDEC = mountModel[mountPoint]['dec'] - buildModel[buildPoint]['dec']
dDEC = dDEC / mountModel[mountPoint]['dec']
fitHA = abs(dHA) < 1e-4
fitDEC = abs(dDEC) < 1e-4
if fitHA and fitDEC:
pointsIn.append(buildPoint)
break
else:
pointsOut.append(buildPoint)
return pointsIn, pointsOut
def compareModel(self, buildModelData, mountModel):
"""
:param buildModelData:
:param mountModel:
:return:
"""
buildModel = {}
for star in buildModelData:
index = star.get('errorIndex', 0)
mount = {'ha': star.get('haMountModel', 0),
'dec': star.get('decMountModel', 0)}
buildModel[index] = mount
pointsIn, pointsOut = self.findKeysFromSourceInDest(buildModel, mountModel)
return pointsIn, pointsOut
def findFittingModel(self):
"""
findFittingModel takes the actual loaded model from the mount and tries
to find the fitting model run data. therefore it compares up to 5 points
to find out. all optimized model files (containing opt in filename) are
ignored.
:return: success
"""
mountModel = {}
for star in self.app.mount.model.starList:
mountModel[star.number] = {'ha': star.coord.ra.hours,
'dec': star.coord.dec.degrees}
modelFileList = glob.glob(self.app.mwGlob['modelDir'] + '/*.model')
for modelFilePath in modelFileList:
if 'opt' in modelFilePath:
continue
with open(modelFilePath, 'r') as inFile:
try:
buildModelData = json.load(inFile)
except Exception as e:
self.log.warning(f'Cannot load model file: {[inFile]}, error: {e}')
continue
pointsIn, pointsOut = self.compareModel(buildModelData, mountModel)
if len(pointsIn) > 2:
self.fittedModelPoints = pointsIn
self.fittedModelPath = modelFilePath
break
else:
self.fittedModelPoints = []
self.fittedModelPath = ''
pointsIn = []
pointsOut = []
name = os.path.splitext(os.path.basename(self.fittedModelPath))[0]
return name, pointsIn, pointsOut
def showModelPosition(self):
"""
showModelPosition draws a polar plot of the align model stars and their
errors in color. the basic setup of the plot is taking place in the
central widget class. which is instantiated from there. important: the
coordinate in model is in HA and DEC and not in RA and DEC. using
skyfield is a little bit misleading, because you address the hour angle
as .ra.hours
:return: True if ok for testing
"""
model = self.app.mount.model
if model is None:
hasNoStars = True
else:
hasNoStars = model.starList is None or not model.starList
axe, fig = self.generatePolar(widget=self.modelPositionPlot)
axe.set_yticks(range(0, 90, 10))
axe.set_ylim(0, 90)
yLabel = ['', '', '', '', '', '', '', '', '']
axe.set_yticklabels(yLabel)
if hasNoStars:
axe.figure.canvas.draw()
return False
altitude = np.asarray([x.alt.degrees for x in model.starList])
azimuth = np.asarray([x.az.degrees for x in model.starList])
error = np.asarray([x.errorRMS for x in model.starList])
self.plane = [(alt, az) for alt, az in zip(altitude, azimuth)]
# and plot it
cm = matplotlib.pyplot.cm.get_cmap('RdYlGn_r')
colors =
|
np.asarray(error)
|
numpy.asarray
|
"""
Use Approximate Bayesian Computation (ABC) to parametrize the rate function
given a hypothetical experiment timeline.
"""
import csv
from timeit import default_timer as timer
import click
import numpy as np
from scipy.interpolate import PchipInterpolator as pchip
import toml
import simtools
class Rate:
def __init__(self, s, c, w, u, m):
"""
:param s float: shape parameter in R
:param c float: center parameter in (0, 1)
:param w float: width between function ends in R
:param u float: mode of function in R
:param m float: function maximum in in R > 0
"""
self.u = u
self.w = w
self.m = m
self.c = c
self.a = s*c
self.b = s - self.a
self.factor = self.a**self.a * self.b**self.b * (self.a + self.b)**(-self.a - self.b)
def __call__(self, x):
y = (x/self.w - self.u/self.w + self.c)**self.a * (1 - (x/self.w - self.u/self.w + self.c))**self.b
y = self.m * y / self.factor
y[x <= self.u - self.c*self.w] = 0
y[x >= self.u - (self.c - 1)*self.w] = 0
return y
class Noise:
def __init__(self, s):
"""
:param s float: standard deviation of normal distribution
"""
self.s = s
def __call__(self, x):
return 1/np.sqrt(2*np.pi*self.s**2) * \
np.exp(-x**2/(2*self.s**2))
class SBP:
# Synchronises a death with every birth, thus keeping the
# number of particles constant
def __init__(self, particles, rate_function, noise_function):
# list of floating point numbers specifying
# internal parameter x for each extant particle
self.particles = np.array(particles)
# function that takes self.particles and returns
# the growth rate of each particle
self.rate_function = rate_function
# funciton that returns a value that is added
# to x for a newly born particle
self.noise_function = noise_function
# Initial setup
self.t = 0
self.rates = rate_function(self.particles)
self.birth_rate = np.sum(self.rates)
def simulate(self, increment_time):
end_time = self.t + increment_time
while self.t < end_time:
# because of synchronization birth rate is the only important rate
total_rate = self.birth_rate
print(total_rate/self.particles.size)
# increment time dependent on total rate
self.t += np.random.exponential(1/total_rate)
# replicate random particle
# normalize rates so that we can use it as probabilities to
# select the dividing particle
selection_probabilities = self.rates / self.birth_rate
select_particle = np.random.choice(self.particles.size, 1, p=selection_probabilities)
new_particle = self.particles[select_particle] + self.noise_function()
new_rate = self.rate_function(np.array(new_particle))[0]
self.particles = np.append(self.particles, new_particle)
self.rates = np.append(self.rates, new_rate)
self.birth_rate += new_rate
# kill random particle
select_particle = np.random.choice(self.particles.size)
self.particles = np.delete(self.particles, select_particle)
self.birth_rate -= self.rates[select_particle]
self.rates =
|
np.delete(self.rates, select_particle)
|
numpy.delete
|
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg') # for saving figures
import matplotlib.pyplot as plt
benign_traffic = pd.read_csv('benign_traffic.csv.zip')
gafgyt_traffic = pd.read_csv('gafgyt_traffic.csv.zip', nrows=2000)
print("Benign traffic:")
print(benign_traffic.head())
print("Gafgyt traffic:")
print(gafgyt_traffic.head())
# Compute distances from every benign data point to every other
from sklearn.metrics.pairwise import cosine_distances
benign_dists = cosine_distances(benign_traffic)
print("Benign self-distances:")
print(benign_dists)
print(np.shape(benign_dists))
print("Min, max:", np.min(benign_dists), np.max(benign_dists))
print("Mean:", np.mean(benign_dists))
print("Median:", np.median(benign_dists))
# Mix in gafgyt attack records, then plot again
# (we'll append them at the end so we can keep track
# of which rows are gafgyt and which are benign)
mixed_traffic = np.vstack((benign_traffic, gafgyt_traffic))
mixed_dists = cosine_distances(mixed_traffic)
print("Mixed self-distances:")
print(mixed_dists)
print(np.shape(mixed_dists))
print("Min, max:", np.min(mixed_dists), np.max(mixed_dists))
print("Mean:", np.mean(mixed_dists))
print("Median:", np.median(mixed_dists))
from sklearn.decomposition import PCA
clf = PCA(n_components=2)
pos = clf.fit_transform(mixed_dists)
print(pos)
print(np.shape(pos))
gafgyt_cnt = len(gafgyt_traffic)
plt.scatter(pos[:-gafgyt_cnt, 0], pos[:-gafgyt_cnt, 1], s=1, color='silver')
plt.scatter(pos[-gafgyt_cnt:, 0], pos[-gafgyt_cnt:, 1], s=20, color='blue')
plt.axis('off')
plt.savefig('mixed-similarity.png', dpi=300, bbox_inches='tight', pad_inches=0)
plt.close()
# Find average cosine distance between benign and gafgyt attack records
benign_gafgyt_dists = cosine_distances(benign_traffic, gafgyt_traffic)
print("Benign vs. Gafgyt distances:")
print(np.shape(benign_gafgyt_dists))
print(benign_gafgyt_dists)
print("Min, max:", np.min(benign_gafgyt_dists), np.max(benign_gafgyt_dists))
print("Mean:", np.mean(benign_gafgyt_dists))
print("Median:", np.median(benign_gafgyt_dists))
# Find average cosine distance between gafgyt attack records and themselves
gafgyt_dists = cosine_distances(gafgyt_traffic)
print("Gafgyt self-distances:")
print(
|
np.shape(gafgyt_dists)
|
numpy.shape
|
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: <NAME>
# Created: 8th October 2015
# Last update: 24th July 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/ # this will probably change
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# Reads in monthly mean anomaly regional average time series for q, T and RH from HadISDH
# Can plot monthly or annual data
# Can plot one region or all four
# For a one region plot it can be annual, monthly or seasonal (DJF, MAM, JJA, SON)
# Plots a T q scatter with each year as the point (or MONYY for monthly)
# Colours the points by simultaneous RH value
# Plots RH colour bar to the right
# Adds Tq and TRH correlation to plot
# Adds Tq and TRH slope to plot
#
# NO MISSING DATA IN TIME SERIES!!!!
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt:
# import matplotlib.pyplot as plt
# import numpy as np
# import numpy.ma as ma
# import sys, os
# import scipy.stats as ss # for pearsonr
# import struct
# import datetime as dt
# from matplotlib.dates import date2num,num2date
# from scipy.io import netcdf
# import matplotlib.colors as mc
# import matplotlib.cm as mpl_cm
# import pdb
#
# Other:
# ReadNetCDFTS - infile function to read in netCDF timeseries, written by <NAME>
# PlotScatter - infile function to plot, written by <NAME>
#
# -----------------------
# DATA
# -----------------------
# directory for regional timeseries:
# /data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/
# files currently worked on:
# Specific humidity:
# HadISDH.landq.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Relative humidity:
# HadISDH.landRH.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
# Temperature:
# HadISDH.landT.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Select 'TimeRes' to be 'M' or 'Y' for month or year
# Ensure correct file paths and files
# Ensure start year (styr) and end year (edyr) are correct
# Select 'Region' to be 'A', 'G','N','T' or 'S' for All, Globe, NHemi, Tropics, SHemi
#
# run:
# python2.7 PlotTqRhScatter_OCT2015.py
# python3
# > module load scitools/default-current
# > python PlotTqRHScatter_PCT2015.py
#
# -----------------------
# OUTPUT
# -----------------------
# directory for output images:
# /data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/ANALYSIS/
# Output image file: (nowmon+nowyear= e.g., OCT2015):
# ScatterTqRH_HadISDH.landq.2.0.1.2014p_'+nowmon+nowyear+
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# python 3
# netCDF4
# masked arrays to deal with missing data
# Can now do seasonal for individual regions
#
# Changes
#
# Bug fixes
#
# Version 3 16 April 2018
# ---------
#
# Enhancements
# Updated editable info so fewer edits are required to run for the most recent version/year
#
# Changes
#
# Bug fixes
#
# Version 2 9 August 2016
# ---------
#
# Enhancements
# Can also plot T vs RH coloured by q anomaly
#
# Changes
#
# Bug fixes
#
# Version 1 8 October 2015
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import sys, os
import scipy.stats as ss # for pearsonr
import struct
import datetime as dt
from matplotlib.dates import date2num,num2date
#from scipy.io import netcdf
import netCDF4 as nc4
import matplotlib.colors as mc
import matplotlib.cm as mpl_cm
import pdb #stop: pdb.set_trace(), start: c
import numpy.ma as ma
# Set up initial run choices
TimeRes='Y' # M=month, Y=year
Region='S' # A=All, G=Globe, N=NHemi, T=Tropics, S=SHemi
Seasons=True # If Region is G, N, T, or S and Seasons == True then plot seasonally (or False for not) M and Y still works
homogtype='IDPHA' # 'IDPHA','PHA','PHADPD'
thenmon='JAN'
thenyear='2020'
version='4.2.0.2019f'
styr=1973
edyr=2019
nyrs=(edyr-styr)+1
nmons=(nyrs)*12
if (TimeRes == 'Y'):
ntims=nyrs
else:
ntims=nmons
YrStr=np.array(range(styr,edyr+1),dtype=str)
YrStr=np.array(([i[2:5] for i in YrStr])) # now a string array of the last two digits
# Set up directories and files
INDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/STATISTICS/TIMESERIES/'
OUTDIR='/data/users/hadkw/WORKING_HADISDH/UPDATE'+str(edyr)+'/IMAGES/ANALYSIS/'
In_q='HadISDH.landq.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_RH='HadISDH.landRH.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
In_T='HadISDH.landT.'+version+'_FLATgridIDPHA5by5_anoms8110_'+thenmon+thenyear+'_areaTS_1973'+str(edyr)+'.nc'
OutPlotTq='ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region
OutPlotTRH='ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region
if (Seasons):
OutPlotTq = 'ScatterTqbyRH_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
OutPlotTRH = 'ScatterTRHbyq_HadISDH.'+version+'_'+TimeRes+'_'+Region+'_SEASONS'
# Set up variables
q_arr=0 #set once file read in
T_arr=0 #set once file read in
RH_arr=0 #set once file read in
#************************************************************************
# Subroutines
#************************************************************************
# READNETCDFTS
def ReadNetCDFTS(FileName,ReadInfo,TheData):
''' Open the NetCDF File
Get the data
FileName: stroing containing filepath/name
TheData: an empty 2D array big enough for 1 or 4 regions worth of data
ReadInfo: list of 1 or 4 strings of variable name/s for the globe, N Hemi, Tropics and S.Hemi '''
ncf=nc4.Dataset(FileName,'r')
# ncf.variables this lists the variable names
for loo in range(len(ReadInfo)):
print(loo)
var=ncf.variables[ReadInfo[loo]]
#pdb.set_trace()
TheData[loo,:]=np.copy(var[:])
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
ncf.close()
return TheData # ReadNetCDFTS
#************************************************************************
# MakeUpSteps
def MakeUpSteps(TheArray,stepsies=9):
''' Given a max and min, make up NICE step sizes for a 9 element colourbar '''
''' Currently works with a minimum range of 0.2 and a maximum or 3.0 '''
''' Can only deal with symmetric ranges '''
''' READS: TheArray - an array of data '''
''' stepsies (OPTIONAL) - number of colours in colourbar - default 9 is NICE '''
''' RETURNS: vmin - minimum threshold of range '''
''' vmax - maximum threshold of range '''
''' bounds - stepsies linear increments through the range from vmin to vmax '''
''' strcounds - strings of the bounds for labelling the colourbar '''
vmax=np.int(np.ceil(np.max(abs(TheArray))*10))/10.
vmin=-vmax
nsteps = stepsies
if (vmax <= 0.2):
vmax = 0.2
vmin = -0.2
if (vmax <= 0.3):
vmax = 0.32
vmin = -0.32
elif (vmax <= 0.4):
vmax = 0.4
vmin = -0.4
elif (vmax <= 0.6):
vmax = 0.6
vmin = -0.6
elif (vmax <= 0.8):
vmax = 0.8
vmin = -0.8
elif (vmax <= 1.0):
vmax = 1.0
vmin = -1.0
elif (vmax <= 1.2):
vmax = 1.2
vmin = -1.2
elif (vmax <= 1.6):
vmax = 1.6
vmin = -1.6
elif (vmax <= 2.0):
vmax = 2.0
vmin = -2.0
elif (vmax <= 3.0):
vmax = 3.0
vmin = -3.0
# pdb.set_trace() # stop here and play
bounds=np.linspace(vmin,vmax,nsteps)
strbounds=["%4.1f" % i for i in bounds]
return vmax,vmin,strbounds,bounds
#************************************************************************
# PlotScatter
def PlotScatter(TheFileTq,TheFileTRH,TheYrStr,Thentims,Theq_arr,TheRH_arr,TheT_arr,TheReg,TheSeasons,ThePointees):
''' Plot Tq scatter with colours related to RH'''
''' Plot TRH scatter with colours related to q'''
''' Points are either the last two years YY or MONYY '''
''' Save as png and eps '''
''' TheFile - the filepath and filename for the image '''
''' TheYrStr - a string array of the last two digits for years NYrs long '''
''' Thentims - an integer for the number of points to be plotted '''
''' Theq_arr - the specific humidity data (can be monthly or yearly '''
''' TheRH_arr - the relative humidity data (can be monthly or yearly '''
''' TheT_arr - the temperature data (can be monthly or yearly '''
# Load colours and set up bounds
cmap=plt.get_cmap('BrBG') # BrownBlueGreen
cmaplist=[cmap(i) for i in range(cmap.N)]
for loo in range(np.int(cmap.N/2)-30,np.int(cmap.N/2)+30):
cmaplist.remove(cmaplist[np.int(cmap.N/2)-30]) # remove the very pale colours in the middle
# #cmaplist.remove(cmaplist[(cmap.N/2)-10:(cmap.N/2)+10]) # remove the very pale colours in the middle
#
## remove the darkest and lightest (white and black) - and reverse
# for loo in range(40):
# cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
## for loo in range(10):
## cmaplist.remove(cmaplist[0])
## cmaplist.reverse()
cmap=cmap.from_list('this_cmap',cmaplist,cmap.N)
# FIRST MAKE UP THE TqbyRH plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(TheRH_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Specific Humidity Anomalies (g kg$^{-1}$)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titleesR=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
titleesS=['December-February','March-May','June-August','September-November']
# set up max and min of q and T for axes - keep same for all regions
qmax=np.ceil(np.max(abs(Theq_arr))/0.1)*0.1
qmin=-qmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
# Is it to be a seasonal (four plot) scenario?
if (TheSeasons):
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
#pdb.set_trace()
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],Theq_arr[0,vv],c=TheRH_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titleesR[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],Theq_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],Theq_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([qmin,qmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(qmin,qmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],Theq_arr[pp,vv],c=TheRH_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],Theq_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],Theq_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
#pdb.set_trace()
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'RH Anomalies (%rh)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTq+".eps")
plt.savefig(TheFileTq+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
#***********************************
# SECOND MAKE UP THE TRHbyq plot
# Call MakeUpSteps routine to get a NICE set of colourbar indices
vmin,vmax,strbounds,bounds=MakeUpSteps(Theq_arr)
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
ytitlee='Relative Humidity Anomalies (%rh)'
xtitlee='Temperature Anomalies ($^{o}$C)'
titlees=['Globe 70$^{o}$S to 70$^{o}$N','N. Hemisphere 20$^{o}$N to 70$^{o}$N','Tropics 20$^{o}$S to 20$^{o}$N','S. Hemisphere 70$^{o}$S to 20$^{o}$S']
# set up max and min of RH and T for axes - keep same for all regions
rhmax=np.ceil(np.max(abs(TheRH_arr))/0.1)*0.1
rhmin=-rhmax
tmax=np.ceil(np.max(abs(TheT_arr))/0.1)*0.1
tmin=-tmax
# set up plot - are we working with one region or four?
if (TheReg != 'A'):
if (Seasons):
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[pp,vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titleesS[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
else:
# Single plot scenario
fig = plt.figure(1,figsize=(8,8))
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
ax1=plt.axes([0.1,0.1,0.75,0.8]) # left, bottom, width, height
ax1.set_xlim([tmin,tmax])
ax1.set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax1.plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax1.plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax1.plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot YEAR LABELS for the goods
for vv in range(Thentims):
#print(vv,TheT_arr[0,vv],Theq_arr[0,vv],TheRH_arr[0,vv],r"$ {} $".format(Pointees[vv]))
scats=ax1.scatter(TheT_arr[0,vv],TheRH_arr[0,vv],c=Theq_arr[0,vv],marker=r"$ {} $".format(ThePointees[vv]),s=250,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
ax1.set_xlabel(xtitlee,size=14)
ax1.set_ylabel(ytitlee,size=14)
ax1.tick_params(axis='both', which='major', labelsize=14)
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=14)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=14,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
# plt.figtext(0.02,0.96,TheLetter,size=18)
if (TheReg == 'G'):
PointTitle=0
if (TheReg == 'N'):
PointTitle=1
if (TheReg == 'T'):
PointTitle=2
if (TheReg == 'S'):
PointTitle=3
ax1.set_title(titlees[PointTitle],size=18)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[0,:],TheRH_arr[0,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[0,:],TheRH_arr[0,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext(0.05,0.96,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext(0.05,0.9,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext(0.05,0.84,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax1.plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
else:
# Four plot scenario
fig,ax=plt.subplots(4,figsize=(8,8)) #6,18
plt.clf() # needs to be called after figure!!! (create the figure, then clear the plot space)
TheLetter=['a)','b)','c)','d)']
xstart=[0.1,0.48,0.1,0.48]
xwide=0.36
ystart=[0.54,0.54,0.08,0.08]
ytall=0.36
for pp in range(4):
ax[pp]=plt.axes([xstart[pp],ystart[pp],xwide,ytall]) # left, bottom, width, height
ax[pp].set_xlim([tmin,tmax])
ax[pp].set_ylim([rhmin,rhmax])
# make blank plot with zero lines on
ax[pp].plot(np.zeros(100),np.linspace(rhmin,rhmax,100),color='black',linewidth=2)
ax[pp].plot(np.linspace(tmin,tmax,100),np.zeros(100),color='black',linewidth=2)
# # plot 1:1 line dashed
# ax[pp].plot(np.linspace(-5,5,100),np.linspace(-5,5,100),color='black',linewidth=2,linestyle='dashed')
# plot black dots for the goods
for vv in range(Thentims):
scats=ax[pp].scatter(TheT_arr[pp,vv],TheRH_arr[pp,vv],c=Theq_arr[pp,vv],marker=r"$ {} $".format(ThePointees[vv]),s=200,cmap=cmap,norm=norm, edgecolors='none' ) # s=1
if (pp == 2) | (pp == 3):
ax[pp].set_xlabel(xtitlee,size=12)
if (pp == 0) | (pp == 2):
ax[pp].set_ylabel(ytitlee,size=12)
if (pp == 0) | (pp == 1):
ax[pp].xaxis.set_ticklabels([])
if (pp == 1) | (pp == 3):
ax[pp].yaxis.set_ticklabels([])
ax[pp].tick_params(axis='both', which='major', labelsize=12)
plt.figtext((xstart[pp]+0.02),ystart[pp]+ytall-0.05,TheLetter[pp],size=14)
ax[pp].set_title(titlees[pp],size=14)
# Get correlation and slope of scatter and add to plot
#pcorr = ss.pearsonr(TheT_arr[pp,:],TheRH_arr[pp,:]) # element 0 = pearson correlation coefficient, element 1 = two-tailed p-value
linvals = ss.linregress(TheT_arr[pp,:],TheRH_arr[pp,:]) # 0 = slope, 1 = intercept, 2 = r-value, 3 = two-tailed p-value, 4 = sterr
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.05,'r = '+"{:3.2f}".format(linvals[2]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.07,'m = '+"{:3.2f}".format(linvals[0]),size=12)
plt.figtext((xstart[pp]+0.05),ystart[pp]+ytall-0.09,'p = '+"{:1.2f}".format(linvals[3]),size=12)
# plot regression line dashed
ax[pp].plot(np.linspace(tmin,tmax,100),np.linspace(linvals[0]*tmin,linvals[0]*tmax,100),color='black',linewidth=2,linestyle='dashed')
cbax=fig.add_axes([0.85,0.1,0.025,0.8])
cb=plt.colorbar(scats,cax=cbax,orientation='vertical',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
plt.figtext(0.97,0.5,'q Anomalies (g kg$^{-1}$)',size=12,ha='center',rotation='vertical',va='center')
# add watermark and plot labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFileTRH+".eps")
plt.savefig(TheFileTRH+".png")
# raw_input("stop") # REALLY USEFUL TO INTERACT WITHIN SUBROUTINE ctrl C
# plt.ion()
# plt.show() can then zoom and save
return #PlotNiceDotsMap
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# Read in region data for each variable
if (Region == 'A'):
nReg=4
else:
nReg=1
tmpq_arr=np.empty((nReg,nmons))
tmpRH_arr=np.empty((nReg,nmons))
tmpT_arr=np.empty((nReg,nmons))
q_arr=np.empty((nReg,ntims))
RH_arr=np.empty((nReg,ntims))
T_arr=np.empty((nReg,ntims))
MyFile=INDIR+In_q
if (Region == 'A'):
ReadInfo=['glob_q_anoms','nhem_q_anoms','trop_q_anoms','shem_q_anoms']
elif (Region == 'G'):
ReadInfo=['glob_q_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_q_anoms']
elif (Region == 'T'):
ReadInfo=['trop_q_anoms']
elif (Region == 'S'):
ReadInfo=['shem_q_anoms']
tmpq_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpq_arr)
MyFile=INDIR+In_RH
if (Region == 'A'):
ReadInfo=['glob_RH_anoms','nhem_RH_anoms','trop_RH_anoms','shem_RH_anoms']
elif (Region == 'G'):
ReadInfo=['glob_RH_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_RH_anoms']
elif (Region == 'T'):
ReadInfo=['trop_RH_anoms']
elif (Region == 'S'):
ReadInfo=['shem_RH_anoms']
tmpRH_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpRH_arr)
MyFile=INDIR+In_T
if (Region == 'A'):
ReadInfo=['glob_T_anoms','nhem_T_anoms','trop_T_anoms','shem_T_anoms']
elif (Region == 'G'):
ReadInfo=['glob_T_anoms']
elif (Region == 'N'):
ReadInfo=['nhem_T_anoms']
elif (Region == 'T'):
ReadInfo=['trop_T_anoms']
elif (Region == 'S'):
ReadInfo=['shem_T_anoms']
tmpT_arr=ReadNetCDFTS(MyFile,ReadInfo,tmpT_arr)
#pdb.set_trace()
# If annual - convert monthly mean anomalies to annual mean anomalies
# THERE SHOULD BE NO MISSING DATA IN THESE!!!!
# However, there are because of April 2015 so we need to set up as masked array.
tmpq_arr = ma.masked_where(tmpq_arr < -1000,tmpq_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpT_arr = ma.masked_where(tmpT_arr < -1000,tmpT_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
tmpRH_arr = ma.masked_where(tmpRH_arr < -1000,tmpRH_arr) # mdi is -1e30 but floating point inaccuracy means it may not match?
if (Seasons):
SeasonPointer = np.reshape(np.arange(nmons),(nyrs,12))
DJF = np.reshape(SeasonPointer[:,(0,1,11,)],nyrs*3)
MAM = np.reshape(SeasonPointer[:,(2,3,4,)],nyrs*3)
JJA = np.reshape(SeasonPointer[:,(5,6,7,)],nyrs*3)
SON = np.reshape(SeasonPointer[:,(8,9,10,)],nyrs*3)
for rr in range(nReg):
if (TimeRes == 'Y'):
Pointees=YrStr
if (Seasons):
# Need to sort the arrays out into seasonal groups of either annual or months
T_arrS = ma.empty((4,nyrs),dtype=float)
q_arrS = ma.empty((4,nyrs),dtype=float)
RH_arrS = ma.empty((4,nyrs),dtype=float)
#PointeesS = np.empty((4,nyrs),dtype=str)
#pdb.set_trace()
for yy in range(nyrs):
if (yy == 0):
TmpT = tmpT_arr[0,DJF]
T_arrS[0,0] = ma.mean(TmpT[0:2])
TmpTN = np.reshape(TmpT[2:-1],(nyrs-1,3))
Tmpq = tmpq_arr[0,DJF]
q_arrS[0,0] = ma.mean(Tmpq[0:2])
TmpqN = np.reshape(Tmpq[2:-1],(nyrs-1,3))
TmpRH = tmpRH_arr[0,DJF]
RH_arrS[0,0] =
|
ma.mean(TmpRH[0:2])
|
numpy.ma.mean
|
"""
A full working spin-orbital CCSD(T) code generated with pdaggerq
If you want to run the example here you should install pyscf openfermion
and openfermion-pyscf The actual CCSD(T) code (ccsd_energy, singles_residual,
doubles_residual, triples_residual, t3_energy, kernel) do not depend on those
packages but you must obtain integrals from somehwere.
The total energy for this example been checked against that produced
by the CCSD(T) implementation in psi4.
(T) energy = -0.003382913092468
* CCSD(T) total energy = -100.009057558929399
the main() function is fairly straightforward.
"""
# set allow numpy built with MKL to consume more threads for tensordot
import os
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
import numpy as np
from numpy import einsum
def ccsd_energy(t1, t2, f, g, o, v):
"""
< 0 | e(-T) H e(T) | 0> :
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 1.0000 f(i,i)
energy = 1.0 * einsum('ii', f[o, o])
# 1.0000 f(i,a)*t1(a,i)
energy += 1.0 * einsum('ia,ai', f[o, v], t1)
# -0.5000 <j,i||j,i>
energy += -0.5 * einsum('jiji', g[o, o, o, o])
# 0.2500 <j,i||a,b>*t2(a,b,j,i)
energy += 0.25 * einsum('jiab,abji', g[o, o, v, v], t2)
# -0.5000 <j,i||a,b>*t1(a,i)*t1(b,j)
energy += -0.5 * einsum('jiab,ai,bj', g[o, o, v, v], t1, t1,
optimize=['einsum_path', (0, 1), (0, 1)])
return energy
def t_energy(l1, l2, t3, f, g, o, v):
"""
E(t)
:param l1: transpose of spin-orbital t1 amplitudes (nocc x nvirt)
:param l2: transpose of spin-orbital t2 amplitudes (nocc x nocc x nvirt x nvirt)
:param 3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 0.2500 <k,j||b,c>*l1(i,a)*t3(b,c,a,i,k,j)
energy = 0.25 * einsum('kjbc,ia,bcaikj', g[o, o, v, v], l1, t3, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.2500 <l,k||c,j>*l2(i,j,b,a)*t3(c,b,a,i,l,k)
energy += 0.25 * einsum('lkcj,ijba,cbailk', g[o, o, v, o], l2, t3, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.2500 <k,b||c,d>*l2(i,j,b,a)*t3(c,d,a,i,j,k)
energy += 0.25 * einsum('kbcd,ijba,cdaijk', g[o, v, v, v], l2, t3, optimize=['einsum_path', (0, 2), (0, 1)])
return energy
def singles_residual(t1, t2, t3, f, g, o, v):
"""
< 0 | m* e e(-T) H e(T) | 0>
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param t3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# 1.0000 f(a,i)
singles_res = 1.0 * einsum('ai->ai', f[v, o])
# -1.0000 f(j,i)*t1(a,j)
singles_res += -1.0 * einsum('ji,aj->ai', f[o, o], t1)
# 1.0000 f(a,b)*t1(b,i)
singles_res += 1.0 * einsum('ab,bi->ai', f[v, v], t1)
# -1.0000 f(j,b)*t2(b,a,i,j)
singles_res += -1.0 * einsum('jb,baij->ai', f[o, v], t2)
# -1.0000 f(j,b)*t1(b,i)*t1(a,j)
singles_res += -1.0 * einsum('jb,bi,aj->ai', f[o, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,a||b,i>*t1(b,j)
singles_res += 1.0 * einsum('jabi,bj->ai', g[o, v, v, o], t1)
# -0.5000 <k,j||b,i>*t2(b,a,k,j)
singles_res += -0.5 * einsum('kjbi,bakj->ai', g[o, o, v, o], t2)
# -0.5000 <j,a||b,c>*t2(b,c,i,j)
singles_res += -0.5 * einsum('jabc,bcij->ai', g[o, v, v, v], t2)
# 1.0000 <k,j||b,i>*t1(b,j)*t1(a,k)
singles_res += 1.0 * einsum('kjbi,bj,ak->ai', g[o, o, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,a||b,c>*t1(b,j)*t1(c,i)
singles_res += 1.0 * einsum('jabc,bj,ci->ai', g[o, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <k,j||b,c>*t1(b,j)*t2(c,a,i,k)
singles_res += 1.0 * einsum('kjbc,bj,caik->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 0.5000 <k,j||b,c>*t1(b,i)*t2(c,a,k,j)
singles_res += 0.5 * einsum('kjbc,bi,cakj->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.5000 <k,j||b,c>*t1(a,j)*t2(b,c,i,k)
singles_res += 0.5 * einsum('kjbc,aj,bcik->ai', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 <k,j||b,c>*t1(b,j)*t1(c,i)*t1(a,k)
singles_res += 1.0 * einsum('kjbc,bj,ci,ak->ai', g[o, o, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return singles_res
def doubles_residual(t1, t2, t3, f, g, o, v):
"""
< 0 | m* n* f e e(-T) H e(T) | 0>
:param t1: spin-orbital t1 amplitudes (nvirt x nocc)
:param t2: spin-orbital t2 amplitudes (nvirt x nvirt x nocc x nocc)
:param t3: spin-orbital t3 amplitudes (nvirt x nvirt x nvirt x nocc x nocc x nocc)
:param f: fock operator defined as soei + np.einsum('piiq->pq', astei[:, o, o, :])
where soei is 1 electron integrals (spinorb) and astei is
antisymmetric 2 electron integrals in openfermion format
<12|21>. <ij|kl> - <ij|lk>
:param g: antisymmetric 2 electron integrals. See fock input.
:param o: slice(None, occ) where occ is number of occupied spin-orbitals
:param v: slice(occ, None) whwere occ is number of occupied spin-orbitals
"""
# -1.0000 P(i,j)f(k,j)*t2(a,b,i,k)
contracted_intermediate = -1.0 * einsum('kj,abik->abij', f[o, o], t2)
doubles_res = 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 1.0000 P(a,b)f(a,c)*t2(c,b,i,j)
contracted_intermediate = 1.0 * einsum('ac,cbij->abij', f[v, v], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# -1.0000 P(i,j)f(k,c)*t1(c,j)*t2(a,b,i,k)
contracted_intermediate = -1.0 * einsum('kc,cj,abik->abij', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# -1.0000 P(a,b)f(k,c)*t1(a,k)*t2(c,b,i,j)
contracted_intermediate = -1.0 * einsum('kc,ak,cbij->abij', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# 1.0000 <a,b||i,j>
doubles_res += 1.0 * einsum('abij->abij', g[v, v, o, o])
# 1.0000 P(a,b)<k,a||i,j>*t1(b,k)
contracted_intermediate = 1.0 * einsum('kaij,bk->abij', g[o, v, o, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# 1.0000 P(i,j)<a,b||c,j>*t1(c,i)
contracted_intermediate = 1.0 * einsum('abcj,ci->abij', g[v, v, v, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 0.5000 <l,k||i,j>*t2(a,b,l,k)
doubles_res += 0.5 * einsum('lkij,ablk->abij', g[o, o, o, o], t2)
# 1.0000 P(i,j)*P(a,b)<k,a||c,j>*t2(c,b,i,k)
contracted_intermediate = 1.0 * einsum('kacj,cbik->abij', g[o, v, v, o], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# 0.5000 <a,b||c,d>*t2(c,d,i,j)
doubles_res += 0.5 * einsum('abcd,cdij->abij', g[v, v, v, v], t2)
# -1.0000 <l,k||i,j>*t1(a,k)*t1(b,l)
doubles_res += -1.0 * einsum('lkij,ak,bl->abij', g[o, o, o, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(i,j)*P(a,b)<k,a||c,j>*t1(c,i)*t1(b,k)
contracted_intermediate = 1.0 * einsum('kacj,ci,bk->abij', g[o, v, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# -1.0000 <a,b||c,d>*t1(c,j)*t1(d,i)
doubles_res += -1.0 * einsum('abcd,cj,di->abij', g[v, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(i,j)<l,k||c,j>*t1(c,k)*t2(a,b,i,l)
contracted_intermediate = 1.0 * einsum('lkcj,ck,abil->abij', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 0.5000 P(i,j)<l,k||c,j>*t1(c,i)*t2(a,b,l,k)
contracted_intermediate = 0.5 * einsum('lkcj,ci,ablk->abij', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# -1.0000 P(i,j)*P(a,b)<l,k||c,j>*t1(a,k)*t2(c,b,i,l)
contracted_intermediate = -1.0 * einsum('lkcj,ak,cbil->abij', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# 1.0000 P(a,b)<k,a||c,d>*t1(c,k)*t2(d,b,i,j)
contracted_intermediate = 1.0 * einsum('kacd,ck,dbij->abij', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# -1.0000 P(i,j)*P(a,b)<k,a||c,d>*t1(c,j)*t2(d,b,i,k)
contracted_intermediate = -1.0 * einsum('kacd,cj,dbik->abij', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate) + -1.00000 * einsum('abij->baij', contracted_intermediate) + 1.00000 * einsum('abij->baji', contracted_intermediate)
# 0.5000 P(a,b)<k,a||c,d>*t1(b,k)*t2(c,d,i,j)
contracted_intermediate = 0.5 * einsum('kacd,bk,cdij->abij', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# -0.5000 P(i,j)<l,k||c,d>*t2(c,d,j,k)*t2(a,b,i,l)
contracted_intermediate = -0.5 * einsum('lkcd,cdjk,abil->abij', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 0.2500 <l,k||c,d>*t2(c,d,i,j)*t2(a,b,l,k)
doubles_res += 0.25 * einsum('lkcd,cdij,ablk->abij', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# -0.5000 <l,k||c,d>*t2(c,a,l,k)*t2(d,b,i,j)
doubles_res += -0.5 * einsum('lkcd,calk,dbij->abij', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(i,j)<l,k||c,d>*t2(c,a,j,k)*t2(d,b,i,l)
contracted_intermediate = 1.0 * einsum('lkcd,cajk,dbil->abij', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# -0.5000 <l,k||c,d>*t2(c,a,i,j)*t2(d,b,l,k)
doubles_res += -0.5 * einsum('lkcd,caij,dblk->abij', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# -1.0000 P(i,j)<l,k||c,j>*t1(c,i)*t1(a,k)*t1(b,l)
contracted_intermediate = -1.0 * einsum('lkcj,ci,ak,bl->abij', g[o, o, v, o], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# -1.0000 P(a,b)<k,a||c,d>*t1(c,j)*t1(d,i)*t1(b,k)
contracted_intermediate = -1.0 * einsum('kacd,cj,di,bk->abij', g[o, v, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# 1.0000 P(i,j)<l,k||c,d>*t1(c,k)*t1(d,j)*t2(a,b,i,l)
contracted_intermediate = 1.0 * einsum('lkcd,ck,dj,abil->abij', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->abji', contracted_intermediate)
# 1.0000 P(a,b)<l,k||c,d>*t1(c,k)*t1(a,l)*t2(d,b,i,j)
contracted_intermediate = 1.0 * einsum('lkcd,ck,al,dbij->abij', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('abij->baij', contracted_intermediate)
# -0.5000 <l,k||c,d>*t1(c,j)*t1(d,i)*t2(a,b,l,k)
doubles_res += -0.5 * einsum('lkcd,cj,di,ablk->abij', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
# 1.0000 P(i,j)*P(a,b)<l,k||c,d>*t1(c,j)*t1(a,k)*t2(d,b,i,l)
contracted_intermediate = 1.0 * einsum('lkcd,cj,ak,dbil->abij', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 *
|
einsum('abij->abji', contracted_intermediate)
|
numpy.einsum
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the disparity map filtering.
"""
import unittest
import logging
import logging.config
import os
import json
import numpy as np
import xarray as xr
import pandora.filter as flt
import pandora
from pandora.constants import *
class TestDisparity(unittest.TestCase):
"""
TestDisparity class allows to test the disparity module
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
pass
def test_median_filter(self):
"""
Test the median method
"""
disp = np.array([[5, 6, 7, 8, 9],
[6, 85, 1, 36, 5],
[5, 9, 23, 12, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
valid = np.array([[0, 0, 0, 0, 0],
[0, PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE, 0, 0, 0],
[0, PANDORA_MSK_PIXEL_FILLED_OCCLUSION, 0, 0, 0],
[0, 0, 0, 0, PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION]], dtype=np.uint16)
disp_dataset = xr.Dataset({'disparity_map': (['row', 'col'], disp),
'validity_mask': (['row', 'col'], valid)},
coords={'row': np.arange(4), 'col': np.arange(5)})
filter_median = flt.AbstractFilter(**{'filter_method': 'median', 'filter_size': 3})
# Apply median filter to the disparity map. Median filter is only applied on valid pixels.
disp_filter = filter_median.filter_disparity(disp_dataset)
# Filtered disparity map ground truth
gt_disp = np.array([[5, 6, 7, 8, 9],
[6, 6, 9, 8, 5],
[5, 6, 9, 5, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_filter['disparity_map'].data, gt_disp)
disp = np.array([[7, 8, 4, 5, 5],
[5, 9, 4, 3, 8],
[5, 2, 7, 2, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
valid = np.array([[PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE, 0,
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE,
PANDORA_MSK_PIXEL_FILLED_OCCLUSION + PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER, 0],
[PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_SEC, PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER,
PANDORA_MSK_PIXEL_OCCLUSION, 0, 0],
[PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_REF, PANDORA_MSK_PIXEL_MISMATCH,
PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING,
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE + PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION,
0],
[PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING, PANDORA_MSK_PIXEL_OCCLUSION,
PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_REF, 0,
PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING]],
dtype=np.uint16)
disp_dataset = xr.Dataset({'disparity_map': (['row', 'col'], disp),
'validity_mask': (['row', 'col'], valid)},
coords={'row': np.arange(4), 'col': np.arange(5)})
filter_median = flt.AbstractFilter(**{'filter_method': 'median', 'filter_size': 3})
# Apply median filter to the disparity map. Median filter is only applied on valid pixels.
disp_filter = filter_median.filter_disparity(disp_dataset)
# Filtered disparity map ground truth
gt_disp = np.array([[7, 8, 4, 5, 5],
[5, 9, 4, 3.5, 8],
[5, 2, 7, 2, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(disp_filter['disparity_map'].data, gt_disp)
disp = np.array([[7, 8, 4, 5, 5],
[5, 9, 4, 3, 8],
[5, 2, 7, 2, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
valid = np.array([[PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE, 0,
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE,
PANDORA_MSK_PIXEL_FILLED_OCCLUSION + PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER, 0],
[0, 0, PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION, 0, 0],
[0, 0, 0, PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE +
PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION, 0],
[PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_SEC, 0, 0, 0, 0]], dtype=np.uint16)
disp_dataset = xr.Dataset({'disparity_map': (['row', 'col'], disp),
'validity_mask': (['row', 'col'], valid)},
coords={'row': np.arange(4), 'col': np.arange(5)})
filter_median = flt.AbstractFilter(**{'filter_method': 'median', 'filter_size': 3})
# Apply median filter to the disparity map. Median filter is only applied on valid pixels.
disp_filter = filter_median.filter_disparity(disp_dataset)
# Filtered disparity map ground truth
gt_disp = np.array([[7, 8, 4, 5, 5],
[5, 5, 4, 4, 8],
[5, 5, 3, 4, 2],
[6, 1, 9, 2, 4]], dtype=np.float32)
# Check if the calculated disparity map is equal to the ground truth (same shape and all elements equals)
|
np.testing.assert_array_equal(disp_filter['disparity_map'].data, gt_disp)
|
numpy.testing.assert_array_equal
|
import mne
import glob
import natsort
import numpy as np
import xarray as xr
from scipy.io import loadmat
from sklearn.model_selection import StratifiedKFold
from imblearn.under_sampling import RandomUnderSampler
mne.set_log_level('error')
def mne_apply(func, raw, verbose="WARNING"):
"""
Apply function to data of `mne.io.RawArray`.
Parameters
----------
func: function
Should accept 2d-array (channels x time) and return modified 2d-array
raw: `mne.io.RawArray`
verbose: bool
Whether to log creation of new `mne.io.RawArray`.
Returns
-------
transformed_set: Copy of `raw` with data transformed by given function.
"""
new_data = func(raw.get_data())
return mne.io.RawArray(new_data, raw.info, verbose=verbose)
def balance_classes(X, y, random_state=0):
'''Balances classes'''
rus = RandomUnderSampler(random_state=random_state)
n_eps, n_ecog2, n_ts = X.shape
X_rsh = X.reshape((n_eps,-1))
X_rs, y_rs = rus.fit_resample(X_rsh, y)
n_eps = X_rs.shape[0]
X_rs = X_rs.reshape((n_eps,n_ecog2,n_ts))
return X_rs, y_rs
def compute_xr_eeg_mr(sbj_id,lp,sp,tlims,sfreq_new,filt_freqs,n_chans,event_dict,
tlims_handpos,sfreq_new_pose,n_splits=4):
eeg_ch_names = ['F3','F1','Fz','F2','F4',
'FFC5h','FFC3h','FFC1h','FFC2h','FFC4h','FFC6h',
'FC5','FC3','FC1','FCz','FC2','FC4','FC6','FTT7h',
'FCC5h','FCC3h','FCC1h','FCC2h','FCC4h','FCC6h','FTT8h',
'C5','C3','C1','Cz','C2','C4','C6','TTP7h',
'CCP5h','CCP3h','CCP1h','CCP2h','CCP4h','CCP6h','TTP8h',
'CP5','CP3','CP1','CPz','CP2','CP4','CP6',
'CPP5h','CPP3h','CPP1h','CPP2h','CPP4h','CPP6h',
'P3','P1','Pz','P2','P4','PPO1h','PPO2h']
fnames_all = natsort.natsorted(glob.glob(lp+sbj_id+'_ME/*.gdf'))
for s,fname_curr in enumerate(fnames_all):
# print(fname_curr)
# Load datafile
dat_load = mne.io.read_raw_edf(fname_curr,preload=True)
dat_hand_pos = dat_load.copy()
dat_hand_pos_ev = dat_load.copy()
ch_labels = dat_load.info['ch_names']
dat = dat_load.drop_channels(ch_labels[n_chans:])
assert len(dat.ch_names) == n_chans
# Convert to millvolt for numerical stability of next operations
dat = mne_apply(lambda a: a * 1e3, dat)
# High-pass filter
dat.filter(filt_freqs[0], filt_freqs[1])
# Common average reference
dat.set_eeg_reference(ref_channels='average')
# Find events (769, 770, 771, 772)
events,ev_dic_orig = mne.events_from_annotations(dat_load)
# ev_dic_orig[str(int(event_dict['move']))]
# Epoch data around events
key_lst = list(event_dict.keys())
event_id = {str(int(event_dict[key])):ev_dic_orig[str(int(event_dict[key]))] for key in key_lst}
# Drop EEG/EOG electrodes for pose data
drop_chan_pos_pose = [val for val in ch_labels if (val[:3] == 'eeg') or (val[:3] == 'eog')]
drop_chan_pos_pose += [val for val in ch_labels if val in eeg_ch_names]
drop_chan_pos_pose += ['wrist_bend', 'roll', 'pitch', 'gesture', 'GripPressure']
dat_hand_pos.drop_channels(drop_chan_pos_pose)
# Remove other pose data as well (except wrist/elbow XYZ)
ch_names = dat_hand_pos.info['ch_names']
diff_chs = ['handPosX', 'handPosY', 'handPosZ', 'elbowPosX', 'elbowPosY', 'elbowPosZ']
non_diff_chs = ['thumb_near', 'thumb_far', 'thumb_index', 'index_near', 'index_far',
'index_middle', 'middle_near', 'middle_far', 'middle_ring', 'ring_near',
'ring_far', 'ring_little', 'litte_near', 'litte_far', 'thumb_palm', 'ShoulderAdducti',
'ShoulderFlexion', 'ShoulderRotatio', 'Elbow', 'ProSupination', 'Wrist']
dat_hand_pos.drop_channels(non_diff_chs)
# Find movement event onset based on hand radial position
ch_labels_pos = dat_hand_pos_ev.info['ch_names']
drop_chan_pos = [val for val in ch_labels_pos if val not in ['handPosX', 'handPosY', 'handPosZ']]
dat_hand_pos_ev.drop_channels(drop_chan_pos)
radial_dist = np.sqrt(np.square(dat_hand_pos_ev._data).sum(axis=0))
dat_hand_pos_ev.drop_channels(['handPosY', 'handPosZ'])
dat_hand_pos_ev._data[0,:] = radial_dist
ep_hand_pos = mne.Epochs(dat_hand_pos_ev, events, event_id, tlims_handpos[0],
tlims_handpos[1], baseline=None, preload=True)
ev_lab = list(event_id.keys())[0]
move_ev_inds = np.nonzero(events[:,2]==event_id[ev_lab])[0]
for i in range(ep_hand_pos[ev_lab]._data.shape[0]):
curr_trace = ep_hand_pos[ev_lab]._data[i,...].squeeze()
curr_trace = np.abs(curr_trace-curr_trace[0])
thresh=min(curr_trace.max()*.75,1)
events[move_ev_inds[i],0] += np.nonzero(curr_trace>thresh)[0][0]
if s==0:
ep_eeg = mne.Epochs(dat, events, event_id, tlims[0], tlims[1], baseline=None, preload=True)
ep_pose = mne.Epochs(dat_hand_pos, events, event_id, tlims[0],
tlims[1], baseline=None, preload=True)
else:
ep_eeg_tmp = mne.Epochs(dat, events, event_id, tlims[0], tlims[1], baseline=None,
preload=True)
ep_eeg = mne.concatenate_epochs([ep_eeg,ep_eeg_tmp])
ep_pose_tmp = mne.Epochs(dat_hand_pos, events, event_id, tlims[0], tlims[1],
baseline=None, preload=True)
ep_pose = mne.concatenate_epochs([ep_pose,ep_pose_tmp])
# Resample epochs to match ECoG inputs
ep_eeg.resample(sfreq_new)
ep_pose.resample(sfreq_new_pose)
# Compute diff for pose data
ch_names = ep_pose.info['ch_names']
ep_pose_np = ep_pose.get_data().copy()
ep_pose_np = np.append(np.diff(ep_pose_np,axis=-1), np.diff(ep_pose_np,axis=-1)[...,-1:], axis=-1)
# ep_pose.pick([0,1])
ep_pose._data = ep_pose_np
# Remove NaN events
nan_evs = np.sum(np.isnan(ep_eeg.get_data()).reshape(ep_eeg.get_data().shape[0],-1),axis=1)
ep_eeg = ep_eeg.drop(np.nonzero(nan_evs)[0])
ep_pose = ep_pose.drop(np.nonzero(nan_evs)[0])
# Labels in consecutive integer order
y = ep_eeg.events[:,-1]
for i, uni_label in enumerate(np.unique(y).tolist()):
y[y==uni_label] = i
# Balance classes
X_ecog = ep_eeg.get_data().copy()
X_pose = ep_pose.get_data().copy()
X_ecog_rs, y_ecog_rs = balance_classes(X_ecog, y)
X_pose_rs, y_pose_rs = balance_classes(X_pose, y)
assert all(y_ecog_rs == y_pose_rs)
# Create recording day variable
skf = StratifiedKFold(n_splits=n_splits)
recording_day = np.zeros_like(y_ecog_rs)
for i, (_, test_index) in enumerate(skf.split(X_ecog_rs, y_ecog_rs)):
recording_day[test_index] = i
print(np.unique(recording_day, return_counts=True))
# Add labels to EEG data
labels_arr = np.tile(y_ecog_rs[:, np.newaxis], (1,X_ecog_rs.shape[2]))
ecog_dat_sbj = np.concatenate((X_ecog_rs,labels_arr[:, np.newaxis, :]), axis=1)
# Add labels to pose data
labels_arr_pose = np.tile(y_pose_rs[:, np.newaxis], (1,X_pose_rs.shape[2]))
pose_dat_sbj = np.concatenate((X_pose_rs,labels_arr_pose[:, np.newaxis, :]),axis=1)
# Randomize epoch order
order_inds = np.arange(ecog_dat_sbj.shape[0])
np.random.shuffle(order_inds)
ecog_dat_sbj = ecog_dat_sbj[order_inds,...]
pose_dat_sbj = pose_dat_sbj[order_inds,...]
recording_day = (recording_day[order_inds]).tolist()
# Convert EEG to xarray and save
da_ecog = xr.DataArray(ecog_dat_sbj,
[('events', recording_day),
('channels', np.arange(ecog_dat_sbj.shape[1])),
('time', ep_eeg.times)])
da_ecog.to_netcdf(sp+sbj_id+'_eeg_data.nc')
# Convert EEG to xarray and save
da_pose = xr.DataArray(pose_dat_sbj,
[('events', recording_day),
('channels', np.arange(pose_dat_sbj.shape[1])),
('time', ep_pose.times)])
da_pose.to_netcdf(sp+'pose/'+sbj_id+'_pose_data.nc')
print('Finished ' + sbj_id +'!')
def flatx_with_labels(evs_in):
X, y = [], []
for i in range(len(evs_in)):
X.extend(evs_in[i])
y.extend([i]*len(evs_in[i]))
return np.array(X), np.array(y)
def align_evs_ff(move_evs, cue_evs):
ind_move, ind_cue = 0, 0
move_evs_out, cue_evs_out = [], []
while (ind_move < len(move_evs)) and (ind_cue < len(cue_evs)):
diff_val = move_evs[ind_move]-cue_evs[ind_cue]
if abs(diff_val) < 3000:
move_evs_out.append(move_evs[ind_move])
cue_evs_out.append(cue_evs[ind_cue])
ind_move += 1
ind_cue += 1
elif diff_val < 0:
# No move event for given cue
ind_move += 1
elif diff_val > 0:
# No cue event for given move
ind_cue += 1
return move_evs_out, cue_evs_out
def ev_ts_ff(in_dat, evs_good):
evs = [[] for i in range(len(evs_good))]
prev_val = 1
for i, val in enumerate(in_dat.flatten().tolist()):
if prev_val <= 0:
if val in evs_good:
evs[val-1].append(i)
prev_val = val
return evs
def compute_xr_ecog_ff(sbj_id, lp, sp, tlims, tlims_handpos,
filt_freqs, sfreq_new, out_sbj_d,
raw_sfreq=1000, n_splits=4):
# Load data
ff_dat = loadmat(lp + sbj_id + '/' + sbj_id + '_fingerflex.mat')
pose = ff_dat['flex'].T
ecog = ff_dat['data'].T
cue = ff_dat['cue'].T
stim_dat = loadmat(lp + sbj_id + '/' + sbj_id + '_stim.mat')
move = stim_dat['stim'].T
# Normalize pose (as done in "Decoding flexion of individual fingers using electrocorticographic signals in humans" section 2.2)
ave_pose = np.tile(np.mean(pose,axis=-1,keepdims=True),[1,pose.shape[1]])
std_pose = np.tile(np.std(pose,axis=-1,keepdims=True),[1,pose.shape[1]])
pose = np.divide((pose - ave_pose), std_pose)
# Identify event times (transition from 0 or negative value to positive value)
evs_good = [1,2,3,4,5]
evs_cue = ev_ts_ff(cue, evs_good)
evs_move = ev_ts_ff(move, evs_good)
# Identify good events (remove non-overlapping events between cue and behavior)
move_evs_final, cue_evs_final = [],[]
for curr_ev in range(len(evs_good)):
move_evs_out, cue_evs_out = align_evs_ff(evs_move[curr_ev],
evs_cue[curr_ev])
move_evs_final.append(move_evs_out)
cue_evs_final.append(cue_evs_out)
# Shuffle events and balance classes
X_move, y_move = flatx_with_labels(move_evs_final)
X_cue, y_cue = flatx_with_labels(cue_evs_final)
assert (y_move == y_cue).all()
rus = RandomUnderSampler(random_state=0)
X_all = np.concatenate((X_move[:, np.newaxis], X_cue[:, np.newaxis]), axis=1)
X_all_res, y_all_res = rus.fit_resample(X_all, y_move)
# Check that events are balanced
print(np.unique(y_all_res, return_counts=True))
# Create raw for ECoG data
ch_names = ['ECoG'+str(val) for val in range(ecog.shape[0])]
ecog_info = mne.create_info(ch_names, raw_sfreq, ch_types='ecog')
raw_ecog = mne.io.RawArray(ecog, ecog_info)
# High-pass filter
raw_ecog.filter(filt_freqs[0], filt_freqs[1])
# Notch filter
raw_ecog.notch_filter(np.arange(60, 301, 60), picks='all')
# Common average reference
raw_ecog.set_eeg_reference(ref_channels='average')
# Create event struct
events = [[], [], []]
event_id = dict(zip(np.unique(y_all_res).astype('str').tolist(),
np.unique(y_all_res).tolist()))
n_evs = X_all_res.shape[0]
events = np.zeros((n_evs, 3))
events[:, 0] = X_all_res[:, 0]
events[:, 2] = y_all_res
events = events.astype('int')
# Create raw for pose data
ch_names = ['Pose'+str(val) for val in range(pose.shape[0])]
pose_info = mne.create_info(ch_names, raw_sfreq, ch_types='misc')
raw_pose = mne.io.RawArray(pose, pose_info)
# Epoch data
ep_ecog = mne.Epochs(raw_ecog, events, event_id, tlims[0], tlims[1], baseline=None, preload=True)
ep_pose = mne.Epochs(raw_pose, events, event_id, tlims[0], tlims[1], baseline=None, preload=True)
# Resample epochs to match ECoG inputs
ep_ecog.resample(sfreq_new)
ep_pose.resample(sfreq_new)
# Add labels to data
event_id_labs = list(event_id.keys())
days_start = (np.arange(n_splits)+1).tolist()
recording_day,labels = [],[]
for i,lab_curr in enumerate(event_id_labs):
ep_tmp = ep_ecog[lab_curr]
ep_tmp_pose = ep_pose[lab_curr]
n_tmp = int(ep_tmp._data.shape[0])//n_splits + 1
days_curr = np.asarray(days_start * n_tmp)[:ep_tmp._data.shape[0]]
np.random.shuffle(days_curr)
recording_day.extend(days_curr.tolist())
if i==0:
ecog_dat_sbj = ep_tmp.get_data().copy()
pose_dat_sbj = ep_tmp_pose.get_data().copy()
else:
ecog_dat_sbj = np.concatenate((ecog_dat_sbj,ep_tmp.get_data().copy()),axis=0)
pose_dat_sbj = np.concatenate((pose_dat_sbj,ep_tmp_pose.get_data().copy()),axis=0)
labels.extend([i+1]*ep_tmp.get_data().shape[0])
# Add labels to EEG data
labels_arr = np.tile(np.asarray(labels)[:, np.newaxis],(1,ecog_dat_sbj.shape[2]))
ecog_dat_sbj = np.concatenate((ecog_dat_sbj,labels_arr[:, np.newaxis]),axis=1)
# Add labels to pose data
labels_arr_pose = np.tile(np.asarray(labels)[:, np.newaxis],(1,pose_dat_sbj.shape[2]))
pose_dat_sbj =
|
np.concatenate((pose_dat_sbj,labels_arr_pose[:, np.newaxis]),axis=1)
|
numpy.concatenate
|
from matrix_utils import hat, vee, expm_SO3
import datetime
import numpy as np
class Estimator:
"""Estimates the states of the UAV.
This uses the estimator defined in "Real-time Kinematics GPS Based
Telemetry System for Airborne Measurements of Ship Air Wake", but without
the bias estimation terms.
DOI: 10.2514/6.2019-2377
x (3x1 numpy array) current position of the UAV [m]
x: (3x1 numpy array) current position of the UAV [m]
v: (3x1 numpy array) current velocity of the UAV [m/s]
a: (3x1 numpy array) current acceleration of the UAV [m/s^s]
b_a: (float) accelerometer bias in e3 direction [m/s^2]
R: (3x3 numpy array) current attitude of the UAV in SO(3)
W: (3x1 numpy array) current angular velocity of the UAV [rad/s]
Q: (7x7 numpy array) variances of w_k
P: (10x10 numpy array) covariances of the states
t0: (datetime object) time at epoch
t: (float) current time since epoch [s]
t_prev: (float) time since epoch in the previous loop [s]
W_pre: (3x1 numpy array) angular velocity of the previous loop [rad/s]
a_imu_pre: (3x1 numpy array) acceleration of the previous loop [m/s^2]
R_pre: (3x3 numpy array) attitude in the previous loop in SO(3)
b_a_pre: (3x1 numpy array) accelerometer bias in the previous loop [m/s^2]
g: (float) gravitational acceleration [m/s^2]
ge3: (3x1 numpy array) gravitational acceleration direction [m/s^2]
R_bi: (3x3 numpy array) transformation from IMU frame to the body frame
R_bi_T: (3x3 numpy array) transformation from IMU frame to the body frame
e3 : (3x1 numpy array) direction of the e3 axis
eye3: (3x3 numpy array) 3x3 identity matrix
eye10: (10x10 numpy array) 10x10 identity matrix
zero3: (3x3 numpy array) 3x3 zero matrix
"""
def __init__(self):
self.x = np.zeros(3)
self.v = np.zeros(3)
self.a = np.zeros(3)
self.b_a = 0.0
self.R = np.eye(3)
self.W = np.zeros(3)
# Variances of w_k
self.Q = np.diag([
0.001, 0.001, 0.001, # acceleration
0.025, 0.025, 0.025, # angular velocity
0.0001 # acclerometer z bias
])
# Initial covariances of x
self.P = np.diag([
1.0, 1.0, 1.0, # position
1.0, 1.0, 1.0, # velocity
0.01, 0.01, 0.01, # attitude
1.0 # accelerometer z bias
])
self.t0 = datetime.datetime.now()
self.t = 0.0
self.t_pre = 0.0
self.W_pre = np.zeros(3)
self.a_imu_pre = np.zeros(3)
self.R_pre = np.eye(3)
self.b_a_pre = 0.0
self.g = 9.81
self.ge3 = np.array([0.0, 0.0, self.g])
# Transformation from IMU frame to the body frame.
self.R_bi = np.array([
[1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0]
])
self.R_bi_T = self.R_bi.T
self.e3 = np.array([0.0, 0.0, 1.0])
self.eye3 = np.eye(3)
self.eye10 = np.eye(10)
self.zero3 = np.zeros((3, 3))
def prediction(self, a_imu, W_imu):
"""Prediction step of the estimator.
Args:
a_imu: (3x1 numpy array) acceleration measured by the IMU [m/s^2]
W_imu: (3x1 numpy array) angular rate measured by the IMU [rad/s]
"""
h = self.get_dt()
self.R_pre = np.copy(self.R)
self.W_pre = np.copy(self.W)
self.b_a_pre = self.b_a * 1.0
self.W = self.R_bi.dot(W_imu)
self.R = self.R.dot(expm_SO3(h / 2 * (self.W + self.W_pre)))
# This assumes IMU provide acceleration without g
self.a = self.R.dot(self.R_bi).dot(a_imu) + self.b_a * self.e3
a_pre = self.R_pre.dot(self.R_bi).dot(self.a_imu_pre) \
+ self.b_a_pre * self.e3
self.x = self.x + h * self.v + h**2 / 2 * a_pre
self.v = self.v + h / 2 * (self.a + a_pre)
# Calculate A(t_{k-1})
A = np.zeros((10, 10))
A[0:3, 3:6] = self.eye3
A[3:6, 6:9] = - self.R_pre.dot(hat(self.R_bi.dot(self.a_imu_pre)))
A[3:6, 9] = self.e3
A[6:9, 6:9] = -hat(self.R_bi.dot(W_imu))
# Calculate F(t_{k-1})
F = np.zeros((10, 7))
F[3:6, 0:3] = self.R_pre.dot(self.R_bi)
F[6:9, 3:6] = self.R_bi
F[9, 6] = 1.0
# Calculate \Psi using A(t)
psi = self.eye10 + h / 2 * A
A = self.eye10 + h * A.dot(psi)
F = h * psi.dot(F)
self.P = A.dot(self.P).dot(A.T) + F.dot(self.Q).dot(F.T)
self.a_imu_pre = a_imu
def imu_correction(self, R_imu, V_R_imu):
"""IMU correction step of the estimator.
Args:
R_imu: (3x3 numpy array) attitude measured by the IMU in SO(3)
V_R_imu: (3x3 numpy array) attitude measurement covariance
"""
imu_R = self.R.T.dot(R_imu).dot(self.R_bi_T)
del_z = 0.5 * vee(imu_R - imu_R.T)
H = np.block([self.zero3, self.zero3, self.eye3, np.zeros((3, 1))])
H_T = H.T
G = self.R_bi
G_T = G.T
V = V_R_imu
S = H.dot(self.P).dot(H_T) + G.dot(V).dot(G_T)
K = self.P.dot(H_T).dot(np.linalg.inv(S))
X = K.dot(del_z)
eta = X[6:9]
self.R = self.R.dot(expm_SO3(eta))
I_KH = self.eye10 - K.dot(H)
self.P = I_KH.dot(self.P).dot(I_KH.T) \
+ K.dot(G).dot(V).dot(G_T).dot(K.T)
def gps_correction(self, x_gps, v_gps, V_x_gps, V_v_gps):
"""GPS correction step of the estimator.
Args:
x_gps: (3x1 numpy array) position measured by the GPS [m]
v_gps: (3x1 numpy array) velocity measured by the GPS [m]
V_x_gps: (3x1 numpy array) position measurement covariance
V_v_gps: (3x1 numpy array) velocity measurement covariance
"""
del_z = np.hstack((x_gps - self.x, v_gps - self.v))
H = np.block([
[self.eye3, self.zero3, self.zero3,
|
np.zeros((3, 1))
|
numpy.zeros
|
#! / usr / bin / env python3
# -*- coding: utf-8 -*-
"""
Three classes' definition in here.
* a MRSData2 class with a bunch of methods based on the suspect module to deal with SIEMENS 7T MRS data
* a pipeline class to run the reconstruction process on a bunch of acquired data
@author: <NAME>
"""
import suspect
import numpy as np
import pandas as pd
from scipy import signal
from scipy import interpolate
import matplotlib.pylab as plt
import matplotlib._pylab_helpers
from datetime import datetime
import pickle
import os
import re
from enum import Enum
from pastis import io
from pastis import log
import pdb
# max number of datasets in a pipeline
MAX_NUM_DATASETS = 1000
# constants used during data rejection
# minimum step while setting amplitude threshold (% of signal relative change)
DATA_REJECTION_AMPLITUDE_STEP = 2.0
# minimum step while setting linewidth threshold (Hz)
DATA_REJECTION_LINEWIDTH_STEP = 1.0
# minimum step while setting chemical shift threshold (ppm)
DATA_REJECTION_FREQUENCY_STEP = 0.001
# minimum step while setting phase threshold (rad)
DATA_REJECTION_PHASE_STEP = 0.1
# spectral resolution for peak realignment using inter-correlation mode (experimental) (ppm)
RECO_CORRECT_REALIGN_INTER_CORR_MODE_DF = 0.1
class suspect_phasing_method(Enum):
"""The enum suspect_phasing_method describes the type of phasing method to use (phasing method from the suspect package."""
MATCH_MAGNITUDE_REAL = 1
MIN_IMAG_INTEGRAL = 2
ACME = 3
class data_rejection_method(Enum):
"""The enum data_rejection_method describes the type of data rejection method used."""
AUTO_AMPLITUDE = 0
AUTO_LINEWIDTH = 1
AUTO_FREQUENCY = 2
AUTO_PHASE = 3
class MRSData2(suspect.mrsobjects.MRSData):
"""A class based on suspect's MRSData to store MRS data."""
def __new__(cls, data_filepath, physio_log_file=None, anatomy_folderpath=None, obj=None, dt=None, f0=None, te=None, tr=None, ppm0=None, voxel_dimensions=None, transform=None, metadata=None, data_ref=None, label="", offset_display=0.0, patient={}, sequence_obj=None, noise_level=None, data_rejection=None, data_file_hash=None, is_concatenated=None, is_rawdata=None):
"""
Construct a MRSData2 object that inherits of Suspect's MRSData class. In short, the MRSData2 class is a copy of MRSData + my custom methods for post-processing. To create a MRSData2 object, you need give a path that points to a SIEMENS DICOM or a SIEMENS TWIX file.
Parameters
----------
data_filepath : string
Full absolute file path pointing to the stored signal (DCM or TWIX file) or the folder assuming that a dcm file named "original-primary_e09_0001.dcm" is stored inside.
physio_log_file : string
Full absolute file path pointing to a IDEA VB17 respiratory log file
anatomy_folderpath : string
Full absolute file path pointing a folder containing dicom DCM files contaning the anatomical images on which you want to dispaly the VOI
obj,dt,f0,te,tr,ppm0,voxel_dimensions,transform,metadata
Please check suspect's MRSData class for those arguments
data_ref : MRSData2 object
Reference data acquired for this signal
label : string
Label for this signal
offset_display : float
Y-axis offset display
patient : dict
Patient meta data
sequence_obj : sim.mrs_sequence object
Sequence object
noise_level : float
Noise level measured on real FID
data_rejection : list of dicts
Data rejection results (NA, SNR, LW, etc.)
data_file_hash : string
MD5 hash code of data file content
is_concatenated : boolean
Was this signal the result of a concatenation?
is_rawdata : boolean
Was this signal read from a DICOM file?
Returns
-------
obj : MRSData2 numpy array [averages,channels,timepoints]
Resulting constructed MRSData2 object
"""
if(data_filepath == []):
# calling the parent class' constructor
obj = super(suspect.mrsobjects.MRSData, cls).__new__(cls, obj, dt, f0, te, tr, ppm0, voxel_dimensions, transform, metadata)
# adding attributes
obj.data_ref = data_ref
obj._display_label = label
obj._display_offset = offset_display
obj._tr = tr
obj._patient = patient
obj._sequence = sequence_obj
obj._noise_level = noise_level
obj._data_rejection = data_rejection
obj._data_file_hash = data_file_hash
obj._is_concatenated = is_concatenated
obj._is_rawdata = is_rawdata
# bye
return(obj)
# hello
log.debug("creating object...")
# open data file
log.info("reading data file...")
log.info(data_filepath)
# read data and header
mfr = io.get_data_file_reader(data_filepath)
# read data and get a suspect MRSData object
MRSData_obj = mfr.data
# get hash code
hc = mfr.get_md5_hash()
# --- reshape data ---
# add dimensions if needed
if(MRSData_obj.ndim == 1):
# this could be a single-shot single-rx signal in twix
# or an averaged single-rx signal in dicom...
# add 2 dimensions
MRSData_obj = MRSData_obj.reshape((1, 1,) + MRSData_obj.shape)
if(MRSData_obj.ndim == 2):
# this could be a single-shot multi-rx signal
# or a averaged single-rx signal...
coil_nChannels = mfr.get_number_rx_channels()
if(coil_nChannels == MRSData_obj.shape[0]):
# beware, same number of averages / coil elements, which is which?
log.warning("ambiguous data dimensions: " + str(MRSData_obj.shape) + "-> Assuming (" + str(MRSData_obj.shape[0]) + ") to be the coil channels!")
MRSData_obj = MRSData_obj.reshape((1,) + MRSData_obj.shape)
elif(coil_nChannels == 1):
# ok the user said it is a single channel coil, so the 2nd dimension here is the number of averages!
# adding 1 dimension for number of channels
MRSData_obj = MRSData_obj.reshape((1,) + MRSData_obj.shape)
MRSData_obj = np.transpose(MRSData_obj, (1, 0, 2))
else:
# ok that is a single-shot multi-channel signal
# adding 1 dimension for averages
MRSData_obj = MRSData_obj.reshape((1,) + MRSData_obj.shape)
log.info("read a " + str(MRSData_obj.shape) + " vector")
# --- build MRSData object ---
# calling the parent class' constructor
obj = super(suspect.mrsobjects.MRSData, cls).__new__(cls, MRSData_obj, MRSData_obj.dt, MRSData_obj.f0, MRSData_obj.te, MRSData_obj.tr, MRSData_obj.ppm0, MRSData_obj.voxel_dimensions, MRSData_obj.transform, MRSData_obj.metadata)
# --- get extra parameters ---
# patient birthyear
patient_birthday_datetime = mfr.get_patient_birthday()
log.debug("extracted patient birthyear (" + str(patient_birthday_datetime) + ")")
# patient sex
patient_sex_str = mfr.get_patient_sex()
log.debug("extracted patient sex (%s)" % patient_sex_str)
# patient name
patient_name_str = mfr.get_patient_name()
log.debug("extracted patient name (%s)" % patient_name_str)
# patient weight
patient_weight_kgs = mfr.get_patient_weight()
log.debug("extracted patient weight (%.2fkg)" % patient_weight_kgs)
# patient height
patient_height_m = mfr.get_patient_height()
log.debug("extracted patient height (%.2fm)" % patient_height_m)
# extract all the info to build the sequence object
sequence_obj = mfr.get_sequence()
# --- build MRSData2 object ---
# adding MRSData2 attributes
obj.data_ref = data_ref
obj._patient = {"name": patient_name_str,
"birthday": patient_birthday_datetime,
"sex": patient_sex_str,
"weight": patient_weight_kgs,
"height": patient_height_m}
obj._sequence = sequence_obj
obj._noise_level = 0.0
obj._data_rejection = None
obj._data_file_hash = hc
obj._is_concatenated = False
obj._is_rawdata = mfr.is_rawdata()
# those need to be called now, because they the attributes above
obj.set_display_label()
obj.set_display_offset()
# respiratory trace if any
if(physio_log_file is None):
obj._physio_file = None
else:
# save this
obj._physio_file = physio_log_file
# anatomical images if any
if(anatomy_folderpath is None):
obj._anatomy_folderpath = None
else:
# save this
obj._anatomy_folderpath = anatomy_folderpath
return(obj)
def __array_finalize__(self, obj):
"""
Overload of special numpy array method called when playing around with stuff relative to object copy etc...
Parameters
----------
obj : MRSData2 numpy array [1,channels,timepoints]
"""
super().__array_finalize__(obj)
self.data_ref = getattr(obj, 'data_ref', None)
# replace by a copy
if(self.data_ref is not None):
self.data_ref = obj.data_ref.copy()
else:
self.data_ref = None
self._display_label = getattr(obj, 'display_label', None)
self._display_offset = getattr(obj, 'display_offset', 0.0)
self._patient = getattr(obj, 'patient', None)
self._physio_file = getattr(obj, 'physio_file', None)
self._anatomy_folderpath = getattr(obj, 'anatomy_folderpath', None)
self._sequence = getattr(obj, 'sequence', None)
# replace by a copy
if(self.sequence is not None):
self._sequence = obj.sequence.copy()
self._noise_level = getattr(obj, 'noise_level', None)
self._data_rejection = getattr(obj, 'data_rejection', None)
self._data_file_hash = getattr(obj, 'data_file_hash', None)
self._is_concatenated = getattr(obj, 'is_concatenated', None)
self._is_rawdata = getattr(obj, 'is_rawdata', None)
def inherit(self, obj):
"""
Overload of special suspect's MRSData method to import a numpy array in here.
Parameters
----------
obj : MRSData2 numpy array [1,channels,timepoints]
Multi-channel reference signal
"""
obj2 = super().inherit(obj)
obj2.data_ref = getattr(self, 'data_ref', None)
# replace by a copy
if(obj2.data_ref is not None):
obj2.data_ref = self.data_ref.copy()
else:
self.data_ref = None
obj2._display_label = getattr(self, 'display_label', None)
obj2._display_offset = getattr(self, 'display_offset', 0.0)
obj2._patient = getattr(self, 'patient', None)
obj2._physio_file = getattr(self, 'physio_file', None)
obj2._anatomy_folderpath = getattr(self, 'anatomy_folderpath', None)
obj2._sequence = getattr(self, 'sequence', None)
# replace by a copy
if(obj2.sequence is not None):
obj2._sequence = self.sequence.copy()
obj2._noise_level = getattr(self, 'noise_level', 0.0)
obj2._data_rejection = getattr(self, 'data_rejection', None)
obj2._data_file_hash = getattr(self, 'data_file_hash', None)
obj2._is_concatenated = getattr(self, 'is_concatenated', 0.0)
obj2._is_rawdata = getattr(self, 'is_rawdata', 0.0)
return(obj2)
@property
def display_label(self):
"""
Property get function for display_label.
Returns
-------
self._display_label : string
Display label used in display_spectrum method
"""
return(self._display_label)
def set_display_label(self, lbl=""):
"""
Set the display label.
Parameters
----------
lbl: string
New display label for this signal
"""
if((lbl == "" or lbl == []) and self.patient is not None):
# create a usefull label based on patient name, sequence and object id
new_lbl = ""
if(self.patient["name"] is not None):
new_lbl = new_lbl + self.patient["name"] + " | "
if(self.sequence is not None):
new_lbl = new_lbl + self.sequence.name + " | "
new_lbl = new_lbl + str(id(self))
else:
self._display_label = lbl
@property
def display_offset(self):
"""
Property get function for display_offset.
Returns
-------
self._display_offset : float
Display offset used in display_spectrum method
"""
return(self._display_offset)
def set_display_offset(self, ofs=0.0):
"""
Set the display offset.
Parameters
----------
ofs: float
New display offset for this signal
"""
self._display_offset = ofs
@property
def patient(self):
"""
Property get function for patient.
Returns
-------
self._patient : dict
Patient meta data
"""
return(self._patient)
@property
def physio_file(self):
"""
Property get function for physio_file.
Returns
-------
self._physio_file : string
Path to physio recording file
"""
return(self._physio_file)
@property
def anatomy_folderpath(self):
"""
Property get function for anatomy_folderpath.
Returns
-------
self._anatomy_folderpath : string
Full absolute file path pointing a folder containing dicom DCM files contaning the anatomical images on which you want to dispaly the VOI
"""
return(self._anatomy_folderpath)
@property
def sequence(self):
"""
Property get function for sequence.
Returns
-------
self._sequence : sim.mrs_sequence object
Sequence
"""
return(self._sequence)
@property
def noise_level(self):
"""
Property get function for noise_level.
Returns
-------
self._noise_level : float
Noise level in time-domain
"""
return(self._noise_level)
@property
def data_rejection(self):
"""
Property get function for data_rejection.
Returns
-------
self._data_rejection : list of dict
Data rejection results (NA, SNR, LW, etc.)
"""
return(self._data_rejection)
@property
def data_file_hash(self):
"""
Property get function for data_file_hash.
Returns
-------
self._data_file_hash : string
MD5 hash code of data file content
"""
return(self._data_file_hash)
@property
def is_concatenated(self):
"""
Property get function for is_concatenated.
Returns
-------
self._is_concatenated : boolean
True if this current signal is a result of a concatenation
"""
return(self._is_concatenated)
@property
def is_rawdata(self):
"""
Property get function for is_rawdata.
Returns
-------
self._is_rawdata : boolean
True if this current signal was originally read from a raw data file
"""
return(self._is_rawdata)
def _analyze_peak_1d(self, ppm_range, allowed_apodization=1.0):
"""
Find peak in specific ppm range using magnitude mode and return stuff.
* Works only with a 1D [timepoints] signal.
Parameters
----------
ppm_range : list [2]
Range in ppm used for peak searching
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during peak analysis
Returns
-------
peak_ppm : float
Position in PPM of the peak
peak_val : np.complex128
Peak value
peak_lw : float
Peak linewidth in Hz
peak_seg_ppm : numpy array
Peak segment ppm axis
peak_seg_val : numpy complex array
Peak segment value
"""
# silently zero-fill and apodize if needed
log.pause()
s = self.correct_zerofill_nd().correct_apodization_nd(allowed_apodization)
log.resume()
# init
ppm = s.frequency_axis_ppm()
sf = s.spectrum()
sf_abs = np.abs(sf)
# mask outside range
ippm_peak_outside_range = (ppm_range[0] > ppm) | (ppm > ppm_range[1])
sf_abs[ippm_peak_outside_range] = 0
# max
peak_index = np.argmax(sf_abs)
# check
if(peak_index == 0):
log.error("no peak found in specified ppm range or badly phased data!")
# ppm
peak_ppm = ppm[peak_index]
# complex value
peak_val = sf[peak_index]
# lw
sf_real = np.real(sf)
amp_peak = np.real(peak_val)
ippm_max = peak_index + np.argmax(sf_real[peak_index:] < amp_peak / 2)
ippm_min = peak_index - np.argmax(sf_real[peak_index::-1] < amp_peak / 2)
dppm = np.abs(ppm[ippm_max] - ppm[ippm_min])
peak_lw = dppm * s.f0
# peak segment
ippm_half_peak = np.arange(ippm_min, ippm_max)
ppm_seg = ppm[ippm_half_peak]
peak_seg = sf[ippm_half_peak]
return(peak_ppm, peak_val, peak_lw, ppm_seg, peak_seg)
def _analyze_peak_2d(self, peak_range=[4.5, 5], allowed_apodization=1.0):
"""
Analyze a peak in the spectrum by estimating its amplitude, linewidth, frequency shift and phase for each average.
* Works only with a 2D [averages,timepoints] signal.
Parameters
----------
peak_range : array [2]
Range in ppm used to analyze peak phase when no reference signal is specified
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during peak analysis
Returns
-------
peak_trace : numpy array [averages,4]
Peak changes (amplitude, linewidth, frequency and phase) for each average in raw data
peak_trace_rel2mean : numpy array [averages,4]
Peak changes (amplitude, linewidth, frequency and phase) for each average in raw data relative to mean value
peak_trace_rel2firstpt : numpy array [averages,4]
Peak relative changes (amplitude, linewidth, frequency and phase) for each average in raw data relative to 1st point
"""
log.debug("analyzing peak for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# apodize if needed
s = self.copy()
# first, find peak of interest in range, just to check
s_avg = np.mean(s, axis=0)
peak_ppm, _, _, _, _ = s_avg._analyze_peak_1d(peak_range, allowed_apodization)
log.debug("found peak of interest at %0.2fppm!" % peak_ppm)
# for each average in moving averaged data
peak_trace = np.zeros([s.shape[0], 4])
pbar = log.progressbar("analyzing", s.shape[0])
for a in range(0, s.shape[0]):
# call 1D peak analysis
peak_ppm, peak_val, peak_lw, _, _ = s[a, :]._analyze_peak_1d(peak_range, allowed_apodization)
# shift in ppm
peak_trace[a, 2] = peak_ppm
# amplitude
peak_trace[a, 0] = np.real(peak_val)
# linewidth in Hz
peak_trace[a, 1] = peak_lw
# phase in rad
peak_trace[a, 3] = np.angle(peak_val)
pbar.update(a)
# normalize stuff
# relative to mean
peak_trace_rel2mean = np.zeros([s.shape[0], 4])
peak_trace_rel2mean[:, 0] = peak_trace[:, 0] / peak_trace[:, 0].mean() * 100 - 100
peak_trace_rel2mean[:, 1] = peak_trace[:, 1] - peak_trace[:, 1].mean()
peak_trace_rel2mean[:, 2] = peak_trace[:, 2] - peak_trace[:, 2].mean()
peak_trace_rel2mean[:, 3] = peak_trace[:, 3] - peak_trace[:, 3].mean()
# relative to 1st pt
peak_trace_rel2firstpt = np.zeros([s.shape[0], 4])
peak_trace_rel2firstpt[:, 0] = peak_trace[:, 0] / peak_trace[0, 0] * 100 - 100
peak_trace_rel2firstpt[:, 1] = peak_trace[:, 1] - peak_trace[0, 1]
peak_trace_rel2firstpt[:, 2] = peak_trace[:, 2] - peak_trace[0, 2]
peak_trace_rel2firstpt[:, 3] = peak_trace[:, 3] - peak_trace[0, 3]
pbar.finish("done")
return(peak_trace, peak_trace_rel2mean, peak_trace_rel2firstpt)
def analyze_noise_nd(self, n_pts=100):
"""
Measure noise level in time domain and store it in the "noise_level" attribute. This is usefull to keep track of the original noise level for later use, CRB normalization durnig quantification for example.
* Works with multi-dimensional signals.
* Returns a multi-dimensional signal.
Parameters
----------
n_pts : int
Number of points at the end of the FID signal which we should use to estimate the noise STD
Returns
-------
noise_lev : float
Time-domain noise level
"""
log.debug("estimating noise level for [%s]..." % self.display_label)
s = self.copy()
s_real = np.real(s)
# average if needed
while(s_real.ndim > 1):
s_real = np.mean(s_real, axis=0)
# init
log.debug("estimating noise level in FID using last %d points..." % n_pts)
# noise is the std of the last real points, but that is not so simple
# we really want real noise, not zeros from zero_filling
s_nonzero_mask = (s_real != 0.0)
s_analyze = s_real[s_nonzero_mask]
# now take the last n_pts points
noise_lev = np.std(s_analyze[-n_pts:-1])
log.info("noise level = %.2E" % noise_lev)
# changing noise level attribute
log.debug("updating noise level...")
s._noise_level = noise_lev
# if any ref data available, analyze noise there too
if(s.data_ref is not None):
s.data_ref = s.data_ref.analyze_noise_nd(n_pts)
return(s)
def analyze_physio_2d(self, peak_range=[4.5, 5], delta_time_range=1000.0, allowed_apodization=1.0, display=False):
"""
Analyze the physiological signal and try to correlate it to a peak amplitude, linewidth, frequency shift and phase variations.
* Works only with a 2D [averages,timepoints] signal.
Parameters
----------
peak_range : array [2]
Range in ppm used to analyze peak phase when no reference signal is specified
delta_time_range : float
Range in ms used to correlate / match the NMR and the physiological signal. Yes, since we are not really sure of the start timestamp we found in the TWIX header, we try to match perfectly the two signals.
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during correction process. However, the final corrected signal will not be apodized.
display : boolean
Display correction process (True) or not (False)
"""
log.debug("analyzing physiological signals for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
if(self._physio_file is None):
# no physio signal here, exiting
log.error("no error physiological recording file provided!")
return()
# read data
[rt, rup, rd, rr] = io.read_physio_file(self._physio_file)
resp_trace = [rt, rup, rd, rr]
# physio signal
resp_t = resp_trace[0]
resp_s = resp_trace[3]
# perform peak analysis
peak_prop_abs, _, _ = self._analyze_peak_2d(peak_range, allowed_apodization)
# init
mri_t = np.linspace(self.sequence.timestamp, self.sequence.timestamp + self.sequence.tr * peak_prop_abs.shape[0], peak_prop_abs.shape[0])
dt_array = np.arange(-delta_time_range / 2.0, delta_time_range / 2.0, 1.0)
cc_2d = np.zeros([dt_array.shape[0], 4])
# shift signal and calculate corr coeff
pbar = log.progressbar("correlating signals", dt_array.shape[0])
for idt, dt in enumerate(dt_array):
# build time scale
this_resp_t_interp = mri_t.copy() + dt
this_resp_s_interp = np.interp(this_resp_t_interp, resp_t, resp_s)
# now crop the signals to have the same length
final_length = min(this_resp_s_interp.shape[0], peak_prop_abs.shape[0])
this_mri_t = mri_t[0:final_length]
this_params_trace = peak_prop_abs[0:final_length, :]
this_resp_s_interp = this_resp_s_interp[0:final_length]
# now remove points where resp trace is at 0 or 1
mm = np.logical_and(this_resp_s_interp > 0, this_resp_s_interp < 1)
this_resp_s_interp = this_resp_s_interp[mm]
this_params_trace = this_params_trace[mm, :]
# now, for each parameter
for p in range(4):
# estimate some R coeff
cc = np.corrcoef(this_resp_s_interp, this_params_trace[:, p])
cc_2d[idt, p] = cc[0, 1]
pbar.update(idt)
# find time shift that gives best correlation for each parameter
best_dt_per_par = np.zeros(4)
for p in range(4):
i_maxcorr = np.argmax(np.abs(cc_2d[:, p]))
best_dt = dt_array[i_maxcorr]
best_dt_per_par[p] = best_dt
# find time shift that gives best correlation for all 4 parameters
cc_2d_all = np.sum(np.abs(cc_2d), axis=1)
i_maxcorr = np.argmax(cc_2d_all)
best_dt_all = dt_array[i_maxcorr]
pbar.finish("done")
# some info in the term
st_ms = self.sequence.timestamp
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("data timestamp=\t" + str(st_ms) + "ms\t" + st_str)
log.info("best start time for...")
st_ms = self.sequence.timestamp + best_dt_per_par[0]
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("amplitude=\t\t" + str(st_ms) + "ms\t" + st_str)
st_ms = self.sequence.timestamp + best_dt_per_par[1]
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("linewidth=\t\t" + str(st_ms) + "ms\t" + st_str)
st_ms = self.sequence.timestamp + best_dt_per_par[2]
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("frequency=\t\t" + str(st_ms) + "ms\t" + st_str)
st_ms = self.sequence.timestamp + best_dt_per_par[3]
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("phase=\t\t" + str(st_ms) + "ms\t" + st_str)
st_ms = self.sequence.timestamp + best_dt_all
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("total=\t\t" + str(st_ms) + "ms\t" + st_str)
imaxR = np.argmax(best_dt_per_par)
best_dt = best_dt_per_par[imaxR]
st_ms = self.sequence.timestamp + best_dt
st_str = datetime.fromtimestamp(st_ms / 1000 - 3600).strftime('%H:%M:%S')
log.info("max R for=\t\t" + str(st_ms) + "ms\t" + st_str)
# time shift the signals with optimal shift
# build time scale
this_resp_t_interp = mri_t.copy() + best_dt
this_resp_s_interp = np.interp(this_resp_t_interp, resp_t, resp_s)
# now crop the signals to have the same length
final_length = min(this_resp_s_interp.shape[0], peak_prop_abs.shape[0])
this_mri_t = mri_t[0:final_length]
this_params_trace = peak_prop_abs[0:final_length, :]
this_resp_s_interp = this_resp_s_interp[0:final_length]
# evaluate correlation coeff.
# for each parameter
cc_final = np.zeros(4)
for p in range(4):
# estimate some R coeff
cc = np.corrcoef(this_resp_s_interp, this_params_trace[:, p])
cc_final[p] = cc[0, 1]
# now let's talk about FFT
log.debug("FFT analysis...")
nFFT = 2048
# freq axis for resp trace
resp_f_axis = np.fft.fftshift(np.fft.fftfreq(nFFT, d=(resp_t[1] - resp_t[0]) / 1000.0)) # Hz ou / s
resp_f_axis_bpm = resp_f_axis * 60.0 # / min
# freq axis for params traces
this_params_trace_f_axis = np.fft.fftshift(np.fft.fftfreq(nFFT, d=self.tr / 1000.0)) # Hz ou / s
this_params_trace_f_axis_bpm = this_params_trace_f_axis * 60.0 # / min
# FFT of resp. trace
resp_fft = np.abs(np.fft.fftshift(np.fft.fft((resp_s - np.mean(resp_s)) * signal.windows.hann(resp_s.shape[0]), nFFT, norm='ortho')))
# FFT of params traces
this_params_trace_fft = np.zeros([nFFT, 4])
for p in range(4):
this_params_trace_fft[:, p] = np.abs(np.fft.fftshift(np.fft.fft((this_params_trace[:, p] - np.mean(this_params_trace[:, p])) * signal.windows.hann(this_params_trace.shape[0]), nFFT, axis=0, norm='ortho'), axes=0))
if(display):
# display the cross-correlation plots
fig_title = "Analyzing physiological signal [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='all')
p = 0
for ix in range(2):
for iy in range(2):
axs[ix, iy].plot(dt_array, cc_2d[:, p], '-', linewidth=1)
axs[ix, iy].axvline(x=best_dt_per_par[p], color='r', linestyle='-')
axs[ix, iy].axvline(x=best_dt, color='r', linestyle='--')
axs[ix, iy].set_xlabel('time shift (ms)')
axs[ix, iy].grid('on')
p = p + 1
axs[0, 0].set_ylabel('R amplitude vs. resp.')
axs[0, 1].set_ylabel('R linewidth. vs. resp.')
axs[1, 0].set_ylabel('R frequency vs. resp.')
axs[1, 1].set_ylabel('R phase vs. resp.')
fig.subplots_adjust()
fig.show()
# display time signals
fig_title = "Physio. signal analysis (I) [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='all')
p = 0
for ix in range(2):
for iy in range(2):
if(cc_final[p] > 0):
axs[ix, iy].plot(resp_t - best_dt, resp_s, 'k-', label='resp. original')
axs[ix, iy].plot(this_mri_t, this_resp_s_interp, 'b-', label='resp. resampled')
else:
axs[ix, iy].plot(resp_t - best_dt, 1.0 - resp_s, 'k-', label='resp. original')
axs[ix, iy].plot(this_mri_t, 1.0 - this_resp_s_interp, 'b-', label='resp. resampled')
ax2 = axs[ix, iy].twinx()
ax2.plot(this_mri_t, this_params_trace[:, p], 'rx-', label='MR peak property')
axs[ix, iy].set_xlabel('time (ms)')
axs[ix, iy].grid('on')
axs[ix, iy].legend(bbox_to_anchor=(1.05, 1), loc=2, mode='expand', borderaxespad=0)
ax2.legend(bbox_to_anchor=(1.05, 1), loc=3, mode='expand', borderaxespad=0)
p = p + 1
axs[0, 0].set_ylabel('Rel. amplitude change (%)')
axs[0, 1].set_ylabel('Abs. linewidth (Hz)')
axs[1, 0].set_ylabel('Abs. frequency (Hz)')
axs[1, 1].set_ylabel('Abs. phase shift (rd)')
fig.subplots_adjust()
fig.show()
# display correlation plots
fig_title = "Physio. signal analysis (II) [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='all')
p = 0
for ix in range(2):
for iy in range(2):
axs[ix, iy].scatter(this_resp_s_interp, this_params_trace[:, p])
axs[ix, iy].set_xlabel('resp. (u.a)')
axs[ix, iy].grid('on')
axs[ix, iy].set_title("R=" + str(cc_final[p]))
p = p + 1
axs[0, 0].set_ylabel('Rel. amplitude change (%)')
axs[0, 1].set_ylabel('Abs. linewidth (Hz)')
axs[1, 0].set_ylabel('Abs. frequency (Hz)')
axs[1, 1].set_ylabel('Abs. phase shift (rd)')
fig.subplots_adjust()
fig.show()
# display FFT plots
fig_title = "Physio. signal analysis (III) [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='all')
p = 0
for ix in range(2):
for iy in range(2):
axs[ix, iy].plot(resp_f_axis_bpm, resp_fft, 'k-', label='resp.')
ax2 = axs[ix, iy].twinx()
ax2.plot(this_params_trace_f_axis_bpm, this_params_trace_fft[:, p], 'r-', label='MR peak property')
axs[ix, iy].set_xlabel('frequency (BPM or 1 / min)')
axs[ix, iy].grid('on')
axs[ix, iy].set_xlim(0, 60.0)
axs[ix, iy].legend(bbox_to_anchor=(1.05, 1), loc=2, mode='expand', borderaxespad=0)
ax2.legend(bbox_to_anchor=(1.05, 1), loc=3, mode='expand', borderaxespad=0)
p = p + 1
axs[0, 0].set_ylabel('Rel. amplitude change (FFT)')
axs[0, 1].set_ylabel('Abs. linewidth (FFT)')
axs[1, 0].set_ylabel('Abs. frequency (FFT)')
axs[1, 1].set_ylabel('Abs. phase shift (FFT)')
fig.subplots_adjust()
fig.show()
# done
def analyze_snr_1d(self, peak_range, noise_range=[-2, -1], half_factor=False, magnitude_mode=False, display=False, display_range=[1, 6]):
"""
Estimate the SNR of a peak in the spectrum ; chemical shift ranges for the peak and the noise regions are specified by the user. Can also look at time-domain SNR. Works only for a 1D MRSData2 objects.
* Works only with a 1D [timepoints] signal.
Parameters
----------
peak_range : list [2]
Range in ppm used to find a peak of interest
noise_range : list [2]
Range in ppm used to estimate noise
half_factor : float
If (True), will divide the SNR by 2 folowing an old definition of the SNR where the std of noise is multiplied by two. Btw, this outdated definition is used by LCModel.
magnitude_mode : boolean
analyze signal in magnitude mode (True) or the real part (False)
display : boolean
Display process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
snr : float
Resulting SNR value
s : float
Resulting signal value
n : float
Resulting noise value
"""
log.debug("analyzing SNR for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
# display
if(display):
fig_title = "SNR estimation [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
# find maximum peak in range
sf = s.spectrum()
# analyze peak WITHOUT ANY apodization (important)
ppm_peak, peak_val, _, _, _ = s._analyze_peak_1d(peak_range, 0.0)
if(magnitude_mode):
log.debug("measuring the MAGNITUDE intensity at %0.2fppm!" % ppm_peak)
snr_signal = np.abs(peak_val)
sf_noise = np.abs(sf)
else:
log.debug("measuring the REAL intensity at %0.2fppm!" % ppm_peak)
snr_signal = np.real(peak_val)
sf_noise = np.real(sf)
# estimate noise in user specified spectral region
ppm = s.frequency_axis_ppm()
log.debug("estimating noise from %0.2f to %0.2fppm region!" % (noise_range[0], noise_range[1]))
ippm_noise_range = (noise_range[0] < ppm) & (ppm < noise_range[1])
snr_noise = np.std(sf_noise[ippm_noise_range])
if(half_factor):
snr_noise = 2 * snr_noise
if(display):
axs[0].plot(ppm, np.real(sf), 'k-', linewidth=1)
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('real part')
axs[0].grid('on')
axs[1].plot(ppm, np.abs(sf), 'k-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('magnitude mode')
axs[1].grid('on')
if(magnitude_mode):
ax = axs[1]
else:
ax = axs[0]
# show peak of interest
ax.plot(ppm_peak, snr_signal, 'ro')
ax.axvline(x=ppm_peak, color='r', linestyle='--')
# show noise region
ax.plot(ppm[ippm_noise_range], sf_noise[ippm_noise_range], 'bo')
# finish display
if(display):
fig.subplots_adjust()
fig.show()
# that's it
snr = snr_signal / snr_noise
log.info("results for [" + s.display_label + "] coming...")
log.info("S = %.2E, N = %.2E, SNR = %0.2f!" % (snr_signal, snr_noise, snr))
return(snr, snr_signal, snr_noise)
def analyze_linewidth_1d(self, peak_range, magnitude_mode=False, display=False, display_range=[1, 6]):
"""
Estimate the linewidth of a peak in the spectrum ; chemical shift ranges for the peak and the noise regions are specified by the user.
* Works only with a 1D [timepoints] signal.
Parameters
----------
peak_range : list [2]
Range in ppm used to find a peak of interest
magnitude_mode : boolean
analyze signal in magnitude mode (True) or the real part (False)
display : boolean
Display process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
lw : float
Linewidth in Hz
"""
log.debug("analyzing peak linewidth for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
# call 1D peak analysis WITHOUT ANY apodization (important)
ppm_peak, _, lw, peak_seg_ppm, peak_seg_val = s._analyze_peak_1d(peak_range, False)
if(magnitude_mode):
log.debug("estimating the MAGNITUDE peak linewidth at %0.2fppm!" % ppm_peak)
else:
log.debug("estimating the REAL peak linewidth at %0.2fppm!" % ppm_peak)
log.info("results for [" + s.display_label + "] coming...")
log.info("LW = %0.2f Hz!" % lw)
if(display):
ppm = s.frequency_axis_ppm()
sf = s.spectrum()
fig_title = "FWHM estimation [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
axs[0].plot(ppm, np.real(sf), 'k-', linewidth=1)
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('real part')
axs[0].grid('on')
axs[1].plot(ppm, np.abs(sf), 'k-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('magnitude mode')
axs[1].grid('on')
if(magnitude_mode):
axs[1].plot(peak_seg_ppm, np.abs(peak_seg_val), 'r-')
else:
axs[0].plot(peak_seg_ppm, np.real(peak_seg_val), 'r-')
fig.subplots_adjust()
fig.show()
return(lw)
def correct_intensity_scaling_nd(self, scaling_factor_rawdata=1e8, scaling_factor_dcm=1.0):
"""
Amplify the FID signals. Sounds useless but can actually help during quantification! Yes, it is not a good idea to fit signals which have intensities around 1e-6 or lower because of various fit tolerances and also digital problems (epsilon).
* Works with multi-dimensional signals.
Parameters
----------
scaling_factor_rawdata : float
Amplification factor if data is raw
scaling_factor_dcm : float
Amplification factor if data is from a dcm file (already amplified)
Returns
-------
s : MRSData2 numpy array [whatever dimensions]
Resulting amplified MRSData2 object
"""
log.debug("intensity scaling [%s]..." % self.display_label)
if(self.is_rawdata):
scaling_factor = scaling_factor_rawdata
else:
scaling_factor = scaling_factor_dcm
# scale signal
log.debug("multiplying time-domain signals by %E..." % scaling_factor)
s_sc = self.copy() * scaling_factor
# convert back to MRSData2
s_sc = self.inherit(s_sc)
# if any ref data available, we crop it too (silently)
if(s_sc.data_ref is not None):
s_sc.data_ref = s_sc.data_ref.correct_intensity_scaling_nd(scaling_factor)
return(s_sc)
def correct_fidmodulus_nd(self):
"""
Calculate absolute mode of FID signals. I am not sure I am doing this correctly but it is a first attempt.
* Works with multi-dimensional signals.
Returns
-------
s : MRSData2 numpy array [whatever dimensions]
Resulting FID modulus data stored in a MRSData2 object
"""
# init
log.debug("fid modulus [%s]..." % self.display_label)
log.debug("calculation magnitude of signal...")
s = self.copy()
# return magnitude
s = self.inherit(np.abs(s))
return(s)
def correct_zerofill_nd(self, nPoints_final=16384, display=False, display_range=[1, 6]):
"""
Zero-fill MRS data signals along the time axis.
Parameters
----------
nPoints_final : int
Final number of points
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
* Works with multi-dimensional signals.
* Returns a multi-dimensional signal.
Returns
-------
s_zf : MRSData2 numpy array [whatever,...,timepoints]
Resulting zero-filled data stored in a MRSData2 object
"""
# init
log.debug("zero_filling [%s]..." % self.display_label)
s = self.copy()
# check
nZeros = nPoints_final - s.np
if(nZeros <= 0):
log.warning("no zero-filling performed. The number of zeros to add was negative (%d)!" % nZeros)
return(s)
s_new_shape = list(s.shape)
s_new_shape[-1] = nZeros
log.debug("%d-pts signal + %d zeros = %d-pts zero-filled signal..." % (s.np, nZeros, nPoints_final))
s_zf = self.inherit(np.concatenate((s, np.zeros(s_new_shape)), axis=s.ndim - 1))
# sequence npts
if(s_zf.sequence is not None):
log.debug("updating sequence.npts...")
s_zf.sequence.npts = nPoints_final
s_zf.sequence._ready = False
if(display):
s_disp = s.copy()
s_zf_disp = s_zf.copy()
while(s_disp.ndim > 1):
s_disp = np.mean(s_disp, axis=0)
s_zf_disp = np.mean(s_zf_disp, axis=0)
fig_title = "Zero-filling [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='row', sharey='row')
# no time axis, we want to see the number of points
axs[0, 0].plot(np.real(s_disp), 'k-', linewidth=1)
axs[0, 0].set_xlabel('number of points')
axs[0, 0].set_ylabel('original')
axs[0, 0].grid('on')
axs[0, 1].plot(np.real(s_zf_disp), 'b-', linewidth=1)
axs[0, 1].set_xlabel('number of points')
axs[0, 1].set_ylabel('zero-filled')
axs[0, 1].grid('on')
axs[1, 0].plot(s_disp.frequency_axis_ppm(), s_disp.spectrum().real, 'k-', linewidth=1)
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('original')
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].grid('on')
axs[1, 1].plot(s_zf_disp.frequency_axis_ppm(), s_zf_disp.spectrum().real, 'b-', linewidth=1)
axs[1, 1].set_xlabel("chemical shift (ppm)")
axs[1, 1].set_ylabel('zero-filled')
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].grid('on')
fig.subplots_adjust()
fig.show()
# if any ref data available, we crop it too (silently)
if(s_zf.data_ref is not None):
s_zf.data_ref = s_zf.data_ref.correct_zerofill_nd(nPoints_final, False)
return(s_zf)
def correct_time_shift_nd(self, time_shift_us=-375, display=False, display_range=[1, 6]):
"""
Shift time signals of Zero-fill MRS data signals along the time axis.
Parameters
----------
time_shift_us : float
Time shift to apply to time signals. Negative means the beginning of the FIDs will be eaten up and circshifted at the end.
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
* Works with multi-dimensional signals.
* Returns a multi-dimensional signal.
Returns
-------
s_shifted : MRSData2 numpy array [averages,channels,timepoints]
Resulting shifted data stored in a MRSData2 object
"""
log.debug("time_shifting [%s]..." % self.display_label)
# init
s = self.copy()
# prepare frequency vector
f = s.frequency_axis()
if(s.ndim > 1):
f_tiles = list(s.shape)
f_tiles[-1] = 1
f = np.tile(f, f_tiles)
# fft
sf = np.fft.fftshift(np.fft.fft(s, axis=-1), axes=-1)
# apply phase 1st order in frequency-frequency domain (which is equivalent to a time shift in time domain)
sf_shifted = sf * np.exp(1j * 2.0 * np.pi * -time_shift_us / 1000000.0 * f)
# fft back and prey
s_shifted = np.fft.ifft(np.fft.ifftshift(sf_shifted, axes=-1), axis=-1)
# convert back to MRSData2
s_shifted = self.inherit(s_shifted)
if(display):
t = s.time_axis() * 1000000.0 # us
ppm = s.frequency_axis_ppm()
s_disp = s.copy()
s_shifted_disp = s_shifted.copy()
while(s_disp.ndim > 1):
s_disp = np.mean(s_disp, axis=0)
s_shifted_disp = np.mean(s_shifted_disp, axis=0)
fig_title = "Time-shifting [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='row', sharey='row')
axs[0, 0].plot(t, np.real(s_disp), 'k-', linewidth=1)
axs[0, 0].set_xlabel('time (us)')
axs[0, 0].set_ylabel('original')
axs[0, 0].grid('on')
axs[0, 1].plot(t, np.real(s_shifted_disp), 'b-', linewidth=1)
axs[0, 1].set_xlabel('time (us)')
axs[0, 1].set_ylabel('time-shifted')
axs[0, 1].grid('on')
axs[1, 0].plot(ppm, s_disp.spectrum().real, 'k-', linewidth=1)
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('original')
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].grid('on')
axs[1, 1].plot(ppm, s_shifted_disp.spectrum().real, 'b-', linewidth=1)
axs[1, 1].set_xlabel("chemical shift (ppm)")
axs[1, 1].set_ylabel('time-shifted')
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].grid('on')
fig.subplots_adjust()
fig.show()
# if any ref data available, we crop it too (silently)
if(s_shifted.data_ref is not None):
s_shifted.data_ref = s_shifted.data_ref.correct_time_shift_nd(time_shift_us)
return(s_shifted)
def correct_phase_3d(self, use_ref_data=True, peak_range=[4.5, 5], average_per_channel_mode=False, first_point_fid_mode=False, phase_order=0, phase_offset=0.0, display=False, display_range=[1, 6]):
"""
Well, that's a big one but basically it rephases the signal of interest.
>In the case of multi-channel acquisition, the phase will be estimated for each channel, for each average using phase time evolution estimated on reference signal.
>If the reference signal is not specified and weak water suppression was performed, 0th order phase correction will be done using the first point in the fid.
>If strong water suppression was performed, 0th order phase correction will be done using the phase of a peak in the spectrum; the chemical shift range to find this peak is specified by the user.
>>Note that those last two approaches can be performed for each average in the case of high SNR (rare) or by averaging all the scans for each channel in the case of lower SNR.
* Works only with a 3D [averages,channels,timepoints] signal.
* Returns a 3D [averages,channels,timepoints] signal.
Parameters
----------
use_ref_data : boolean
Use reference data (usually non water suppressed) for phasing
peak_range : list [2]
Range in ppm used to peak-pick and estimate a phase
average_per_channel_mode : boolean
Average all the averages for each channel when doing peak analysis
first_point_fid_mode : boolean
Estimate phase from 1st point of FID
phase_order : int
Order of phasing: 0(th) or 1(st) order phasing
phase_offset : float
Phase added to signal (rad)
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_phased : MRSData2 numpy array [averages,channels,timepoints]
Resulting phased data stored in a MRSData2 object
"""
log.debug("phasing [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 3):
log.error("this method only works for 3D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# check if any ref data
if(self.data_ref is None and use_ref_data):
log.warning("you want to phase data based on ref. data but no such data is available!")
use_ref_data = False
# dimensions check for reference data
if(use_ref_data and self.data_ref.ndim != 3):
log.error("this method only works for 3D signals! You are feeding it with %d-dimensional reference data. :s" % self.ndim)
# init
s = self.copy()
s_phased = self.copy()
# list of phasing methods
list_phase_method = {}
list_phase_method[0] = "0th & 1st order phase from ref. scan FID (method #0)"
list_phase_method[1] = "0th order only phase from ref. scan FID (method #1)"
list_phase_method[2] = "0th order phase from 1st pt in FID (method #2)"
list_phase_method[3] = "0th order phase from 1st pt in averaged FID (method #3)"
list_phase_method[4] = "0th order phase from peak in spectrum (method #4)"
list_phase_method[5] = "0th order phase from peak in averaged spectrum (method #5)"
t = s.time_axis()
ppm = s.frequency_axis_ppm()
# choose which method is adequate
only_0th_order = (phase_order == 0)
if(use_ref_data):
# we have a ref scan, so method #0 or #1
phase_method = 0 + only_0th_order
t_ref = self.data_ref.time_axis()
else:
# we do not have a ref scan, so method #2, #3, #4, #5
# that depends on water intensity and water suppression
if(first_point_fid_mode):
phase_method = 2
else:
phase_method = 4
# and on SNR
if(average_per_channel_mode):
phase_method += 1
if(display):
# prepare subplot
fig_title = "Phasing [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 3, sharex='col')
# display chosen method
log.debug("phasing using method: " + list_phase_method[phase_method])
# for each channel
for c in range(0, s.shape[1]):
if(phase_method == 0 or phase_method == 1):
# time-domain phase of reference signal for this channel
sp_ref = np.angle(self.data_ref[0, c, :])
if(display):
# display reference FID
axs[0, 0].cla()
axs[0, 0].plot(t_ref, np.real(self.data_ref[0, c, :]), linewidth=1, label='real part')
axs[0, 0].plot(t_ref, np.imag(self.data_ref[0, c, :]), linewidth=1, label='imag part')
axs[0, 0].set_xlabel('time (s)')
axs[0, 0].set_ylabel('intensity (u.a)')
axs[0, 0].grid('on')
axs[0, 0].legend()
axs[0, 0].set_title("Ref - channel #" + str(c + 1))
# display reference time-domain phase
axs[1, 0].cla()
axs[1, 0].plot(t_ref, sp_ref, linewidth=1, label='unwrapped phase')
axs[1, 0].set_xlabel('time (s)')
axs[1, 0].set_ylabel("phase (rd)")
axs[1, 0].grid('on')
axs[1, 0].legend()
axs[1, 0].set_title("Ref - channel #" + str(c + 1))
elif(phase_method == 3):
# phase of first point in averaged fid
s_avg = s[:, c, 0].mean(axis=0)
phase_fid_avg = np.angle(s_avg)
elif(phase_method == 5):
# phase of peak in averaged spectrum
s_avg = s[:, c, :].mean(axis=0)
# find maximum peak in range and its phase
peak_ppm, peak_val, _, _, _ = s_avg._analyze_peak_1d(peak_range)
phase_peak_avg = np.angle(peak_val)
if(c == 0):
log.debug("measuring phase at %0.2fppm on 1st channel!" % peak_ppm)
# late init progress bar
if(c == 0):
pbar = log.progressbar("phasing", s.shape[1] * s.shape[0])
# now, for each average in meta signal acquired with this channel
for a in range(0, s.shape[0]):
# this spectrum
this_s = s[a, c, :]
this_sf = this_s.spectrum()
if(phase_method == 0):
# correct phase using reference time-domain phase estimation
this_s_phased = this_s * np.exp(-1j * (sp_ref + phase_offset))
elif(phase_method == 1):
# correct phase using first point of reference time-domain phase estimation
this_s_phased = this_s * np.exp(-1j * (sp_ref[0] + phase_offset))
elif(phase_method == 2):
# phase of first point of this fid
phase_fid = np.angle(this_s[0])
# and apply it to correct the spectrum
this_s_phased = this_s * np.exp(-1j * (phase_fid + phase_offset))
elif(phase_method == 3):
# apply to correct the spectrum
this_s_phased = this_s * np.exp(-1j * (phase_fid_avg + phase_offset))
elif(phase_method == 4):
# find maximum peak in range and its phase
peak_ppm, peak_val, _, _, _ = this_s._analyze_peak_1d(peak_range)
phase_peak = np.angle(peak_val)
# apply phase to spectrum
this_sf_phased = this_sf * np.exp(-1j * (phase_peak + phase_offset))
# ifft back
this_s_phased = np.fft.ifft(np.fft.ifftshift(this_sf_phased))
elif(phase_method == 5):
# apply phase to spectrum
this_sf_phased = this_sf * np.exp(-1j * (phase_peak_avg + phase_offset))
# ifft back
this_s_phased = np.fft.ifft(np.fft.ifftshift(this_sf_phased))
# store
s_phased[a, c, :] = this_s_phased
if(display):
# convert back to MRSData2
this_s_phased = self.inherit(this_s_phased)
# display original meta FID
axs[0, 1].cla()
axs[0, 1].plot(t, np.real(this_s), linewidth=1, label='real part')
axs[0, 1].plot(t, np.imag(this_s), linewidth=1, label='imag part')
axs[0, 1].set_xlabel('time (s)')
axs[0, 1].set_ylabel('original')
axs[0, 1].grid('on')
axs[0, 1].legend()
axs[0, 1].set_title("Meta channel #" + str(c + 1) + " average #" + str(a + 1))
# display original meta spectrum
axs[0, 2].cla()
axs[0, 2].plot(ppm, np.real(this_sf), linewidth=1, label='real part')
if(phase_method == 4 or phase_method == 5):
axs[0, 2].plot(peak_ppm, np.real(peak_val), 'ro')
axs[0, 2].axvline(x=peak_ppm, color='r', linestyle='--')
axs[0, 2].set_xlim(display_range[1], display_range[0])
axs[0, 2].set_xlabel('time (s)')
axs[0, 2].set_ylabel('original')
axs[0, 2].grid('on')
axs[0, 2].legend()
axs[0, 2].set_title("Meta channel #" + str(c + 1) + " average #" + str(a + 1))
# display corrected meta FID
axs[1, 1].cla()
axs[1, 1].plot(t, np.real(this_s_phased), 'b-', linewidth=1, label='real part')
axs[1, 1].set_xlabel('time (s)')
axs[1, 1].set_ylabel('corrected')
axs[1, 1].grid('on')
axs[1, 1].legend()
# display corrected meta spectrum
axs[1, 2].cla()
axs[1, 2].plot(ppm, this_s_phased.spectrum().real, 'b-', linewidth=1, label='real part')
axs[1, 2].set_xlim(display_range[1], display_range[0])
axs[1, 2].set_xlabel('frequency (ppm)')
axs[1, 2].set_ylabel('corrected')
axs[1, 2].grid('on')
axs[1, 2].legend()
fig.subplots_adjust()
fig.show()
plt.pause(0.1)
pbar.update(c * s.shape[0] + a + 1)
pbar.finish("done")
# convert back to MRSData2
s_phased = self.inherit(s_phased)
# if any ref data available, we phase it too (silently)
if(s_phased.data_ref is not None):
s_phased.data_ref = s_phased.data_ref.correct_phase_3d(True)
return(s_phased)
def correct_combine_channels_3d(self, use_ref_data=True, phasing=False, channels_onoff=[True]):
"""
Recombine Rx channels using SVD stuff. If no reference signal is specified, the recombination weights will be calculated from the signal of interest (not optimal).
* Works only with a 3D [averages,channels,timepoints] signal.
* Returns a 2D [averages,timepoints] signal.
Parameters
----------
use_ref_data : boolean
Use reference data (usually non water suppressed) for channel combining
phasing : boolean
Allow 0th order phasing during channel combine or not
channels_onoff : boolean list [nChannels]
Binary weights to apply to each channel for example to turn off some of them
Returns
-------
s_combined : MRSData2 numpy array [averages,timepoints]
Resulting channel combined data stored in a MRSData2 object
"""
log.debug("channel combining [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 3):
log.error("this method only works for 3D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# check if any ref data
if(self.data_ref is None and use_ref_data):
log.warning("you want to channel-combine data based on ref. data but no such data is available!")
use_ref_data = False
# dimensions check for reference data
if(use_ref_data and self.data_ref.ndim != 3):
log.error("this method only works for 3D signals! You are feeding it with %d-dimensional reference data. :s" % self.ndim)
# init
s = self.copy()
if(s.shape[1] == 1):
log.warning("this is a single-channel signal, no need to recombine this!")
s_combined = np.mean(s, axis=1)
log.warning("reshaped to " + str(s_combined.shape))
else:
if(phasing):
if(use_ref_data):
log.debug("channel recombine WITH reference scan AND phasing (original suspect code)...")
weights = suspect.processing.channel_combination.svd_weighting(self.data_ref.mean(axis=0))
else:
log.debug("channel recombine WITHOUT reference scan AND phasing (original suspect code)...")
s_dirty_mean = np.mean(s, axis=0)
weights = suspect.processing.channel_combination.svd_weighting(s_dirty_mean)
else:
if(use_ref_data):
log.debug("channel recombine WITH reference scan & NO phasing...")
p, _, v = np.linalg.svd(self.data_ref.mean(axis=0), full_matrices=False)
channel_weights = p[:, 0].conjugate()
weights = -channel_weights / np.sum(np.abs(channel_weights))
else:
log.debug("channel recombine reference scan & NO phasing...")
s_dirty_mean = np.mean(s, axis=0)
p, _, v = np.linalg.svd(s_dirty_mean, full_matrices=False)
channel_weights = p[:, 0].conjugate()
weights = -channel_weights / np.sum(np.abs(channel_weights))
# turn off some channels ?
channels_onoff_np = np.array(channels_onoff)
if((channels_onoff_np == False).any()):
channels_onoff_float = channels_onoff_np.astype(float)
log.debug("playing with channel weights...")
log.debug(str(channels_onoff_float))
weights = weights * channels_onoff_float
s_combined = suspect.processing.channel_combination.combine_channels(s, weights)
# convert back to MRSData2
s_combined = self.inherit(s_combined)
# if any ref data available, we combine it too (silently)
if(s_combined.data_ref is not None):
s_combined.data_ref = s_combined.data_ref.correct_combine_channels_3d(True, True, channels_onoff)
return(s_combined)
def concatenate_2d(self, data):
"""
Concatenate current signal with another one along the averages axis.
* Works only with a 2D [averages,timepoints] signal.
* Returns a 2D [averages,timepoints] signal.
Parameters
----------
data : MRSData2 numpy array [averages,timepoints]
MRS data to concatenate to the current data
Returns
-------
s_concatenated : MRSData2 numpy array [averages,timepoints]
Resulting concatenated signal
"""
log.debug("concatenating [%s] to [%s]..." % (self.display_label, data.display_label))
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
if(data.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % data.ndim)
# init
log.debug("concatenating dataset shapes " + str(self.shape) + " and " + str(data.shape) + " ...")
s_concatenated = np.concatenate((self, data))
# convert back to MRSData2
s_concatenated = self.inherit(s_concatenated)
log.debug("obtained a dataset shape " + str(s_concatenated.shape))
# update some attributes
s_concatenated._is_concatenated = True
return(s_concatenated)
def _build_moving_average_data_2d(self, nAvgWindow=5):
"""
Build moving average data in the average dimension. Usefull for the analyze_peak and correct_realign_2d functions.
* Works only with a 2D [averages,timepoints] signal.
* Returns a 2D [averages,timepoints] signal.
Parameters
----------
nAvgWindow : int
Size of the moving average window
Returns
-------
s_ma : MRSData2 numpy array [averages,timepoints]
Resulting moving average data stored in a MRSData2 object. The number of averages is the same as the original data BUT each of those average is actually an average of nAvgWindow spectra.
"""
log.debug("calculating moving average for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
log.debug("moving averaging with window of %d samples!" % nAvgWindow)
s = self.copy()
# number of averages for moving average?
if(np.mod(nAvgWindow, 2) == 0):
nAvgWindow += 1
# build moving averaged data
s_ma = s.copy()
moving_averages_half = int((nAvgWindow - 1) / 2)
for a in range(0, s.shape[0]):
ia = max(0, a - moving_averages_half)
ib = min(s.shape[0], a + moving_averages_half + 1)
s_ma[a, :] = np.mean(s[ia:ib, :], axis=0)
return(s_ma)
def correct_analyze_and_reject_2d(self, peak_analyze_range=[4.5, 5], peak_snr_range=[1.8, 2.2], peak_lw_range=[4.5, 5], moving_averages=1, reject_when_linewidth_fails=True, peak_properties_ranges={"amplitude (%)": None, "linewidth (Hz)": [5.0, 30.0], "chemical shift (ppm)": 0.5, "phase std. factor (%)": 60.0}, peak_properties_rel2mean=True, auto_method_list=None, auto_adjust_allowed_snr_change=1.0, allowed_apodization=0.0, display=False, display_range=[1, 6]):
"""
Analyze peak in each average in terms intensity, linewidth, chemical shift and phase and reject data if one of these parameters goes out of the min / max bounds. Usefull to understand what the hell went wrong during your acquisition when you have the raw data and to try to improve things a little. You can choose to set the bounds manually or automatically based on a peak property (amplitude, linewidth, frequency, phase). And you can run several automatic adjusment methods, the one giving the highest SNR and/or the lowest peak linewidth will be selected. All this is very experimental and the code is long and not optimized, sorry ;).
Special note about the optimization: when does it stop? First, the algorithm tries to optimize the data rejection to get a SNR higher than the (initial SNR * auto_adjust_allowed_snr_change). If the latter is not possible, then the algorithm tries to reduce the linewidth without reducing SNR compared to the initial SNR. If nothing works out, no data rejection is performed except maybe based on peak detection (see reject_when_linewidth_fails).
* Works only with a 2D [averages,timepoints] signal.
* Returns a 2D [averages,timepoints] signal.
Parameters
----------
peak_analyze_range : list
Range in ppm used to analyze peak properties (amplitude, linewidth, chemical shift, phase)
peak_snr_range : list
Range in ppm used to estimate SNR
peak_lw_range : list
Range in ppm used to estimate linewidth
moving_averages : int
Number of averages to perform when using moving average, need to be an odd number
reject_when_linewidth_fails : boolean
Reject data if linewidth estimatiopn fails (=0Hz). Usually when the peak looks so bad that the peak width cannot be measured...
peak_properties_ranges : dict
Dictionnary that contains 4 entries, 4 rejection criterias for
"amplitude (%)": amplitude relative changes: keep data if within +/-val % range
"linewidth (Hz)": linewidth changes: keep data is within values in Hz
"chemical shift (ppm)": chemical shift changes: keep data is within +/-val ppm
"phase std. factor (%)": phase changes: keep data if within +/- val/100 * std(phase) rd
peak_properties_rel2mean : boolean
Relative peak properties (amplitude, chemical shift and phase) should be caculated based on the mean value over the whole acquisition (True) or only the first acquired point (False)
auto_method_list : list of data_rejection_method
Automatic rejection bounds adjustment methods
auto_adjust_allowed_snr_change : float
Allowed change in SNR (%), a positive or negative relative to the initial SNR without data rejection
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during correction process. However, the final corrected signal will not be apodized.
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_cor : MRSData2 numpy array [averages,timepoints]
Data remaining after data rejection stored in a MRSData2 object.
"""
log.debug("analyzing data and rejecting some for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
if(s.shape[0] == 1):
log.warning("single-shot signal, nothing to analyze!")
return(s)
ppm = s.frequency_axis_ppm()
# check if we did data rejection before
if(self.data_rejection is None):
iround_data_rej = 1
else:
iround_data_rej = len(self.data_rejection) + 1
log.info("%dth round of data rejection!" % iround_data_rej)
# estimate initial SNR and linewidth
log.pause()
s_avg = s.correct_average_2d()
initial_snr, _, _ = s_avg.analyze_snr_1d(peak_snr_range)
initial_lw = s_avg.analyze_linewidth_1d(peak_lw_range)
log.resume()
log.info("* Pre-data-rejection SNR = %.2f" % initial_snr)
log.info("* Pre-data-rejection linewidth = %.2f Hz" % initial_lw)
# build moving averaged data
s_ma = self._build_moving_average_data_2d(moving_averages)
# perform peak analysis (possibly with apodization to stabilize things)
peak_prop_abs, peak_prop_rel2mean, peak_prop_rel2firstpt = s_ma._analyze_peak_2d(peak_analyze_range, allowed_apodization)
# first set the data according to relative option: this is a user option
if(peak_properties_rel2mean):
peak_prop_rel = peak_prop_rel2mean
else:
peak_prop_rel = peak_prop_rel2firstpt
# choose if absolute or relative will be analyzed: this is hard-coded
peak_prop_analyze = peak_prop_abs * 0.0
# amplitude: relative in %
peak_prop_analyze[:, 0] = peak_prop_rel[:, 0]
# linewidth: absolute in Hz
peak_prop_analyze[:, 1] = peak_prop_abs[:, 1]
# frequency: relative in ppm
peak_prop_analyze[:, 2] = peak_prop_rel[:, 2]
# phase: absolute in rad
peak_prop_analyze[:, 3] = peak_prop_rel[:, 3]
# choose if absolute or relative will be displayed
peak_prop_disp = peak_prop_rel * 0.0
# amplitude: relative in %
peak_prop_disp[:, 0] = peak_prop_rel[:, 0]
# linewidth: absolute in Hz
peak_prop_disp[:, 1] = peak_prop_abs[:, 1]
# frequency: absolute in ppm
peak_prop_disp[:, 2] = peak_prop_abs[:, 2]
# phase: absolute in rad
peak_prop_disp[:, 3] = peak_prop_abs[:, 3]
# our time scale
t_ma = np.linspace(0, self.tr * s.shape[0], s_ma.shape[0]) / 1000.0 # s
# stats
log.info("peak analysis: means ± std. deviations")
log.info("rel. peak amplitude = %.2f ± %.2f %%" % (peak_prop_disp[:, 0].mean(), peak_prop_disp[:, 0].std()))
log.info("abs. linewidth = %.1f ± %.1f Hz (%.3f ± %.3f ppm)" % (peak_prop_disp[:, 1].mean(), peak_prop_disp[:, 1].std(), (peak_prop_disp[:, 1] / s_ma.f0).mean(), (peak_prop_disp[:, 1] / s_ma.f0).std()))
log.info("abs. frequency = %.2f ± %.2f ppm (± %.1f Hz)" % (peak_prop_disp[:, 2].mean(), peak_prop_disp[:, 2].std(), (peak_prop_disp[:, 2] * s_ma.f0).std()))
log.info("abs. phase = %.2f ± %.2f rad" % (peak_prop_disp[:, 3].mean(), peak_prop_disp[:, 3].std()))
# check for Nones
peak_properties_ranges_list = list(peak_properties_ranges.values())
peak_properties_ranges_list = [np.inf if p is None else p for p in peak_properties_ranges_list]
# special for linewidth: can be a max linewidth or a list
if(type(peak_properties_ranges_list[1]) != list):
peak_properties_ranges_list[1] = [1.0, peak_properties_ranges_list[1]]
# reject when peak linewidth fails
if(reject_when_linewidth_fails):
peak_properties_ranges_list[1][0] = 1.0
else:
peak_properties_ranges_list[1][0] = -1.0
# special for phase: rejection range is a factor of std
phase_std = peak_prop_analyze[:, 3].std()
phase_std_reject_range = peak_properties_ranges_list[3] / 100.0 * phase_std
# prepare rejection min/max vectors
peak_prop_min = [-peak_properties_ranges_list[0],
peak_properties_ranges_list[1][0],
-peak_properties_ranges_list[2],
-phase_std_reject_range]
peak_prop_max = [+peak_properties_ranges_list[0],
peak_properties_ranges_list[1][1],
+peak_properties_ranges_list[2],
+phase_std_reject_range]
# automatic rejection ?
if(auto_method_list is not None):
# init
display_axes_ready = [False, False]
properties_names = list(peak_properties_ranges.keys())
auto_method_final_snr_list = np.array([0.0] * 4)
auto_method_final_lw_list = np.array([np.inf] * 4)
peak_prop_min_auto_res = peak_prop_min.copy()
peak_prop_max_auto_res = peak_prop_max.copy()
# for each auto method
for this_auto_method in auto_method_list:
# prepare min/max peak property range
this_prop_min = np.abs(peak_prop_analyze[:, this_auto_method.value]).min()
this_prop_max = np.abs(peak_prop_analyze[:, this_auto_method.value]).max()
# get range resolution from constants
if(this_auto_method == data_rejection_method.AUTO_AMPLITUDE):
this_prop_step = DATA_REJECTION_AMPLITUDE_STEP
elif(this_auto_method == data_rejection_method.AUTO_LINEWIDTH):
this_prop_step = DATA_REJECTION_LINEWIDTH_STEP
elif(this_auto_method == data_rejection_method.AUTO_FREQUENCY):
this_prop_step = DATA_REJECTION_FREQUENCY_STEP
elif(this_auto_method == data_rejection_method.AUTO_PHASE):
this_prop_step = DATA_REJECTION_PHASE_STEP
else:
log.error("upsyy! I am not aware of this automatic data rejection method: " + str(this_auto_method))
# generate a range to test, using the resolution
this_prop_range = np.arange(this_prop_min - this_prop_step, this_prop_max + this_prop_step, this_prop_step)
# checking that there is actually a variation and a range to test
if(this_prop_range.size == 0):
# no? let's skip this method
this_prop_range = np.array([this_prop_min])
else:
# now let's be smart and reduce the number of values to test according to the peak property measurements
# regrid to nearest in previously computed range
this_prop_analyze_set = np.abs(peak_prop_analyze[:, this_auto_method.value])
this_prop_analyze_set = interpolate.interp1d(this_prop_range, this_prop_range, kind='nearest')(this_prop_analyze_set)
# remove one step resolution, remove duplicates and sort
this_prop_analyze_set = np.sort(np.array(list(set(this_prop_analyze_set - this_prop_step))))
# remove negative values
this_prop_analyze_set = this_prop_analyze_set[this_prop_analyze_set > 0]
# we should now have an optimized set of thresholds to test
this_prop_range = this_prop_analyze_set
# checking that there is actually a variation and a range to test (again, sorry)
if(this_prop_range.size == 0):
# no? let's skip this method
this_prop_range = np.array([this_prop_min])
# iterate and test the resulting data
pbar = log.progressbar("adjusting rejection threshold for [" + properties_names[this_auto_method.value] + "] in range [%.3f;%.3f] (n=%d)" % (this_prop_range.min(), this_prop_range.max(), this_prop_range.size), this_prop_range.shape[0])
# add inf to be sure that we try without any thresholds
this_prop_range = np.hstack((this_prop_range, +np.inf))
test_snr_list = np.zeros(this_prop_range.shape)
test_lw_list = np.zeros(this_prop_range.shape)
test_nrej_list = np.zeros(this_prop_range.shape)
# test each criteria bound
for (i_prop_val, this_prop_val) in enumerate(this_prop_range):
# rebuild min/max rejection bounds including user values
peak_prop_min_auto = peak_prop_min.copy()
peak_prop_max_auto = peak_prop_max.copy()
peak_prop_max_auto[this_auto_method.value] = this_prop_val
# now see what we can reject
this_mask_reject_data = np.full([s_ma.shape[0], 4], False)
for a in range(0, s_ma.shape[0]):
for p in range(4):
if(peak_prop_analyze[a, p] < peak_prop_min_auto[p]):
this_mask_reject_data[a, p] = True
if(peak_prop_analyze[a, p] > peak_prop_max_auto[p]):
this_mask_reject_data[a, p] = True
# reject data now
this_mask_reject_data_sumup = (this_mask_reject_data.sum(axis=1) > 0)
this_s_cor = s[(this_mask_reject_data_sumup == False), :]
# analyze snr / lw and number of rejections
if(this_mask_reject_data_sumup.sum() < s_ma.shape[0]):
log.pause()
this_s_cor_avg = this_s_cor.correct_average_2d()
test_snr_list[i_prop_val], _, _ = this_s_cor_avg.analyze_snr_1d(peak_snr_range)
test_lw_list[i_prop_val] = this_s_cor_avg.analyze_linewidth_1d(peak_lw_range)
log.resume()
test_nrej_list[i_prop_val] = this_mask_reject_data_sumup.sum()
# progression
pbar.update(i_prop_val)
pbar.finish("done")
# relative SNR and minimum acceptable relative SNR change
test_snr_threshold = initial_snr + initial_snr * auto_adjust_allowed_snr_change / 100.0
test_snr_list_rel = test_snr_list / initial_snr * 100.0 - 100.0
# relative LW change
test_lw_list_rel = test_lw_list - initial_lw
# first, try and find a higher SNR than the initial one (best case, we reject crappy data and improved final SNR)
if(test_snr_list_rel.max() > auto_adjust_allowed_snr_change):
log.info("SNR change above threshold: %.2f%% > %.2f%% threshold! :)" % (test_snr_list_rel.max(), auto_adjust_allowed_snr_change))
ind_max_snr = np.argmax(test_snr_list)
optim_prop = this_prop_range[ind_max_snr]
optim_res_snr = test_snr_list[ind_max_snr]
optim_res_lw = test_lw_list[ind_max_snr]
log.info("optimal [" + properties_names[this_auto_method.value] + "] = %.1f" % optim_prop)
else:
# no SNR above threshold found, so let's try to find at least a lower linewidth (intermediate case), for the same SNR or more
# check that we have a segment of the curve above the initial SNR
test_snr_list_mask = (test_snr_list_rel >= 0.0)
if(not test_snr_list_mask.any()):
# that was a bit ambitious, there was absolutely no SNR enhancement
log.info("sorry, this is only making your SNR worse...")
log.debug("the best SNR change we found was %.2f%% compared to initial :(" % test_snr_list_rel.max())
# set optimal LW to max (inf)
optim_prop = this_prop_max
optim_res_snr = test_snr_list[-1]
optim_res_lw = test_lw_list[-1]
else:
# we found relative SNR changes equal (no change) or above 0 (little SNR enchancement, still below expectation)
# let's choose the one with the lowest LW
min_lw_snr_masked = np.min(test_lw_list[test_snr_list_mask])
ind_min_lw_snr_masked = np.argmin(test_lw_list[test_snr_list_mask])
# if LW was actually reduced
if(min_lw_snr_masked < 0):
log.info("could not improve SNR above threshold but reduced peak linewidth! :)")
optim_prop = this_prop_range[test_snr_list_mask][ind_min_lw_snr_masked]
optim_res_snr = test_snr_list[test_snr_list_mask][ind_min_lw_snr_masked]
optim_res_lw = test_lw_list[test_snr_list_mask][ind_min_lw_snr_masked]
log.info("optimal [" + properties_names[this_auto_method.value] + "] = %.1f" % optim_prop)
else:
# that was a bit ambitious, there was absolutly no SNR enhancement and no LW enhancement too! :(
log.info("sorry, this is only making your SNR and LW worse...")
# set optimal LW to max (inf)
optim_prop = this_prop_max
optim_res_snr = test_snr_list[-1]
optim_res_lw = test_lw_list[-1]
# display and save the final snr and lw
log.info("* Post-data-rejection based on [" + properties_names[this_auto_method.value] + "] SNR = %.2f" % optim_res_snr)
log.info("* Post-data-rejection based on [" + properties_names[this_auto_method.value] + "] linewidth = %.2f Hz" % optim_res_lw)
auto_method_final_snr_list[this_auto_method.value] = optim_res_snr
auto_method_final_lw_list[this_auto_method.value] = optim_res_lw
# plot SNR / LW combinaisons and optimal choice
if(display):
# plot the SNRs versus LWs
fig_title = "Data discarding [%s]: adjusting criteria" % self.display_label
if(iround_data_rej > 1):
fig_title += " (round #%d)" % iround_data_rej
fig = plt.figure(fig_title)
if(not display_axes_ready[0]):
# create the figure, let's create the axes
fig.clf()
fig.suptitle(fig_title)
fig.subplots(2, 2)
for a in fig.axes:
a.twinx()
display_axes_ready[0] = True
fig.axes[this_auto_method.value].plot(this_prop_range, test_snr_list, 'rx-', label='SNR')
fig.axes[this_auto_method.value].axvline(optim_prop, color='m', linestyle='--', label='Optimal')
fig.axes[this_auto_method.value].axhline(test_snr_threshold, color='g', linestyle='--', label='SNR threshold')
fig.axes[this_auto_method.value].set_xlabel(properties_names[this_auto_method.value][0].upper() + properties_names[this_auto_method.value][1:])
fig.axes[this_auto_method.value].set_ylabel('Estimated SNR (u.a)')
fig.axes[this_auto_method.value].grid('on')
fig.axes[this_auto_method.value].legend(loc='lower left')
fig.axes[this_auto_method.value + 4].plot(this_prop_range, test_lw_list, 'bx-', label='Linewidth')
fig.axes[this_auto_method.value + 4].set_ylabel('Estimated linewidth (Hz)')
fig.axes[this_auto_method.value + 4].legend(loc='lower right')
fig.subplots_adjust()
fig.show()
# save the found optimal criteria to rejection critera vector
if(this_auto_method == data_rejection_method.AUTO_LINEWIDTH):
peak_prop_max_auto_res[this_auto_method.value] = optim_prop
else:
peak_prop_min_auto_res[this_auto_method.value] = -optim_prop
peak_prop_max_auto_res[this_auto_method.value] = optim_prop
# tried all auto methods, now apply best one
# the final snr and lw obtained are in auto_method_final_snr_list and auto_method_final_lw_list
# the final bounds are in peak_prop_min_auto_res and peak_prop_max_auto_res
# relative SNR change
auto_method_final_snr_list_rel = auto_method_final_snr_list / initial_snr * 100.0 - 100.0
auto_method_final_lw_list_rel = auto_method_final_lw_list - initial_lw
# is this higher than the initial snr?
if(auto_method_final_snr_list_rel.max() > auto_adjust_allowed_snr_change):
# apply this method
ind_max_snr_auto_method = np.argmax(auto_method_final_snr_list)
optim_auto_method = data_rejection_method(ind_max_snr_auto_method)
log.info("best adjustment done with " + str(optim_auto_method) + " regarding SNR! (round #%d)" % iround_data_rej)
else:
# no methods could give a SNR above threshold
# so let's try to find at least a method that does not reduce SNR and gives a lower linewidth (intermediate case)
auto_method_final_snr_list_mask = (auto_method_final_snr_list_rel >= 0.0)
if(not auto_method_final_snr_list_mask.any()):
# that was a bit ambitious
log.info("sorry but only made your SNR worse...")
log.debug("the best SNR change we found was %.2f%% compared to initial :(" % auto_method_final_snr_list_rel.max())
log.info("automatic data rejection failed, no optimal method found, sorry! :(")
# no optimal method!
optim_auto_method = None
else:
# we found relative SNR changes equal (no change) or above 0 (little SNR enchancement, still below expectation)
# let's choose the method that gives the the lowest LW
min_lw_auto_method = np.min(auto_method_final_lw_list_rel[auto_method_final_snr_list_mask])
ind_min_lw_auto_method = np.argmin(auto_method_final_lw_list_rel[auto_method_final_snr_list_mask])
# if LW was actually reduced
if(min_lw_auto_method < 0):
log.info("could not find a method that improves SNR above threshold, only reduces the peak linewidth! :)")
optim_auto_method = data_rejection_method(ind_min_lw_auto_method)
log.info("best adjustment done with " + str(optim_auto_method) + " regarding linewidth! (round #%d)" % iround_data_rej)
else:
# that was a bit ambitious, there was absolutly no SNR enhancement and no LW enhancement too! :(
log.info("automatic data rejection failed, no optimal method found, sorry! :(")
# no optimal method!
optim_auto_method = None
if(optim_auto_method is not None):
log.info("* Post-data-rejection SNR = %.2f" % auto_method_final_snr_list[optim_auto_method.value])
log.info("* Post-data-rejection linewidth = %.2f Hz" % auto_method_final_lw_list[optim_auto_method.value])
# apply automatically optimized bounds to rejection vectors
peak_prop_min[optim_auto_method.value] = peak_prop_min_auto_res[optim_auto_method.value]
peak_prop_max[optim_auto_method.value] = peak_prop_max_auto_res[optim_auto_method.value]
else:
optim_auto_method = None
# for each average, check if peak parameters are in the min / max bounds
mask_reject_data = np.full([s_ma.shape[0], 4], False)
pbar = log.progressbar("rejecting data", s_ma.shape[0])
for a in range(0, s_ma.shape[0]):
for p in range(4):
if(peak_prop_analyze[a, p] < peak_prop_min[p]):
mask_reject_data[a, p] = True
if(peak_prop_analyze[a, p] > peak_prop_max[p]):
mask_reject_data[a, p] = True
pbar.update(a)
pbar.finish("done")
# stats regarding data rejection, how many, for what reasons, overall percentage
log.info("data rejection: summary (round #%d)" % iround_data_rej)
log.info("number of averages rejected because of...")
log.info("amplitude = %d" % mask_reject_data[:, 0].sum())
log.info("linewidth = %d" % mask_reject_data[:, 1].sum())
log.info("frequency = %d" % mask_reject_data[:, 2].sum())
log.info("phase = %d" % mask_reject_data[:, 3].sum())
# actually reject data now
mask_reject_data_sumup = (mask_reject_data.sum(axis=1) > 0)
s_cor = s[(mask_reject_data_sumup == False), :]
# build rejected spectrum
s_rej = s[(mask_reject_data_sumup == True), :]
log.pause()
s_rej_avg = s_rej.correct_average_2d()
log.resume()
log.info("TOTAL data rejection = %d / %d (%.0f%%)" % (mask_reject_data_sumup.sum(), s_ma.shape[0], (mask_reject_data_sumup.sum() / s_ma.shape[0] * 100)))
# perform post-correction measurements
peak_prop_abs, peak_prop_rel2mean, peak_prop_rel2firstpt = s_ma._analyze_peak_2d(peak_analyze_range, allowed_apodization)
# first set the data according to relative option: this is a user option
if(peak_properties_rel2mean):
peak_prop_rel = peak_prop_rel2mean
else:
peak_prop_rel = peak_prop_rel2firstpt
# choose if absolute or relative will be analyzed: this is hard-coded
peak_prop_analyze_postcor = peak_prop_abs * 0.0
# amplitude: relative in %
peak_prop_analyze_postcor[:, 0] = peak_prop_rel[:, 0]
# linewidth: absolute in Hz
peak_prop_analyze_postcor[:, 1] = peak_prop_abs[:, 1]
# frequency: relative in ppm
peak_prop_analyze_postcor[:, 2] = peak_prop_rel[:, 2]
# phase: absolute in rad
peak_prop_analyze_postcor[:, 3] = peak_prop_rel[:, 3]
# final display
if(display):
fig_title = "Data discarding [%s]: summary" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 3, sharex='all')
k = 0
for ix in range(2):
for iy in range(2):
# original data
axs[ix, iy].plot(t_ma, peak_prop_analyze[:, k], 'k-x', linewidth=1)
# rejected data
t_ma_rej = t_ma[mask_reject_data[:, k]]
this_peak_prop_analyze_rej = peak_prop_analyze[mask_reject_data[:, k], k]
axs[ix, iy].plot(t_ma_rej, this_peak_prop_analyze_rej, 'ro', linewidth=1)
axs[ix, iy].axhline(y=peak_prop_min[k], color='r', linestyle='--')
axs[ix, iy].axhline(y=peak_prop_max[k], color='r', linestyle='--')
k = k + 1
axs[0, 0].set_ylabel('Rel. amplitude change (%)')
axs[0, 0].grid('on')
axs[0, 0].set_title("Rel. amplitude = %.2f ± %.2f %%" % (peak_prop_disp[:, 0].mean(), peak_prop_disp[:, 0].std()))
axs[0, 1].set_ylabel('Abs. linewidth (Hz)')
axs[0, 1].grid('on')
axs[0, 1].set_title("Abs. linewidth = %.1f ± %.1f Hz (%.3f ± %.3f ppm)" % (peak_prop_disp[:, 1].mean(), peak_prop_disp[:, 1].std(), (peak_prop_disp[:, 1] / s_ma.f0).mean(), (peak_prop_disp[:, 1] / s_ma.f0).std()))
axs[1, 0].set_xlabel('Acq. time (s)')
axs[1, 0].set_ylabel('Abs. frequency (ppm)')
axs[1, 0].grid('on')
axs[1, 0].set_title("Abs. frequency = %.2f ± %.2f ppm (± %.1f Hz)" % (peak_prop_disp[:, 2].mean(), peak_prop_disp[:, 2].std(), (peak_prop_disp[:, 2] * s_ma.f0).std()))
axs[1, 1].set_xlabel('Acq. time (s)')
axs[1, 1].set_ylabel('Abs. phase shift (rd)')
axs[1, 1].grid('on')
axs[1, 1].set_title("Abs. phase = %.2f ± %.2f rad" % (peak_prop_disp[:, 3].mean(), peak_prop_disp[:, 3].std()))
# nice plot showing all raw data
ax = plt.subplot(1, 3, 3)
ppm = s_ma.frequency_axis_ppm()
ystep = np.max(np.mean(s_ma.spectrum().real, axis=0))
ystep = np.power(10, 1 + (np.floor(np.log10(ystep))))
ampfactor = 4
for k in range(s_ma.shape[0]):
if(mask_reject_data_sumup[k]):
plt.plot(ppm, s_ma[k, :].spectrum().real * ampfactor + ystep * k, 'r-', linewidth=1)
else:
plt.plot(ppm, s_ma[k, :].spectrum().real * ampfactor + ystep * k, 'g-', linewidth=1)
# build lineshape segment
_, _, _, peak_seg_ppm, peak_seg_val = s_ma[k, :]._analyze_peak_1d(peak_analyze_range)
plt.plot(peak_seg_ppm, np.real(peak_seg_val) * ampfactor + ystep * k, 'k-', linewidth=1)
plt.xlim(peak_analyze_range[1], peak_analyze_range[0])
plt.xlabel('chemical shift (ppm)')
plt.ylabel('individual spectra')
# y ticks: spectrum index
# TODO: maybe need to calculate this automatically
n_yticks = 16
step_yticks = 2 ** np.ceil(np.sqrt(s.shape[0] / n_yticks))
yt_lbl_list = np.arange(0, s.shape[0], step_yticks).tolist()
if(yt_lbl_list[-1] != s.shape[0]):
yt_lbl_list = yt_lbl_list + [s.shape[0]]
yt_lbl_list = [("%d" % yt) for yt in yt_lbl_list]
yt_loc_list = np.arange(0, s.shape[0] * ystep, step_yticks * ystep).tolist()
if(yt_loc_list[-1] != (s.shape[0] * ystep)):
yt_loc_list = yt_loc_list + [s.shape[0] * ystep]
ax.set_yticks(yt_loc_list)
ax.set_yticklabels(labels=yt_lbl_list)
plt.grid('on')
fig.subplots_adjust()
fig.show()
# wait, are we removing all data ???
if(mask_reject_data_sumup.sum() == s.shape[0]):
log.error("all data is rejected! You need to readjust your rejection bounds...")
# estimate final SNR and linewidth
log.pause()
s_cor_avg = s_cor.correct_average_2d()
final_snr, _, _ = s_cor_avg.analyze_snr_1d(peak_snr_range)
final_lw = s_cor_avg.analyze_linewidth_1d(peak_lw_range)
log.resume()
log.info("* Final post-data-rejection SNR = %.2f" % final_snr)
log.info("* Final post-data-rejection linewidth = %.2f Hz" % final_lw)
# fill up dict about this data rejection
data_rej_dict = {}
data_rej_dict["Pre-rejection"] = {}
data_rej_dict["Pre-rejection"]["snr"] = initial_snr
data_rej_dict["Pre-rejection"]["lw"] = initial_lw
data_rej_dict["Pre-rejection"]["na"] = s.shape[0]
data_rej_dict["Pre-rejection"]["measurements"] = peak_prop_analyze
data_rej_dict["Post-rejection"] = {}
data_rej_dict["Post-rejection"]["snr"] = final_snr
data_rej_dict["Post-rejection"]["lw"] = final_lw
data_rej_dict["Post-rejection"]["na"] = s_cor.shape[0]
data_rej_dict["Post-rejection"]["measurements"] = peak_prop_analyze_postcor
# final rejection bounds
final_peak_properties_ranges = peak_properties_ranges.copy()
final_peak_properties_ranges["amplitude (%)"] = np.abs(peak_prop_max[0])
final_peak_properties_ranges["linewidth (Hz)"] = [peak_prop_min[1], peak_prop_max[1]]
final_peak_properties_ranges["chemical shift (ppm)"] = np.abs(peak_prop_max[2])
final_peak_properties_ranges["phase std. factor (%)"] = np.abs(peak_prop_max[3]) / phase_std * 100.0 # special for phase
data_rej_dict["Rejection bounds"] = final_peak_properties_ranges
# auto methods
data_rej_dict["Automatic data rejection methods"] = {}
data_rej_dict["Automatic data rejection methods"]["Methods tried"] = auto_method_list
data_rej_dict["Automatic data rejection methods"]["Best method"] = optim_auto_method
data_rej_dict["Automatic data rejection methods"]["SNR change threshold (%)"] = auto_adjust_allowed_snr_change
# check if empty or not (if first data rejection or not)
if(s_cor._data_rejection is None):
s_cor._data_rejection = [data_rej_dict]
else:
s_cor._data_rejection.append(data_rej_dict)
return(s_cor)
def correct_realign_2d(self, peak_range=[4.5, 5], moving_averages=1, inter_corr_mode=False, freq_shift_max=25, allowed_apodization=5.0, display=False, display_range=[1, 6]):
"""
Realign each signal of interest in frequency by taking as a reference the first spectra in absolute mode using pick-picking or inter-correlation (experimental).
* Works only with a 2D [averages,timepoints] signal.
* Returns a 2D [averages,timepoints] signal.
Parameters
----------
peak_range : list [2]
Range in ppm used to analyze peak phase
moving_averages : int
Number of averages to perform when using moving average, need to be an odd number
inter_corr_mode : boolean
Use inter-correlation technique to adjust frequency shifts. Could be more robust when SNR is low.
freq_shift_max : float
Max allowed frequency shift during realignment (Hz).
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during correction process. However, the final corrected signal will not be apodized.
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_realigned : MRSData2 numpy array [averages,timepoints]
Resulting frequency realigned data stored in a MRSData2 object
"""
log.debug("frequency realigning [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
ppm = s.frequency_axis_ppm()
s_realigned = s.copy()
if(s.shape[0] == 1):
log.warning("single-shot signal, cannot realign this!")
else:
# build moving averaged data
s_ma = self._build_moving_average_data_2d(moving_averages)
# init
s_avg = np.mean(s, axis=0)
if(inter_corr_mode):
# let's fix a +/-0.5ppm range
f_shifts_min = - np.abs(peak_range[1] - peak_range[0]) * s_ma.f0
f_shifts_max = + np.abs(peak_range[1] - peak_range[0]) * s_ma.f0
# let's fix a the resolution here for inter-correlation tests
f_shifts_step = RECO_CORRECT_REALIGN_INTER_CORR_MODE_DF * s_ma.f0
f_shifts_list = np.arange(f_shifts_min, f_shifts_max, f_shifts_step)
else:
# find peak in average spectrum absolute mode
ppm_peak_avg, peak_val, _, _, _ = s_avg._analyze_peak_1d(peak_range, allowed_apodization)
log.debug("measuring peak properties at %0.2fppm!" % ppm_peak_avg)
# for each average in moving averaged data
s_realigned_ma = s_ma.copy()
df_trace = np.zeros(s_ma.shape[0])
pbar = log.progressbar("realigning", s_ma.shape[0])
for a in range(0, s_ma.shape[0]):
if(inter_corr_mode):
# compare this individual spectrum with the first, using inter-correlation
# zero-fill and apodize moving average signal if needed
# btw, I only do this in the case of the intercorrelation mode because it is done internally for the peak-picking mode by the the method _analyze_peak_1d
log.pause()
s_ma_ic = s_ma.correct_zerofill_nd().correct_apodization_nd(allowed_apodization)
log.resume()
# first spectrum as reference
s_ma_ic_ref = np.abs(s_ma_ic[0, :].spectrum())
# use the peak_range as a range for inter-corr tests
cc_2d = f_shifts_list * 0.0
for ifs, fs in enumerate(f_shifts_list):
s_ma_ic_shifted = np.abs(s_ma_ic[a, :].adjust_frequency(fs).spectrum())
cc = np.corrcoef(s_ma_ic_ref, s_ma_ic_shifted)
cc_2d[ifs] = np.abs(cc[0, 1])
# find max correlation
optimal_fs_ind = np.argmax(cc_2d)
optimal_fs = f_shifts_list[optimal_fs_ind]
df_trace[a] = optimal_fs
else:
# measure shift on moving average data
ppm_peak, _, _, _, _ = s_ma[a, :]._analyze_peak_1d(peak_range, allowed_apodization)
# estimate frequency shift in Hz compared to average spectrum
dppm = -(ppm_peak_avg - ppm_peak)
df_trace[a] = dppm * s_ma.f0
# check max shift
if(np.abs(df_trace[a]) > freq_shift_max):
# that is too much, do not realign this spectrum
df_trace[a] = 0.0
# correct moving averaged data (for display only, less heavy)
s_realigned_ma[a, :] = s_ma[a, :].adjust_frequency(df_trace[a])
# correct original data
s_realigned[a, :] = s[a, :].adjust_frequency(df_trace[a])
pbar.update(a)
pbar.finish("done")
# final display
if(display):
fig_title = "Realigning individual spectra [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 3, sharex='all', sharey='all')
# display original averaged spectrum
axs[0, 0].plot(ppm, np.abs(s_avg.spectrum()), 'k-', linewidth=1)
axs[0, 0].set_xlim(display_range[1], display_range[0])
axs[0, 0].set_ylabel('averaged')
axs[0, 0].grid('on')
if(not inter_corr_mode):
# add peak position
axs[0, 0].plot(ppm_peak_avg, np.abs(peak_val), 'ro')
axs[0, 0].axvline(x=ppm_peak_avg, color='r', linestyle='--')
# display original data
axs[0, 1].plot(ppm, np.abs(s_ma.spectrum().transpose()), 'k-', linewidth=1)
axs[0, 1].set_xlim(display_range[1], display_range[0])
axs[0, 1].set_ylabel('original')
axs[0, 1].grid('on')
# display corrected spectra
axs[1, 1].plot(s_realigned_ma.frequency_axis_ppm(), np.abs(s_realigned_ma.spectrum().transpose()), 'b-', linewidth=1)
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].set_xlabel('chemical shift (ppm)')
axs[1, 1].set_ylabel('corrected')
axs[1, 1].grid('on')
# display corrected averaged spectrum
axs[1, 0].plot(s_realigned_ma.frequency_axis_ppm(), np.abs(np.mean(s_realigned_ma, axis=0).spectrum().transpose()), 'b-', linewidth=1)
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('averaged & corrected')
axs[1, 0].grid('on')
plt.subplot(1, 3, 3)
plt.plot(df_trace, np.arange(s_ma.shape[0]), 'k-x', linewidth=1)
plt.xlabel('estimated frequency shift (Hz)')
plt.ylabel('average index')
plt.grid('on')
fig.subplots_adjust()
fig.show()
# convert back to MRSData2
s_realigned = self.inherit(s_realigned)
return(s_realigned)
def correct_average_2d(self, na=None, display=False, display_range=[1, 6]):
"""
Average all averages data into one 1D MRS signal.
* Works only with a 2D [averages,timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
na : int
Number of signals to average
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_mean : MRSData2 numpy array [timepoints]
Resulting frequency realigned data stored in a MRSData2 object
"""
log.debug("averaging [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 2):
log.error("this method only works for 2D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
if(s.shape[0] == 1):
log.warning("single-shot signal, nothing to average!")
s_mean = np.mean(s, axis=0)
log.warning("reshaped to a " + str(s_mean.shape) + " vector")
else:
log.debug("averaging data...")
if(na is not None):
log.debug("only " + str(na) + "...")
if(na == 1):
s_mean = s[0, :]
else:
s_mean = np.mean(s[0:(na - 1), :], axis=0)
else:
s_mean = np.mean(s, axis=0)
if(display):
ppm = s.frequency_axis_ppm()
ppm_mean = s_mean.frequency_axis_ppm()
fig_title = "Averaging [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
axs[0].plot(ppm, s.spectrum().real.transpose(), 'k-', linewidth=1)
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('all spectra')
axs[0].grid('on')
axs[1].plot(ppm_mean, s_mean.spectrum().real.transpose(), 'b-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('averaged spectrum')
axs[1].grid('on')
fig.subplots_adjust()
fig.show()
# convert back to MRSData2
s_mean = self.inherit(s_mean)
# if any ref data available, we average it too (silently)
if(s_mean.data_ref is not None):
s_mean.data_ref = s_mean.data_ref.correct_average_2d(None, False)
return(s_mean)
def correct_phase_1d(self, suspect_method=suspect_phasing_method.MATCH_MAGNITUDE_REAL, ppm_range=[0, 6], allowed_apodization=1.0, display=False, display_range=[1, 6]):
"""
Phase signal using suspect's (hidden) phasing functions. You can choose between 3 different types of phasing methods. See suspect/processing/phase.py file.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
suspect_method : suspect_phasing_method
Suspect phasing method to use here
ppm_range : list [2]
Range in ppm when analyzing spectra for phasing
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during phase analysis process. However, the final corrected signal will not be apodized.
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_phased : MRSData2 numpy array [timepoints]
Resulting phased data stored in a MRSData2 object
"""
log.debug("phasing using suspect functions [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
s_analyze = self.correct_apodization_nd(allowed_apodization)
# estimate phases
if(suspect_method == suspect_phasing_method.MATCH_MAGNITUDE_REAL):
phi0, phi1 = suspect.processing.phase.mag_real(s_analyze, range_ppm=ppm_range)
elif(suspect_method == suspect_phasing_method.MIN_IMAG_INTEGRAL):
phi0, phi1 = suspect.processing.phase.ernst(s_analyze)
elif(suspect_method == suspect_phasing_method.ACME):
phi0, phi1 = suspect.processing.phase.acme(s_analyze, range_ppm=ppm_range)
else:
log.error("hey, I do not know this suspect phasing method!?")
# apply phase corrections
s_phased = s.adjust_phase(phi0, phi1)
# convert back to MRSData2
s_phased = self.inherit(s_phased)
if(display):
fig_title = "Phasing [%s] using suspect's routines" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
axs[0].plot(s.frequency_axis_ppm(), s.spectrum().real, 'k-', linewidth=1)
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('original')
axs[0].grid('on')
# add low/high cuts
axs[0].axvline(x=ppm_range[0], color='r', linestyle='--')
axs[0].axvline(x=ppm_range[1], color='r', linestyle='--')
axs[1].plot(s_phased.frequency_axis_ppm(), s_phased.spectrum().real, 'b-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('phased')
axs[1].grid('on')
# add low/high cuts
axs[1].axvline(x=ppm_range[0], color='r', linestyle='--')
axs[1].axvline(x=ppm_range[1], color='r', linestyle='--')
fig.subplots_adjust()
fig.show()
# if any ref data available, we phase it too (silently)
if(s_phased.data_ref is not None):
s_phased.data_ref = s_phased.data_ref.correct_phase_1d(suspect_method, ppm_range)
return(s_phased)
def correct_first_order_phase_1d(self, coeff_rad_ppm=0.15, display=False, display_range=[1, 6]):
"""
Correct for first-order phase along the chemical shift axis using a coefficient set manually.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
coeff_rad_ppm : float
First-order phase coefficient in rad/ppm
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_phased : MRSData2 numpy array [timepoints]
Resulting phased data stored in a MRSData2 object
"""
log.debug("phasing using first-order phasing [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
# get spectrum and apply linear phase along ppm axis
sf_phased = s.spectrum() * np.exp(1j * np.pi * (s.frequency_axis_ppm() - s.ppm0) * coeff_rad_ppm)
# fft back and convert back to MRSData2
s_phased = np.fft.ifft(np.fft.ifftshift(sf_phased))
# convert back to MRSData2
s_phased = self.inherit(s_phased)
if(display):
fig_title = "First-order Phasing [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='all', sharey='col')
axs[0, 0].plot(s.frequency_axis_ppm(), s.spectrum().real, 'k-', linewidth=1)
axs[0, 0].set_xlim(display_range[1], display_range[0])
axs[0, 0].set_xlabel('chemical shift (ppm)')
axs[0, 0].set_ylabel('original real spectrum')
axs[0, 0].grid('on')
axs[0, 1].plot(s.frequency_axis_ppm(), np.unwrap(np.angle(s.spectrum())), 'k-', linewidth=1)
axs[0, 1].set_xlim(display_range[1], display_range[0])
axs[0, 1].set_xlabel('chemical shift (ppm)')
axs[0, 1].set_ylabel('original unwrapped phase (rad)')
axs[0, 1].grid('on')
axs[1, 0].plot(s_phased.frequency_axis_ppm(), s_phased.spectrum().real, 'b-', linewidth=1)
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('phased real spectrum')
axs[1, 0].grid('on')
axs[1, 1].plot(s_phased.frequency_axis_ppm(), np.unwrap(np.angle(s_phased.spectrum())), 'b-', linewidth=1)
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].set_xlabel('chemical shift (ppm)')
axs[1, 1].set_ylabel('phased real spectrum')
axs[1, 1].grid('on')
fig.subplots_adjust()
fig.show()
# if any ref data available, we phase it too (silently)
if(s_phased.data_ref is not None):
s_phased.data_ref = s_phased.data_ref.correct_first_order_phase_1d(coeff_rad_ppm)
return(s_phased)
def correct_apodization_nd(self, apo_factor=1.0, display=False, display_range=[1, 6]):
"""
Apodize signal using an exponential window adjusted by a linewidth parameter in Hz.
* Works with multi-dimensional signals.
Parameters
----------
apo_factor : float
Apodization factor in Hz
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_apo : MRSData2 numpy array [whatever,...,timepoints]
Resulting apodized data stored in a MRSData2 object
"""
log.debug("apodizing [%s]..." % self.display_label)
# apodize each individual signal
s = self.copy()
# check apodization factor
if(apo_factor <= 0):
log.warning("apodization factor is zero or negative, skipping!")
return(s)
t = s.time_axis()
w_apo = np.exp(-apo_factor * t)
if(s.ndim == 1):
w_apo_nd = w_apo
else: # >1
w_apo_nd = np.tile(w_apo, list(s.shape[:-1]) + [1])
s_apo = s * w_apo_nd
if(display):
# reshaping
if(s.ndim == 3):
s_disp = s.reshape([s.shape[0] * s.shape[1], s.shape[2]])
s_apo_disp = s_apo.reshape([s_apo.shape[0] * s_apo.shape[1], s_apo.shape[2]])
else:
s_disp = s.copy()
s_apo_disp = s_apo.copy()
ppm = s_disp.frequency_axis_ppm()
ppm_apo = s_apo_disp.frequency_axis_ppm()
fig_title = "Apodizing [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='row', sharey='row')
axs[0, 0].plot(t, np.abs(s_disp).transpose(), 'k-', linewidth=1, label='fid')
axs[0, 0].plot(t, w_apo * np.abs(s_disp.max()), 'r-', linewidth=1, label='apodization window')
axs[0, 0].set_xlabel('time (s)')
axs[0, 0].set_ylabel('original')
axs[0, 0].grid('on')
axs[0, 1].plot(t, np.abs(s_apo_disp).transpose(), 'b-', linewidth=1)
axs[0, 1].set_xlabel('time (s)')
axs[0, 1].set_ylabel('apodized')
axs[0, 1].grid('on')
axs[1, 0].plot(ppm, s_disp.spectrum().real.transpose(), 'k-', linewidth=1)
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('original spectrum')
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].grid('on')
axs[1, 1].plot(ppm_apo, s_apo_disp.spectrum().real.transpose(), 'b-', linewidth=1)
axs[1, 1].set_xlabel("chemical shift (ppm)")
axs[1, 1].set_ylabel('apodized spectrum')
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].grid('on')
fig.subplots_adjust()
fig.show()
# convert back to MRSData2
s_apo = self.inherit(s_apo)
# if any ref data available, we apodize it too (silently)
if(s_apo.data_ref is not None):
s_apo.data_ref = s_apo.data_ref.correct_apodization_nd(apo_factor, False)
return(s_apo)
def correct_crop_1d(self, nPoints_final=6144, display=False, display_range=[1, 6]):
"""
Crop signal in time-domain to remove last points.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
nPoints_final : int
Final number of points (after crop)
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_crop : MRSData2 numpy array [timepoints]
Resulting cropped data stored in a MRSData2 object
"""
log.debug("cropping [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
# crop
if(nPoints_final < s.shape[0]):
log.debug("cropping data from %d to %d points..." % (s.shape[0], nPoints_final))
s_crop = s[0:nPoints_final]
else:
s_crop = self.copy()
log.debug("no cropping needed, getting bored...")
if(display):
t = s.time_axis()
t_crop = s_crop.time_axis()
ppm = s.frequency_axis_ppm()
ppm_crop = s_crop.frequency_axis_ppm()
fig_title = "Cropping [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2, sharex='row', sharey='row')
axs[0, 0].plot(t, np.abs(s), 'k-', linewidth=1, label='fid')
axs[0, 0].set_xlabel('time (s)')
axs[0, 0].set_ylabel('original')
axs[0, 0].grid('on')
axs[0, 1].plot(t_crop, np.abs(s_crop), 'b-', linewidth=1)
axs[0, 1].set_xlabel('time (s)')
axs[0, 1].set_ylabel('cropped')
axs[0, 1].grid('on')
axs[1, 0].plot(ppm, s.spectrum().real, 'k-', linewidth=1)
axs[1, 0].set_xlabel('chemical shift (ppm)')
axs[1, 0].set_ylabel('original spectrum')
axs[1, 0].set_xlim(display_range[1], display_range[0])
axs[1, 0].grid('on')
axs[1, 1].plot(ppm_crop, s_crop.spectrum().real, 'b-', linewidth=1)
axs[1, 1].set_xlabel("chemical shift (ppm)")
axs[1, 1].set_ylabel('cropped spectrum')
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].grid('on')
fig.subplots_adjust()
fig.show()
# convert back to MRSData2
s_crop = self.inherit(s_crop)
# now we have a MRSData2 obj, modify sequence attribute
if(s_crop.sequence is not None):
log.debug("updating sequence.npts...")
s_crop.sequence.npts = nPoints_final
s_crop.sequence._ready = False
# if any ref data available, we crop it too (silently)
if(s_crop.data_ref is not None):
s_crop.data_ref = s_crop.data_ref.correct_crop_1d(nPoints_final, False)
return(s_crop)
def correct_peak_removal_1d(self, hsvd_nComponents=5, hsvd_range=[4.6, 4.8], display=False, display_range=[1, 6]):
"""
Remove any peak(s) within a ppm range using HSVD. Usually used to remove residual water peak.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
hsvd_nComponents : int
Number of components for HSVD
hsvd_range : list [2]
Range in ppm of HSVD components
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_peak_removed : MRSData2 numpy array [timepoints]
Resulting water HSVD suppressed data stored in a MRSData2 object
"""
log.debug("removing peak for [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
ppm = s.frequency_axis_ppm()
pbar = log.progressbar("removing peak(s) with HSVD", 5)
# estimate HSVD components
components = suspect.processing.water_suppression.hsvd(s, hsvd_nComponents)
pbar.update(1)
# filter them by keeping the ones contributing to the residual water peak and its sidebands
water_components = [component for component in components if ((4.7 - component["frequency"] / self.f0) > hsvd_range[0] and (4.7 - component["frequency"] / self.f0) < hsvd_range[1])]
pbar.update(2)
# reconstruct the estimated water peak
hsvd_fid = suspect.processing.water_suppression.construct_fid(water_components, s.time_axis())
pbar.update(3)
# rebuild object
hsvd_fid = s.inherit(hsvd_fid)
pbar.update(4)
# and substract it from the fid
s_peak_removed = s - hsvd_fid
pbar.update(5)
# display this over the data
if(display):
fig_title = "Removing some peak(s) [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
# original spectrum
axs[0].plot(ppm, s.spectrum().real, 'k-', linewidth=1, label='original data (real part)')
axs[0].plot(ppm, hsvd_fid.spectrum().real, 'r-', linewidth=1, label='estimated HSVD peak')
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('original spectrum')
axs[0].grid('on')
axs[0].legend()
# water removed spectrum
axs[1].plot(ppm, s_peak_removed.spectrum().real, 'b-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('peak removed spectrum')
axs[1].grid('on')
axs[1].legend()
fig.subplots_adjust()
fig.show()
pbar.finish("done")
# convert back to MRSData2
s_peak_removed = self.inherit(s_peak_removed)
return(s_peak_removed)
def correct_freqshift_1d(self, peak_range=[4.5, 5], peak_real_ppm=4.7, allowed_apodization=1.0, display=False, display_range=[1, 6]):
"""
Shift the spectrum in frequency in order to get the right peaks at the right chemical shifts.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
peak_range : list [2]
Range in ppm used to find a peak of interest
peak_real_ppm : float
Chemical shift to set to the peak found
allowed_apodization : float/boolean
If >0 or !=False, apodize signal during peak analysis process. However, the final corrected signal will not be apodized.
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_shifted : MRSData2 numpy array [timepoints]
Resulting frequency calibrated data stored in a MRSData2 object
"""
log.debug("calibrating [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
ppm = s.frequency_axis_ppm()
# find maximum peak in range and its chemical shift
ppm_peak, peak_val, _, _, _ = s._analyze_peak_1d(peak_range, allowed_apodization)
log.debug("peak detected at %0.2fppm -> %0.2fppm!" % (ppm_peak, peak_real_ppm))
# estimate frequency shift in Hz
log.debug("frequency shifting data...")
dppm = (peak_real_ppm - ppm_peak)
df = dppm * s.f0
s_shifted = s.adjust_frequency(-df)
if(display):
fig_title = "Calibrating/frequency shifting [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 1, sharex='all', sharey='all')
axs[0].plot(ppm, s.spectrum().real, 'k-', linewidth=1)
axs[0].set_xlim(display_range[1], display_range[0])
axs[0].set_xlabel('chemical shift (ppm)')
axs[0].set_ylabel('original')
axs[0].grid('on')
# add peak position
axs[0].plot(ppm_peak, np.real(peak_val), 'ro')
axs[0].axvline(x=ppm_peak, color='r', linestyle='--')
axs[1].plot(ppm, s_shifted.spectrum().real, 'b-', linewidth=1)
axs[1].set_xlim(display_range[1], display_range[0])
axs[1].set_xlabel('chemical shift (ppm)')
axs[1].set_ylabel('shifted')
axs[1].grid('on')
# add new peak position
axs[1].plot(peak_real_ppm, np.real(peak_val), 'ro')
axs[1].axvline(x=peak_real_ppm, color='r', linestyle='--')
fig.subplots_adjust()
fig.show()
# convert back to MRSData2
s_shifted = self.inherit(s_shifted)
return(s_shifted)
def correct_bandpass_filtering_1d(self, range_ppm=[0, 6], window_func=np.hanning, display=False, display_range=[1, 6]):
"""
Filter the signal using FFT windowing.
* Works only with a 1D [timepoints] signal.
* Returns a 1D [timepoints] signal.
Parameters
----------
range_ppm : list [2]
Range in ppm used for band-pass filtering
window_func : numpy windowing function
Apodization window function
display : boolean
Display correction process (True) or not (False)
display_range : list [2]
Range in ppm used for display
Returns
-------
s_filtered : MRSData2 numpy array [timepoints]
Resulting frequency filtered signal
"""
log.debug("band-pass filtering [%s]: keeping the %.2f-%.2fppm region..." % (self.display_label, range_ppm[0], range_ppm[1]))
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.copy()
ppm = s.frequency_axis_ppm()
# build apodization window
ind_low = np.argmin(np.abs(ppm - range_ppm[0]))
ind_high = np.argmin(np.abs(ppm - range_ppm[1]))
n_window = ind_low - ind_high
window_segment = window_func(n_window)
window_full = ppm * 0.0
window_full[ind_high:ind_low] = window_segment
# apply window
sf = s.spectrum()
sf_filtered = sf * window_full
s_filtered = np.fft.ifft(np.fft.ifftshift(sf_filtered))
# convert back to MRSData2
s_filtered = self.inherit(s_filtered)
if(display):
fig_title = "Band-pass filtering [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
axs = fig.subplots(2, 2)
axs[0, 0].plot(s.time_axis(), np.real(s), 'k-', linewidth=1)
axs[0, 0].set_xlabel('time (s)')
axs[0, 0].set_ylabel('original')
axs[0, 0].grid('on')
axs[0, 1].plot(ppm, s.spectrum().real, 'k-', linewidth=1)
axs[0, 1].plot(ppm, window_full * np.max(s.spectrum().real), 'r-', linewidth=1)
axs[0, 1].set_xlim(display_range[1], display_range[0])
axs[0, 1].set_xlabel('chemical shift (ppm)')
axs[0, 1].set_ylabel('original')
axs[0, 1].grid('on')
# add low/high cuts
axs[0, 1].axvline(x=range_ppm[0], color='r', linestyle='--')
axs[0, 1].axvline(x=range_ppm[1], color='r', linestyle='--')
axs[1, 0].plot(s.time_axis(), np.real(s_filtered), 'k-', linewidth=1)
axs[1, 0].set_xlabel('time (s)')
axs[1, 0].set_ylabel('filtered')
axs[1, 0].grid('on')
axs[1, 1].plot(ppm, s_filtered.spectrum().real, 'b-', linewidth=1)
axs[1, 1].set_xlim(display_range[1], display_range[0])
axs[1, 1].set_xlabel('chemical shift (ppm)')
axs[1, 1].set_ylabel('filtered')
axs[1, 1].grid('on')
# add low/high cuts
axs[1, 1].axvline(x=range_ppm[0], color='r', linestyle='--')
axs[1, 1].axvline(x=range_ppm[1], color='r', linestyle='--')
fig.subplots_adjust()
fig.show()
return(s_filtered)
def display_voi_anatomy_nd(self):
"""
Display the VOI on a anatomical image of your choice. Experimental for now, just following tutorial here: https://suspect.readthedocs.io/en/latest/notebooks/tut06_mpl.html
* Works with multi-dimensional signals.
"""
log.debug("displaying VOI [%s]..." % self.display_label)
# read dicom images
t2w = suspect.image.load_dicom_volume(self.anatomy_folderpath + "/original-primary-m-norm-nd_e01_0001.dcm")
# find best slice
pcg_centre = self.to_scanner(0, 0, 0)
pcg_centre_index = t2w.from_scanner(*pcg_centre).round().astype(int)
# VOI drawing coordinates
# TODO: this works only for sagittal images (spinal cord)
vx = self.sequence.voxel_size # mm (will need cm)
corner_coords_pcg = [[0, -vx[1] / 20.0, -vx[2] / 20.0],
[0, -vx[1] / 20.0, vx[2] / 20.0],
[0, vx[1] / 20.0, vx[2] / 20.0],
[0, vx[1] / 20.0, -vx[2] / 20.0],
[0, -vx[1] / 20.0, -vx[2] / 20.0]]
corner_coords = np.array([t2w.from_scanner(*self.to_scanner(*coord)) for coord in corner_coords_pcg])
fig_title = "Anatomical image display [%s]" % self.display_label
fig = plt.figure(fig_title)
fig.clf()
fig.suptitle(fig_title)
plt.imshow(t2w[pcg_centre_index[2]], cmap=plt.cm.gray)
plt.plot(corner_coords[:, 0], corner_coords[:, 1], 'red')
plt.xticks([])
plt.yticks([])
plt.show()
def display_spectrum_1d(self, ifig="Displaying final spectra", display_range=[1, 6], allowed_apodization=5.0, magnitude_mode=False):
"""
Display spectrum in figure 'ifig', overlaying if needed.
* Works only with a 1D [timepoints] signal.
Parameters
----------
ifig: int or str
The figure index that shoud host the plot
s : MRSData2 numpy array [timepoints]
MRS data to display
display_range : list [2]
Range in ppm used for display
allowed_apodization : float
Apodization factor used for display (Hz)
magnitude_mode : boolean
Displays in magnitude mode (True) or the real part (False)
Returns
-------
fig : matplotlib.figure
Resulting matplotlib figure
"""
log.debug("displaying [%s]..." % self.display_label)
# dimensions check
if(self.ndim != 1):
log.error("this method only works for 1D signals! You are feeding it with %d-dimensional data. :s" % self.ndim)
# init
s = self.correct_apodization_nd(allowed_apodization)
log.debug("displaying stuff!")
fig = plt.figure(ifig)
fig.suptitle("Displaying final spectra")
if(magnitude_mode):
plt.plot(s.frequency_axis_ppm(), np.abs(s.spectrum()) + self.display_offset, linewidth=1, label=self.display_label)
else:
plt.plot(s.frequency_axis_ppm(), s.spectrum().real + self.display_offset, linewidth=1, label=self.display_label)
# add ytick if offset
if(self.display_offset != 0):
yt = plt.yticks()
yt2 = np.hstack((yt[0], self.display_offset))
yt3 = np.sort(yt2)
plt.yticks(yt3)
if any(display_range):
plt.xlim(display_range[1], display_range[0])
plt.xlabel('chemical shift (ppm)')
plt.ylabel('spectrum')
plt.grid('on')
plt.legend()
plt.subplots_adjust()
plt.show()
return(plt.figure(ifig))
def save_ismrmd(self, h5_filepath):
"""
Save the MRSData2 object to a ISMRMRD format file.
Parameters
----------
h5_filepath: string
Full absolute file path pointing to h5 file
"""
io.write_ismrmd(self, h5_filepath)
def save_nifti_mrs(self, nifti_mrs_filepath):
"""
Save this MRS signal to a NIFTI MRS file.
Parameters
----------
nifti_mrs_filepath: string
Full absolute file path pointing to the nifti file
"""
io.write_nifti_mrs(self, nifti_mrs_filepath)
def save_mat(self, mat_filepath):
"""
Save the numpy array content to a MATLAB mat file.
Parameters
----------
mat_filepath: string
Full absolute file path pointing to the mat file
"""
io.write_mat(self, mat_filepath)
def save_pkl(self, pkl_filepath):
"""
Save the whole object to a pickle file.
Parameters
----------
pkl_filepath: string
Full absolute file path pointing to pkl file
"""
io.write_pkl(self, pkl_filepath)
def __reduce__(self):
"""Reduce internal pickling method used when dumping. Modified so that MRSData2 attributes are not forgotten. See for more info: https://docs.python.org/3/library/pickle.html ."""
# get numpy reduce tuple
rd = super().__reduce__()
# add MRSData2 attributes
rd2 = rd[2] + (self.__dict__,)
# return the new reduce tuple version
return(rd[0], rd[1], rd2)
def __setstate__(self, d):
"""Set new state to object. Internal pickling method used when loading. Modified so that MRSData2 attributes are not forgotten. See for more info: https://docs.python.org/3/library/pickle.html ."""
# load MRSData2 attributes
self.__dict__ = d[-1]
# load all the rest relative to numpy
super().__setstate__(d[0:-1])
return(self)
def to_dataframe(self, include_obj=True, prefix_str="data_"):
"""
Convert the object's attributes to dataframe. Can include the object itself.
Parameters
----------
include_obj : boolean
Include self to the dataframe row
prefix_str : string
Prefix string to add to column names
Returns
-------
df : Dataframe
Containing the attributes as columns (a single row)
"""
log.debug("converting to dataframe...")
# get all attributes but remove the private ones
df = pd.DataFrame.from_dict([vars(self)])
df = df.filter(regex=("^(?!_).*"))
# add some specific private attributes I need
df["display_label"] = self.display_label
df["noise_level"] = self.noise_level
df["rejection"] = [self.data_rejection] # can be a list
df["file_hash"] = self.data_file_hash
df["is_rawdata"] = self.is_rawdata
df = pd.concat([df, pd.DataFrame.from_dict([self.patient])], axis=1)
df = pd.concat([df, self.sequence.to_dataframe(True)], axis=1)
if(include_obj):
df["obj"] = [self]
df = df.add_prefix(prefix_str)
return(df)
class pipeline:
"""The pipeline class is used to store all the reconstruction parameters needed for a specific bunch of acquired signals. Once the parameters are set, the pipeline can be run using one of the methods."""
# frozen stuff: a technique to prevent creating new attributes
# (https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init)
__isfrozen = False
def __setattr__(self, key, value):
"""Overload of __setattr__ method to check that we are not creating a new attribute."""
if self.__isfrozen and not hasattr(self, key):
log.error_new_attribute(key)
object.__setattr__(self, key, value)
def __init__(self, template_name=None):
"""
Initialize the reconstruction pipeline, using a template if needed.
Parameters
----------
template_name: string
Reco pipeline template file
"""
# --- initializing dataset dict ---
self.dataset = [{}] * MAX_NUM_DATASETS
for i in range(MAX_NUM_DATASETS):
self.dataset[i] = {"legend": None,
"raw": {"files": [None, None], "data": None, "analysis_results": None, "ref_data_analysis_results": None},
"dcm": {"files": [None, None], "data": None, "analysis_results": None, "ref_data_analysis_results": None},
"physio_file": None,
"imaging_file": None,
"resp_bpm": None,
"heart_bpm": None,
"comment": None}
# --- global settings ---
self.settings = { # option to process only a set of datasets: list of indexes
"datasets_indexes": None,
# folder to search for supplementary datasets
"folder_additional_datasets": None,
# ppm scale reference
"ppm0": 4.7,
# ppm range to search for peak used for phasing, etc.
"POI_range_ppm": [4.5, 5.2],
# ppm range to search for ppm scale calibration
"POI_shift_range_ppm": [4.5, 5.2],
# real ppm value the above peak
"POI_shift_true_ppm": 4.7,
# ppm range to search for peak for SNR estimation
"POI_SNR_range_ppm": [1.8, 2.2],
# ppm range to search for peak for FWHM estimation
"POI_LW_range_ppm": [4.5, 5.2],
# ppm range to for SNR/LW estimation in ref. data
"POI_ref_range_ppm": [4.5, 5.2],
# apodization factor used during signal analysis, never actually applied for signal correction
"allowed_apodization": 1.0,
# no phasing using reference data for already reconstructed data
"no_phasing_using_ref_for_dcm_data": True,
# path to pkl file to store processed data
"storage_file": None,
# force display off if needed
"display": None,
# ppm range used for display
"display_range_ppm": [1, 6],
# y offset used for display
"display_offset": 0.0,
# automatically check which dataset is noWS or WS and swap if needed
"auto_detect_ref_scan": True,
# raise error if raw data looks worse than dcm
"raise_error_on_badreco": True}
# --- available jobs and their parameters ---
self.job = {}
# --- job: spectrum final display ---
self.job["displaying"] = {"job_func": MRSData2.display_spectrum_1d, "job_name": "displaying",
# figure index
"fig_index": "Displaying final spectra",
# ppm range used for display
"display_range_ppm": pipeline._get_setting,
# apodization factor used for display (Hz)
"allowed_apodization": pipeline._get_setting,
# display spectrum in magnitude mode?
"magnitude_mode": False
}
# --- job: VOI display on anatomical images ---
self.job["displaying_anatomy"] = {"job_func": MRSData2.display_voi_anatomy_nd, "job_name": "overlaying VOI on anatomical image"
}
# --- job: automatic rephasing ---
self.job["phasing"] = {"job_func": MRSData2.correct_phase_3d, "job_name": "phasing",
# use reference data is available?
"using_ref_data": True,
# ppm range to look fo peak used to estimate phase
"POI_range_ppm": pipeline._get_setting,
# average all averages per channel
"average_per_channel_mode": False,
# measure phase from 1st time point
"first_point_fid_mode": False,
# order of phasing in time: 0th or 1st order
"order": 0,
# add an additional 0th order phase (rd)
"offset": 0.0,
# display all this process to check what the hell is going on
"display": False,
"display_range_ppm": pipeline._get_setting
}
# --- job: amplification ---
self.job["scaling"] = {"job_func": MRSData2.correct_intensity_scaling_nd, "job_name": "scaling intensity",
"scaling_factor_rawdata": 1e8,
"scaling_factor_dcm": 1.0
}
# --- job: FID modulus ---
self.job["FID modulus"] = {"job_func": MRSData2.correct_fidmodulus_nd, "job_name": "FID modulus"
}
# --- job: time_shifting ---
self.job["time_shifting"] = {"job_func": MRSData2.correct_time_shift_nd, "job_name": "time_shifting",
# time shift in us
"time_shift_us": 375,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: channel combination ---
self.job["channel_combining"] = {"job_func": MRSData2.correct_combine_channels_3d, "job_name": "channel-combining",
# use non water-suppressed data to recombine and rephase channels
"using_ref_data": True,
# should we rephase (0th order) data while combining?
"phasing": False,
# boolean mask to switch on/off some Rx channels
"weights": [True]
}
# --- job: concatenate ---
self.job["concatenate"] = {"job_func": MRSData2.concatenate_2d, "job_name": "concatenate"
}
# --- job: zero_filling ---
self.job["zero_filling"] = {"job_func": MRSData2.correct_zerofill_nd, "job_name": "zero-filling",
# number of signal points after zf
"npts": 8192 * 2,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: analyze physio signal ---
self.job["physio_analysis"] = {"job_func": MRSData2.analyze_physio_2d, "job_name": "analyzing physio. signals",
# ppm range to look for a peak to analyze
"POI_range_ppm": pipeline._get_setting,
# time range in (ms) to look around timestamp for correlation physio/MRS
"delta_time_ms": 1000.0,
# apodization factor used during signal analysis stage
"allowed_apodization": pipeline._get_setting,
# display all this process to check what the hell is going on
"display": True
}
# --- job: automatic data rejection based on criterias ---
self.job["data_rejecting"] = {"job_func": MRSData2.correct_analyze_and_reject_2d, "job_name": "data rejecting",
# ppm range to look for a peak to analyze
"POI_range_ppm": pipeline._get_setting,
# ppm range to estimate SNR
"POI_SNR_range_ppm": pipeline._get_setting,
# ppm range to estimate LW
"POI_LW_range_ppm": pipeline._get_setting,
# size of moving average window
"moving_averages": 1,
# if True, rejects if linewidth could not be estimated
"reject_when_linewidth_fails": True,
# rejection criterias for
# amplitude relative changes: keep data if within +/-val % range
# linewidth changes: keep data is below val Hz
# chemical shift changes: keep data is within +/-val ppm
# phase changes: keep data if within +/-val/100 * std(phase) rd
"ranges": {"amplitude (%)": None,
"linewidth (Hz)": None,
"chemical shift (ppm)": None,
"phase std. factor (%)": None},
# for amplitude, chemical shift and phase, the rejection of data is based on ranges of relative changes of those metrics. Relative to what? The man value over the whole acquisition (True) or the first acquired point (False)
"rel2mean": True,
# method for automatic adjustement
"auto_method_list": [data_rejection_method.AUTO_AMPLITUDE,
data_rejection_method.AUTO_LINEWIDTH,
data_rejection_method.AUTO_FREQUENCY,
data_rejection_method.AUTO_PHASE],
# minimum allowed SNR change (%) when adjusting the linewidth criteria, this can be positive (we want to increase SNR +10% by rejecting crappy dat) or negative (we are ok in decreasing the SNR -10% in order to get better resolved spectra)
"auto_allowed_snr_change": 1.0,
# apodization factor used during signal analysis stage
"allowed_apodization": pipeline._get_setting,
# intercorrelation mode for realignment?
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: automatic data frequency realignment ---
self.job["realigning"] = {"job_func": MRSData2.correct_realign_2d, "job_name": "frequency realigning",
# ppm range to look for a peak to analyze
"POI_range_ppm": pipeline._get_setting,
# size of moving average window
"moving_averages": 1,
# use correlation mode
"inter_corr_mode": False,
# maximum frequency shift allowed in Hz
"freq_shift_max": 25,
# apodization factor used during signal analysis stage
"allowed_apodization": pipeline._get_setting,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: spectral filtering ---
self.job["filtering"] = {"job_func": MRSData2.correct_bandpass_filtering_1d, "job_name": "FFT filtering",
# ppm range to keep
"range_ppm": [1, 6],
# type of apodization window (take it from numpy/scipy)
"window_func": signal.tukey,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: data averaging ---
self.job["averaging"] = {"job_func": MRSData2.correct_average_2d, "job_name": "averaging",
# number of averages to mean (None = all)
"na": None,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: phasing using suspect ---
self.job["phasing_suspect"] = { "job_func": MRSData2.correct_phase_1d, "job_name": "phasing (suspect)",
# phasing method
"suspect_method": suspect_phasing_method.MATCH_MAGNITUDE_REAL,
# ppm range to analyze phase
"range_ppm": [1, 6],
# apodization factor used during signal analysis stage
"allowed_apodization": pipeline._get_setting,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: first-order phasing ---
self.job["phasing_first_order"] = { "job_func": MRSData2.correct_first_order_phase_1d, "job_name": "first-order phasing",
# phasing coeficient
"coeff_rad_ppm": 0.15,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: noise level analysis ---
self.job["noise_estimation"] = {"job_func": MRSData2.analyze_noise_nd, "job_name": "estimating noise level",
# estimate noise std time-domain on the last 100 pts of the FID
"npts": 100
}
# --- job: data apodization ---
self.job["apodizing"] = {"job_func": MRSData2.correct_apodization_nd, "job_name": "apodizing",
# exponential damping factor for apodization (Hz)
"damping_hz": 5,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: data cropping ---
self.job["cropping"] = {"job_func": MRSData2.correct_crop_1d, "job_name": "cropping",
# final number of signal points after crop
"final_npts": 6144,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: water post-acquisition removal ---
self.job["water_removal"] = {"job_func": MRSData2.correct_peak_removal_1d, "job_name": "removing water peak",
# number of components when running HSVD
"hsvd_components": 5,
# ppm range where all components will be remove
"POI_range_ppm": pipeline._get_setting,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: spectrum chemical shift calibration ---
self.job["calibrating"] = {"job_func": MRSData2.correct_freqshift_1d, "job_name": "frequency shifting",
# ppm range to look for the peak of interest (NAA by default)
"POI_shift_range_ppm": pipeline._get_setting,
# real ppm value for this peak
"POI_shift_true_ppm": pipeline._get_setting,
# apodization factor used during signal analysis stage
"allowed_apodization": pipeline._get_setting,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: SNR analysis ---
self.job["analyzing_snr"] = {"job_func": MRSData2.analyze_snr_1d, "job_name": "analyzing SNR",
# ppm range to look for a peak to analyze
"POI_SNR_range_ppm": pipeline._get_setting,
# ppm range to look for pure noise
"n_range_ppm": [-2, -1],
# divide SNR by 2, like LCModel do
"half_factor": False,
# should we look at the magnitude or real spectrum?
"magnitude_mode": False,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: LW analysis ---
self.job["analyzing_lw"] = {"job_func": MRSData2.analyze_linewidth_1d, "job_name": "analyzing peak-linewidth",
# ppm range to look for a peak to analyze
"POI_LW_range_ppm": pipeline._get_setting,
# should we look at the magnitude or real spectrum?
"magnitude_mode": False,
# display all this process to check what the hell is going on
"display": True,
"display_range_ppm": pipeline._get_setting
}
# --- job: ref data SNR analysis ---
self.job["ref_data_analyzing_snr"] = {
"job_func": MRSData2.analyze_snr_1d, "job_name": "analyzing ref. data SNR",
# ppm range to look for a peak to analyze
"POI_ref_range_ppm": pipeline._get_setting,
# ppm range to look for pure noise
"n_range_ppm": [-2, -1],
# divide SNR by 2, like LCModel do
"half_factor": False,
# should we look at the magnitude or real spectrum?
"magnitude_mode": False,
# display all this process to check what the hell is going on
"display": False,
"display_range_ppm": pipeline._get_setting
}
# --- job: ref data LW analysis ---
self.job["ref_data_analyzing_lw"] = {
"job_func": MRSData2.analyze_linewidth_1d, "job_name": "analyzing ref. data peak-linewidth",
# ppm range to look for a peak to analyze
"POI_ref_range_ppm": pipeline._get_setting,
# should we look at the magnitude or real spectrum?
"magnitude_mode": False,
# display all this process to check what the hell is going on
"display": False,
"display_range_ppm": pipeline._get_setting
}
# --- job list ---
# list of data processing to apply to the data
# beware, you need to know what you are doing here
# also, be careful with the dimensionality of data 3D, 2D, 1D along the data processing
# order is important!
self.job_list = [self.job["phasing"],
self.job["scaling"],
self.job["FID modulus"],
self.job["channel_combining"],
self.job["concatenate"],
self.job["zero_filling"],
self.job["physio_analysis"],
self.job["data_rejecting"],
self.job["realigning"],
self.job["averaging"],
self.job["noise_estimation"],
self.job["apodizing"],
self.job["cropping"],
self.job["water_removal"],
self.job["calibrating"],
self.job["displaying"]]
# --- analyze job list ---
# SNR/LW analysis job list
self.analyze_job_list = [self.job["channel_combining"],
self.job["averaging"],
self.job["calibrating"]]
# --- SNR/LW analysis ---
self.analyze_enable = True
# --- template loading if needed ---
self.template_name = template_name
if(template_name is not None):
# overwrite everything above with the template
self.load_template(template_name)
# freeze the object and prevent the creation of new attributes
self.__isfrozen = True
def _get_setting(self, setting_key):
"""
Return value of setting from settings dict. Sounds like a stupid method but I use it to replace pointers. To make it a little easier for the user, a settings dict contains all the main parameters that I repeatably used during the reco, especially the ppm range to look for a peak of interest (POI). For each job defined above, the default parameter value are linked to this settings dict using this method.
Parameters
----------
setting_key : dict key from self.settings
Name of the setting
Returns
-------
self.settings[setting_key] : ?
Value of the setting
"""
return(self.settings[setting_key])
def _run_job(self, job, data, default_args=False):
"""
Estimate SNR and/or peak linewidth for this dataset. Values are stored. A mini default pipeline is applied before SNR/LW measurements and can be set with self.analyze_job_list.
Parameters
----------
job : dict entry from self.job
The job to run on the data
data : MRSData2 object [whatever,...,timepoints]
Data to process
default_args : boolean
Should we ignore the pipeline job parameters and run with default arguments (True)?
Returns
-------
job_result : ?
Stuff returned by the job
"""
# get job name
log.info_line________________________()
job_name = job["job_name"]
log.info("%s on [%s]..." % (job_name, data.display_label))
# get function
job_func = job["job_func"]
if(default_args):
job_args = [data]
else:
# get arguments
job_args = job.copy()
del job_args["job_name"]
del job_args["job_func"]
job_args = [data] + list(job_args.values())
# call job on data
job_result = job_func(*job_args)
# return
return(job_result)
def _analyze(self, data, current_job, already_done_jobs):
"""
Estimate SNR and/or peak linewidth for this dataset. Values are stored. A mini default pipeline is applied before SNR/LW measurements and can be set with self.analyze_job_list.
Parameters
----------
data : MRSData2 object [whatever,...,timepoints]
Dataset
current_job : MRSData2 method function
The job that was just applied to data before calling this function
already_done_jobs : list (stack)
List of already applied processing functions to this dataset
Returns
-------
data_snr : float
SNR estimated on data
data_lw : float
Peak linewidth estimated on data
ref_data_snr : float
SNR estimated on ref. data if any
ref_data_lw : float
Peak linewidth estimated on ref. data if any
"""
log.debug("estimating SNR and LW for [%s]..." % data.display_label)
# init job list
this_analyze_job_list = [j for j in self.analyze_job_list + already_done_jobs if (j in self.analyze_job_list) and (j not in already_done_jobs)]
# run mini-pipeline with default arguments (= no display)
# and with no log output
log.pause()
for j in this_analyze_job_list:
# run job on this dataset with default arguments
data = self._run_job(j, data, True)
# measure snr
this_job = self.job["analyzing_snr"]
data_snr, _, _ = self._run_job(this_job, data)
# measure lw
this_job = self.job["analyzing_lw"]
data_lw = self._run_job(this_job, data)
if(data.data_ref is not None):
# measure ref data snr
this_job = self.job["ref_data_analyzing_snr"]
ref_data_snr, _, _ = self._run_job(this_job, data.data_ref)
# measure ref data lw
this_job = self.job["ref_data_analyzing_lw"]
ref_data_lw = self._run_job(this_job, data.data_ref)
else:
ref_data_snr = np.nan
ref_data_lw = np.nan
# output
log.resume()
job_label = "post-" + current_job["job_name"]
log.debug(job_label + " SNR of [%s] = %.2f" % (data.display_label, data_snr))
log.debug(job_label + " LW of [%s] = %.2f" % (data.display_label, data_lw))
return(data_snr, data_lw, ref_data_snr, ref_data_lw)
def _detect_data_ref(self, s, s_ref):
"""
Detect highest SNR between these two datasets and return them in the right order.
Parameters
----------
s : MRSData2 object
Data supposed to be WS
s_ref : MRSData2 object
Data supposed to be noWS and used as a reference signal for phasing, etc.
Returns
-------
s2 : MRSData2 object
Data supposed to be WS
s_ref2 : MRSData2 object
Data supposed to be noWS and used as a reference signal for phasing, etc.
"""
# compare FID max in magnitude (dirty but robust)
s_ref_tmp = np.mean(np.abs(s_ref), axis=(0, 1))
s_tmp = np.mean(np.abs(s), axis=(0, 1))
if(
|
np.max(s_tmp)
|
numpy.max
|
import numpy as np
import tkinter as tk
import matplotlib.pyplot as plt
import trimesh as tr
mm=1/1000
inch=0.0254
myz=np.pi*4e-7
const=myz/(4*np.pi)
def get_fix_mins_maxs(mins, maxs):
deltas = (maxs - mins) / 12.
mins = mins + deltas / 4.
maxs = maxs - deltas / 4.
return [mins, maxs]
def make3D(inp,depth=3):
#takes any 2D matrix and cpies it depth amount of layers
return np.tile(inp,(depth,1,1)).transpose((1,2,0))
def repeat(inp,num):
#repeats input array num times
return np.tile(inp,(num,1))
def test_sanity(inp):
return np.nansum(np.logical_not(np.isfinite(inp)))
def difmat(a,b):
#calculates the distance between every point in a and b
alen=len(a)
blen=len(b)
return np.tile(a,(blen,1,1))-np.transpose(np.tile(b,(alen,1,1)),(1,0,2))
def dist(a,b):
#calculates the distance between every point in a and b
diffmat= difmat(a,b)
return np.sqrt(np.nansum(diffmat**2,2))
def bool(s):
if s in [True,'True','TRUE','true','1',1]:
return True
elif s in [False,'False','FALSE','false','0',0]:
return False
else:
print('error:no truth value detected')
return s
def bool_or_type(s,dtype):
if type(s) in [list,tuple,np.ndarray]:
out=[]
for item in s:
out.append(bool_or_type(item,dtype))
else:
if s in [True,'True','TRUE','true','1',1]:
return True
elif s in [False,'False','FALSE','false','0',0]:
return False
else:
try:
return dtype(s)
except:
return s
def sanatise(s):
if type(s) in [list,tuple,np.ndarray]:
out=[]
for item in s:
out.append(sanatise(item))
return out
else:
try:
s_int=int(s)
s_float=float(s)
if s_int != s_float:
return s_float
else:
return s_int
except:
if s in [True,'True','TRUE','true']:
return True
elif s in [False,'False','FALSE','false']:
return False
else:
return s
class array_edit_widget:
def __init__(self,master,array,title=''):
self.outvars=[]
for index,item in enumerate(array):
if type(item) in [list,np.ndarray,tuple]:
self.outvars.append(array_edit_widget(master,item,title=str(index)))
else:
l = tk.Label(master, text=title+str(index))
l.pack()
self.outvars.append(tk.StringVar(master,value=str(item)))
tk.Entry(master,textvariable=self.outvars[-1]).pack()
def get(self,dtype=np.float32):
out=[]
for item in self.outvars:
out.append(sanatise(item.get()))
return np.array(out)
def destroy(self):
for var in self.outvars:
var.destroy()
def calc_kin_f(ch_pos,ch_mag):
#energy calculation test function
const=1
len_own=len(ch_pos)
diffmat = np.tile(ch_pos, (len_own, 1, 1)) - np.transpose(np.tile(ch_pos, (len_own, 1, 1)), (1, 0, 2))
distmat = np.sqrt(np.nansum(diffmat ** 2, 2)) # calculates the distance from every point to every other point
np.fill_diagonal(distmat, 1) # fills diagonal with ones to prevent zero divide for same charge distance
ch_mat = np.tile(ch_mag,(len_own,1))*(np.ones((len_own, len_own)) - np.identity(len_own))
charges = ch_mat*ch_mat.transpose()
energy = np.nansum(np.triu(const * charges / distmat,1))
force = np.nansum(const * np.tile(charges,(3,1,1)).transpose() * diffmat / np.tile(distmat,(3,1,1)).transpose() ** 3 , 1)
torque_m = np.cross(ch_pos, force)
return energy
def calc_energy(ch_pos,ch_mag):
#energy calculation test function
const=1
len_own = len(ch_pos)
diffmat = np.tile(ch_pos, (len_own, 1, 1)) - np.transpose(np.tile(ch_pos, (len_own, 1, 1)), (1, 0, 2))
distmat = np.sqrt(np.nansum(diffmat ** 2, 2)) # calculates the distance from every point to every other point
np.fill_diagonal(distmat, 1) # fills diagonal with ones to prevent zero divide for same charge distance
removediag = np.ones((len_own, len_own)) - np.identity(len_own) # matrix filled with ones and zeros along diagonal to remove same charge multiplication
ch_mat = np.tile(ch_mag,(len_own,1))*(np.ones((len_own, len_own)) *removediag)
##charges = np.tile(ch_mag, (len_own, 3, 1)).transpose((2, 0, 1)) * np.tile(ch_mag,(len_own, 3, 1)).transpose((0, 2, 1)) * removediag
charges = ch_mat*ch_mat.transpose()
energy = np.nansum(np.triu(const *charges / distmat,1)) # potential energy due to magnetic interaction of point charges
return energy
def calc_energy_safe(ch_pos,ch_mag,const=1):
#energy calculation test function
# simple ineficent algo to calculate coulomb energy to compare validity of other algo
len_own = len(ch_pos)
energy = 0
for l in range(len_own):
for k in range(len_own):
if l < k:
energy += const * ch_mag[l] * ch_mag[k] / np.linalg.norm(ch_pos[k] - ch_pos[l])
return energy
def norm(inp):
#creates norm over last axes of input
return np.sqrt(np.nansum(inp**2,np.ndim(inp)-1))
def plot_and_save_motion_study(energy,torque,angle,name):
fig, ax1 = plt.subplots()
angle=180*angle/np.pi
color = 'tab:red'
ax1.set_xlabel('angle [deg]')
ax1.set_ylabel('Coulomb Energy [J]', color=color)
en=ax1.plot(angle, energy, marker='o', color=color,label='pot energy')
ax1.plot(merker='v',color='tab:blue')
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Torque [Nm]', color=color) # we already handled the x-label with ax1
to=ax2.plot(angle, torque,marker='v', color=color,label='torque')
ax2.tick_params(axis='y', labelcolor=color)
fig.legend(['pot energy','torque'])
plt.xlim((0,180))
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
np.savetxt(name+".csv",np.column_stack([energy,torque,angle]),header='Coulomb energy, torque and corresponding anble from a formation of '+name,delimiter=",")
plt.savefig(name+".svg")
def icosahedron(level):
from math import sin, cos, acos, sqrt, pi
from mpl_toolkits.mplot3d import Axes3D
s, c = 2 / sqrt(level), 1 / sqrt(level)
topPoints = [(0, 0, 1)] + [(s * cos(i * 2 * pi / 5.), s * sin(i * 2 * pi / 5.), c) for i in range(5)]
bottomPoints = [(-x, y, -z) for (x, y, z) in topPoints]
icoPoints = topPoints + bottomPoints
icoTriangs = [(0, i + 1, (i + 1) % 5 + 1) for i in range(5)] + \
[(6, i + 7, (i + 1) % 5 + 7) for i in range(5)] + \
[(i + 1, (i + 1) % 5 + 1, (7 - i) % 5 + 7) for i in range(5)] + \
[(i + 1, (7 - i) % 5 + 7, (8 - i) % 5 + 7) for i in range(5)]
icoPoints=np.array(icoPoints)
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, color='y', alpha=0.1)
ax.scatter(icoPoints[:,0],icoPoints[:,1],icoPoints[:,2])
return icoPoints
def D_scatter(points):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, color='y', alpha=0.1)
ax.scatter(points[:,0],points[:,1],points[:,2])
def polygon(n=8):
phi = np.arange(0, 2 * pi, num=n)
points = []
for p in phi:
points.append((np.sin(p), np.cos(p)))
def sfr_centers(segments=8,
layers=3,
d=6*mm):
out=[]
r=segments*d/(2*np.pi)
for l in range(layers):
alpha=l*2*np.pi/(np.sqrt(2)*segments)
out.append(np.array((r*np.sin(alpha),r*(1-
|
np.cos(alpha)
|
numpy.cos
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_ProjectionVG [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionVG&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExVGProj).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, array, diff, linspace, abs, log, exp, sqrt, tile, atleast_2d, newaxis
from numpy import sum as npsum, min as npmin, max as npmax
from scipy.stats import norm
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, legend, ylabel, \
xlabel, title
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import struct_to_dict, datenum, save_plot
from intersect_matlab import intersect
from EffectiveScenarios import EffectiveScenarios
from ConditionalFP import ConditionalFP
from MMFP import MMFP
from VGpdf import VGpdf
from ParamChangeVG import ParamChangeVG
from ShiftedVGMoments import ShiftedVGMoments
from VG import VG
# -
# ## Upload databases
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_OptionStrategy'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_OptionStrategy'), squeeze_me=True)
OptionStrategy = struct_to_dict(db['OptionStrategy'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_VIX'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_VIX'), squeeze_me=True)
VIX = struct_to_dict(db['VIX'])
# invariants (daily P&L)
pnl = OptionStrategy.cumPL
epsi =
|
diff(pnl)
|
numpy.diff
|
import numpy as np
import random as rand
import pathlib
import sys
fileDir = pathlib.Path(__file__).parents[2]
code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
sys.path.append(str(code_library_folder))
from wpcd_partitioning_dictionaries import as_dict, cl_dict, pb_dict, hg_dict, n_dict, se_dict
def alox_modeling(alox, runs):
if alox == 0:
alox_removal = np.zeros(shape=(runs, 3))
alox_removal[:, 1] = np.ones(shape=runs)
as_alox = alox_removal
cl_alox = alox_removal
pb_alox = alox_removal
hg_alox = alox_removal
n_alox = alox_removal
se_alox = alox_removal
elif alox == 1:
alox_removal = np.zeros(shape=(runs, 3))
alox_removal[:, 1] = np.ones(shape=runs)
# For Arsenic.
as_alox_removal = np.column_stack((as_dict['AlOx']['removal'], as_dict['AlOx']['effluent']))
as_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(as_alox[:, 1]):
as_alox[i, :] = as_alox_removal[rand.randint(0, len(as_alox_removal) - 1), :]
i += 1
# For Chlorides.
cl_alox_removal = np.column_stack((cl_dict['AlOx']['removal'], cl_dict['AlOx']['effluent']))
cl_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(cl_alox[:, 1]):
cl_alox[i, :] = cl_alox_removal[rand.randint(0, len(cl_alox_removal) - 1), :]
i += 1
# For Lead.
pb_alox_removal = np.column_stack((pb_dict['AlOx']['removal'], pb_dict['AlOx']['effluent']))
pb_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(pb_alox[:, 1]):
pb_alox[i, :] = pb_alox_removal[rand.randint(0, len(pb_alox_removal) - 1), :]
i += 1
# For Mercury.
hg_alox_removal = np.column_stack((hg_dict['AlOx']['removal'], hg_dict['AlOx']['effluent']))
hg_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(hg_alox[:, 1]):
hg_alox[i, :] = hg_alox_removal[rand.randint(0, len(hg_alox_removal) - 1), :]
i += 1
# For Nitrogen.
n_alox_removal = np.column_stack((n_dict['AlOx']['removal'], n_dict['AlOx']['effluent']))
n_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(n_alox[:, 1]):
n_alox[i, :] = n_alox_removal[rand.randint(0, len(n_alox_removal) - 1), :]
i += 1
# For Selenium.
se_alox_removal = np.column_stack((se_dict['AlOx']['removal'], se_dict['AlOx']['effluent']))
se_alox = np.zeros(shape=(runs, 2))
i = 0
while i < len(se_alox[:, 1]):
se_alox[i, :] = se_alox_removal[rand.randint(0, len(se_alox_removal) - 1), :]
i += 1
return as_alox, cl_alox, pb_alox, hg_alox, se_alox #, n_alox
def bt_modeling(bt, runs):
if bt == 0:
bt_removal = np.zeros(shape=(runs, 3))
bt_removal[:, 1] = np.ones(shape=runs)
as_bt = bt_removal
cl_bt = bt_removal
pb_bt = bt_removal
hg_bt = bt_removal
n_bt = bt_removal
se_bt = bt_removal
elif bt == 1:
bt_removal = np.zeros(shape=(runs, 3))
bt_removal[:, 1] = np.ones(shape=runs)
# For Arsenic.
as_bt_removal = np.column_stack((as_dict['BT']['removal'], as_dict['BT']['effluent']))
as_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(as_bt[:, 1]):
as_bt[i, :] = as_bt_removal[rand.randint(0, len(as_bt_removal) - 1), :]
i += 1
# For Chlorides.
cl_bt_removal = np.column_stack((cl_dict['BT']['removal'], cl_dict['BT']['effluent']))
cl_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(cl_bt[:, 1]):
cl_bt[i, :] = cl_bt_removal[rand.randint(0, len(cl_bt_removal) - 1), :]
i += 1
# For Lead.
pb_bt_removal = np.column_stack((pb_dict['BT']['removal'], pb_dict['BT']['effluent']))
pb_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(pb_bt[:, 1]):
pb_bt[i, :] = pb_bt_removal[rand.randint(0, len(pb_bt_removal) - 1), :]
i += 1
# For Mercury.
hg_bt_removal = np.column_stack((hg_dict['BT']['removal'], hg_dict['BT']['effluent']))
hg_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(hg_bt[:, 1]):
hg_bt[i, :] = hg_bt_removal[rand.randint(0, len(hg_bt_removal) - 1), :]
i += 1
# For Nitrogen.
n_bt_removal = np.column_stack((n_dict['BT']['removal'], n_dict['BT']['effluent']))
n_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(n_bt[:, 1]):
n_bt[i, :] = n_bt_removal[rand.randint(0, len(n_bt_removal) - 1), :]
i += 1
# For Selenium.
se_bt_removal = np.column_stack((se_dict['BT']['removal'], se_dict['BT']['effluent']))
se_bt = np.zeros(shape=(runs, 2))
i = 0
while i < len(se_bt[:, 1]):
se_bt[i, :] = se_bt_removal[rand.randint(0, len(se_bt_removal) - 1), :]
i += 1
return as_bt, cl_bt, pb_bt, hg_bt, se_bt #, n_bt
def cp_modeling(cp, runs):
if cp == 0:
cp_removal = np.zeros(shape=(runs, 3))
cp_removal[:, 1] = np.ones(shape=runs)
as_cp = cp_removal
cl_cp = cp_removal
pb_cp = cp_removal
hg_cp = cp_removal
n_cp = cp_removal
se_cp = cp_removal
elif cp == 1:
cp_removal = np.zeros(shape=(runs, 3))
cp_removal[:, 1] = np.ones(shape=runs)
# For Arsenic.
as_cp_removal = np.column_stack((as_dict['CP']['removal'], as_dict['CP']['effluent']))
as_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(as_cp[:, 1]):
as_cp[i, :] = as_cp_removal[rand.randint(0, len(as_cp_removal) - 1), :]
i += 1
# For Chlorides.
cl_cp_removal = np.column_stack((cl_dict['CP']['removal'], cl_dict['CP']['effluent']))
cl_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(cl_cp[:, 1]):
cl_cp[i, :] = cl_cp_removal[rand.randint(0, len(cl_cp_removal) - 1), :]
i += 1
# For Lead.
pb_cp_removal = np.column_stack((pb_dict['CP']['removal'], pb_dict['CP']['effluent']))
pb_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(pb_cp[:, 1]):
pb_cp[i, :] = pb_cp_removal[rand.randint(0, len(pb_cp_removal) - 1), :]
i += 1
# For Mercury.
hg_cp_removal = np.column_stack((hg_dict['CP']['removal'], hg_dict['CP']['effluent']))
hg_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(hg_cp[:, 1]):
hg_cp[i, :] = hg_cp_removal[rand.randint(0, len(hg_cp_removal) - 1), :]
i += 1
# For Nitrogen.
n_cp_removal = np.column_stack((n_dict['CP']['removal'], n_dict['CP']['effluent']))
n_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(n_cp[:, 1]):
n_cp[i, :] = n_cp_removal[rand.randint(0, len(n_cp_removal) - 1), :]
i += 1
# For Selenium.
se_cp_removal = np.column_stack((se_dict['CP']['removal'], se_dict['CP']['effluent']))
se_cp = np.zeros(shape=(runs, 2))
i = 0
while i < len(se_cp[:, 1]):
se_cp[i, :] = se_cp_removal[rand.randint(0, len(se_cp_removal) - 1), :]
i += 1
return as_cp, cl_cp, pb_cp, hg_cp, se_cp #, n_cp
def crys_modeling(crys, runs):
if crys == 0:
crys_removal =
|
np.zeros(shape=(runs, 3))
|
numpy.zeros
|
# Built-in
import os
import warnings
import itertools as itt
import copy
import datetime as dtm # DB
# Common
import numpy as np
import scipy.optimize as scpopt
import scipy.interpolate as scpinterp
import scipy.constants as scpct
import scipy.sparse as sparse
from scipy.interpolate import BSpline
import scipy.stats as scpstats
import matplotlib.pyplot as plt
# ToFu-specific
import tofu.utils as utils
from . import _fit12d_funccostjac as _funccostjac
from . import _plot
__all__ = [
'fit1d_dinput', 'fit2d_dinput',
'fit12d_dvalid', 'fit12d_dscales',
'fit1d', 'fit2d',
'fit1d_extract', 'fit2d_extract',
]
_NPEAKMAX = 12
_DCONSTRAINTS = {
'bck_amp': False,
'bck_rate': False,
'amp': False,
'width': False,
'shift': False,
'double': False,
'symmetry': False,
}
_DORDER = ['amp', 'width', 'shift']
_SAME_SPECTRUM = False
_DEG = 2
_NBSPLINES = 13
_TOL1D = {'x': 1e-10, 'f': 1.e-10, 'g': 1.e-10}
_TOL2D = {'x': 1e-6, 'f': 1.e-6, 'g': 1.e-6}
_SYMMETRY_CENTRAL_FRACTION = 0.3
_BINNING = False
_POS = False
_SUBSET = False
_CHAIN = True
_METHOD = 'trf'
_LOSS = 'linear'
_D3 = {
'bck_amp': 'x',
'bck_rate': 'x',
'amp': 'x',
'coefs': 'lines',
'ratio': 'lines',
'Ti': 'x',
'width': 'x',
'vi': 'x',
'shift': 'lines', # necessarily by line for de-normalization (*lamb0)
}
_VALID_NSIGMA = 6.
_VALID_FRACTION = 0.8
_SIGMA_MARGIN = 3.
_ALLOW_PICKLE = True
_LTYPES = [int, float, np.int_, np.float_]
_DBOUNDS = {
'bck_amp': (0., 3.),
'bck_rate': (-3., 3.),
'amp': (0, 2),
'width': (0.01, 2.),
'shift': (-2, 2),
'dratio': (0., 2.),
'dshift': (-10., 10.),
'bs': (-10., 10.),
}
_DX0 = {
'bck_amp': 1.,
'bck_rate': 0.,
'amp': 1.,
'width': 1.,
'shift': 0.,
'dratio': 0.5,
'dshift': 0.,
'bs': 1.,
}
###########################################################
###########################################################
#
# Preliminary
# utility tools for 1d spectral fitting
#
###########################################################
###########################################################
def get_symmetry_axis_1dprofile(phi, data, cent_fraction=None):
""" On a series of 1d vertical profiles, find the best symmetry axis """
if cent_fraction is None:
cent_fraction = _SYMMETRY_CENTRAL_FRACTION
# Find the phi in the central fraction
phimin = np.nanmin(phi)
phimax = np.nanmax(phi)
phic = 0.5*(phimax + phimin)
dphi = (phimax - phimin)*cent_fraction
indphi = np.abs(phi-phic) <= dphi/2.
phiok = phi[indphi]
# Compute new phi and associated costs
phi2 = phi[:, None] - phiok[None, :]
phi2min = np.min([np.nanmax(np.abs(phi2 * (phi2 < 0)), axis=0),
np.nanmax(np.abs(phi2 * (phi2 > 0)), axis=0)], axis=0)
indout = np.abs(phi2) > phi2min[None, :]
phi2p = np.abs(phi2)
phi2n = np.abs(phi2)
phi2p[(phi2 < 0) | indout] = np.nan
phi2n[(phi2 > 0) | indout] = np.nan
nok = np.min([np.sum((~np.isnan(phi2p)), axis=0),
np.sum((~np.isnan(phi2n)), axis=0)], axis=0)
cost = np.full((data.shape[0], phiok.size), np.nan)
for ii in range(phiok.size):
indp = np.argsort(np.abs(phi2p[:, ii]))
indn = np.argsort(np.abs(phi2n[:, ii]))
cost[:, ii] = np.nansum(
(data[:, indp] - data[:, indn])[:, :nok[ii]]**2,
axis=1)
return phiok[np.nanargmin(cost, axis=1)]
###########################################################
###########################################################
#
# 1d spectral fitting from dlines
#
###########################################################
###########################################################
def _checkformat_dconstraints(dconstraints=None, defconst=None):
# Check constraints
if dconstraints is None:
dconstraints = defconst
# Check dconstraints keys
lk = sorted(_DCONSTRAINTS.keys())
c0 = (
isinstance(dconstraints, dict)
and all([k0 in lk for k0 in dconstraints.keys()])
)
if not c0:
msg = (
"\ndconstraints should contain constraints for spectrum fitting\n"
+ "It be a dict with the following keys:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided keys: {}".format(dconstraints.keys())
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstraints)
def _checkformat_dconstants(dconstants=None, dconstraints=None):
if dconstants is None:
return
lk = [kk for kk in sorted(dconstraints.keys()) if kk != 'symmetry']
if not isinstance(dconstants, dict):
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys: {}\n".format(lk)
+ "\t- provided : {}".format(type(dconstants))
)
raise Exception(msg)
# Check dconstraints keys
lc = [
k0 for k0, v0 in dconstants.items()
if not (
k0 in lk
and (
(
k0 in _DORDER
and isinstance(v0, dict)
and all([
k1 in dconstraints[k0].keys()
and type(v1) in _LTYPES
for k1, v1 in v0.items()
])
)
or (
k0 not in _DORDER
and type(v0) in _LTYPES
)
)
)
]
if len(lc) > 0:
dc0 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstraints[kk].keys()) if kk in _DORDER else float
)
for kk in lk
]
dc1 = [
'\t\t{}: {}'.format(
kk,
sorted(dconstants[kk].keys())
if kk in _DORDER else dconstants[kk]
)
for kk in sorted(dconstants.keys())
]
msg = (
"\ndconstants should be None or a dict with keys in:\n"
+ "\t- available keys:\n"
+ "\n".join(dc0)
+ "\n\t- provided keys:\n"
+ "\n".join(dc1)
)
raise Exception(msg)
# copy to avoid modifying reference
return copy.deepcopy(dconstants)
def _dconstraints_double(dinput, dconstraints, defconst=_DCONSTRAINTS):
dinput['double'] = dconstraints.get('double', defconst['double'])
c0 = (
isinstance(dinput['double'], bool)
or (
isinstance(dinput['double'], dict)
and all([
kk in ['dratio', 'dshift'] and type(vv) in _LTYPES
for kk, vv in dinput['double'].items()
])
)
)
if c0 is False:
msg = (
"dconstraints['double'] must be either:\n"
+ "\t- False: no line doubling\n"
+ "\t- True: line doubling with unknown ratio and shift\n"
+ "\t- {'dratio': float}: line doubling with:\n"
+ "\t \t explicit ratio, unknown shift\n"
+ "\t- {'dshift': float}: line doubling with:\n"
+ "\t \t unknown ratio, explicit shift\n"
+ "\t- {'dratio': float, 'dshift': float}: line doubling with:\n"
+ "\t \t explicit ratio, explicit shift"
)
raise Exception(msg)
def _width_shift_amp(
indict, dconstants=None,
keys=None, dlines=None, nlines=None, k0=None,
):
# ------------------------
# Prepare error message
msg = ''
pavail = sorted(set(itt.chain.from_iterable([
v0.keys() for v0 in dlines.values()
])))
# ------------------------
# Check case
c0 = indict is False
c1 = (
isinstance(indict, str)
and indict in pavail
)
c2 = (
isinstance(indict, dict)
and all([
isinstance(k1, str)
and (
(isinstance(v1, str)) # and v0 in keys)
or (
isinstance(v1, list)
and all([
isinstance(v2, str)
# and v1 in keys
for v2 in v1
])
)
)
for k1, v1 in indict.items()
])
)
c3 = (
isinstance(indict, dict)
and all([
# ss in keys
isinstance(vv, dict)
and all([s1 in ['key', 'coef', 'offset'] for s1 in vv.keys()])
and isinstance(vv['key'], str)
for ss, vv in indict.items()
])
)
c4 = (
isinstance(indict, dict)
and isinstance(indict.get('keys'), list)
and isinstance(indict.get('ind'), np.ndarray)
)
if not any([c0, c1, c2, c3, c4]):
msg = (
"dconstraints['{}'] shoud be either:\n".format(k0)
+ "\t- False ({}): no constraint\n".format(c0)
+ "\t- str ({}): key from dlines['<lines>'] ".format(c1)
+ "to be used as criterion\n"
+ "\t\t available crit: {}\n".format(pavail)
+ "\t- dict ({}): ".format(c2)
+ "{str: line_keyi or [line_keyi, ..., line_keyj}\n"
+ "\t- dict ({}): ".format(c3)
+ "{line_keyi: {'key': str, 'coef': , 'offset': }}\n"
+ "\t- dict ({}): ".format(c4)
+ "{'keys': [], 'ind': np.ndarray}\n"
+ " Available line_keys:\n{}\n".format(sorted(keys))
+ " You provided:\n{}".format(indict)
)
raise Exception(msg)
# ------------------------
# str key to be taken from dlines as criterion
if c0:
lk = keys
ind = np.eye(nlines)
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
if c1:
lk = sorted(set([dlines[k1].get(indict, k1) for k1 in keys]))
ind = np.array([
[dlines[k2].get(indict, k2) == k1 for k2 in keys]
for k1 in lk
])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c2:
lkl = []
for k1, v1 in indict.items():
if isinstance(v1, str):
v1 = [v1]
v1 = [k2 for k2 in v1 if k2 in keys]
c0 = (
len(set(v1)) == len(v1)
and all([k2 not in lkl for k2 in v1])
)
if not c0:
msg = (
"Inconsistency in indict[{}], either:\n".format(k1)
+ "\t- v1 not unique: {}\n".format(v1)
+ "\t- some v1 not in keys: {}\n".format(keys)
+ "\t- some v1 in lkl: {}".format(lkl)
)
raise Exception(msg)
indict[k1] = v1
lkl += v1
for k1 in set(keys).difference(lkl):
indict[k1] = [k1]
lk = sorted(set(indict.keys()))
ind = np.array([[k2 in indict[k1] for k2 in keys] for k1 in lk])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': np.ones((nlines,)),
'offset': np.zeros((nlines,)),
}
elif c3:
lk = sorted(set([v0['key'] for v0 in indict.values()]))
lk += sorted(set(keys).difference(indict.keys()))
ind = np.array([
[indict.get(k2, {'key': k2})['key'] == k1 for k2 in keys]
for k1 in lk
])
coefs = np.array([
indict.get(k1, {'coef': 1.}).get('coef', 1.) for k1 in keys
])
offset = np.array([
indict.get(k1, {'offset': 0.}).get('offset', 0.) for k1 in keys
])
outdict = {
'keys': np.r_[lk],
'ind': ind,
'coefs': coefs,
'offset': offset,
}
elif c4:
outdict = indict
if 'coefs' not in indict.keys():
outdict['coefs'] = np.ones((nlines,))
if 'offset' not in indict.keys():
outdict['offset'] = np.zeros((nlines,))
# ------------------------
# Ultimate conformity checks
if not c0:
assert sorted(outdict.keys()) == ['coefs', 'ind', 'keys', 'offset']
assert isinstance(outdict['ind'], np.ndarray)
assert outdict['ind'].dtype == np.bool_
assert outdict['ind'].shape == (outdict['keys'].size, nlines)
assert np.all(np.sum(outdict['ind'], axis=0) == 1)
assert outdict['coefs'].shape == (nlines,)
assert outdict['offset'].shape == (nlines,)
return outdict
###########################################################
###########################################################
#
# 2d spectral fitting from dlines
#
###########################################################
###########################################################
def _dconstraints_symmetry(
dinput,
dprepare=None,
symmetry=None,
cent_fraction=None,
defconst=_DCONSTRAINTS,
):
if symmetry is None:
symmetry = defconst['symmetry']
dinput['symmetry'] = symmetry
if not isinstance(dinput['symmetry'], bool):
msg = "dconstraints['symmetry'] must be a bool"
raise Exception(msg)
if dinput['symmetry'] is True:
dinput['symmetry_axis'] = get_symmetry_axis_1dprofile(
dprepare['phi1d'],
dprepare['dataphi1d'],
cent_fraction=cent_fraction,
)
###########################################################
###########################################################
#
# data, lamb, phi conformity checks
#
###########################################################
###########################################################
def _checkformat_data_fit12d_dlines_msg(data, lamb, phi=None, mask=None):
datash = data.shape if isinstance(data, np.ndarray) else type(data)
lambsh = lamb.shape if isinstance(lamb, np.ndarray) else type(lamb)
phish = phi.shape if isinstance(phi, np.ndarray) else type(phi)
masksh = mask.shape if isinstance(mask, np.ndarray) else type(mask)
shaped = '(nt, n1)' if phi is None else '(nt, n1, n2)'
shape = '(n1,)' if phi is None else '(n1, n2)'
msg = ("Args data, lamb, phi and mask must be:\n"
+ "\t- data: {} or {} np.ndarray\n".format(shaped, shape)
+ "\t- lamb, phi: both {} np.ndarray\n".format(shape)
+ "\t- mask: None or {}\n".format(shape)
+ " You provided:\n"
+ "\t - data: {}\n".format(datash)
+ "\t - lamb: {}\n".format(lambsh))
if phi is not None:
msg += "\t - phi: {}\n".format(phish)
msg += "\t - mask: {}\n".format(masksh)
return msg
def _checkformat_data_fit12d_dlines(
data, lamb, phi=None,
nxi=None, nxj=None, mask=None,
is2d=False,
):
# Check types
c0 = isinstance(data, np.ndarray) and isinstance(lamb, np.ndarray)
if is2d:
c0 &= isinstance(phi, np.ndarray)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 1
mindim = 1 if phi is None else 2
phi1d, lamb1d, dataphi1d, datalamb1d = None, None, None, None
if is2d:
# special case
c1 = lamb.ndim == phi.ndim == 1
if c1:
if nxi is None:
nxi = lamb.size
if nxj is None:
nxj = phi.size
lamb1d = np.copy(lamb)
phi1d = np.copy(phi)
lamb = np.repeat(lamb[None, :], nxj, axis=0)
phi = np.repeat(phi[:, None], nxi, axis=1)
c0 = (
data.ndim in mindim + np.r_[0, 1]
and (
lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
and lamb.shape == phi.shape
and lamb.shape in [(nxi, nxj), (nxj, nxi)]
)
)
else:
c0 = (
data.ndim in mindim + np.r_[0, 1]
and lamb.ndim == mindim
and lamb.shape == data.shape[-mindim:]
)
if not c0:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
# Check shapes 2
if data.ndim == mindim:
data = data[None, ...]
if is2d and c1:
dataphi1d = np.nanmean(data, axis=2)
datalamb1d = np.nanmean(data, axis=1)
if is2d and lamb.shape == (nxi, nxj):
lamb = lamb.T
phi = phi.T
data = np.swapaxes(data, 1, 2)
# mask
if mask is not None:
if mask.shape != lamb.shape:
if phi is not None and mask.T.shape == lamb.shape:
mask = mask.T
else:
msg = _checkformat_data_fit12d_dlines_msg(
data, lamb, phi=phi, mask=mask,
)
raise Exception(msg)
if is2d:
return lamb, phi, data, mask, phi1d, lamb1d, dataphi1d, datalamb1d
else:
return lamb, data, mask
###########################################################
###########################################################
#
# Domain limitation
#
###########################################################
###########################################################
def _checkformat_domain(domain=None, keys=['lamb', 'phi']):
if keys is None:
keys = ['lamb', 'phi']
if isinstance(keys, str):
keys = [keys]
if domain is None:
domain = {k0: {'spec': [np.inf*np.r_[-1., 1.]]} for k0 in keys}
return domain
c0 = (
isinstance(domain, dict)
and all([k0 in keys for k0 in domain.keys()])
)
if not c0:
msg = ("\nArg domain must be a dict with keys {}\n".format(keys)
+ "\t- provided: {}".format(domain))
raise Exception(msg)
domain2 = {k0: v0 for k0, v0 in domain.items()}
for k0 in keys:
domain2[k0] = domain2.get(k0, [np.inf*np.r_[-1., 1.]])
ltypesin = [list, np.ndarray]
ltypesout = [tuple]
for k0, v0 in domain2.items():
c0 = (
type(v0) in ltypesin + ltypesout
and (
(
all([type(v1) in _LTYPES for v1 in v0])
and len(v0) == 2
and v0[1] > v0[0]
)
or (
all([
type(v1) in ltypesin + ltypesout
and all([type(v2) in _LTYPES for v2 in v1])
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
)
)
)
if not c0:
msg = (
"domain[{}] must be either a:\n".format(k0)
+ "\t- np.ndarray or list of 2 increasing values: "
+ "inclusive interval\n"
+ "\t- tuple of 2 increasing values: exclusive interval\n"
+ "\t- a list of combinations of the above\n"
+ " provided: {}".format(v0)
)
raise Exception(msg)
if type(v0) in ltypesout:
v0 = [v0]
else:
c0 = all([
type(v1) in ltypesin + ltypesout
and len(v1) == 2
and v1[1] > v1[0]
for v1 in v0
])
if not c0:
v0 = [v0]
domain2[k0] = {
'spec': v0,
'minmax': [np.nanmin(v0), np.nanmax(v0)],
}
return domain2
def apply_domain(lamb=None, phi=None, domain=None):
lc = [lamb is not None, phi is not None]
if not lc[0]:
msg = "At least lamb must be provided!"
raise Exception(msg)
din = {'lamb': lamb}
if lc[1]:
din['phi'] = phi
domain = _checkformat_domain(domain=domain, keys=din.keys())
ind = np.ones(lamb.shape, dtype=bool)
for k0, v0 in din.items():
indin = np.zeros(v0.shape, dtype=bool)
indout = np.zeros(v0.shape, dtype=bool)
for v1 in domain[k0]['spec']:
indi = (v0 >= v1[0]) & (v0 <= v1[1])
if isinstance(v1, tuple):
indout |= indi
else:
indin |= indi
ind = ind & indin & (~indout)
return ind, domain
###########################################################
###########################################################
#
# binning (2d only)
#
###########################################################
###########################################################
def _binning_check(
binning,
domain=None, nbsplines=None,
):
lk = ['phi', 'lamb']
lkall = lk + ['nperbin']
msg = (
"binning must be dict of the form:\n"
+ "\t- provide number of bins:\n"
+ "\t \t{'phi': int,\n"
+ "\t \t 'lamb': int}\n"
+ "\t- provide bin edges vectors:\n"
+ "\t \t{'phi': 1d np.ndarray (increasing),\n"
+ "\t \t 'lamb': 1d np.ndarray (increasing)}\n"
+ " provided:\n{}".format(binning)
)
# Check input
if binning is None:
binning = _BINNING
if nbsplines is None:
nbsplines = False
if nbsplines is not False:
c0 = isinstance(nbsplines, int) and nbsplines > 0
if not c0:
msg2 = (
"Both nbsplines and deg must be positive int!\n"
+ "\t- nbsplines: {}\n".format(nbsplines)
)
raise Exception(msg2)
# Check which format was passed and return None or dict
ltypes0 = _LTYPES
ltypes1 = [tuple, list, np.ndarray]
lc = [
binning is False,
(
isinstance(binning, dict)
and all([kk in lkall for kk in binning.keys()])
),
type(binning) in ltypes0,
type(binning) in ltypes1,
]
if not any(lc):
raise Exception(msg)
if binning is False:
return binning
elif type(binning) in ltypes0:
binning = {
'phi': {'nbins': int(binning)},
'lamb': {'nbins': int(binning)},
}
elif type(binning) in ltypes1:
binning = np.atleast_1d(binning).ravel()
binning = {
'phi': {'edges': binning},
'lamb': {'edges': binning},
}
for kk in lk:
if type(binning[kk]) in ltypes0:
binning[kk] = {'nbins': int(binning[kk])}
elif type(binning[kk]) in ltypes1:
binning[kk] = {'edges': np.atleast_1d(binning[kk]).ravel()}
c0 = all([
all([k1 in ['edges', 'nbins'] for k1 in binning[k0].keys()])
for k0 in lk
])
c0 = (
c0
and all([
(
(
binning[k0].get('nbins') is None
or type(binning[k0].get('nbins')) in ltypes0
)
and (
binning[k0].get('edges') is None
or type(binning[k0].get('edges')) in ltypes1
)
)
for k0 in lk
])
)
if not c0:
raise Exception(msg)
# Check dict
for k0 in lk:
c0 = all([k1 in ['nbins', 'edges'] for k1 in binning[k0].keys()])
if not c0:
raise Exception(msg)
if binning[k0].get('nbins') is not None:
binning[k0]['nbins'] = int(binning[k0]['nbins'])
if binning[k0].get('edges') is None:
binning[k0]['edges'] = np.linspace(
domain[k0]['minmax'][0], domain[k0]['minmax'][1],
binning[k0]['nbins'] + 1,
endpoint=True,
)
else:
binning[k0]['edges'] = np.atleast_1d(
binning[k0]['edges']).ravel()
if binning[k0]['nbins'] != binning[k0]['edges'].size - 1:
raise Exception(msg)
elif binning[k0].get('bin_edges') is not None:
binning[k0]['edges'] = np.atleast_1d(binning[k0]['edges']).ravel()
binning[k0]['nbins'] = binning[k0]['edges'].size - 1
else:
raise Exception(msg)
if not np.allclose(binning[k0]['edges'],
np.unique(binning[k0]['edges'])):
raise Exception(msg)
# Optional check vs nbsplines and deg
if nbsplines is not False:
if binning['phi']['nbins'] <= nbsplines:
msg = (
"The number of bins is too high:\n"
+ "\t- nbins = {}\n".format(binning['phi']['nbins'])
+ "\t- nbsplines = {}".format(nbsplines)
)
raise Exception(msg)
return binning
def binning_2d_data(
lamb, phi, data, indok=None,
domain=None, binning=None,
nbsplines=None,
phi1d=None, lamb1d=None,
dataphi1d=None, datalamb1d=None,
):
# ------------------
# Checkformat input
binning = _binning_check(
binning,
domain=domain, nbsplines=nbsplines,
)
nspect = data.shape[0]
if binning is False:
if phi1d is None:
phi1d_bins = np.linspace(domain['phi'][0], domain['phi'][1], 100)
lamb1d_bins = np.linspace(
domain['lamb'][0], domain['lamb'][1], 100,
)
dataf = data.reshape((nspect, data.shape[1]*data.shape[2]))
dataphi1d = scpstats.binned_statistics(
phi.ravel(),
dataf,
statistic='sum',
)
datalamb1d = scpstats.binned_statistics(
lamb.ravel(),
dataf,
statistic='sum',
)
phi1d = 0.5*(phi1d_bins[1:] + phi1d_bins[:-1])
lamb1d = 0.5*(lamb1d_bins[1:] + lamb1d_bins[:-1])
import pdb; pdb.set_trace() # DB
return (
lamb, phi, data, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
else:
nphi = binning['phi']['nbins']
nlamb = binning['lamb']['nbins']
bins = (binning['lamb']['edges'], binning['phi']['edges'])
# ------------------
# Compute
databin = np.full((nspect, nphi, nlamb), np.nan)
nperbin = np.full((nspect, nphi, nlamb), np.nan)
for ii in range(nspect):
databin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok[ii, ...]],
lamb[indok[ii, ...]],
data[indok[ii, ...]],
statistic='sum', bins=bins,
range=None, expand_binnumbers=True,
)[0]
nperbin[ii, ...] = scpstats.binned_statistic_2d(
phi[indok[ii, ...]],
lamb[indok[ii, ...]],
np.ones((indok[ii, ...].sum(),), dtype=int),
statistic='sum', bins=bins,
range=None, expand_binnumbers=True,
)[0]
binning['nperbin'] = nperbin
lambbin = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
phibin = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lambbin = np.repeat(lambbin[None, :], nphi, axis=0)
phibin = np.repeat(phibin[:, None], nlamb, axis=1)
indok = ~np.isnan(databin)
# dataphi1d
phi1d = phibin
lamb1d = lambbin
dataphi1d = np.nanmean(databin, axis=2)
datalamb1d = np.nanmean(databin, axis=1)
return (
lambbin, phibin, databin, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
)
###########################################################
###########################################################
#
# dprepare dict
#
###########################################################
###########################################################
def _get_subset_indices(subset, indlogical):
if subset is None:
subset = _SUBSET
if subset is False:
return indlogical
c0 = ((isinstance(subset, np.ndarray)
and subset.shape == indlogical.shape
and 'bool' in subset.dtype.name)
or (type(subset) in [int, float, np.int_, np.float_]
and subset >= 0))
if not c0:
msg = ("subset must be either:\n"
+ "\t- an array of bool of shape: {}\n".format(indlogical.shape)
+ "\t- a positive int (nb. of ind. to keep from indlogical)\n"
+ "You provided:\n{}".format(subset))
raise Exception(msg)
if isinstance(subset, np.ndarray):
indlogical = subset[None, ...] & indlogical
else:
subset = np.random.default_rng().choice(
indlogical.sum(), size=int(indlogical.sum() - subset),
replace=False, shuffle=False)
for ii in range(indlogical.shape[0]):
ind = indlogical[ii, ...].nonzero()
indlogical[ii, ind[0][subset], ind[1][subset]] = False
return indlogical
def _extract_lphi_spectra(
data, phi, lamb,
lphi=None, lphi_tol=None,
databin=None, binning=None, nlamb=None,
):
""" Extra several 1d spectra from 2d image at lphi """
# --------------
# Check input
if lphi is None:
lphi = False
if lphi is False:
lphi_tol = False
if lphi is not False:
lphi = np.atleast_1d(lphi).astype(float).ravel()
lphi_tol = float(lphi_tol)
if lphi is False:
return False, False
nphi = len(lphi)
# --------------
# Compute non-trivial cases
if binning is False:
if nlamb is None:
nlamb = lamb.shape[1]
lphi_lamb = np.linspace(lamb.min(), lamb.max(), nlamb+1)
lphi_spectra = np.full((data.shape[0], lphi_lamb.size-1, nphi), np.nan)
for ii in range(nphi):
indphi = np.abs(phi - lphi[ii]) < lphi_tol
lphi_spectra[:, ii, :] = scpstats.binned_statistic(
lamb[indphi], data[:, indphi], bins=lphi_lamb,
statistic='mean', range=None,
)[0]
else:
lphi_lamb = 0.5*(
binning['lamb']['edges'][1:] + binning['lamb']['edges'][:-1]
)
lphi_phi = 0.5*(
binning['phi']['edges'][1:] + binning['phi']['edges'][:-1]
)
lphi_spectra = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
lphi_spectra1 = np.full((data.shape[0], nphi, lphi_lamb.size), np.nan)
for ii in range(nphi):
datai = databin[:, np.abs(lphi_phi - lphi[ii]) < lphi_tol, :]
iok = np.any(~np.isnan(datai), axis=1)
for jj in range(datai.shape[0]):
if np.any(iok[jj, :]):
lphi_spectra[jj, ii, iok[jj, :]] = np.nanmean(
datai[jj, :, iok[jj, :]],
axis=1,
)
return lphi_spectra, lphi_lamb
def _checkformat_possubset(pos=None, subset=None):
if pos is None:
pos = _POS
c0 = isinstance(pos, bool) or type(pos) in _LTYPES
if not c0:
msg = ("Arg pos must be either:\n"
+ "\t- False: no positivity constraints\n"
+ "\t- True: all negative values are set to nan\n"
+ "\t- float: all negative values are set to pos")
raise Exception(msg)
if subset is None:
subset = _SUBSET
return pos, subset
def multigausfit1d_from_dlines_prepare(
data=None, lamb=None,
mask=None, domain=None,
pos=None, subset=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
lamb, data, mask = _checkformat_data_fit12d_dlines(
data, lamb, mask=mask,
)
# --------------
# Use valid data only and optionally restrict lamb
indok, domain = apply_domain(lamb, domain=domain)
if mask is not None:
indok &= mask
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
# Introduce time-dependence (useful for valid)
indok = indok[None, ...] & (~np.isnan(data))
# Recompute domain
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok, axis=0)]),
np.nanmax(lamb[np.any(indok, axis=0)])
]
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
indok = _get_subset_indices(subset, indok)
if np.any(np.isnan(data[indok])):
msg = (
"Some NaNs in data not caught by indok!"
)
raise Exception(msg)
# --------------
# Return
dprepare = {
'data': data,
'lamb': lamb,
'domain': domain,
'indok': indok,
'pos': pos,
'subset': subset,
}
return dprepare
def multigausfit2d_from_dlines_prepare(
data=None, lamb=None, phi=None,
mask=None, domain=None,
pos=None, binning=None,
nbsplines=None, deg=None, subset=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
):
# --------------
# Check input
pos, subset = _checkformat_possubset(pos=pos, subset=subset)
# Check shape of data (multiple time slices possible)
(
lamb, phi, data, mask,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = _checkformat_data_fit12d_dlines(
data, lamb, phi,
nxi=nxi, nxj=nxj, mask=mask, is2d=True,
)
# --------------
# Use valid data only and optionally restrict lamb / phi
indok, domain = apply_domain(lamb, phi, domain=domain)
if mask is not None:
indok &= mask
# Optional positivity constraint
if pos is not False:
if pos is True:
data[data < 0.] = np.nan
else:
data[data < 0.] = pos
# Introduce time-dependence (useful for valid)
indok = indok[None, ...] & (~np.isnan(data))
# Recompute domain
domain['lamb']['minmax'] = [
np.nanmin(lamb[np.any(indok, axis=0)]),
np.nanmax(lamb[np.any(indok, axis=0)])
]
domain['phi']['minmax'] = [
np.nanmin(phi[np.any(indok, axis=0)]),
np.nanmax(phi[np.any(indok, axis=0)])
]
# --------------
# Optionnal 2d binning
(
lambbin, phibin, databin, indok, binning,
phi1d, lamb1d, dataphi1d, datalamb1d,
) = binning_2d_data(
lamb, phi, data, indok=indok,
binning=binning, domain=domain,
nbsplines=nbsplines,
phi1d=phi1d, lamb1d=lamb1d,
dataphi1d=dataphi1d, datalamb1d=datalamb1d,
)
# --------------
# Optionally fit only on subset
# randomly pick subset indices (replace=False => no duplicates)
indok = _get_subset_indices(subset, indok)
# --------------
# Optionally extract 1d spectra at lphi
lphi_spectra, lphi_lamb = _extract_lphi_spectra(
data, phi, lamb,
lphi, lphi_tol,
databin=databin,
binning=binning,
)
# --------------
# Return
dprepare = {
'data': databin, 'lamb': lambbin, 'phi': phibin,
'domain': domain, 'binning': binning, 'indok': indok,
'pos': pos, 'subset': subset, 'nxi': nxi, 'nxj': nxj,
'lphi': lphi, 'lphi_tol': lphi_tol,
'lphi_spectra': lphi_spectra, 'lphi_lamb': lphi_lamb,
'phi1d': phi1d, 'dataphi1d': dataphi1d,
'lamb1d': lamb1d, 'datalamb1d': datalamb1d,
}
return dprepare
def multigausfit2d_from_dlines_dbsplines(
knots=None, deg=None, nbsplines=None,
phimin=None, phimax=None,
symmetryaxis=None,
):
# Check / format input
if nbsplines is None:
nbsplines = _NBSPLINES
c0 = [nbsplines is False, isinstance(nbsplines, int)]
if not any(c0):
msg = "nbsplines must be a int (degree of bsplines to be used!)"
raise Exception(msg)
if nbsplines is False:
lk = ['knots', 'knots_mult', 'nknotsperbs', 'ptsx0', 'nbs', 'deg']
return dict.fromkeys(lk, False)
if deg is None:
deg = _DEG
if not (isinstance(deg, int) and deg <= 3):
msg = "deg must be a int <= 3 (the degree of the bsplines to be used!)"
raise Exception(msg)
if symmetryaxis is None:
symmetryaxis = False
if knots is None:
if phimin is None or phimax is None:
msg = "Please provide phimin and phimax if knots is not provided!"
raise Exception(msg)
if symmetryaxis is False:
knots = np.linspace(phimin, phimax, nbsplines + 1 - deg)
else:
phi2max = np.max(
np.abs(np.r_[phimin, phimax][None, :] - symmetryaxis[:, None])
)
knots = np.linspace(0, phi2max, nbsplines + 1 - deg)
if not np.allclose(knots, np.unique(knots)):
msg = "knots must be a vector of unique values!"
raise Exception(msg)
# Get knots for scipy (i.e.: with multiplicity)
if deg > 0:
knots_mult = np.r_[[knots[0]]*deg, knots, [knots[-1]]*deg]
else:
knots_mult = knots
nknotsperbs = 2 + deg
nbs = knots.size - 1 + deg
assert nbs == knots_mult.size - 1 - deg
if deg == 0:
ptsx0 = 0.5*(knots[:-1] + knots[1:])
elif deg == 1:
ptsx0 = knots
elif deg == 2:
num = (knots_mult[3:]*knots_mult[2:-1]
- knots_mult[1:-2]*knots_mult[:-3])
denom = (knots_mult[3:] + knots_mult[2:-1]
- knots_mult[1:-2] - knots_mult[:-3])
ptsx0 = num / denom
else:
# To be derived analytically for more accuracy
ptsx0 = np.r_[
knots[0],
np.mean(knots[:2]),
knots[1:-1],
np.mean(knots[-2:]),
knots[-1],
]
msg = ("degree 3 not fully implemented yet!"
+ "Approximate values for maxima positions")
warnings.warn(msg)
assert ptsx0.size == nbs
dbsplines = {
'knots': knots, 'knots_mult': knots_mult,
'nknotsperbs': nknotsperbs, 'ptsx0': ptsx0,
'nbs': nbs, 'deg': deg,
}
return dbsplines
###########################################################
###########################################################
#
# dvalid dict (S/N ratio)
#
###########################################################
###########################################################
def _dvalid_checkfocus_errmsg(focus=None, focus_half_width=None,
lines_keys=None):
msg = ("Please provide focus as:\n"
+ "\t- str: the key of an available spectral line:\n"
+ "\t\t{}\n".format(lines_keys)
+ "\t- float: a wavelength value\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ " You provided:\n"
+ "{}\n\n".format(focus)
+ "Please provide focus_half_width as:\n"
+ "\t- float: a unique wavelength value for all focus\n"
+ "\t- a list / tuple / flat np.ndarray of such\n"
+ " You provided:\n"
+ "{}".format(focus_half_width))
return msg
def _dvalid_checkfocus(
focus=None,
focus_half_width=None,
lines_keys=None,
lines_lamb=None,
lamb=None,
):
""" Check the provided focus is properly formatted and convert it
focus specifies the wavelength range of interest in which S/N is evaluated
It can be provided as:
- a spectral line key (or list of such)
- a wavelength (or list of such)
For each wavelength, a spectral range centered on it, is defined using
the provided focus_half_width
The focus_half_width can be a unique value applied to all or a list of
values of the same length as focus.
focus is then return as a (n, 2) array where:
each line gives a central wavelength and halfwidth of interest
"""
if focus in [None, False]:
return False
# Check focus and transform to array of floats
lc0 = [
type(focus) in [str] + _LTYPES,
type(focus) in [list, tuple, np.ndarray]
]
if not any(lc0):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
if lc0[0] is True:
focus = [focus]
for ii in range(len(focus)):
if focus[ii] not in lines_keys and type(focus[ii]) not in _LTYPES:
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
focus = np.array([
lines_lamb[(lines_keys == ff).nonzero()[0][0]]
if ff in lines_keys else ff for ff in focus
])
# Check focus_half_width and transform to array of floats
if focus_half_width is None:
focus_half_width = (np.nanmax(lamb) - np.nanmin(lamb))/10.
lc0 = [
type(focus_half_width) in _LTYPES,
(
type(focus_half_width) in [list, tuple, np.ndarray]
and len(focus_half_width) == focus.size
and all([type(fhw) in _LTYPES for fhw in focus_half_width])
)
]
if not any(lc0):
msg = _dvalid_checkfocus_errmsg(
focus, focus_half_width, lines_keys,
)
raise Exception(msg)
if lc0[0] is True:
focus_half_width = np.full((focus.size,), focus_half_width)
return np.array([focus, np.r_[focus_half_width]]).T
def fit12d_dvalid(
data=None, lamb=None, phi=None,
indok=None, binning=None,
valid_nsigma=None, valid_fraction=None,
focus=None, focus_half_width=None,
lines_keys=None, lines_lamb=None, dphimin=None,
nbs=None, deg=None, knots_mult=None, nknotsperbs=None,
return_fract=None,
):
""" Return a dict of valid time steps and phi indices
data points are considered valid if there signal is sufficient:
np.sqrt(data) >= valid_nsigma
data is supposed to be provided in counts (or photons).. TBC!!!
"""
# Check inputs
if valid_nsigma is None:
valid_nsigma = _VALID_NSIGMA
if valid_fraction is None:
valid_fraction = _VALID_FRACTION
if binning is None:
binning = False
if dphimin is None:
dphimin = 0.
if return_fract is None:
return_fract = False
data2d = data.ndim == 3
nspect = data.shape[0]
focus = _dvalid_checkfocus(
focus,
focus_half_width=focus_half_width,
lines_keys=lines_keys,
lines_lamb=lines_lamb,
lamb=lamb,
)
# Get indices of pts with enough signal
ind = np.zeros(data.shape, dtype=bool)
if indok is None:
isafe = (~np.isnan(data))
isafe[isafe] = data[isafe] >= 0.
# Ok with and w/o binning if data provided as counts / photons
# and binning was done by sum (and not mean)
ind[isafe] = np.sqrt(data[isafe]) > valid_nsigma
else:
ind[indok] = np.sqrt(data[indok]) > valid_nsigma
# Derive indt and optionally dphi and indknots
indbs, dphi = False, False
if focus is not False:
# TBC
lambok = np.rollaxis(
np.array([np.abs(lamb - ff[0]) < ff[1] for ff in focus]),
0,
lamb.ndim+1,
)
indall = ind[..., None] & lambok[None, ...]
if data2d is True:
# Make sure there are at least deg + 2 different phi
deltaphi = np.max(np.diff(knots_mult))
# Code ok with and without binning :-)
if focus is False:
fract = np.full((nspect, nbs), np.nan)
for ii in range(nbs):
iphi = (
(phi >= knots_mult[ii])
& (phi < knots_mult[ii+nknotsperbs-1])
)
fract[:, ii] = (
np.sum(np.sum(ind & iphi[None, ...], axis=-1), axis=-1)
/ np.sum(iphi)
)
indbs = fract > valid_fraction
else:
fract = np.full((nspect, nbs, len(focus)), np.nan)
for ii in range(nbs):
iphi = ((phi >= knots_mult[ii])
& (phi < knots_mult[ii+nknotsperbs-1]))
fract[:, ii, :] = (
np.sum(np.sum(indall & iphi[None, ..., None],
axis=1), axis=1)
/ np.sum(np.sum(iphi[..., None] & lambok,
axis=0), axis=0))
indbs = np.all(fract > valid_fraction, axis=2)
indt = np.any(indbs, axis=1)
dphi = deltaphi*(deg + indbs[:, deg:-deg].sum(axis=1))
else:
# 1d spectra
if focus is False:
fract = ind.sum(axis=-1) / ind.shape[1]
indt = fract > valid_fraction
else:
fract = np.sum(indall, axis=1) / lambok.sum(axis=0)[None, :]
indt = np.all(fract > valid_fraction, axis=1)
# Optional debug
if focus is not False and False:
indt_debug, ifocus = 40, 1
if data2d is True:
indall2 = indall.astype(int)
indall2[:, lambok] = 1
indall2[ind[..., None] & lambok[None, ...]] = 2
plt.figure()
plt.imshow(indall2[indt_debug, :, :, ifocus].T, origin='lower')
else:
plt.figure()
plt.plot(lamb[~indall[indt_debug, :, ifocus]],
data[indt_debug, ~indall[indt_debug, :, ifocus]], '.k',
lamb[indall[indt_debug, :, ifocus]],
data[indt_debug, indall[indt_debug, :, ifocus]], '.r')
plt.axvline(focus[ifocus, 0], ls='--', c='k')
if not np.any(indt):
msg = (
"\nThere is no valid time step with the provided constraints:\n"
+ "\t- valid_nsigma = {}\n".format(valid_nsigma)
+ "\t- valid_fraction = {}\n".format(valid_fraction)
+ "\t- focus = {}\n".format(focus)
+ "\t- fract = {}\n".format(fract)
)
raise Exception(msg)
# return
dvalid = {
'indt': indt, 'dphi': dphi, 'indbs': indbs, 'ind': ind,
'focus': focus, 'valid_fraction': valid_fraction,
'valid_nsigma': valid_nsigma,
}
if return_fract is True:
dvalid['fract'] = fract
return dvalid
###########################################################
###########################################################
#
# dlines dict (lines vs domain)
#
###########################################################
###########################################################
def _checkformat_dlines(dlines=None, domain=None):
if dlines is None:
dlines = False
if not isinstance(dlines, dict):
msg = "Arg dlines must be a dict!"
raise Exception(msg)
lc = [
(k0, type(v0)) for k0, v0 in dlines.items()
if not (
isinstance(k0, str)
and isinstance(v0, dict)
and 'lambda0' in v0.keys()
and (
type(v0['lambda0']) in _LTYPES
or (
isinstance(v0['lambda0'], np.ndarray)
and v0['lambda0'].size == 1
)
)
)
]
if len(lc) > 0:
lc = ["\t- {}: {}".format(*cc) for cc in lc]
msg = (
"Arg dlines must be a dict of the form:\n"
+ "\t{'line0': {'lambda0': float},\n"
+ "\t 'line1': {'lambda0': float},\n"
+ "\t ...\n"
+ "\t 'lineN': {'lambda0': float}}\n"
+ " You provided:\n{}".format('\n'.join(lc))
)
raise Exception(msg)
# Select relevant lines (keys, lamb)
lines_keys = np.array([k0 for k0 in dlines.keys()])
lines_lamb = np.array([float(dlines[k0]['lambda0']) for k0 in lines_keys])
if domain not in [None, False]:
ind = (
(lines_lamb >= domain['lamb']['minmax'][0])
& (lines_lamb <= domain['lamb']['minmax'][1])
)
lines_keys = lines_keys[ind]
lines_lamb = lines_lamb[ind]
inds = np.argsort(lines_lamb)
lines_keys, lines_lamb = lines_keys[inds], lines_lamb[inds]
nlines = lines_lamb.size
dlines = {k0: dict(dlines[k0]) for k0 in lines_keys}
return dlines, lines_keys, lines_lamb
###########################################################
###########################################################
#
# dinput dict (lines + spectral constraints)
#
###########################################################
###########################################################
def fit1d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
data=None, lamb=None, mask=None,
domain=None, pos=None, subset=None,
same_spectrum=None, nspect=None, same_spectrum_dlamb=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit1d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit1d_from_dlines_prepare(
data=data, lamb=lamb,
mask=mask, domain=domain,
pos=pos, subset=subset,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# Check same_spectrum
if same_spectrum is None:
same_spectrum = _SAME_SPECTRUM
if same_spectrum is True:
if type(nspect) not in [int, np.int]:
msg = "Please provide nspect if same_spectrum = True"
raise Exception(msg)
if same_spectrum_dlamb is None:
same_spectrum_dlamb = min(
2*np.diff(dprepare['domain']['lamb']['minmax']),
dprepare['domain']['lamb']['minmax'][0],
)
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format double
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with possible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ion', '?') for k0 in lines_keys])
# ------------------------
# same_spectrum
# ------------------------
if same_spectrum is True:
keysadd = np.array([[kk+'_bis{:04.0f}'.format(ii) for kk in keys]
for ii in range(1, nspect)]).ravel()
lines_lamb = (
same_spectrum_dlamb*np.arange(0, nspect)[:, None]
+ lines_lamb[None, :]
)
keys = np.r_[keys, keysadd]
for k0 in _DORDER:
# Add other lines to original group
keyk = dinput[k0]['keys']
offset = np.tile(dinput[k0]['offset'], nspect)
if k0 == 'shift':
ind = np.tile(dinput[k0]['ind'], (1, nspect))
coefs = (
dinput[k0]['coefs']
* lines_lamb[0, :] / lines_lamb
).ravel()
else:
coefs = np.tile(dinput[k0]['coefs'], nspect)
keysadd = np.array([
[kk+'_bis{:04.0f}'.format(ii) for kk in keyk]
for ii in range(1, nspect)
]).ravel()
ind = np.zeros((keyk.size*nspect, nlines*nspect))
for ii in range(nspect):
i0, i1 = ii*keyk.size, (ii+1)*keyk.size
j0, j1 = ii*nlines, (ii+1)*nlines
ind[i0:i1, j0:j1] = dinput[k0]['ind']
keyk = np.r_[keyk, keysadd]
dinput[k0]['keys'] = keyk
dinput[k0]['ind'] = ind
dinput[k0]['coefs'] = coefs
dinput[k0]['offset'] = offset
nlines *= nspect
lines_lamb = lines_lamb.ravel()
# update mz, symb, ion
mz = np.tile(mz, nspect)
symb = np.tile(symb, nspect)
ion = np.tile(ion, nspect)
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
dinput['same_spectrum'] = same_spectrum
if same_spectrum is True:
dinput['same_spectrum_nspect'] = nspect
dinput['same_spectrum_dlamb'] = same_spectrum_dlamb
else:
dinput['same_spectrum_nspect'] = False
dinput['same_spectrum_dlamb'] = False
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
indok=dprepare['indok'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit1d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
return dinput
def fit2d_dinput(
dlines=None, dconstraints=None, dconstants=None, dprepare=None,
deg=None, nbsplines=None, knots=None,
data=None, lamb=None, phi=None, mask=None,
domain=None, pos=None, subset=None, binning=None, cent_fraction=None,
focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None,
valid_return_fract=None,
dscales=None, dx0=None, dbounds=None,
nxi=None, nxj=None,
lphi=None, lphi_tol=None,
defconst=_DCONSTRAINTS,
):
""" Check and format a dict of inputs to be fed to fit2d()
This dict will contain all information relevant for solving the fit:
- dlines: dict of lines (with 'lambda0': wavelength at rest)
- lamb: vector of wavelength of the experimental spectrum
- data: experimental spectrum, possibly 2d (time-varying)
- dconstraints: dict of constraints on lines (amp, width, shift)
- pos: bool, consider only positive data (False => replace <0 with nan)
- domain:
- mask:
- subset:
- same_spectrum:
- focus:
"""
# ------------------------
# Check / format dprepare
# ------------------------
if dprepare is None:
dprepare = multigausfit2d_from_dlines_prepare(
data=data, lamb=lamb, phi=phi,
mask=mask, domain=domain,
pos=pos, subset=subset, binning=binning,
nbsplines=nbsplines, deg=deg,
nxi=nxi, nxj=nxj,
lphi=None, lphi_tol=None,
)
# ------------------------
# Check / format dlines
# ------------------------
dlines, lines_keys, lines_lamb = _checkformat_dlines(
dlines=dlines,
domain=dprepare['domain'],
)
nlines = lines_lamb.size
# ------------------------
# Check / format dconstraints
# ------------------------
dconstraints = _checkformat_dconstraints(
dconstraints=dconstraints, defconst=defconst,
)
dinput = {}
# ------------------------
# Check / format symmetry
# ------------------------
_dconstraints_symmetry(
dinput, dprepare=dprepare, symmetry=dconstraints.get('symmetry'),
cent_fraction=cent_fraction, defconst=defconst,
)
# ------------------------
# Check / format double (spectral line doubling)
# ------------------------
_dconstraints_double(dinput, dconstraints, defconst=defconst)
# ------------------------
# Check / format width, shift, amp (groups with posssible ratio)
# ------------------------
for k0 in ['amp', 'width', 'shift']:
dinput[k0] = _width_shift_amp(
dconstraints.get(k0, defconst[k0]),
dconstants=dconstants,
keys=lines_keys, nlines=nlines,
dlines=dlines, k0=k0,
)
# ------------------------
# add mz, symb, ION, keys, lamb
# ------------------------
mz = np.array([dlines[k0].get('m', np.nan) for k0 in lines_keys])
symb = np.array([dlines[k0].get('symbol', k0) for k0 in lines_keys])
ion = np.array([dlines[k0].get('ION', '?') for k0 in lines_keys])
# ------------------------
# add lines and properties
# ------------------------
dinput['keys'] = lines_keys
dinput['lines'] = lines_lamb
dinput['nlines'] = nlines
dinput['mz'] = mz
dinput['symb'] = symb
dinput['ion'] = ion
# ------------------------
# Get dict of bsplines
# ------------------------
dinput.update(multigausfit2d_from_dlines_dbsplines(
knots=knots, deg=deg, nbsplines=nbsplines,
phimin=dprepare['domain']['phi']['minmax'][0],
phimax=dprepare['domain']['phi']['minmax'][1],
symmetryaxis=dinput.get('symmetry_axis')
))
# ------------------------
# S/N threshold indices
# ------------------------
dinput['valid'] = fit12d_dvalid(
data=dprepare['data'],
lamb=dprepare['lamb'],
phi=dprepare['phi'],
binning=dprepare['binning'],
indok=dprepare['indok'],
valid_nsigma=valid_nsigma,
valid_fraction=valid_fraction,
focus=focus, focus_half_width=focus_half_width,
lines_keys=lines_keys, lines_lamb=lines_lamb,
nbs=dinput['nbs'],
deg=dinput['deg'],
knots_mult=dinput['knots_mult'],
nknotsperbs=dinput['nknotsperbs'],
return_fract=valid_return_fract,
)
# Update with dprepare
dinput['dprepare'] = dict(dprepare)
# Add dind
dinput['dind'] = multigausfit2d_from_dlines_ind(dinput)
# Add dscales, dx0 and dbounds
dinput['dscales'] = fit12d_dscales(dscales=dscales, dinput=dinput)
dinput['dbounds'] = fit12d_dbounds(dbounds=dbounds, dinput=dinput)
dinput['dx0'] = fit12d_dx0(dx0=dx0, dinput=dinput)
dinput['dconstants'] = fit12d_dconstants(
dconstants=dconstants, dinput=dinput,
)
return dinput
###########################################################
###########################################################
#
# dind dict (indices storing for fast access)
#
###########################################################
###########################################################
def multigausfit1d_from_dlines_ind(dinput=None):
""" Return the indices of quantities in x to compute y """
# indices
# General shape: [bck, amp, widths, shifts]
# If double [..., double_shift, double_ratio]
# Except for bck, all indices should render nlines (2*nlines if double)
dind = {
'bck_amp': {'x': np.r_[0]},
'bck_rate': {'x': np.r_[1]},
'dshift': None,
'dratio': None,
}
nn = dind['bck_amp']['x'].size + dind['bck_rate']['x'].size
inddratio, inddshift = None, None
for k0 in _DORDER:
ind = dinput[k0]['ind']
lnl = np.sum(ind, axis=1).astype(int)
dind[k0] = {
'x': nn + np.arange(0, ind.shape[0]),
'lines': nn + np.argmax(ind, axis=0),
'jac': [
tuple(ind[ii, :].nonzero()[0]) for ii in range(ind.shape[0])
]
}
nn += dind[k0]['x'].size
sizex = dind['shift']['x'][-1] + 1
indx = np.r_[
dind['bck_amp']['x'],
dind['bck_rate']['x'],
dind['amp']['x'],
dind['width']['x'],
dind['shift']['x'],
]
assert np.all(np.arange(0, sizex) == indx)
# check if double
if dinput['double'] is True:
dind['dshift'] = {'x': -2}
dind['dratio'] = {'x': -1}
sizex += 2
elif isinstance(dinput['double'], dict):
if dinput['double'].get('dshift') is None:
dind['dshift'] = {'x': -1}
sizex += 1
elif dinput['double'].get('dratio') is None:
dind['dratio'] = {'x': -1}
sizex += 1
dind['sizex'] = sizex
dind['nbck'] = 2
# dind['shapey1'] = dind['bck']['x'].size + dinput['nlines']
# Ref line for amp (for dscales)
amp_x0 = np.zeros((dinput['amp']['ind'].shape[0],), dtype=int)
for ii in range(dinput['amp']['ind'].shape[0]):
indi = dinput['amp']['ind'][ii, :].nonzero()[0]
amp_x0[ii] = indi[np.argmin(np.abs(dinput['amp']['coefs'][indi]-1.))]
dind['amp_x0'] = amp_x0
return dind
def multigausfit2d_from_dlines_ind(dinput=None):
""" Return the indices of quantities in x to compute y """
# indices
# General shape: [bck, amp, widths, shifts]
# If double [..., double_shift, double_ratio]
# Except for bck, all indices should render nlines (2*nlines if double)
nbs = dinput['nbs']
dind = {
'bck_amp': {'x': np.r_[0]},
'bck_rate': {'x': np.r_[1]},
'dshift': None,
'dratio': None,
}
nn = dind['bck_amp']['x'].size + dind['bck_rate']['x'].size
inddratio, inddshift = None, None
for k0 in _DORDER:
# l0bs0, l0bs1, ..., l0bsN, l1bs0, ...., lnbsN
ind = dinput[k0]['ind']
lnl = np.sum(ind, axis=1).astype(int)
dind[k0] = {
'x': (
nn
+ nbs*np.arange(0, ind.shape[0])[None, :]
+ np.arange(0, nbs)[:, None]
),
'lines': (
nn
+ nbs*np.argmax(ind, axis=0)[None, :]
+ np.arange(0, nbs)[:, None]
),
# TBF / TBC !!!
'jac': [ind[ii, :].nonzero()[0] for ii in range(ind.shape[0])],
}
nn += dind[k0]['x'].size
sizex = dind['shift']['x'][-1, -1] + 1
indx = np.r_[
dind['bck_amp']['x'],
dind['bck_rate']['x'],
dind['amp']['x'].T.ravel(),
dind['width']['x'].T.ravel(),
dind['shift']['x'].T.ravel(),
]
assert np.allclose(np.arange(0, sizex), indx)
# check if double
if dinput['double'] is True:
dind['dshift'] = {'x': -2}
dind['dratio'] = {'x': -1}
sizex += 2
elif isinstance(dinput['double'], dict):
if dinput['double'].get('dshift') is None:
dind['dshift'] = {'x': -1}
sizex += 1
elif dinput['double'].get('dratio') is None:
dind['dratio'] = {'x': -1}
sizex += 1
dind['sizex'] = sizex
dind['nbck'] = 2
# Ref line for amp (for x0)
# TBC !!!
amp_x0 = np.zeros((dinput['amp']['ind'].shape[0],), dtype=int)
for ii in range(dinput['amp']['ind'].shape[0]):
indi = dinput['amp']['ind'][ii, :].nonzero()[0]
amp_x0[ii] = indi[np.argmin(np.abs(dinput['amp']['coefs'][indi]-1.))]
dind['amp_x0'] = amp_x0
# Make bsplines selections easy
# if dinput['valid']['dphi'] is not False:
# dind['bs']['x'] =
# import pdb; pdb.set_trace() # DB
# pass
return dind
###########################################################
###########################################################
#
# Common checks and format for scales, x0, bounds
#
###########################################################
###########################################################
def _fit12d_checkformat_dscalesx0(
din=None, dinput=None,
name=None, is2d=False,
):
lkconst = ['dratio', 'dshift']
lk = ['bck_amp', 'bck_rate']
lkdict = _DORDER
if din is None:
din = {}
lkfalse = [
k0 for k0, v0 in din.items()
if not (
isinstance(din, dict)
and (
(k0 in lkconst and type(v0) in _LTYPES)
or (k0 in lk and type(v0) in _LTYPES + [np.ndarray])
or (
k0 in lkdict
and type(v0) in _LTYPES + [np.ndarray]
or (
isinstance(v0, dict)
and all([
k1 in dinput[k0]['keys']
and type(v1) in _LTYPES + [np.ndarray]
for k1, v1 in v0.items()
])
)
)
)
)
]
if len(lkfalse) > 0:
msg = (
"Arg {} must be a dict of the form:\n".format(name)
+ "\t- {}\n".format({
kk: 'float' if kk in lkconst+lk
else {k1: 'float' for k1 in dinput[kk]['keys']}
for kk in lkfalse
})
+ "\t- provided: {}".format({
kk: din[kk] for kk in lkfalse
})
)
raise Exception(msg)
return {
k0: dict(v0) if isinstance(v0, dict) else v0
for k0, v0 in din.items()
}
def _fit12d_filldef_dscalesx0_dict(
din=None, din_name=None,
key=None, vref=None,
nspect=None, dinput=None,
):
# Check vref
if vref is not None:
if type(vref) not in _LTYPES and len(vref) not in [1, nspect]:
msg = (
"Non-conform vref for "
+ "{}['{}']\n".format(din_name, key)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(vref)
)
raise Exception(msg)
if type(vref) in _LTYPES:
vref = np.full((nspect,), vref)
elif len(vref) == 1:
vref = np.full((nspect,), vref[0])
# check din[key]
if din.get(key) is None:
assert vref is not None
din[key] = {k0: vref for k0 in dinput[key]['keys']}
elif not isinstance(din[key], dict):
assert type(din[key]) in _LTYPES + [np.ndarray]
if hasattr(din[key], '__len__') and len(din[key]) == 1:
din[key] = din[key][0]
if type(din[key]) in _LTYPES:
din[key] = {
k0: np.full((nspect,), din[key])
for k0 in dinput[key]['keys']
}
elif din[key].shape == (nspect,):
din[key] = {k0: din[key] for k0 in dinput[key]['keys']}
else:
msg = (
"{}['{}'] not conform!".format(dd_name, key)
)
raise Exception(msg)
else:
for k0 in dinput[key]['keys']:
if din[key].get(k0) is None:
din[key][k0] = vref
elif type(din[key][k0]) in _LTYPES:
din[key][k0] = np.full((nspect,), din[key][k0])
elif len(din[key][k0]) == 1:
din[key][k0] = np.full((nspect,), din[key][k0][0])
elif din[key][k0].shape != (nspect,):
msg = (
"Non-conform value for "
+ "{}['{}']['{}']\n".format(din_name, key, k0)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(din[key][k0])
)
raise Exception(msg)
return din
def _fit12d_filldef_dscalesx0_float(
din=None, din_name=None,
key=None, vref=None,
nspect=None,
):
if din.get(key) is None:
if type(vref) in _LTYPES:
din[key] = np.full((nspect,), vref)
elif np.array(vref).shape == (1,):
din[key] = np.full((nspect,), vref[0])
elif np.array(vref).shape == (nspect,):
din[key] = np.array(vref)
else:
msg = (
"Non-conform vref for {}['{}']\n".format(din_name, key)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(vref)
)
raise Exception(msg)
else:
if type(din[key]) in _LTYPES:
din[key] = np.full((nspect,), din[key])
elif din[key].shape == (1,):
din[key] = np.full((nspect,), din[key][0])
elif din[key].shape != (nspect,):
msg = (
"Non-conform vref for {}['{}']\n".format(din_name, key)
+ "\t- expected: float or array (size {})\n".format(nspect)
+ "\t- provided: {}".format(din[key])
)
raise Exception(msg)
return din
###########################################################
###########################################################
#
# scales (for variables scaling)
#
###########################################################
###########################################################
def fit12d_dscales(dscales=None, dinput=None):
# --------------
# Input checks
dscales = _fit12d_checkformat_dscalesx0(
din=dscales, dinput=dinput, name='dscales',
)
data = dinput['dprepare']['data']
lamb = dinput['dprepare']['lamb']
nspect = data.shape[0]
# --------------
# 2d spectrum = 1d spectrum + vert. profile
is2d = data.ndim == 3
if is2d is True:
data = dinput['dprepare']['datalamb1d']
datavert = dinput['dprepare']['dataphi1d']
lamb = dinput['dprepare']['lamb1d']
phi = dinput['dprepare']['phi1d']
indok = np.any(dinput['dprepare']['indok'], axis=1)
# bsplines modulation of bck and amp, if relevant
# fit bsplines on datavert (vertical profile)
# to modulate scales (bck and amp)
dscales['bs'] = np.full((nspect, dinput['nbs']), np.nan)
if dinput['symmetry'] is True:
for ii in dinput['valid']['indt'].nonzero()[0]:
indnonan = (
(~np.isnan(datavert[ii, :]))
& (
np.abs(phi-dinput['symmetry_axis'][ii])
< dinput['knots'][-1]
)
).nonzero()[0]
indnonan = indnonan[
np.unique(
np.abs(phi[indnonan]-dinput['symmetry_axis'][ii]),
return_index=True,
)[1]
]
bs = scpinterp.LSQUnivariateSpline(
np.abs(phi[indnonan]-dinput['symmetry_axis'][ii]),
datavert[ii, indnonan],
dinput['knots'][1:-1],
k=dinput['deg'],
bbox=dinput['knots'][np.r_[0, -1]],
ext=0,
)
dscales['bs'][ii, :] = bs.get_coeffs()
else:
for ii in dinput['valid']['indt'].nonzero()[0]:
indnonan = (
(~np.isnan(datavert[ii, :]))
& (dinput['knots'][0] <= phi)
& (phi <= dinput['knots'][-1])
)
try:
bs = scpinterp.LSQUnivariateSpline(
phi[indnonan],
datavert[ii, indnonan],
dinput['knots'][1:-1],
k=dinput['deg'],
bbox=dinput['knots'][np.r_[0, -1]],
ext=0,
)
except Exception as err:
import pdb; pdb.set_trace() # DB
pass
dscales['bs'][ii, :] = bs.get_coeffs()
# Normalize to avoid double-amplification when amp*bs
corr = np.max(dscales['bs'][dinput['valid']['indt'], :], axis=1)
dscales['bs'][dinput['valid']['indt'], :] /= corr[:, None]
else:
indok = dinput['dprepare']['indok']
# --------------
# Default values for filling missing fields
Dlamb = np.diff(dinput['dprepare']['domain']['lamb']['minmax'])
lambm = dinput['dprepare']['domain']['lamb']['minmax'][0]
if not (np.isfinite(Dlamb)[0] and Dlamb > 0):
msg = (
"lamb min, max seems to be non-finite or non-positive!\n"
+ "\t- dinput['dprepare']['domain']['lamb']['minmax'] = {}".format(
dinput['dprepare']['domain']['lamb']['minmax']
)
)
raise Exception(msg)
if lambm == 0:
lambm = Dlamb / 100.
# bck_amp
bck_amp = dscales.get('bck_amp')
bck_rate = dscales.get('bck_rate')
if bck_amp is None or bck_rate is None:
indbck = (data > np.nanmean(data, axis=1)[:, None]) | (~indok)
bcky = np.array(np.ma.masked_where(indbck, data).mean(axis=1))
bckstd = np.array(np.ma.masked_where(indbck, data).std(axis=1))
# bck_rate
if bck_rate is None:
bck_rate = (
np.log((bcky+bckstd)/bcky) / (lamb.max()-lamb.min())
)
if bck_amp is None:
# Assuming bck = A*exp(rate*(lamb-lamb.min()))
bck_amp = bcky
dscales = _fit12d_filldef_dscalesx0_float(
din=dscales, din_name='dscales', key='bck_amp',
vref=bck_amp, nspect=nspect,
)
dscales = _fit12d_filldef_dscalesx0_float(
din=dscales, din_name='dscales', key='bck_rate',
vref=bck_rate, nspect=nspect,
)
# amp
dscales['amp'] = dscales.get('amp', dict.fromkeys(dinput['amp']['keys']))
for ii, ij in enumerate(dinput['dind']['amp_x0']):
key = dinput['amp']['keys'][ii]
if dscales['amp'].get(key) is None:
conv = np.exp(-(lamb-dinput['lines'][ij])**2/(2*(Dlamb/20.)**2))
# indi = (
# indok
# & (np.abs(lamb-dinput['lines'][ij]) < Dlamb/20.)[None, :]
# )
# dscales['amp'][key] = np.array(np.ma.masked_where(
# ~indbck, data,
# ).mean(axis=1))
dscales['amp'][key] = np.nansum(data*conv, axis=1) / np.sum(conv)
else:
if type(dscales['amp'][key]) in _LTYPES:
dscales['amp'][key] = np.full((nspect,), dscales['amp'][key])
else:
assert dscales['amp'][key].shape == (nspect,)
# width
if dinput.get('same_spectrum') is True:
lambm2 = (
lambm
+ dinput['same_spectrum_dlamb']
* np.arange(0, dinput['same_spectrum_nspect'])
)
nw0 = iwx.size / dinput['same_spectrum_nspect']
lambmw = np.repeat(lambm2, nw0)
widthref = (Dlamb/(20*lambmw))**2
else:
widthref = (Dlamb/(20*lambm))**2
dscales = _fit12d_filldef_dscalesx0_dict(
din=dscales, din_name='dscales', key='width', vref=widthref,
nspect=nspect, dinput=dinput,
)
# shift
shiftref = Dlamb/(25*lambm)
dscales = _fit12d_filldef_dscalesx0_dict(
din=dscales, din_name='dscales', key='shift', vref=shiftref,
nspect=nspect, dinput=dinput,
)
# Double
if dinput['double'] is not False:
dratio = 1.
dshift = float(Dlamb/(40*lambm))
if dinput['double'] is True:
pass
else:
if dinput['double'].get('dratio') is not None:
dratio = dinput['double']['dratio']
if dinput['double'].get('dshift') is not None:
dratio = dinput['double']['dshift']
din = {'dratio': dratio, 'dshift': dshift}
for k0 in din.keys():
dscales = _fit12d_filldef_dscalesx0_float(
din=dscales, din_name='dscales', key=k0,
vref=din[k0], nspect=nspect,
)
return dscales
###########################################################
###########################################################
#
# x0 (initial guess)
#
###########################################################
###########################################################
def fit12d_dx0(dx0=None, dinput=None):
# --------------
# Input checks
dx0 = _fit12d_checkformat_dscalesx0(
din=dx0, dinput=dinput, name='dx0',
is2d=dinput['dprepare']['data'].ndim == 3,
)
nspect = dinput['dprepare']['data'].shape[0]
# --------------
# 2d spectrum = 1d spectrum + vert. profile
data2d = dinput['dprepare']['data'].ndim == 3
if data2d is True:
dx0 = _fit12d_filldef_dscalesx0_float(
din=dx0, din_name='dx0', key='bs',
vref=_DX0['bs'], nspect=nspect,
)
# --------------
# Default values for filling missing fields
# bck
dx0 = _fit12d_filldef_dscalesx0_float(
din=dx0, din_name='dx0', key='bck_amp',
vref=_DX0['bck_amp'], nspect=nspect,
)
dx0 = _fit12d_filldef_dscalesx0_float(
din=dx0, din_name='dx0', key='bck_rate',
vref=_DX0['bck_rate'], nspect=nspect,
)
# amp, width, shift
for k0 in _DORDER:
dx0 = _fit12d_filldef_dscalesx0_dict(
din=dx0, din_name='dx0', key=k0, vref=_DX0[k0],
nspect=nspect, dinput=dinput,
)
# Double
if dinput['double'] is not False:
dratio = _DX0['dratio']
dshift = _DX0['dshift']
if dinput['double'] is True:
pass
else:
if dinput['double'].get('dratio') is not None:
dratio = dinput['double']['dratio']
if dinput['double'].get('dshift') is not None:
dratio = dinput['double']['dshift']
din = {'dratio': dratio, 'dshift': dshift}
for k0 in din.keys():
dx0 = _fit12d_filldef_dscalesx0_float(
din=dx0, din_name='dx0', key=k0,
vref=din[k0], nspect=nspect,
)
# -------------
# check
lmsg = []
for k0, v0 in dx0.items():
if isinstance(dx0[k0], np.ndarray):
c0 = (
np.any(dx0[k0] < dinput['dbounds']['min'][k0])
or np.any(dx0[k0] > dinput['dbounds']['max'][k0])
)
if c0:
lmsg.append("dx0['{}'] = {} (bounds = ({}, {}))".format(
k0, dx0[k0],
dinput['dbounds']['min'][k0],
dinput['dbounds']['max'][k0],
))
elif isinstance(dx0[k0], dict):
for k1, v1 in dx0[k0].items():
c0 = (
np.any(dx0[k0][k1] < dinput['dbounds']['min'][k0][k1])
or np.any(dx0[k0][k1] > dinput['dbounds']['max'][k0][k1])
)
if c0:
lmsg.append(
"dx0['{}']['{}'] = {} (bounds = ({}, {}))".format(
k0, k1, dx0[k0][k1],
dinput['dbounds']['min'][k0][k1],
dinput['dbounds']['max'][k0][k1],
)
)
if len(lmsg) > 0:
msg = (
"The following values for dx0 are out of bounds:\n"
+ "\n".join(["\t- {}".format(mm) for mm in lmsg])
)
raise Exception(msg)
return dx0
###########################################################
###########################################################
#
# bounds
#
###########################################################
###########################################################
def fit12d_dbounds(dbounds=None, dinput=None):
# --------------
# Input checks
if dbounds is None:
dbounds = {'min': {}, 'max': {}}
c0 = (
isinstance(dbounds, dict)
and all([
kk in ['min', 'max'] and isinstance(vv, dict)
for kk, vv in dbounds.items()
])
)
if not c0:
msg = (
"Arg dbounds must be a dict of te form:\n"
+ "\t{'min': {...}, 'max': {}}"
)
raise Exception(msg)
dbounds['min'] = _fit12d_checkformat_dscalesx0(
din=dbounds['min'], dinput=dinput, name="dbounds['min']",
)
dbounds['max'] = _fit12d_checkformat_dscalesx0(
din=dbounds['max'], dinput=dinput, name="dbounds['max']",
)
nspect = dinput['dprepare']['data'].shape[0]
# --------------
# 2d spectrum = 1d spectrum + vert. profile
data2d = dinput['dprepare']['data'].ndim == 3
if data2d is True:
dbounds['min'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['min'], din_name="dbounds['min']",
key='bs', vref=_DBOUNDS['bs'][0], nspect=nspect,
)
dbounds['max'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['max'], din_name="dbounds['max']",
key='bs', vref=_DBOUNDS['bs'][1], nspect=nspect,
)
# --------------
# Default values for filling missing fields
# bck
dbounds['min'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['min'], din_name="dbounds['min']",
key='bck_amp', vref=_DBOUNDS['bck_amp'][0], nspect=nspect,
)
dbounds['max'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['max'], din_name="dbounds['max']",
key='bck_amp', vref=_DBOUNDS['bck_amp'][1], nspect=nspect,
)
dbounds['min'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['min'], din_name="dbounds['min']",
key='bck_rate', vref=_DBOUNDS['bck_rate'][0], nspect=nspect,
)
dbounds['max'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['max'], din_name="dbounds['max']",
key='bck_rate', vref=_DBOUNDS['bck_rate'][1], nspect=nspect,
)
for k0 in _DORDER:
dbounds['min'] = _fit12d_filldef_dscalesx0_dict(
din=dbounds['min'], din_name="dbounds['min']",
key=k0, vref=_DBOUNDS[k0][0], nspect=nspect,
dinput=dinput,
)
dbounds['max'] = _fit12d_filldef_dscalesx0_dict(
din=dbounds['max'], din_name="dbounds['max']",
key=k0, vref=_DBOUNDS[k0][1], nspect=nspect,
dinput=dinput,
)
# Double
if dinput['double'] is not False:
for k0 in ['dratio', 'dshift']:
dbounds['min'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['min'], din_name="dbounds['min']",
key=k0, vref=_DBOUNDS[k0][0], nspect=nspect,
)
dbounds['max'] = _fit12d_filldef_dscalesx0_float(
din=dbounds['max'], din_name="dbounds['max']",
key=k0, vref=_DBOUNDS[k0][1], nspect=nspect,
)
return dbounds
###########################################################
###########################################################
#
# constants
#
###########################################################
###########################################################
def fit12d_dconstants(dconstants=None, dinput=None):
# --------------
# Input checks
dconstants = _fit12d_checkformat_dscalesx0(
din=dconstants, dinput=dinput, name="dconstants",
)
nspect = dinput['dprepare']['data'].shape[0]
# --------------
# 2d spectrum = 1d spectrum + vert. profile
data2d = dinput['dprepare']['data'].ndim == 3
# --------------
# Default values for filling missing fields
# bck
dconstants = _fit12d_filldef_dscalesx0_float(
din=dconstants, din_name="dconstants",
key='bck_amp', vref=np.nan, nspect=nspect,
)
dconstants = _fit12d_filldef_dscalesx0_float(
din=dconstants, din_name="dconstants",
key='bck_rate', vref=np.nan, nspect=nspect,
)
for k0 in _DORDER:
dconstants = _fit12d_filldef_dscalesx0_dict(
din=dconstants, din_name="dconstants",
key=k0, vref=np.nan, nspect=nspect,
dinput=dinput,
)
# Double
if dinput['double'] is not False:
for k0 in ['dratio', 'dshift']:
dconstants = _fit12d_filldef_dscalesx0_float(
din=dconstants, din_name="dconstants",
key=k0, vref=np.nan, nspect=nspect,
)
return dconstants
###########################################################
###########################################################
#
# dict to vector (scales, x0, bounds)
#
###########################################################
###########################################################
def _dict2vector_dscalesx0bounds(
dd=None, dd_name=None,
dinput=None,
):
nspect = dinput['dprepare']['data'].shape[0]
x = np.full((nspect, dinput['dind']['sizex']), np.nan)
x[:, dinput['dind']['bck_amp']['x'][0]] = dd['bck_amp']
x[:, dinput['dind']['bck_rate']['x'][0]] = dd['bck_rate']
for k0 in _DORDER:
for ii, k1 in enumerate(dinput[k0]['keys']):
x[:, dinput['dind'][k0]['x'][ii]] = dd[k0][k1]
if dinput['double'] is not False:
if dinput['double'] is True:
x[:, dinput['dind']['dratio']['x']] = dd['dratio']
x[:, dinput['dind']['dshift']['x']] = dd['dshift']
else:
if dinput['double'].get('dratio') is None:
x[:, dinput['dind']['dratio']['x']] = dd['dratio']
if dinput['double'].get('dshift') is None:
x[:, dinput['dind']['dshift']['x']] = dd['dshift']
return x
###########################################################
###########################################################
#
# Load dinput
#
###########################################################
###########################################################
def _rebuild_dict(dd):
for k0, v0 in dd.items():
if isinstance(v0, np.ndarray) and v0.shape == ():
dd[k0] = v0.tolist()
if isinstance(dd[k0], dict):
_rebuild_dict(dd[k0])
def _checkformat_dinput(dinput, allow_pickle=True):
if isinstance(dinput, str):
if not (os.path.isfile(dinput) and dinput[-4:] == '.npz'):
msg = ("Arg dinput must be aither a dict or "
+ "the absolute path to a .npz\n"
+ " You provided: {}".format(dinput))
raise Exception(msg)
dinput = dict(np.load(dinput, allow_pickle=allow_pickle))
if not isinstance(dinput, dict):
msg = (
"dinput must be a dict!\n"
+ " You provided: {}".format(type(dinput))
)
_rebuild_dict(dinput)
return dinput
###########################################################
###########################################################
#
# Main fitting sub-routines
#
###########################################################
###########################################################
def _checkformat_options(chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev, verbose):
if chain is None:
chain = _CHAIN
if method is None:
method = _METHOD
assert method in ['trf', 'dogbox'], method
if tr_solver is None:
tr_solver = None
if tr_options is None:
tr_options = {}
if xtol is None:
xtol = _TOL1D['x']
if ftol is None:
ftol = _TOL1D['f']
if gtol is None:
gtol = _TOL1D['g']
if loss is None:
loss = _LOSS
if max_nfev is None:
max_nfev = None
if verbose is None:
verbose = 1
if verbose == 3:
verbscp = 2
else:
verbscp = 0
return (chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev, verbose, verbscp)
def multigausfit1d_from_dlines(
dinput=None,
method=None, tr_solver=None, tr_options=None,
xtol=None, ftol=None, gtol=None,
max_nfev=None, chain=None, verbose=None,
loss=None, jac=None,
):
""" Solve multi_gaussian fit in 1d from dlines
If double is True, all lines are double with common shift and ratio
Unknowns are:
x = [bck, w0, v0, c00, c01, ..., c0n, w1, v1, c10, c11, ..., c1N, ...]
- bck : constant background
- wi : spectral width of a group of lines (ion): wi^2 = 2kTi / m*c**2
This way, it is dimensionless
- vni : normalised velicity of the ion: vni = vi / c
- cij : normalised coef (intensity) of line: cij = Aij
Scaling is done so each quantity is close to unity:
- bck: np.mean(data[data < mean(data)/2])
- wi : Dlamb / 20
- vni: 10 km/s
- cij: np.mean(data)
"""
# ---------------------------
# Check format options
(
chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev,
verbose, verbscp,
) = _checkformat_options(
chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev, verbose,
)
# ---------------------------
# Load dinput if necessary
dinput = _checkformat_dinput(dinput)
dprepare, dind = dinput['dprepare'], dinput['dind']
nspect = dprepare['data'].shape[0]
# ---------------------------
# If same spectrum => consider a single data set
if dinput['same_spectrum'] is True:
lamb = (
dinput['same_spectrum_dlamb']*np.arange(0, nspect)[:, None]
+ dprepare['lamb'][None, :]
).ravel()
datacost = dprepare['data'].ravel()[None, :]
nspect = data.shape[0]
chain = False
else:
lamb = dprepare['lamb']
datacost = dprepare['data']
# ---------------------------
# Get scaling, x0, bounds from dict
scales = _dict2vector_dscalesx0bounds(
dd=dinput['dscales'], dd_name='dscales', dinput=dinput,
)
x0 = _dict2vector_dscalesx0bounds(
dd=dinput['dx0'], dd_name='dx0', dinput=dinput,
)
boundmin = _dict2vector_dscalesx0bounds(
dd=dinput['dbounds']['min'], dd_name="dbounds['min']", dinput=dinput,
)
boundmax = _dict2vector_dscalesx0bounds(
dd=dinput['dbounds']['max'], dd_name="dbounds['max']", dinput=dinput,
)
bounds = np.array([boundmin[0, :], boundmax[0, :]])
# ---------------------------
# Separate free from constant parameters
const = _dict2vector_dscalesx0bounds(
dd=dinput['dconstants'], dd_name='dconstants', dinput=dinput,
)
indx = np.any(np.isnan(const), axis=0)
const = const[:, ~indx]
x0[:, ~indx] = const / scales[:, ~indx]
# ---------------------------
# Get function, cost function and jacobian
(
func_detail, func_cost, func_jac,
) = _funccostjac.multigausfit1d_from_dlines_funccostjac(
lamb, dinput=dinput, dind=dind, jac=jac, indx=indx,
)
# ---------------------------
# Prepare output
sol_x = np.full((nspect, dind['sizex']), np.nan)
success = np.full((nspect,), np.nan)
time = np.full((nspect,), np.nan)
cost = np.full((nspect,), np.nan)
nfev = np.full((nspect,), np.nan)
validity = np.zeros((nspect,), dtype=int)
message = ['' for ss in range(nspect)]
errmsg = ['' for ss in range(nspect)]
# Prepare msg
if verbose in [1, 2]:
col = np.char.array([
'spect', 'time (s)', 'cost', 'nfev', 'njev', 'msg',
])
maxl = max(np.max(np.char.str_len(col)), 10)
msg = '\n'.join([' '.join([cc.ljust(maxl) for cc in col]),
' '.join(['-'*maxl]*6)])
print(msg)
# ---------------------------
# Main loop
end = '\r'
t0 = dtm.datetime.now() # DB
for ii in range(nspect):
if verbose == 3:
msg = "\nspect {} / {}".format(ii+1, nspect)
print(msg)
try:
dti = None
t0i = dtm.datetime.now() # DB
if not dinput['valid']['indt'][ii]:
continue
# optimization
res = scpopt.least_squares(
func_cost, x0[ii, indx],
jac=func_jac, bounds=bounds[:, indx],
method=method, ftol=ftol, xtol=xtol,
gtol=gtol, x_scale=1.0, f_scale=1.0,
loss=loss, diff_step=None,
tr_solver=tr_solver, tr_options=tr_options,
jac_sparsity=None, max_nfev=max_nfev,
verbose=verbscp, args=(),
kwargs={
'data': datacost[ii, :],
'scales': scales[ii, :],
'const': const[ii, :],
'indok': dprepare['indok'][ii, :],
},
)
dti = (dtm.datetime.now() - t0i).total_seconds()
if chain is True and ii < nspect-1:
x0[ii+1, indx] = res.x
# cost, message, time
success[ii] = res.success
cost[ii] = res.cost
nfev[ii] = res.nfev
message[ii] = res.message
time[ii] = round(
(dtm.datetime.now()-t0i).total_seconds(),
ndigits=3,
)
sol_x[ii, indx] = res.x
sol_x[ii, ~indx] = const[ii, :] / scales[ii, ~indx]
except Exception as err:
errmsg[ii] = str(err)
validity[ii] = -1
# Verbose
if verbose in [1, 2]:
if validity[ii] == 0:
col = np.char.array([
'{} / {}'.format(ii+1, nspect),
'{}'.format(dti),
'{:5.3e}'.format(res.cost),
str(res.nfev),
str(res.njev),
res.message,
])
else:
col = np.char.array([
'{} / {}'.format(ii+1, nspect),
'{}'.format(dti),
' - ', ' - ', ' - ',
errmsg[ii],
])
msg = ' '.join([cc.ljust(maxl) for cc in col])
if verbose == 1:
if ii == nspect-1:
end = '\n'
print(msg, end=end, flush=True)
else:
print(msg, end='\n')
# ---------------------------
# Reshape in case of same_spectrum
if dinput['same_spectrum'] is True:
nspect0 = dinput['same_spectrum_nspect']
def reshape_custom(aa, nspect0=nspect0):
return aa.reshape((nspect0, int(aa.size/nspect0)))
nlamb = int(lamb.size / nspect0)
nlines = int((sol_detail.shape[1]-1)/nspect0)
lamb = lamb[:nlamb]
nxbis = int(
dind['bck_amp']['x'].size
+ dind['bck_rate']['x'].size
+ (dind['amp']['x'].size + dind['width']['x'].size)/nspect0
+ dind['shift']['x'].size
)
if dinput['double'] is not False:
if dinput['double'] is True:
nxbis += 2
else:
nxbis += (
dinput['double'].get('dratio') is not None
+ dinput['double'].get('dshift') is not None
)
nba = dind['bck_amp']['x'].size
nbr = dind['bck_rate']['x'].size
nb = nba+nbr
na = int(dind['amp']['x'].size/nspect0)
nw = int(dind['width']['x'].size/nspect0)
ns = dind['shift']['x'].size
x2 = np.full((nspect0, nxbis), np.nan)
x2[:, :nba] = sol_x[0, dind['bck_amp']['x']][None, :]
x2[:, nba:nbr] = sol_x[0, dind['bck_rate']['x']][None, :]
x2[:, nb:nb+na] = reshape_custom(sol_x[0, dind['amp']['x']])
x2[:, nb+na:nb+na+nw] = reshape_custom(sol_x[0, dind['width']['x']])
x2[:, nb+na+nw:nb+na+nw+ns] = sol_x[:, dind['shift']['x']]
if dinput['double'] is True:
x2[:, dind['dratio']['x']] = sol_x[:, dind['dratio']['x']]
x2[:, dind['dshift']['x']] = sol_x[:, dind['dshift']['x']]
import pdb; pdb.set_trace() # DB
sol_x = x2
# Isolate dratio and dshift
dratio, dshift = None, None
if dinput['double'] is not False:
if dinput['double'] is True:
dratio = (
sol_x[:, dind['dratio']['x']] * scales[:, dind['dratio']['x']]
)
dshift = (
sol_x[:, dind['dshift']['x']] * scales[:, dind['dshift']['x']]
)
else:
if dinput['double'].get('dratio') is None:
dratio = (
sol_x[:, dind['dratio']['x']]
* scales[:, dind['dratio']['x']]
)
else:
dratio = np.full((nspect,), dinput['double']['dratio'])
if dinput['double'].get('dshift') is None:
dshift = (
sol_x[:, dind['dshift']['x']]
* scales[:, dind['dshift']['x']]
)
else:
dshift = np.full((nspect,), dinput['double']['dshift'])
if verbose > 0:
dt = (dtm.datetime.now()-t0).total_seconds()
msg = (
"Total computation time:"
+ "\t{} s for {} spectra ({} s per spectrum)".format(
round(dt, ndigits=3),
nspect,
round(dt/nspect, ndigits=3),
)
)
print(msg)
# ---------------------------
# Format output as dict
dfit = {
'dinput': dinput,
'scales': scales, 'x0': x0, 'bounds': bounds,
'jac': jac, 'sol_x': sol_x,
'dratio': dratio, 'dshift': dshift,
'indx': indx,
'time': time, 'success': success,
'validity': validity, 'errmsg': np.array(errmsg),
'cost': cost, 'nfev': nfev, 'msg': np.array(message),
}
return dfit
def multigausfit2d_from_dlines(
dinput=None,
method=None, tr_solver=None, tr_options=None,
xtol=None, ftol=None, gtol=None,
max_nfev=None, chain=None, verbose=None,
loss=None, jac=None,
):
""" Solve multi_gaussian fit in 1d from dlines
If double is True, all lines are double with common shift and ratio
Unknowns are:
x = [bck, w0, v0, c00, c01, ..., c0n, w1, v1, c10, c11, ..., c1N, ...]
- bck : constant background
- wi : spectral width of a group of lines (ion): wi^2 = 2kTi / m*c**2
This way, it is dimensionless
- vni : normalised velicity of the ion: vni = vi / c
- cij : normalised coef (intensity) of line: cij = Aij
Scaling is done so each quantity is close to unity:
- bck: np.mean(data[data < mean(data)/2])
- wi : Dlamb / 20
- vni: 10 km/s
- cij: np.mean(data)
"""
# ---------------------------
# Check format options
(
chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev,
verbose, verbscp,
) = _checkformat_options(
chain, method, tr_solver, tr_options,
xtol, ftol, gtol, loss, max_nfev, verbose,
)
# ---------------------------
# Load dinput if necessary
dinput = _checkformat_dinput(dinput)
dprepare, dind = dinput['dprepare'], dinput['dind']
nspect = dprepare['data'].shape[0]
# ---------------------------
# DEPRECATED?
lamb = dprepare['lamb']
if dinput['symmetry'] is True:
phi = np.abs(phi - np.nanmean(dinput['symmetry_axis']))
else:
phi = dprepare['phi']
# ---------------------------
# Get scaling, x0, bounds from dict
scales = _dict2vector_dscalesx0bounds(
dd=dinput['dscales'], dd_name='dscales', dinput=dinput,
)
x0 = _dict2vector_dscalesx0bounds(
dd=dinput['dx0'], dd_name='dx0', dinput=dinput,
)
boundmin = _dict2vector_dscalesx0bounds(
dd=dinput['dbounds']['min'], dd_name="dbounds['min']", dinput=dinput,
)
boundmax = _dict2vector_dscalesx0bounds(
dd=dinput['dbounds']['max'], dd_name="dbounds['max']", dinput=dinput,
)
bounds = np.array([boundmin[0, :], boundmax[0, :]])
# ---------------------------
# Separate free from constant parameters
const = _dict2vector_dscalesx0bounds(
dd=dinput['dconstants'], dd_name='dconstants', dinput=dinput,
)
indx = np.any(np.isnan(const), axis=0)
const = const[:, ~indx]
x0[:, ~indx] = const / scales[:, ~indx]
# ---------------------------
# Get function, cost function and jacobian
(
func_detail, func_cost, func_jac,
) = _funccostjac.multigausfit2d_from_dlines_funccostjac(
lamb, phi2,
dinput=dinput, dind=dind, jac=jac, indx=indx,
)
# TBF after multigausfit2d_from_dlines_funccostjac() is checked
# ---------------------------
# Prepare output
datacost = np.reshape(
dprepare['data'][:, dprepare['indok']],
(nspect, dprepare['indok'].sum()))
sol_x = np.full((nspect, dind['sizex']), np.nan)
success = np.full((nspect,), np.nan)
time = np.full((nspect,), np.nan)
cost = np.full((nspect,), np.nan)
nfev = np.full((nspect,), np.nan)
validity = np.zeros((nspect,), dtype=int)
message = ['' for ss in range(nspect)]
errmsg = ['' for ss in range(nspect)]
if dprepare.get('indok_var') is not None:
msg = ('indok_var not implemented yet!')
raise Exception(msg)
if dprepare['indok_var'].ndim == 3:
indok_var = dprepare['indok_var'].reshape(
(nspect, dprepare['lamb'].size))
else:
indok_var = [dprepare['indok_var'].ravel()]*nspect
else:
indok_var = [False]*nspect
dprepare['indok_var'] = indok_var
# Prepare msg
if verbose in [1, 2]:
col = np.char.array(['Spect', 'time (s)', 'cost',
'nfev', 'njev', 'msg'])
maxl = max(np.max(np.char.str_len(col)), 10)
msg = '\n'.join([' '.join([cc.ljust(maxl) for cc in col]),
' '.join(['-'*maxl]*6)])
print(msg)
# ---------------------------
# Minimize
end = '\r'
t0 = dtm.datetime.now() # DB
for ii in range(nspect):
if verbose == 3:
msg = "\nSpect {} / {}".format(ii+1, nspect)
print(msg)
try:
t0i = dtm.datetime.now() # DB
if not dinput['valid']['indt'][ii]:
continue
res = scpopt.least_squares(
func_cost, x0_scale[ii, :],
jac=func_jac, bounds=bounds_scale,
method=method, ftol=ftol, xtol=xtol,
gtol=gtol, x_scale=1.0, f_scale=1.0,
loss=loss, diff_step=None,
tr_solver=tr_solver, tr_options=tr_options,
jac_sparsity=None, max_nfev=max_nfev,
verbose=verbscp, args=(),
kwargs={'data': datacost[ii, :],
'scales': scales[ii, :],
'indok_var': indok_var[ii],
'ind_bs': dinput['valid']['indbs'][ii, :]})
dti = (dtm.datetime.now() - t0i).total_seconds()
if chain is True and ii < nspect-1:
x0_scale[ii+1, :] = res.x
# cost, message, time
success[ii] = res.success
cost[ii] = res.cost
nfev[ii] = res.nfev
message[ii] = res.message
time[ii] = round((dtm.datetime.now()-t0i).total_seconds(),
ndigits=3)
sol_x[ii, :] = res.x
except Exception as err:
errmsg[ii] = str(err)
validity[ii] = -1
if verbose in [1, 2]:
if validity[ii] == 0:
col = np.char.array(['{} / {}'.format(ii+1, nspect),
'{}'.format(dti),
'{:5.3e}'.format(res.cost),
str(res.nfev), str(res.njev),
res.message])
else:
col = np.char.array(['{} / {}'.format(ii+1, nspect),
'{}'.format(dti),
' - ', ' - ', ' - ',
errmsg[ii]])
msg = ' '.join([cc.ljust(maxl) for cc in col])
if verbose == 1:
if ii == nspect-1:
end = '\n'
print(msg, end=end, flush=True)
else:
print(msg, end='\n')
# Isolate dratio and dshift
dratio, dshift = None, None
if dinput['double'] is not False:
if dinput['double'] is True:
dratio = (sol_x[:, dind['dratio']['x']]
* scales[:, dind['dratio']['x']])
dshift = (sol_x[:, dind['dshift']['x']]
* scales[:, dind['dshift']['x']])
else:
if dinput['double'].get('dratio') is None:
dratio = (sol_x[:, dind['dratio']['x']]
* scales[:, dind['dratio']['x']])
else:
dratio = np.full((nspect,), dinput['double']['dratio'])
if dinput['double'].get('dshift') is None:
dshift = (sol_x[:, dind['dshift']['x']]
* scales[:, dind['dshift']['x']])
else:
dshift = np.full((nspect,), dinput['double']['dshift'])
if verbose > 0:
dt = (dtm.datetime.now()-t0).total_seconds()
msg = ("Total computation time:"
+ "\t{} s for {} spectra ({} s per spectrum)".format(
round(dt, ndigits=3), nspect,
round(dt/nspect, ndigits=3)))
print(msg)
# ---------------------------
# Format output as dict
dfit = {'dinput': dinput,
'scales': scales, 'x0_scale': x0_scale,
'bounds_scale': bounds_scale, 'phi2': phi2,
'jac': jac, 'sol_x': sol_x,
'dratio': dratio, 'dshift': dshift,
'time': time, 'success': success,
'validity': validity, 'errmsg': np.array(errmsg),
'cost': cost, 'nfev': nfev, 'msg': np.array(message)}
return dfit
###########################################################
###########################################################
#
# Main fit functions
#
###########################################################
###########################################################
def fit1d(
dinput=None,
method=None, tr_solver=None, tr_options=None,
xtol=None, ftol=None, gtol=None,
max_nfev=None, loss=None, chain=None,
dx0=None, x0_scale=None, bounds_scale=None,
jac=None, verbose=None, showonly=None,
save=None, name=None, path=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
plot=None, fs=None, wintit=None, tit=None, dmargin=None,
return_dax=None,
):
# ----------------------
# Check / format
if showonly is None:
showonly = False
if save is None:
save = False
if plot is None:
plot = False
if return_dax is None:
return_dax = False
# ----------------------
# Get dinput for 1d fitting from dlines, dconstraints, dprepare...
if not isinstance(dinput, dict):
msg = ("Please provide a properly formatted dict of inputs!\n"
+ "fit1d() needs the problem to be given as a dinput dict\n"
+ " => Use dinput = fit1d_dinput()")
raise Exception(msg)
# ----------------------
# Perform 2d fitting
if showonly is True:
msg = "TBF: lambfit and spect1d not defined"
raise Exception(msg)
dfit1d = {'shift': np.zeros((1, dinput['nlines'])),
'coefs': np.zeros((1, dinput['nlines'])),
'lamb': lambfit,
'data': spect1d,
'double': False,
'Ti': False,
'vi': False,
'ratio': None}
else:
dfit1d = multigausfit1d_from_dlines(
dinput=dinput,
method=method, max_nfev=max_nfev,
tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol, loss=loss,
chain=chain, verbose=verbose, jac=jac)
# ----------------------
# Optional saving
if save is True:
if name is None:
name = 'custom'
name = 'TFS_fit1d_doutput_{}_nbs{}_{}_tol{}_{}.npz'.format(
name, dinput['nbs'], dinput['method'], dinput['xtol'])
if name[-4:] != '.npz':
name = name + '.npz'
if path is None:
path = './'
pfe = os.path.join(os.path.abspath(path), name)
np.savez(pfe, **dfit2d)
msg = ("Saved in:\n"
+ "\t{}".format(pfe))
print(msg)
# ----------------------
# Optional plotting
if plot is True:
dout = fit1d_extract(
dfit1d,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width, vi=vi, shift=shift,
pts_lamb_total=pts_lamb_total,
pts_lamb_detail=pts_lamb_detail,
)
# TBF
dax = _plot.plot_fit1d(
dfit1d=dfit1d, dout=dout, showonly=showonly,
fs=fs, dmargin=dmargin,
tit=tit, wintit=wintit)
# ----------------------
# return
if return_dax is True:
return dfit1d, dax
else:
return dfit1d
# TBF
def fit2d(
dinput=None, dprepare=None, dlines=None, dconstraints=None,
lamb=None, phi=None, data=None, mask=None,
domain=None, pos=None, subset=None, binning=None,
deg=None, knots=None, nbsplines=None,
method=None, tr_solver=None, tr_options=None,
xtol=None, ftol=None, gtol=None,
max_nfev=None, loss=None, chain=None,
dx0=None, x0_scale=None, bounds_scale=None,
jac=None, nxi=None, nxj=None, verbose=None, showonly=None,
save=None, name=None, path=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
plot=None, fs=None, wintit=None, tit=None, dmargin=None,
return_dax=None,
):
# ----------------------
# Check / format
if showonly is None:
showonly = False
if save is None:
save = False
if plot is None:
plot = False
if return_dax is None:
return_dax = False
# ----------------------
# Get dinput for 2d fitting from dlines, dconstraints, dprepare...
if dinput is None:
dinput = fit2d_dinput(
dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,
data=data, lamb=lamb, phi=phi,
mask=mask, domain=domain,
pos=pos, subset=subset, binning=binning,
nxi=nxi, nxj=nxj, lphi=None, lphi_tol=None,
deg=deg, knots=knots, nbsplines=nbsplines)
# ----------------------
# Perform 2d fitting
if showonly is True:
# TBF
pass
else:
dfit2d = multigausfit2d_from_dlines(
dinput=dinput, dx0=dx0,
x0_scale=x0_scale, bounds_scale=bounds_scale,
method=method, max_nfev=max_nfev,
tr_solver=tr_solver, tr_options=tr_options,
xtol=xtol, ftol=ftol, gtol=gtol, loss=loss,
chain=chain, verbose=verbose, jac=jac)
# ----------------------
# Optional saving
if save is True:
if name is None:
name = 'custom'
name = 'TFS_fit2d_doutput_{}_nbs{}_{}_tol{}_{}.npz'.format(
name, dinput['nbs'], dinput['method'], dinput['xtol'])
if name[-4:] != '.npz':
name = name + '.npz'
if path is None:
path = './'
pfe = os.path.join(os.path.abspath(path), name)
np.savez(pfe, **dfit2d)
msg = ("Saved in:\n"
+ "\t{}".format(pfe))
print(msg)
# ----------------------
# Optional plotting
if plot is True:
dout = fit2d_extract(dfit2d)
dax = None
# ----------------------
# return
if return_dax is True:
return dfit2d, dax
else:
return dfit2d
###########################################################
###########################################################
#
# Extract data from pre-computed dict of fitted results
#
###########################################################
###########################################################
def fit12d_get_data_checkformat(
dfit=None,
pts_phi=None, npts_phi=None,
bck=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_total=None, pts_detail=None,
allow_pickle=None,
):
# load file if str
if isinstance(dfit, str):
if not os.path.isfile(dfit) or not dfit[-4:] == '.npz':
msg = ("Provided dfit must be either a dict or "
+ "the absolute path to a saved .npz\n"
+ " You provided: {}".format(dfit))
raise Exception(msg)
if allow_pickle is None:
allow_pickle = _ALLOW_PICKLE
dfit = dict(np.load(dfit, allow_pickle=allow_pickle))
_rebuild_dict(dfit)
# check dfit basic structure
lk = ['dprepare', 'dinput', 'dind', 'sol_x', 'jac', 'scales']
c0 = isinstance(dfit, dict) and all([ss in dfit.keys() for ss in lk])
if not isinstance(dfit, dict):
msg = ("\ndfit must be a dict with at least the following keys:\n"
+ "\t- {}\n".format(lk)
+ "\t- provided: {}".format(dfit))
raise Exception(msg)
# Identify if fit1d or fit2d
is2d = 'nbsplines' in dfit['dinput'].keys()
if is2d is True and 'phi2' not in dfit.keys():
msg = "dfit is a fit2d output but does not have key 'phi2'!"
raise Exception(msg)
# Extract dinput and dprepare (more readable)
dinput = dfit['dinput']
dprepare = dfit['dinput']['dprepare']
# ratio
if ratio is None:
ratio = False
if ratio is not False:
coefs = True
# Check / format amp, Ti, vi
d3 = {
'bck_amp': [bck, 'bck_amp'],
'bck_rate': [bck, 'bck_rate'],
'amp': [amp, 'amp'],
'coefs': [coefs, 'amp'],
'Ti': [Ti, 'width'],
'width': [width, 'width'],
'vi': [vi, 'shift'],
'shift': [shift, 'shift'],
}
# amp, Ti, vi
for k0 in d3.keys():
if d3[k0][0] is None:
d3[k0][0] = True
if d3[k0][0] is True:
d3[k0][0] = _D3[k0]
if d3[k0][0] is False:
d3[k0] = d3[k0][0]
continue
if 'bck' in k0:
continue
lc = [
d3[k0][0] in ['lines', 'x'],
isinstance(d3[k0][0], str),
(
isinstance(d3[k0][0], list)
and all([isinstance(isinstance(ss, str) for ss in d3[k0][0])])
)
]
if not any(lc):
msg = (
"\nArg {} must be either:\n".format(k0)
+ "\t- 'x': return all unique {}\n".format(k0)
+ "\t- 'lines': return {} for all lines (inc. duplicates)\n"
+ "\t- str: a key in:\n"
+ "\t\t{}\n".format(dinput['keys'])
+ "\t\t{}\n".format(dinput[d3[k0][1]]['keys'])
+ "\t- list: a list of keys (see above)\n"
+ "Provided: {}".format(d3[k0][0])
)
raise Exception(msg)
if lc[0]:
if d3[k0][0] == 'lines':
d3[k0][0] = {
'type': d3[k0][0],
'ind': np.arange(0, dinput['nlines']),
}
else:
d3[k0][0] = {
'type': d3[k0][0],
'ind': np.arange(0, dinput[d3[k0][1]]['keys'].size),
}
elif lc[1]:
d3[k0][0] = [d3[k0][0]]
if isinstance(d3[k0][0], list):
lc = [
all([ss in dinput['keys'] for ss in d3[k0][0]]),
all([ss in dinput[d3[k0][1]]['keys'] for ss in d3[k0][0]]),
]
if not any(lc):
msg = (
"\nArg must contain either keys from:\n"
+ "\t- lines keys: {}\n".format(dinput['keys'])
+ "\t- {} keys: {}".format(k0, dinput[d3[k0][1]]['keys']),
)
raise Exception(msg)
if lc[0]:
d3[k0][0] = {
'type': 'lines',
'ind': np.array(
[
(dinput['keys'] == ss).nonzero()[0][0]
for ss in d3[k0][0]
],
dtype=int,
)
}
else:
d3[k0][0] = {
'type': 'x',
'ind': np.array(
[
(dinput[d3[k0][1]]['keys'] == ss).nonzero()[0][0]
for ss in d3[k0][0]
],
dtype=int),
}
d3[k0][0]['field'] = d3[k0][1]
d3[k0] = d3[k0][0]
# Ratio
if ratio is not False:
lkeys = dfit['dinput']['keys']
lc = [
isinstance(ratio, tuple),
isinstance(ratio, list),
isinstance(ratio, np.ndarray),
]
msg = (
"\nArg ratio (spectral lines magnitude ratio) must be either:\n"
+ "\t- False: no line ration computed\n"
+ "\t- tuple of len=2: upper and lower keys of the lines\n"
+ "\t- list of tuple of len=2: upper and lower keys pairs\n"
+ "\t- np.ndarray of shape (2, N): upper keys and lower keys\n"
+ " You provided: {}\n".format(ratio)
+ " Available keys: {}".format(lkeys)
)
if not any(lc):
raise Exception(msg)
if lc[0]:
c0 = (
len(ratio) == 2
and all([ss in lkeys for ss in ratio])
)
if not c0:
raise Exception(msg)
ratio = np.reshape(ratio, (2, 1))
elif lc[1]:
c0 = all([
isinstance(tt, tuple)
and len(tt) == 2
and all([ss in lkeys for ss in tt])
for tt in ratio
])
if not c0:
raise Exception(msg)
ratio = np.array(ratio).T
c0 = (
isinstance(ratio, np.ndarray)
and ratio.ndim == 2
and ratio.shape[0] == 2
and all([ss in lkeys for ss in ratio[0, :]])
and all([ss in lkeys for ss in ratio[1, :]])
)
if not c0:
raise Exception(msg)
d3['ratio'] = ratio
# pts_phi, npts_phi
if is2d is True:
c0 = any([v0 is not False for v0 in d3.values()])
c1 = [pts_phi is not None, npts_phi is not None]
if all(c1):
msg = "Arg pts_phi and npts_phi cannot be both provided!"
raise Exception(msg)
if not any(c1):
npts_phi = (2*dinput['deg']-1)*(dinput['knots'].size-1) + 1
if npts_phi is not None:
npts_phi = int(npts_phi)
pts_phi = np.linspace(dprepare['domain']['phi']['minmax'][0],
dprepare['domain']['phi']['minmax'][1],
npts_phi)
else:
pts_phi = np.array(pts_phi).ravel()
# pts_total, pts_detail
if pts_total is None:
if dprepare is None:
pts_total = False
else:
if is2d is True:
pts_total = np.array([dprepare['lamb'], dprepare['phi']])
else:
pts_total = dprepare['lamb']
if pts_detail is None:
pts_detail = False
if pts_detail is True and pts_total is not False:
pts_detail = pts_total
if pts_detail is not False:
pts_detail = np.array(pts_detail)
if pts_total is not False:
pts_total = np.array(pts_total)
return dfit, d3, pts_phi, pts_total, pts_detail
def fit1d_extract(
dfit1d=None,
bck=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_total=None, pts_lamb_detail=None,
):
# -------------------
# Check format input
(
dfit1d, d3, pts_phi,
pts_lamb_total, pts_lamb_detail,
) = fit12d_get_data_checkformat(
dfit=dfit1d,
bck=bck,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width,
vi=vi, shift=shift,
pts_total=pts_lamb_total,
pts_detail=pts_lamb_detail,
)
# Extract dprepare and dind (more readable)
dprepare = dfit1d['dinput']['dprepare']
dind = dfit1d['dinput']['dind']
nspect = dprepare['data'].shape[0]
# Prepare extract func
def _get_values(key, pts_phi=None,
d3=d3, nspect=nspect, dinput=dfit1d['dinput'],
dind=dind, sol_x=dfit1d['sol_x'], scales=dfit1d['scales']):
if d3[key]['type'] == 'lines':
keys = dinput['keys'][d3[key]['ind']]
else:
keys = dinput[d3[key]['field']]['keys'][d3[key]['ind']]
indbis = dind[d3[key]['field']][d3[key]['type']][d3[key]['ind']]
val = sol_x[:, indbis] * scales[:, indbis]
return keys, val
# -------------------
# Prepare output
lk = [
'bck_amp', 'bck_rate',
'amp', 'coefs', 'ratio', 'Ti', 'width', 'vi', 'shift',
'dratio', 'dshift',
]
dout = dict.fromkeys(lk, False)
# bck
if d3['bck_amp'] is not False:
dout['bck_amp'] = {
'values': (
dfit1d['sol_x'][:, dind['bck_amp']['x'][0]]
* dfit1d['scales'][:, dind['bck_amp']['x'][0]]
),
'units': 'a.u.',
}
dout['bck_rate'] = {
'values': (
dfit1d['sol_x'][:, dind['bck_rate']['x'][0]]
* dfit1d['scales'][:, dind['bck_rate']['x'][0]]
),
'units': 'a.u.',
}
# amp
if d3['amp'] is not False:
keys, val = _get_values('amp')
dout['amp'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# coefs
if d3['coefs'] is not False:
keys, val = _get_values('coefs')
dout['coefs'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# ratio
if d3['ratio'] is not False:
nratio = d3['ratio'].shape[1]
indup = np.r_[[(dout['coefs']['keys'] == kk).nonzero()[0][0]
for kk in d3['ratio'][0, :]]]
indlo = np.r_[[(dout['coefs']['keys'] == kk).nonzero()[0][0]
for kk in d3['ratio'][1, :]]]
val = (dout['coefs']['values'][:, indup]
/ dout['coefs']['values'][:, indlo])
lab = np.r_[['{} / {}'.format(dfit1d['dinput']['symb'][indup[ii]],
dfit1d['dinput']['symb'][indlo[ii]])
for ii in range(nratio)]]
dout['ratio'] = {'keys': dout['ratio'], 'values': val,
'lab': lab, 'units': 'a.u.'}
# Ti
if d3['Ti'] is not False:
keys, val = _get_values('Ti')
conv = np.sqrt(scpct.mu_0*scpct.c / (2.*scpct.h*scpct.alpha))
indTi = np.array([iit[0] for iit in dind['width']['jac']])
# if d3['Ti']['type'] == 'lines':
# indTi = np.arange(0, dfit1d['dinput']['nlines'])
indTi = indTi[d3['Ti']['ind']]
val = (conv * val
* dfit1d['dinput']['mz'][indTi][None, :]
* scpct.c**2)
dout['Ti'] = {'keys': keys, 'values': val, 'units': 'eV'}
# width
if d3['width'] is not False:
keys, val = _get_values('width')
dout['width'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# vi
if d3['vi'] is not False:
keys, val = _get_values('vi')
val = val * scpct.c
dout['vi'] = {'keys': keys, 'values': val, 'units': 'm.s^-1'}
# shift
if d3['shift'] is not False:
keys, val = _get_values('shift')
val = val * dfit1d['dinput']['lines'][None, :]
dout['shift'] = {'keys': keys, 'values': val, 'units': 'm'}
# double
if dfit1d['dinput']['double'] is not False:
double = dfit1d['dinput']['double']
if double is True or double.get('dratio') is None:
dout['dratio'] = dfit1d['sol_x'][:, dind['dratio']['x']]
else:
dout['dratio'] = np.full((nspect,), double['dratio'])
if double is True or double.get('dratio') is None:
dout['dshift'] = dfit1d['sol_x'][:, dind['dshift']['x']]
else:
dout['dshift'] = np.full((nspect,), double['dshift'])
# -------------------
# sol_detail and sol_tot
sold, solt = False, False
if pts_lamb_detail is not False or pts_lamb_total is not False:
(func_detail,
func_cost, _) = _funccostjac.multigausfit1d_from_dlines_funccostjac(
dprepare['lamb'],
dinput=dfit1d['dinput'],
dind=dind, jac=dfit1d['jac'])
if pts_lamb_detail is not False:
shape = tuple(np.r_[nspect, pts_lamb_detail.shape,
dfit1d['dinput']['nlines']+1])
sold = np.full(shape, np.nan)
for ii in range(nspect):
sold[ii, dprepare['indok'][ii, :], :] = func_detail(
dfit1d['sol_x'][ii, :],
scales=dfit1d['scales'][ii, :],
indok=dprepare['indok'][ii, :],
)
# indok_var=dprepare['indok_var'][ii])
if pts_lamb_total is not False:
shape = tuple(np.r_[nspect, pts_lamb_total.shape])
solt = np.full(shape, np.nan)
for ii in range(nspect):
solt[ii, dprepare['indok'][ii, :]] = func_cost(
dfit1d['sol_x'][ii, :],
scales=dfit1d['scales'][ii, :],
indok=dprepare['indok'][ii, :],
data=0.)
# Double-check consistency if possible
c0 = (pts_lamb_detail is not False
and np.allclose(pts_lamb_total, pts_lamb_detail))
if c0:
if not np.allclose(solt, np.sum(sold, axis=-1),
equal_nan=True):
msg = "Inconsistent computations detail vs total"
raise Exception(msg)
dout['sol_detail'] = sold
dout['sol_tot'] = solt
dout['units'] = 'a.u.'
# -------------------
# Add input args
dout['d3'] = d3
dout['pts_lamb_detail'] = pts_lamb_detail
dout['pts_lamb_total'] = pts_lamb_total
return dout
def _get_phi_profile(key,
nspect=None, dinput=None,
dind=None, sol_x=None, scales=None,
typ=None, ind=None, pts_phi=None):
ncoefs = ind.size
val = np.full((nspect, pts_phi.size, ncoefs), np.nan)
BS = BSpline(dinput['knots_mult'],
np.ones((dinput['nbs'], ncoefs), dtype=float),
dinput['deg'],
extrapolate=False, axis=0)
if typ == 'lines':
keys = dinput['keys'][ind]
else:
keys = dinput[key]['keys'][ind]
indbis = dind[key][typ][:, ind]
for ii in range(nspect):
BS.c = sol_x[ii, indbis] * scales[ii, indbis]
val[ii, :, :] = BS(pts_phi)
return keys, val
def fit2d_extract(dfit2d=None,
amp=None, coefs=None, ratio=None,
Ti=None, width=None,
vi=None, shift=None,
pts_lamb_phi_total=None, pts_lamb_phi_detail=None):
# -------------------
# Check format input
out = fit12d_get_data_checkformat(
dfit=dfit2d,
amp=amp, coefs=coefs, ratio=ratio,
Ti=Ti, width=width,
vi=vi, shift=shift,
pts_total=pts_lamb_total,
pts_detail=pts_lamb_detail)
d3, pts_phi, pts_lamb_phi_total, pts_lamb_phi_detail = out
# Extract dprepare and dind (more readable)
dprepare = dfit1d['dinput']['dprepare']
dind = dfit1d['dinput']['dind']
nspect = dprepare['data'].shape[0]
# Prepare extract func
# TBF
def _get_values(key, pts_phi=None,
d3=d3, nspect=nspect, dinput=dfit1d['dinput'],
dind=dind, sol_x=dfit1d['sol_x'], scales=dfit1d['scales']):
if d3[key]['type'] == 'lines':
keys = dinput['keys'][d3[key]['ind']]
else:
keys = dinput[d3[key]['field']]['keys'][d3[key]['ind']]
indbis = dind[d3[key]['field']][d3[key]['type']][d3[key]['ind']]
# 1d vs 2d
if pts_phi is None:
val = sol_x[:, indbis] * scales[:, indbis]
else:
BS = BSpline(dinput['knots_mult'],
np.ones((dinput['nbs'], ncoefs), dtype=float),
dinput['deg'],
extrapolate=False, axis=0)
for ii in range(nspect):
BS.c = sol_x[ii, indbis] * scales[ii, indbis]
val[ii, :, :] = BS(pts_phi)
return keys, val
# -------------------
# Prepare output
lk = ['amp', 'coefs', 'ratio', 'Ti', 'width', 'vi', 'shift',
'dratio', 'dshift']
dout = dict.fromkeys(lk, False)
# amp
if d3['amp'] is not False:
keys, val = _get_values('amp')
dout['amp'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# coefs
if d3['coefs'] is not False:
keys, val = _get_values('coefs')
dout['coefs'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# ratio
if d3['ratio'] is not False:
nratio = d3['ratio'].shape[1]
indup = np.r_[[(dout['coefs']['keys'] == kk).nonzero()[0][0]
for kk in d3['ratio'][0, :]]]
indlo = np.r_[[(dout['coefs']['keys'] == kk).nonzero()[0][0]
for kk in d3['ratio'][1, :]]]
val = (dout['coefs']['values'][:, indup]
/ dout['coefs']['values'][:, indlo])
lab = np.r_[['{} / {}'.format(dfit1d['dinput']['symb'][indup[ii]],
dfit1d['dinput']['symb'][indlo[ii]])
for ii in range(nratio)]]
dout['ratio'] = {'keys': dout['ratio'], 'values': val,
'lab': lab, 'units': 'a.u.'}
dout = {}
# amp
if d3['amp'] is not False:
keys, val = _get_phi_profile(
d3['amp']['field'], nspect=nspect,
dinput=dfit2d['dinput'],
dind=dfit2d['dind'], sol_x=dfit2d['sol_x'],
scales=dfit2d['scales'], pts_phi=pts_phi,
typ=d3['amp']['type'], ind=d3['amp']['ind'])
dout['amp'] = {'keys': keys, 'values': val, 'units': 'a.u.'}
# Ti
if d3['Ti'] is not False:
keys, val = _get_phi_profile(
d3['Ti']['field'], nspect=nspect,
dinput=dfit2d['dinput'],
dind=dfit2d['dind'], sol_x=dfit2d['sol_x'],
scales=dfit2d['scales'], pts_phi=pts_phi,
typ=d3['Ti']['type'], ind=d3['Ti']['ind'])
conv = np.sqrt(scpct.mu_0*scpct.c / (2.*scpct.h*scpct.alpha))
if d3['Ti']['type'] == 'lines':
indTi = np.arange(0, dfit2d['dinput']['nlines'])
else:
indTi = np.array([iit[0]
for iit in dfit2d['dind']['width']['jac']])
indTi = indTi[d3['Ti']['ind']]
val = (conv * val
* dfit2d['dinput']['mz'][indTi][None, None, :]
* scpct.c**2)
dout['Ti'] = {'keys': keys, 'values': val, 'units': 'eV'}
# vi
if d3['vi'] is not False:
keys, val = _get_phi_profile(
d3['vi']['field'], nspect=nspect,
dinput=dfit2d['dinput'],
dind=dfit2d['dind'], sol_x=dfit2d['sol_x'],
scales=dfit2d['scales'], pts_phi=pts_phi,
typ=d3['vi']['type'], ind=d3['vi']['ind'])
val = val * scpct.c
dout['vi'] = {'keys': keys, 'values': val, 'units': 'm.s^-1'}
# -------------------
# sol_detail and sol_tot
sold, solt = False, False
if pts_lamb_phi_detail is not False or pts_lamb_phi_total is not False:
func_detail = _funccostjac.multigausfit2d_from_dlines_funccostjac(
dfit2d['dprepare']['lamb'], dfit2d['phi2'],
indok=dfit2d['dprepare']['indok'],
binning=dfit2d['dprepare']['binning'],
dinput=dfit2d['dinput'],
dind=dfit2d['dind'], jac=dfit2d['jac'])[0]
if pts_lamb_phi_detail is not False:
shape = tuple(np.r_[nspect, pts_lamb_phi_detail.shape,
dfit2d['dinput']['nlines']+1,
dfit2d['dinput']['nbs']])
sold = np.full(shape, np.nan)
if pts_lamb_phi_total is not False:
shape = tuple(np.r_[nspect, pts_lamb_phi_total.shape])
solt = np.full(shape, np.nan)
for ii in range(nspect):
# Separate and reshape output
fd = func_detail(dfit2d['sol_x'][ii, :],
scales=dfit2d['scales'][ii, :],
indok_var=dfit2d['dprepare']['indok_var'][ii])
if pts_lamb_phi_detail is not False:
sold[ii, ...] = fd
if pts_lamb_phi_total is not False:
solt[ii, ...] = np.nansum(np.nansum(fd, axis=-1), axis=-1)
dout['sol_detail'] = sold
dout['sol_tot'] = solt
dout['units'] = 'a.u.'
# -------------------
# Add input args
dout['d3'] = d3
dout['pts_phi'] = pts_phi
dout['pts_lamb_phi_detail'] = pts_lamb_phi_detail
dout['pts_lamb_phi_total'] = pts_lamb_phi_total
return dout
###########################################################
###########################################################
#
# Plot fitted data from pre-computed dict of fitted results
#
###########################################################
###########################################################
def fit2d_plot(dout=None):
# ----------------------
# Optional plotting
if plot is True:
if plotmode is None:
plotmode = 'transform'
if indspect is None:
indspect = 0
if spect1d is not None:
# Compute lambfit / phifit and spectrum1d
if nlambfit is None:
nlambfit = 200
((spect1d, fit1d), lambfit,
phifit, _, phiminmax) = self._calc_spect1d_from_data2d(
[dataflat[indspect, :], dfit2d['sol_tot'][indspect, :]],
lambflat, phiflat,
nlambfit=nlambfit, nphifit=10,
spect1d=spect1d, mask=None, vertsum1d=False)
else:
fit1d, lambfit, phiminmax = None, None, None
dax = _plot_optics.CrystalBragg_plot_data_fit2d(
xi=xi, xj=xj, data=dfit2d['data'],
lamb=dfit2d['lamb'], phi=dfit2d['phi'], indspect=indspect,
indok=indok, dfit2d=dfit2d,
dax=dax, plotmode=plotmode, angunits=angunits,
cmap=cmap, vmin=vmin, vmax=vmax,
spect1d=spect1d, fit1d=fit1d,
lambfit=lambfit, phiminmax=phiminmax,
dmargin=dmargin, tit=tit, wintit=wintit, fs=fs)
return dax
###########################################################
###########################################################
#
# 1d vertical fitting for noise analysis
#
###########################################################
###########################################################
def get_noise_costjac(deg=None, nbsplines=None, dbsplines=None, phi=None,
phiminmax=None, symmetryaxis=None, sparse=None):
if sparse is None:
sparse = False
if dbsplines is None:
dbsplines = multigausfit2d_from_dlines_dbsplines(
knots=None, deg=deg, nbsplines=nbsplines,
phimin=phiminmax[0], phimax=phiminmax[1],
symmetryaxis=symmetryaxis)
def cost(x,
km=dbsplines['knots_mult'],
deg=dbsplines['deg'],
data=0., phi=phi):
return scpinterp.BSpline(km, x, deg,
extrapolate=False, axis=0)(phi) - data
jac = np.zeros((phi.size, dbsplines['nbs']), dtype=float)
km = dbsplines['knots_mult']
kpb = dbsplines['nknotsperbs']
lind = [(phi >= km[ii]) & (phi < km[ii+kpb-1])
for ii in range(dbsplines['nbs'])]
if sparse is True:
def jac_func(x, jac=jac, km=km, data=None,
phi=phi, kpb=kpb, lind=lind):
for ii in range(x.size):
jac[lind[ii], ii] = scpinterp.BSpline.basis_element(
km[ii:ii+kpb], extrapolate=False)(phi[lind[ii]])
return scpsparse.csr_matrix(jac)
else:
def jac_func(x, jac=jac, km=km, data=None,
phi=phi, kpb=kpb, lind=lind):
for ii in range(x.size):
jac[lind[ii], ii] = scpinterp.BSpline.basis_element(
km[ii:ii+kpb], extrapolate=False)(phi[lind[ii]])
return jac
return cost, jac_func
def _basic_loop(ilambu=None, ilamb=None, phi=None, data=None, mask=None,
domain=None, nbs=None, dbsplines=None, nspect=None,
method=None, tr_solver=None, tr_options=None, loss=None,
xtol=None, ftol=None, gtol=None, max_nfev=None, verbose=None):
# ---------------
# Check inputs
if method is None:
method = _METHOD
assert method in ['trf', 'dogbox', 'lm'], method
if tr_solver is None:
tr_solver = None
if tr_options is None:
tr_options = {}
if xtol is None:
xtol = _TOL2D['x']
if ftol is None:
ftol = _TOL2D['f']
if gtol is None:
gtol = _TOL2D['g']
if loss is None:
loss = _LOSS
if max_nfev is None:
max_nfev = None
x0 = 1. - (2.*np.arange(nbs)/nbs - 1.)**2
# ---------------
# Prepare outputs
dataint = np.full((nspect, ilambu.size), np.nan)
fit = np.full(data.shape, np.nan)
indsort = np.zeros((2, phi.size), dtype=int)
indout_noeval = np.zeros(phi.shape, dtype=bool)
chi2n = np.full((nspect, ilambu.size), np.nan)
chi2_meandata = np.full((nspect, ilambu.size), np.nan)
# ---------------
# Main loop
i0, indnan = 0, []
for jj in range(ilambu.size):
ind = ilamb == ilambu[jj]
nind = ind.sum()
isort = i0 + np.arange(0, nind)
# skips cases with no points
if not np.any(ind):
continue
inds = np.argsort(phi[ind])
inds_rev = np.argsort(inds)
indsort[0, isort] = ind.nonzero()[0][inds]
indsort[1, isort] = ind.nonzero()[1][inds]
phisort = phi[indsort[0, isort], indsort[1, isort]]
datasort = data[:, indsort[0, isort], indsort[1, isort]]
dataint[:, jj] = np.nanmean(datasort, axis=1)
# skips cases with to few points
indok = ~np.any(np.isnan(datasort), axis=0)
if mask is not None:
indok &= mask[indsort[0, isort], indsort[1, isort]]
# Check there are enough phi vs bsplines
indphimin = np.searchsorted(np.linspace(domain['phi']['minmax'][0],
domain['phi']['minmax'][1],
nbs + 1),
phisort[indok])
if np.unique(indphimin).size < nbs:
indout_noeval[ind] = True
continue
indout_noeval[ind] = ~indok[inds_rev]
# get bsplines func
func_cost, func_jac = get_noise_costjac(phi=phisort[indok],
dbsplines=dbsplines,
sparse=False,
symmetryaxis=False)
for tt in range(nspect):
if verbose > 0:
msg = ("\tlambbin {} / {}".format(jj+1, ilambu.size)
+ " "
+ "time step = {} / {}".format(tt+1, nspect))
print(msg.ljust(50), end='\r', flush=True)
if dataint[tt, jj] == 0.:
continue
datai = datasort[tt, indok] / dataint[tt, jj]
res = scpopt.least_squares(
func_cost, x0, jac=func_jac,
method=method, ftol=ftol, xtol=xtol, gtol=gtol,
x_scale='jac', f_scale=1.0, loss=loss, diff_step=None,
tr_solver=tr_solver, tr_options={}, jac_sparsity=None,
max_nfev=max_nfev, verbose=0, args=(),
kwargs={'data': datai})
# Store in original shape
fit[tt, ind] = (
func_cost(res.x, phi=phisort, data=0.)
* dataint[tt, jj]
)[inds_rev]
chi2_meandata[tt, jj] = np.nanmean(fit[tt, ind])
chi2n[tt, jj] = np.nanmean(func_cost(x=res.x, data=datai)**2)
i0 += nind
indnan.append(i0)
return (fit, dataint, indsort, np.array(indnan), indout_noeval,
chi2n, chi2_meandata)
def noise_analysis_2d(
data, lamb, phi, mask=None, margin=None, valid_fraction=None,
deg=None, knots=None, nbsplines=None, nxerrbin=None,
nlamb=None, loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
ms=None, dcolor=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None, fmt=None,
return_dax=None,
):
# -------------
# Check inputs
if not isinstance(nbsplines, int):
msg = "Please provide a (>0) integer value for nbsplines"
raise Exception(msg)
if deg is None:
deg = 2
if plot is None:
plot = True
if verbose is None:
verbose = 1
if return_dax is None:
return_dax = False
c0 = lamb.shape == phi.shape == data.shape[1:]
if c0 is not True:
msg = (
"input data, lamb, phi are non-conform!\n"
+ "\t- expected lamb.shape == phi.shape == data.shape[1:]\n"
+ "\t- provided:\n"
+ "\t\tlamb.shape = {}\n".format(lamb.shape)
+ "\t\tphi.shape = {}\n".format(phi.shape)
+ "\t\tdata.shape = {}\n".format(data.shape)
)
raise Exception(msg)
nspect = data.shape[0]
domain = {'lamb': {'minmax': [np.nanmin(lamb), np.nanmax(lamb)]},
'phi': {'minmax': [np.nanmin(phi), np.nanmax(phi)]}}
if nlamb is None:
if lamb.ndim == 2:
nlamb = lamb.shape[0]
else:
msg = ("Please provide a value for nlamb (nb of bins)!")
raise Exception(msg)
nlamb = int(nlamb)
# -------------
# lamb binning
lambedges = np.linspace(domain['lamb']['minmax'][0],
domain['lamb']['minmax'][1], nlamb+1)
ilamb = np.searchsorted(lambedges, lamb)
ilambu = np.unique(ilamb)
# -------------
# bspline dict and plotting utilities
dbsplines = multigausfit2d_from_dlines_dbsplines(
knots=None, deg=deg, nbsplines=nbsplines,
phimin=domain['phi']['minmax'][0],
phimax=domain['phi']['minmax'][1],
symmetryaxis=False)
# plotting utils
bs_phi = np.linspace(domain['phi']['minmax'][0],
domain['phi']['minmax'][1], 101)
bs_val = np.array([
scpinterp.BSpline.basis_element(
dbsplines['knots_mult'][ii:ii+dbsplines['nknotsperbs']],
extrapolate=False)(bs_phi)
for ii in range(nbsplines)]).T
# -------------
# Perform fits
(fit, dataint, indsort, indnan, indout_noeval,
chi2n, chi2_meandata) = _basic_loop(
ilambu=ilambu, ilamb=ilamb, phi=phi, data=data, mask=mask,
domain=domain, nbs=nbsplines, dbsplines=dbsplines, nspect=nspect,
method=method, tr_solver=tr_solver, tr_options=tr_options, loss=loss,
xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, verbose=verbose)
# -------------
# Identify outliers with respect to noise model
(mean, var, xdata, const,
indout_var, _, margin, valid_fraction) = get_noise_analysis_var_mask(
fit=fit, data=data, mask=(mask & (~indout_noeval)),
margin=margin, valid_fraction=valid_fraction)
# Safety check
if mask is None:
indout_mask = np.zeros(lamb.shape, dtype=bool)
else:
indout_mask = ~mask
indout_noeval[~mask] = False
indout_tot = np.array([~mask,
indout_noeval,
np.any(indout_var, axis=0)])
c0 = np.all(np.sum(indout_tot.astype(int), axis=0) <= 1)
if not c0:
msg = "Overlapping indout!"
raise Exception(msg)
indin = ~np.any(indout_tot, axis=0)
# -------------
# output dict
dnoise = {
'data': data, 'phi': phi, 'fit': fit,
'chi2n': chi2n, 'chi2_meandata': chi2_meandata, 'dataint': dataint,
'domain': domain, 'indin': indin, 'indout_mask': indout_mask,
'indout_noeval': indout_noeval, 'indout_var': indout_var,
'mask': mask, 'ind_noeval': None,
'indsort': indsort, 'indnan': np.array(indnan),
'nbsplines': nbsplines, 'bs_phi': bs_phi, 'bs_val': bs_val,
'deg': deg, 'lambedges': lambedges, 'deg': deg,
'ilamb': ilamb, 'ilambu': ilambu,
'var_mean': mean, 'var': var, 'var_xdata': xdata,
'var_const': const, 'var_margin': margin,
'var_fraction': valid_fraction}
# Plot
if plot is True:
try:
dax = _plot.plot_noise_analysis(
dnoise=dnoise,
ms=ms, dcolor=dcolor,
dax=dax, fs=fs, dmargin=dmargin,
wintit=wintit, tit=tit, sublab=sublab,
save=save_fig, name=name_fig, path=path_fig, fmt=fmt)
except Exception as err:
msg = ("Plotting failed: {}".format(str(err)))
warnings.warn(msg)
if return_dax is True:
return dnoise, dax
else:
return dnoise
def noise_analysis_2d_scannbs(
data, lamb, phi, mask=None, nxerrbin=None,
deg=None, knots=None, nbsplines=None, lnbsplines=None,
nlamb=None, loss=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
method=None, tr_solver=None, tr_options=None,
verbose=None, plot=None,
dax=None, fs=None, dmargin=None,
wintit=None, tit=None, ms=None, sublab=None,
save_fig=None, name_fig=None, path_fig=None,
fmt=None, return_dax=None,
):
# -------------
# Check inputs
if lnbsplines is None:
lnbsplines = np.arange(5, 21)
else:
lnbsplines = np.atleast_1d(lnbsplines).ravel().astype(int)
if nbsplines is None:
nbsplines = int(lnbsplines.size/2)
if nbsplines is not None:
nbsplines = np.unique(np.atleast_1d(nbsplines)).astype(int)
nlnbs = lnbsplines.size
if nxerrbin is None:
nxerrbin = 100
if deg is None:
deg = 2
if plot is None:
plot = True
if verbose is None:
verbose = 1
if return_dax is None:
return_dax = False
c0 = lamb.shape == phi.shape == data.shape[1:]
if c0 is not True:
msg = ("input data, lamb, phi are non-conform!\n"
+ "\t- expected lamb.shape == phi.shape == data.shape[1:]\n"
+ "\t- provided: ")
raise Exception(msg)
nspect = data.shape[0]
domain = {'lamb': {'minmax': [np.nanmin(lamb), np.nanmax(lamb)]},
'phi': {'minmax': [np.nanmin(phi), np.nanmax(phi)]}}
if nlamb is None:
if lamb.ndim == 2:
nlamb = lamb.shape[0]
else:
msg = ("Please provide a value for nlamb (nb of bins)!")
raise Exception(msg)
nlamb = int(nlamb)
# -------------
# lamb binning
lambedges = np.linspace(domain['lamb']['minmax'][0],
domain['lamb']['minmax'][1], nlamb+1)
ilamb = np.searchsorted(lambedges, lamb)
ilambu = np.unique(ilamb)
# -------------
# Perform fits
xdata_edge = np.linspace(0, np.nanmax(data[:, mask]), nxerrbin+1)
xdata = 0.5*(xdata_edge[1:] + xdata_edge[:-1])
dataint = np.full((nspect, ilambu.size), np.nan)
# fit = np.full(data.shape, np.nan)
indsort = np.zeros((2, phi.size), dtype=int)
# indout_noeval = np.zeros(phi.shape, dtype=bool)
chi2n = np.full((nlnbs, nspect, ilambu.size), np.nan)
chi2_meandata = np.full((nlnbs, nspect, ilambu.size), np.nan)
const = np.full((nlnbs,), np.nan)
mean = np.full((nlnbs, nxerrbin), np.nan)
var = np.full((nlnbs, nxerrbin), np.nan)
bs_phidata, bs_data, bs_fit, bs_indin = [], [], [], []
for ii in range(lnbsplines.size):
nbs = int(lnbsplines[ii])
# -------------
# bspline dict and plotting utilities
dbsplines = multigausfit2d_from_dlines_dbsplines(
knots=None, deg=deg, nbsplines=nbs,
phimin=domain['phi']['minmax'][0],
phimax=domain['phi']['minmax'][1],
symmetryaxis=False,
)
# -------------
# Perform fits
if verbose > 0:
msg = "nbs = {} ({} / {})".format(nbs, ii+1, lnbsplines.size)
print(msg)
(fiti, dataint, indsort, indnan, indout_noeval,
chi2n[ii, ...], chi2_meandata[ii, ...]) = _basic_loop(
ilambu=ilambu, ilamb=ilamb, phi=phi, data=data, mask=mask,
domain=domain, nbs=nbs, dbsplines=dbsplines, nspect=nspect,
method=method, tr_solver=tr_solver, tr_options=tr_options,
loss=loss, xtol=xtol, ftol=ftol, gtol=gtol,
max_nfev=max_nfev, verbose=verbose)
if ii == 0:
ind_intmax = np.unravel_index(
|
np.argmax(dataint, axis=None)
|
numpy.argmax
|
import tensorflow as tf
from galaxy_CapsNet import *
import cv2
import os
import sys
import numpy as np
from galaxy_data import multigalaxy_generate_sample_alexnet
batch_size = 10
is_multi_galaxy = True
is_shift_ag = True
steps = 20000
|
np.random.seed(1234)
|
numpy.random.seed
|
import os
import numpy as np
import shutil
import math
import targets
try:
import flopy
from flopy.utils.lgrutil import Lgr
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
mf6exe = os.path.abspath(targets.target_dict["mf6"])
name = "gwf"
mvr_scens = ["mltmvr", "mltmvr5050", "mltmvr7525"]
ws = os.path.join("temp", name)
exdirs = [ws]
sim_workspaces = []
gwf_names = []
# ----------------
# Universal input
# ----------------
numdays = 1
perlen = [1] * numdays
nper = len(perlen)
nstp = [1] * numdays
tsmult = [1.0] * numdays
icelltype = [1, 0, 0]
# Aquifer properties
hk = 1
k33 = 1
# Solver settings
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 0.97
# ------------------------------------------
# Static input associated with parent model
# ------------------------------------------
nlayp = 3
nrowp = 15
ncolp = 15
delrp = 1544.1 / ncolp
delcp = 1029.4 / nrowp
x = [round(x, 3) for x in np.linspace(50.0, 45.0, ncolp)]
topp = np.repeat(x, nrowp).reshape((15, 15)).T
z = [round(z, 3) for z in np.linspace(50.0, 0.0, nlayp + 1)]
botmp = [topp - z[len(z) - 2], topp - z[len(z) - 3], topp - z[0]]
idomainp = np.ones((nlayp, nrowp, ncolp), dtype=np.int32)
# Zero out where the child grid will reside
idomainp[0:2, 6:11, 2:8] = 0
# ------------------------------------------
# Common SFR data for all parent models
# ------------------------------------------
# Package_data information
sfrcells = [
(0, 0, 1),
(0, 1, 1),
(0, 2, 1),
(0, 2, 2),
(0, 3, 2),
(0, 4, 2),
(0, 4, 3),
(0, 5, 3),
(0, 8, 8),
(0, 8, 9),
(0, 8, 10),
(0, 8, 11),
(0, 7, 11),
(0, 7, 12),
(0, 6, 12),
(0, 6, 13),
(0, 6, 14),
(0, 5, 14),
]
rlen = [
65.613029,
72.488609,
81.424789,
35.850410,
75.027390,
90.887520,
77.565651,
74.860397,
120.44695,
112.31332,
109.00368,
91.234566,
67.486000,
24.603355,
97.547943,
104.97595,
8.9454498,
92.638367,
]
rwid = 5
rgrd1 = 0.12869035e-02
rgrd2 = 0.12780087e-02
rbtp = [
49.409676,
49.320812,
49.221775,
49.146317,
49.074970,
48.968212,
48.859821,
48.761742,
45.550678,
45.401943,
45.260521,
45.132568,
45.031143,
44.972298,
44.894241,
44.764832,
44.692032,
44.627121,
]
rbth = 1.5
rbhk = 0.1
man = 0.04
ustrf = 1.0
ndv = 0
# -----------------------------------------------
# Child model SFR data (common to all scenarios)
# -----------------------------------------------
connsc = []
for i in
|
np.arange(89)
|
numpy.arange
|
import numpy as np
from hmmlearn import hmm
def states_probability(a, states, initial=1.0):
return initial * np.product(a[states[:-1], states[1:]])
def forward_algorithm(a, b, observations, initial_probs=None):
state_n, _ = a.shape
current_probs = initial_probs
if initial_probs is None:
current_probs = np.full(state_n, 1.0 / state_n, dtype=np.float64)
for observation in observations:
observation_probs = b[:, observation]
current_probs = (a @ current_probs) * observation_probs
return current_probs.sum()
class HMMGMMClassifier:
def __init__(self):
self.models = None
self.unique_labels = None
def fit(self, features, labels, *args, **kwargs):
self.unique_labels =
|
np.unique(labels)
|
numpy.unique
|
import doctest
import re
import pytest
import warnings
import numpy as np
from numpy.testing import assert_array_equal
from datascience import *
import pandas as pd
#########
# Utils #
#########
@pytest.fixture(scope='function')
def table():
"""Setup Scrabble table"""
return Table().with_columns([
'letter', ['a', 'b', 'c', 'z'],
'count', [9, 3, 3, 1],
'points', [1, 2, 2, 10],
])
@pytest.fixture(scope='function')
def table2():
"""Setup second table"""
return Table().with_columns([
['points', (1, 2, 3)],
['names', ('one', 'two', 'three')],
])
@pytest.fixture(scope='function')
def table3():
"""Setup third table; same labels as first but in a different order."""
return Table().with_columns([
'count', [0, 54, 5],
'points', [3, 10, 24],
'letter', ['x', 'y', 'z'],
])
@pytest.fixture(scope='function')
def table4():
"""Setup fourth table; three overlapping columns with table."""
return Table().with_columns([
'letter', ['a', 'b', 'c', '8', 'a'],
'count', [9, 3, 2, 0, 9],
'different label', [1, 4, 2, 1, 1],
'name', ['Gamma', 'Delta', 'Epsilon', 'Alpha', 'Beta']
])
@pytest.fixture(scope='function')
def table5():
"""Setup fifth table; has NaNs in it"""
return Table().with_columns([
'letter', ['a', 'b', 'c', 'd', 'y', 'z'],
'count', [9, 3, 3, 4, 2, 1],
'points', [1, 2, 2, 2, 4, 10],
'person_id', [np.float64('nan'), np.float64('nan'), np.float64('nan'), 1, 2, 3]
])
@pytest.fixture(scope='function')
def numbers_table():
"""Setup table containing only numbers"""
return Table().with_columns([
'count', [9, 3, 3, 1],
'points', [1, 2, 2, 10],
])
@pytest.fixture(scope='function')
def categories_table():
"""Setup a table with a column to serve as pivot keys and
a columns of values to bin for each key."""
return Table(['key', 'val']).with_rows([
['a', 1],
['a', 1],
['a', 2],
['b', 1],
['b', 2],
['b', 2]])
@pytest.fixture(scope='module')
def t():
"""Create one table for entire module"""
return table()
@pytest.fixture(scope='module')
def u():
"""Setup second alphanumeric table"""
return table2()
@pytest.fixture(scope='function')
def scrabble_table2():
"""Setup Scrabble table"""
return Table().with_columns([
'letter', ['a', 'b', 'c', 'z'],
'count', [9, 3, 3, 1],
'count_2', [9, 3, 3, 1],
'pointsplus1', [2, 3, 3, 11],
])
def assert_equal(string1, string2):
string1, string2 = str(string1), str(string2)
whitespace = re.compile(r'\s')
purify = lambda s: whitespace.sub('', s)
assert purify(string1) == purify(string2), "\n%s\n!=\n%s" % (string1, string2)
############
# Doctests #
############
def test_doctests():
results = doctest.testmod(tables, optionflags=doctest.NORMALIZE_WHITESPACE)
assert results.failed == 0
############
# Overview #
############
def test_basic(table):
"""Tests that t works"""
t = table
assert_equal(t, """
letter | count | points
a | 9 | 1
b | 3 | 2
c | 3 | 2
z | 1 | 10
""")
def test_column(table):
"""Test table.values()"""
t = table
assert_array_equal(t.column('letter'), np.array(['a', 'b', 'c', 'z']))
assert_array_equal(t.column(1), np.array([9, 3, 3, 1]))
with pytest.raises(ValueError):
t.column(-1)
with pytest.raises(ValueError):
t.column('abc')
def test_values():
t1 = Table().with_columns({
'row1': ['a', 'b', 'c'],
'row2': ['d', 'e', 'f'],
})
assert_array_equal(t1.values, np.array(t1.columns, None).T)
t2 = Table().with_columns({
'row1': ['x', 'y', 'z'],
'row2': [1, 2, 3],
})
assert_array_equal(t2.values, np.array(t2.columns, object).T)
def test_basic_points(table):
t = table
assert_array_equal(t['points'], np.array([1, 2, 2, 10]))
def test_basic_rows(table):
t = table
assert_equal(
t.rows[2],
"Row(letter='c', count=3, points=2)")
def test_row_conversion_to_np_array(table):
t = table
t_subset = t.select("count", "points")
assert_array_equal(np.array(t_subset.row(0)), np.array([9, 1]))
def test_select(table):
t = table
test = t.select('points', 1)
assert_equal(test, """
points | count
1 | 9
2 | 3
2 | 3
10 | 1
""")
def test_drop(table):
t = table
test = t.drop(['points', 1])
assert_equal(test, """
letter
a
b
c
z
""")
def test_take(table):
t = table
test = t.take([1, 2])
assert_equal(test, """
letter | count | points
b | 3 | 2
c | 3 | 2
""")
def test_take_slice(table):
t = table
test = t.take[1:3]
assert_equal(test, """
letter | count | points
b | 3 | 2
c | 3 | 2
""")
def test_take_slice_single(table):
t = table
test = t.take[1]
assert_equal(test, """
letter | count | points
b | 3 | 2
""")
def test_take_iterable(table):
t = table
test = t.take[0, 2]
assert_equal(test, """
letter | count | points
a | 9 | 1
c | 3 | 2
""")
def test_take_floating_args(table):
t = table
test = t.take(0, 2)
assert_equal(test, """
letter | count | points
a | 9 | 1
c | 3 | 2
""")
def test_exclude(table):
t = table
test = t.exclude([1, 3])
assert_equal(test, """
letter | count | points
a | 9 | 1
c | 3 | 2
""")
def test_exclude_slice(table):
t = table
test = t.exclude[1:3]
assert_equal(test, """
letter | count | points
a | 9 | 1
z | 1 | 10
""")
def test_exclude_slice_single(table):
t = table
test = t.exclude[1]
assert_equal(test, """
letter | count | points
a | 9 | 1
c | 3 | 2
z | 1 | 10
""")
def test_exclude_iterable(table):
t = table
test = t.exclude[0, 2]
assert_equal(test, """
letter | count | points
b | 3 | 2
z | 1 | 10
""")
def test_exclude_floating_args(table):
t = table
test = t.exclude(1, 3)
assert_equal(test, """
letter | count | points
a | 9 | 1
c | 3 | 2
""")
def test_stats(table):
t = table
test = t.stats()
assert_equal(test, """
statistic | letter | count | points
min | a | 1 | 1
max | z | 9 | 10
median | | 3 | 2
sum | | 16 | 15
""")
def test_stats_with_numpy(table):
t = table
test = t.stats([np.mean, np.std, np.var])
assert_equal(test, """
statistic | letter | count | points
mean | | 4 | 3.75
std | | 3 | 3.63146
var | | 9 | 13.1875""")
def test_where(table):
t = table
test = t.where('points', 2)
assert_equal(test, """
letter | count | points
b | 3 | 2
c | 3 | 2
""")
test = t.where(2, 2)
assert_equal(test, """
letter | count | points
b | 3 | 2
c | 3 | 2
""")
def test_where_conditions(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.where(t['totals'] > 8)
assert_equal(test, """
letter | count | points | totals
a | 9 | 1 | 9
z | 1 | 10 | 10
""")
def test_where_predicates(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.where('totals', are.between(9, 11))
assert_equal(test, """
letter | count | points | totals
a | 9 | 1 | 9
z | 1 | 10 | 10
""")
@pytest.mark.filterwarnings("error")
def test_where_predicates_nowarning_on_str(table):
t = table
test = t.where('letter', are.equal_to('a'))
assert_equal(test, """
letter | count | points
a | 9 | 1
""")
def test_where_predicates_warning(table, capsys):
t1 = table.copy()
count1 = t1['count'] - 1
count1[0] += 1
t1['count1'] = count1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with (pytest.raises(ValueError)):
test = t1.where('count', are.equal_to(t1.column("count1")))
assert len(w) == 1
assert "Do not pass an array or list to a predicate." in str(w[-1].message)
test = t1.where('count', are.equal_to, t1.column('count1'))
assert_equal(test, """
letter | count | points | count1
a | 9 | 1 | 9
""")
def test_sort(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.sort('points')
assert_equal(test, """
letter | count | points | totals
a | 9 | 1 | 9
b | 3 | 2 | 6
c | 3 | 2 | 6
z | 1 | 10 | 10
""")
test = t.sort(3)
assert_equal(test, """
letter | count | points | totals
b | 3 | 2 | 6
c | 3 | 2 | 6
a | 9 | 1 | 9
z | 1 | 10 | 10
""")
def test_sort_args(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.sort('points', descending=True, distinct=True)
assert_equal(test, """
letter | count | points | totals
z | 1 | 10 | 10
b | 3 | 2 | 6
a | 9 | 1 | 9
""")
def test_sort_descending(table):
sorted_table = table.sort('points', descending=True)
assert_equal(sorted_table, """
letter | count | points
z | 1 | 10
b | 3 | 2
c | 3 | 2
a | 9 | 1
""")
def test_sort_syntax(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.sort(-t['totals'])
assert_equal(test, """
letter | count | points | totals
z | 1 | 10 | 10
a | 9 | 1 | 9
b | 3 | 2 | 6
c | 3 | 2 | 6
""")
def test_group(table, table5):
t = table
test = t.group('points')
assert_equal(test, """
points | count
1 | 1
2 | 2
10 | 1
""")
test = t.group(2)
assert_equal(test, """
points | count
1 | 1
2 | 2
10 | 1
""")
def test_group_nans(table5):
t = table5
test = t.group('person_id')
assert_equal(test, """
person_id | count
nan | 3
1 | 1
2 | 1
3 | 1
""")
def test_group_with_func(table):
t = table
t['totals'] = t['points'] * t['count']
test = t.group('points', sum)
assert_equal(test, """
points | letter sum | count sum | totals sum
1 | | 9 | 9
2 | | 6 | 12
10 | | 1 | 10
""")
def test_groups(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.groups(['points', 'early'])
assert_equal(test, """
points | early | count
1 | False | 1
1 | True | 1
2 | True | 2
10 | False | 1
""")
def test_groups_using_group(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.group(['points', 'early'])
assert_equal(test, """
points | early | count
1 | False | 1
1 | True | 1
2 | True | 2
10 | False | 1
""")
def test_groups_list(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.groups(['points', 'early'], lambda s: s)
assert_equal(test, """
points | early | letter | count | totals
1 | False | ['e'] | [12] | [12]
1 | True | ['a'] | [9] | [9]
2 | True | ['b' 'c'] | [3 3] | [6 6]
10 | False | ['z'] | [1] | [10]
""")
def test_groups_collect(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.select(['points', 'early', 'count']).groups(['points', 'early'], sum)
assert_equal(test, """
points | early | count sum
1 | False | 12
1 | True | 9
2 | True | 6
10 | False | 1
""")
def test_groups_nans(table5):
t = table5
test = t.group(['person_id', 'points'])
assert_equal(test, """
person_id | points |count
nan | 1 | 1
nan | 2 | 2
1 | 2 | 1
2 | 4 | 1
3 | 10 | 1
""")
def test_join(table, table2):
"""Tests that join works, not destructive"""
t = table
u = table2
t['totals'] = t['points'] * t['count']
assert_equal(t.join('points', u), """
points | letter | count | totals | names
1 | a | 9 | 9 | one
2 | b | 3 | 6 | two
2 | c | 3 | 6 | two
""")
assert_equal(u, """
points | names
1 | one
2 | two
3 | three
""")
assert_equal(t, """
letter | count | points | totals
a | 9 | 1 | 9
b | 3 | 2 | 6
c | 3 | 2 | 6
z | 1 | 10 | 10
""")
def test_join_html(table, table2):
"""Test that join doesn't crash with formatting."""
t = table
u = table2
t = t.set_format('count', NumberFormatter)
t.as_html()
u.join('points', t, 'points').as_html()
def test_pivot_counts(table, table2):
t = table.copy()
u = table2
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.pivot('points', 'early')
assert_equal(test, """
early | 1 | 2 | 10
False | 1 | 0 | 1
True | 1 | 2 | 0
""")
def test_pivot_counts_with_indices(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
test = t.pivot(2, 4)
assert_equal(test, """
early | 1 | 2 | 10
False | 1 | 0 | 1
True | 1 | 2 | 0
""")
def test_pivot_values(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
t['exists'] = 2
summed = t.pivot('points', 'early', 'exists', sum)
assert_equal(summed, """
early | 1 | 2 | 10
False | 2 | 0 | 2
True | 2 | 4 | 0
""")
maxed = t.pivot('points', 'early', 'exists', max, -1)
assert_equal(maxed, """
early | 1 | 2 | 10
False | 2 | -1 | 2
True | 2 | 2 | -1
""")
def test_pivot_multiple_rows(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
t['late'] = t['letter'] > 'c'
t['exists'] = 1
test = t.pivot('points', ['early', 'late'], 'exists', sum)
assert_equal(test, """
early | late | 1 | 2 | 10
False | True | 1 | 0 | 1
True | False | 1 | 2 | 0
""")
def test_pivot_sum(table):
t = table.copy()
t['totals'] = t['points'] * t['count']
t.append(('e', 12, 1, 12))
t['early'] = t['letter'] < 'd'
t['exists'] = 1
test = t.pivot('points', 'early', 'exists', sum)
assert_equal(test, """
early | 1 | 2 | 10
False | 1 | 0 | 1
True | 1 | 2 | 0
""")
def test_apply(table):
t = table.copy()
assert_array_equal(t.apply(lambda x, y: x * y, 'count', 'points'),
|
np.array([9, 6, 6, 10])
|
numpy.array
|
import time
import numpy as np
from deeprl_lib.core import ExpReplay
def test_reset_buffer():
bf = ExpReplay(10000)
shapes = (
(2, 5),
(2, 2),
(1,),
(3, 9)
)
types = (
np.uint8,
np.float32,
int,
bool
)
bf.reset_buffer(shapes, types)
assert bf.full == False
assert bf.index == 0
assert bf.buffer_state.shape == (10000, 2, 5)
assert bf.buffer_action.shape == (10000, 2, 2)
assert bf.buffer_reward.shape == (10000, 1)
assert bf.buffer_done.shape == (10000, 3, 9)
assert bf.buffer_state.dtype == np.uint8
assert bf.buffer_action.dtype == np.float32
assert bf.buffer_reward.dtype == int
assert bf.buffer_done.dtype == bool
def test_append_one():
bf = ExpReplay(2)
shapes = ((2, 2), (1,), (1,), (1,))
types = (np.float32, np.int32, np.uint8, bool)
bf.reset_buffer(shapes, types)
state = np.zeros((2, 2), dtype=np.float32) + 5
action = np.array([3], dtype=np.int32)
reward = np.array([2], dtype=np.uint8)
done =
|
np.array([True], dtype=bool)
|
numpy.array
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
class MCLoss(nn.Module):
def __init__(self, num_classes=200, cnums=[10, 11], cgroups=[152, 48], p=0.4, lambda_=10):
super().__init__()
if isinstance(cnums, int): cnums = [cnums]
elif isinstance(cnums, tuple): cnums = list(cnums)
assert isinstance(cnums, list), print("Error: cnums should be int or a list of int, not {}".format(type(cnums)))
assert sum(cgroups) == num_classes, print("Error: num_classes != cgroups.")
self.cnums = cnums
self.cgroups = cgroups
self.p = p
self.lambda_ = lambda_
self.celoss = nn.CrossEntropyLoss()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, feat, targets):
n, c, h, w = feat.size()
sp = [0]
tmp = np.array(self.cgroups) * np.array(self.cnums)
for i in range(len(self.cgroups)):
sp.append(sum(tmp[:i + 1]))
# L_div branch
feature = feat
feat_group = []
for i in range(1, len(sp)):
feat_group.append(F.softmax(feature[:, sp[i - 1]:sp[i]].view(n, -1, h * w), dim=2).view(n, -1, h, w)) # Softmax
l_div = 0.
for i in range(len(self.cnums)):
features = feat_group[i]
features = F.max_pool2d(features.view(n, -1, h * w), kernel_size=(self.cnums[i], 1), stride=(self.cnums[i], 1))
l_div = l_div + (1.0 - torch.mean(torch.sum(features, dim=2)) / (self.cnums[i] * 1.0))
# L_dis branch
mask = self._gen_mask(self.cnums, self.cgroups, self.p).expand_as(feat)
if feat.is_cuda: mask = mask.cuda()
feature = mask * feat # CWA
feat_group = []
for i in range(1, len(sp)):
feat_group.append(feature[:, sp[i - 1]:sp[i]])
dis_branch = []
for i in range(len(self.cnums)):
features = feat_group[i]
features = F.max_pool2d(features.view(n, -1, h * w), kernel_size=(self.cnums[i], 1), stride=(self.cnums[i], 1))
dis_branch.append(features)
dis_branch = torch.cat(dis_branch, dim=1).view(n, -1, h, w) # CCMP
dis_branch = self.avgpool(dis_branch).view(n, -1) # GAP
l_dis = self.celoss(dis_branch, targets)
return l_dis + self.lambda_ * l_div
def _gen_mask(self, cnums, cgroups, p):
"""
:param cnums:
:param cgroups:
:param p: float, probability of random deactivation
"""
bar = []
for i in range(len(cnums)):
foo =
|
np.ones((cgroups[i], cnums[i]), dtype=np.float32)
|
numpy.ones
|
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* K-NN
Input to Proposed Solution:
---------------------------
* Directories of training and testing data in csv file format
* These two types of data should be stored in n x m pattern in csv file format.
Typical Example:
----------------
n x m samples in training csv file (Explain n and m)
k x s samples in testing csv file (Explain k and s
Output of Proposed Solution:
----------------------------
* Predictions generated by learning model for testing set
* They are stored in "results_team12.csv" file. (Change the name file if needed)
Code Owner:
-----------
* Copyright © Team 12. All rights reserved.
* Copyright © Istanbul Technical University, Learning From Data Spring/Fall 2020. All rights reserved.
"""
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.neighbors import NearestNeighbors
from scipy.stats.stats import pearsonr
import random as r
r.seed(1)
np.random.seed(1)
import warnings
warnings.filterwarnings('ignore')
def load_data(csv):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
# reading the data from the csv files
df = pd.read_csv(csv, sep=',')
# ignoring the index column of the data (0,...,149 or 0,...,79)
df = df.drop(columns=['ID'])
df_np = df.to_numpy()
return df_np
def train_model(train_t0, neighbourCount):
"""
The method creates a learning model and trains it by using training data.
Parameters
----------
train_t0: x
neighbourCount: number of neigbours in KNN
"""
nbrs = []
train_t0_single = np.transpose(train_t0)
for i in range(train_t0_single.shape[0]):
nbrs.append(NearestNeighbors(n_neighbors=neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1)))
return nbrs
def predict(train_t0, train_t1, test_t0, nbrs):
"""
The method makes predictions for testing data samples by using trained learning model.
Parameters
----------
train_t0: x
train_t1: y
test_t0: x_test
nbrs: Nearest Neigbors model for each feature
"""
train_t0_single = np.transpose(train_t0)
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
for i in range(train_t0_single.shape[0]):
distances, indices = nbrs[i].kneighbors(test_t0_single[i].reshape(-1,1))
distances = np.ones_like(distances)* 0.7 - distances
mul = np.multiply(distances, train_t1_single[i,indices])
pred = np.divide(np.mean(mul, axis =1), np.mean(distances, axis = 1))
prediction[:,i] = pred.reshape(-1)
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0
return prediction
def cv5(data_t0, data_t1, neighbourCount):
kf = KFold(n_splits=5 , shuffle = True, random_state=1)
prediction_all = np.zeros_like(data_t1)
mses= []
maes = []
pears = []
for trainIndex, testIndex in kf.split(data_t0):
train_t0, test_t0 = data_t0[trainIndex], data_t0[testIndex] #Split Data into train and test sets
train_t1, test_t1 = data_t1[trainIndex], data_t1[testIndex]
train_t0_single = np.transpose(train_t0) # Use features as rows and subjects as columns
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
preds = []
for i in range(train_t0_single.shape[0]): #Loop through each feature
nbrs = NearestNeighbors(n_neighbors= neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1))
distances, indices = nbrs.kneighbors(test_t0_single[i].reshape(-1,1))# Calculate the distances and indices of K closest neighbours of test subjects and train subjects in t0
distances = np.ones_like(distances)* 0.7 - distances # Set distances to (0.7 - d). Neighbours with low distance get larger values and vice versa
mul = np.multiply(distances, train_t1_single[i,indices]) # Use the changed distances as weights and multiply the corresponding t1 of the neighbours
pred = np.divide(np.mean(mul,axis =1),np.mean(distances, axis = 1)) #Take the mean of the weighted t1's and divide by the mean of distances to normalize
prediction[:,i] = pred.reshape(-1) #This is the prediction for this feature acroos all test subjects
preds.append(pred.reshape(-1))
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0 # Set nan locations to 0
preds = np.asarray(preds)
preds =
|
np.transpose(preds)
|
numpy.transpose
|
import sys
import timeit
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import preprocess as pp
class AtomNet(nn.Module):
def __init__(self, dim):
super(AtomNet, self).__init__()
self.fc1 = nn.Linear(dim, dim)
self.fc2 = nn.Linear(dim, dim)
self.fc3 = nn.Linear(dim, dim)
self.fc4 = nn.Linear(dim, dim)
self.fc5 = nn.Linear(dim, dim)
self.fc6 = nn.Linear(dim, dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = h + F.relu(self.fc2(h))
h = h + F.relu(self.fc3(h))
h = h + F.relu(self.fc4(h))
h = h + F.relu(self.fc5(h))
return self.fc6(h)
class MolecularGraphNeuralNetwork(nn.Module):
def __init__(self, N_atoms, dim, layer_hidden, layer_output):
super(MolecularGraphNeuralNetwork, self).__init__()
self.embed_atom = nn.Embedding(N_atoms, dim)
self.gamma = nn.ModuleList([nn.Embedding(N_atoms, 1)
for _ in range(layer_hidden)])
self.W_atom = nn.ModuleList([nn.Linear(dim, dim)
for _ in range(layer_hidden)])
self.W_output = nn.ModuleList([nn.Linear(dim, dim)
for _ in range(layer_output)])
self.W_property = nn.Linear(dim, 1)
self.AtomNet = AtomNet(dim)
def pad(self, matrices, pad_value):
"""Pad the list of matrices
with a pad_value (e.g., 0) for batch processing.
For example, given a list of matrices [A, B, C],
we obtain a new matrix [A00, 0B0, 00C],
where 0 is the zero (i.e., pad value) matrix.
"""
shapes = [m.shape for m in matrices]
M, N = sum([s[0] for s in shapes]), sum([s[1] for s in shapes])
zeros = torch.FloatTensor(np.zeros((M, N))).to(device)
pad_matrices = pad_value + zeros
i, j = 0, 0
for k, matrix in enumerate(matrices):
m, n = shapes[k]
pad_matrices[i:i+m, j:j+n] = matrix
i += m
j += n
return pad_matrices
def update(self, matrix, vectors, layer):
hidden_vectors = torch.relu(self.W_atom[layer](vectors))
return self.AtomNet(hidden_vectors + torch.matmul(matrix, hidden_vectors))
def sum(self, vectors, axis):
sum_vectors = [torch.sum(v, 0) for v in torch.split(vectors, axis)]
return torch.stack(sum_vectors)
def forward(self, inputs):
"""Cat or pad each input data for batch processing."""
atoms, distance_matrices, molecular_sizes = inputs
atoms = torch.cat(atoms)
distance_matrix = self.pad(distance_matrices, 1e6)
"""GNN layer (update the atom vectors)."""
atom_vectors = self.embed_atom(atoms)
for l in range(layer_hidden):
gammas = torch.squeeze(self.gamma[l](atoms))
M = torch.exp(-gammas*distance_matrix**2)
atom_vectors = self.update(M, atom_vectors, l)
atom_vectors = F.normalize(atom_vectors, 2, 1) # normalize.
"""Output layer."""
for l in range(layer_output):
atom_vectors = torch.relu(self.W_output[l](atom_vectors))
"""Molecular vector by sum of the atom vectors."""
molecular_vectors = self.sum(atom_vectors, molecular_sizes)
"""Molecular property."""
properties = self.W_property(molecular_vectors)
return properties
def __call__(self, data_batch, train):
inputs = data_batch[:-1]
correct_properties = torch.cat(data_batch[-1])
if train:
predicted_properties = self.forward(inputs)
loss = F.mse_loss(predicted_properties, correct_properties)
return loss
else:
with torch.no_grad():
predicted_properties = self.forward(inputs)
ts = correct_properties.to('cpu').data.numpy()
ys = predicted_properties.to('cpu').data.numpy()
ts, ys = np.concatenate(ts), np.concatenate(ys)
sum_absolute_error = sum(np.abs(ts-ys))
return sum_absolute_error
class Trainer(object):
def __init__(self, model):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def train(self, dataset):
|
np.random.shuffle(dataset)
|
numpy.random.shuffle
|
# Copyright 2021 Mechanics of Microstructures Group
# at The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from defdap import plotting
from typing import Union, Tuple, List, Optional
class Quat(object):
"""Class used to define and perform operations on quaternions. These
are interpreted in the passive sense.
"""
__slots__ = ['quatCoef']
def __init__(self, *args, allow_southern: Optional[bool] = False) -> None:
"""
Construct a Quat object from 4 quat coefficients or an array of
quat coefficients.
Parameters
----------
*args
Variable length argument list.
allow_southern
if False, move quat to northern hemisphere.
"""
# construct with array of quat coefficients
if len(args) == 1:
if len(args[0]) != 4:
raise TypeError("Arrays input must have 4 elements")
self.quatCoef = np.array(args[0], dtype=float)
# construct with quat coefficients
elif len(args) == 4:
self.quatCoef = np.array(args, dtype=float)
else:
raise TypeError("Incorrect argument length. Input should be "
"an array of quat coefficients or idividual "
"quat coefficients")
# move to northern hemisphere
if not allow_southern and self.quatCoef[0] < 0:
self.quatCoef = self.quatCoef * -1
@classmethod
def fromEulerAngles(cls, ph1: float, phi: float, ph2: float) -> 'Quat':
"""Create a quat object from 3 Bunge euler angles.
Parameters
----------
ph1
First Euler angle, rotation around Z in radians.
phi
Second Euler angle, rotation around new X in radians.
ph2
Third Euler angle, rotation around new Z in radians.
Returns
-------
defdap.quat.Quat
Initialised Quat object.
"""
# calculate quat coefficients
quatCoef = np.array([
np.cos(phi / 2.0) * np.cos((ph1 + ph2) / 2.0),
-np.sin(phi / 2.0) * np.cos((ph1 - ph2) / 2.0),
-np.sin(phi / 2.0) * np.sin((ph1 - ph2) / 2.0),
-np.cos(phi / 2.0) * np.sin((ph1 + ph2) / 2.0)
], dtype=float)
# call constructor
return cls(quatCoef)
@classmethod
def fromAxisAngle(cls, axis: np.ndarray, angle: float) -> 'Quat':
"""Create a quat object from a rotation around an axis. This
creates a quaternion to represent the passive rotation (-ve axis).
Parameters
----------
axis
Axis that the rotation is applied around.
angle
Magnitude of rotation in radians.
Returns
-------
defdap.quat.Quat
Initialised Quat object.
"""
# normalise the axis vector
axis = np.array(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
# calculate quat coefficients
quatCoef = np.zeros(4, dtype=float)
quatCoef[0] = np.cos(angle / 2)
quatCoef[1:4] = -np.sin(angle / 2) * axis
# call constructor
return cls(quatCoef)
def eulerAngles(self) -> np.ndarray:
"""Calculate the Euler angle representation for this rotation.
Returns
-------
eulers : numpy.ndarray, shape 3
Bunge euler angles (in radians).
References
----------
<NAME>. et al., 'Conversion of EBSD data by a quaternion
based algorithm to be used for grain structure simulations',
Technische Mechanik, 30(4)401 – 413
<NAME> al., 'Consistent representations of and
conversions between 3D rotations',
Model. Simul. Mater. Sci. Eng., 23(8)
"""
eulers = np.empty(3, dtype=float)
q = self.quatCoef
q03 = q[0]**2 + q[3]**2
q12 = q[1]**2 + q[2]**2
chi = np.sqrt(q03 * q12)
if chi == 0 and q12 == 0:
eulers[0] = np.arctan2(-2 * q[0] * q[3], q[0]**2 - q[3]**2)
eulers[1] = 0
eulers[2] = 0
elif chi == 0 and q03 == 0:
eulers[0] = np.arctan2(2 * q[1] * q[2], q[1]**2 - q[2]**2)
eulers[1] = np.pi
eulers[2] = 0
else:
cosPh1 = (-q[0] * q[1] - q[2] * q[3]) / chi
sinPh1 = (-q[0] * q[2] + q[1] * q[3]) / chi
cosPhi = q[0]**2 + q[3]**2 - q[1]**2 - q[2]**2
sinPhi = 2 * chi
cosPh2 = (-q[0] * q[1] + q[2] * q[3]) / chi
sinPh2 = (q[1] * q[3] + q[0] * q[2]) / chi
eulers[0] = np.arctan2(sinPh1, cosPh1)
eulers[1] = np.arctan2(sinPhi, cosPhi)
eulers[2] = np.arctan2(sinPh2, cosPh2)
if eulers[0] < 0:
eulers[0] += 2 * np.pi
if eulers[2] < 0:
eulers[2] += 2 * np.pi
return eulers
def rotMatrix(self) -> np.ndarray:
"""Calculate the rotation matrix representation for this rotation.
Returns
-------
rotMatrix : numpy.ndarray, shape (3, 3)
Rotation matrix.
References
----------
<NAME>. et al., 'Conversion of EBSD data by a quaternion
based algorithm to be used for grain structure simulations',
Technische Mechanik, 30(4)401 – 413
<NAME>. et al., 'Consistent representations of and
conversions between 3D rotations',
Model. Simul. Mater. Sci. Eng., 23(8)
"""
rotMatrix = np.empty((3, 3), dtype=float)
q = self.quatCoef
qbar = q[0]**2 - q[1]**2 - q[2]**2 - q[3]**2
rotMatrix[0, 0] = qbar + 2 * q[1]**2
rotMatrix[0, 1] = 2 * (q[1] * q[2] - q[0] * q[3])
rotMatrix[0, 2] = 2 * (q[1] * q[3] + q[0] * q[2])
rotMatrix[1, 0] = 2 * (q[1] * q[2] + q[0] * q[3])
rotMatrix[1, 1] = qbar + 2 * q[2]**2
rotMatrix[1, 2] = 2 * (q[2] * q[3] - q[0] * q[1])
rotMatrix[2, 0] = 2 * (q[1] * q[3] - q[0] * q[2])
rotMatrix[2, 1] = 2 * (q[2] * q[3] + q[0] * q[1])
rotMatrix[2, 2] = qbar + 2 * q[3]**2
return rotMatrix
# show components when the quat is printed
def __repr__(self) -> str:
return "[{:.4f}, {:.4f}, {:.4f}, {:.4f}]".format(*self.quatCoef)
def __str__(self) -> str:
return self.__repr__()
def _plotIPF(
self,
direction: np.ndarray,
symGroup: str,
**kwargs
) -> 'plotting.PolePlot':
Quat.plotIPF([self], direction, symGroup, **kwargs)
# overload * operator for quaternion product and vector product
def __mul__(self, right: 'Quat', allow_southern: bool = False) -> 'Quat':
if isinstance(right, type(self)): # another quat
newQuatCoef = np.zeros(4, dtype=float)
newQuatCoef[0] = (
self.quatCoef[0] * right.quatCoef[0] -
np.dot(self.quatCoef[1:4], right.quatCoef[1:4])
)
newQuatCoef[1:4] = (
self.quatCoef[0] * right.quatCoef[1:4] +
right.quatCoef[0] * self.quatCoef[1:4] +
|
np.cross(self.quatCoef[1:4], right.quatCoef[1:4])
|
numpy.cross
|
#!/usr/bin/env python
# coding: utf-8
# # Use keras to classify Sea Lions
#
# - I am using the first picture to extract Sea Lion coordinates using blob detection
# - I extract 32 by 32 images centered on the extracted coordinates
# - I train a simple keras model
#
# *** The test accuracy is for Sea Lions in the first image only and without negative examples**
# In[1]:
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import skimage.feature
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, Cropping2D
from keras.utils import np_utils
get_ipython().run_line_magic('matplotlib', 'inline')
# ### Initialize variables
# In[2]:
classes = ["adult_males", "subadult_males", "adult_females", "juveniles", "pups"]
file_names = os.listdir("../input/Train/")
file_names = sorted(file_names, key=lambda
item: (int(item.partition('.')[0]) if item[0].isdigit() else float('inf'), item))
# select a subset of files to run on
file_names = file_names[0:1]
# dataframe to store results in
coordinates_df = pd.DataFrame(index=file_names, columns=classes)
# ### Extract coordinates
# In[3]:
for filename in file_names:
# read the Train and Train Dotted images
image_1 = cv2.imread("../input/TrainDotted/" + filename)
image_2 = cv2.imread("../input/Train/" + filename)
# absolute difference between Train and Train Dotted
image_3 = cv2.absdiff(image_1,image_2)
# mask out blackened regions from Train Dotted
mask_1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2GRAY)
mask_1[mask_1 < 20] = 0
mask_1[mask_1 > 0] = 255
mask_2 = cv2.cvtColor(image_2, cv2.COLOR_BGR2GRAY)
mask_2[mask_2 < 20] = 0
mask_2[mask_2 > 0] = 255
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_1)
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_2)
# convert to grayscale to be accepted by skimage.feature.blob_log
image_3 = cv2.cvtColor(image_3, cv2.COLOR_BGR2GRAY)
# detect blobs
blobs = skimage.feature.blob_log(image_3, min_sigma=3, max_sigma=4, num_sigma=1, threshold=0.02)
adult_males = []
subadult_males = []
pups = []
juveniles = []
adult_females = []
for blob in blobs:
# get the coordinates for each blob
y, x, s = blob
# get the color of the pixel from Train Dotted in the center of the blob
g,b,r = image_1[int(y)][int(x)][:]
# decision tree to pick the class of the blob by looking at the color in Train Dotted
if r > 200 and g < 50 and b < 50: # RED
adult_males.append((int(x),int(y)))
elif r > 200 and g > 200 and b < 50: # MAGENTA
subadult_males.append((int(x),int(y)))
elif r < 100 and g < 100 and 150 < b < 200: # GREEN
pups.append((int(x),int(y)))
elif r < 100 and 100 < g and b < 100: # BLUE
juveniles.append((int(x),int(y)))
elif r < 150 and g < 50 and b < 100: # BROWN
adult_females.append((int(x),int(y)))
coordinates_df["adult_males"][filename] = adult_males
coordinates_df["subadult_males"][filename] = subadult_males
coordinates_df["adult_females"][filename] = adult_females
coordinates_df["juveniles"][filename] = juveniles
coordinates_df["pups"][filename] = pups
# ### Extract 32 by 32 images
# In[4]:
x = []
y = []
for filename in file_names:
image = cv2.imread("../input/Train/" + filename)
for lion_class in classes:
for coordinates in coordinates_df[lion_class][filename]:
thumb = image[coordinates[1]-16:coordinates[1]+16,coordinates[0]-16:coordinates[0]+16,:]
if np.shape(thumb) == (32, 32, 3):
x.append(thumb)
y.append(lion_class)
x = np.array(x)
y =
|
np.array(y)
|
numpy.array
|
import sklearn
import os
import numpy as np
from discovery_imaging_utils import dictionary_utils
import matplotlib.pyplot as plt
def calc_triple_network_model(parcellated_timeseries, parcel_ids):
salience_ids = []
control_ids = []
dmn_ids = []
salience_identifier = 'SalVentAttnA'
executive_control_identifier = 'ContA'
dmn_identifier = 'DefaultA'
for i, temp_label in enumerate(parcel_ids):
if salience_identifier in temp_label:
salience_ids.append(i)
if executive_control_identifier in temp_label:
control_ids.append(i)
if dmn_identifier in temp_label:
dmn_ids.append(i)
salience_signal = np.mean(parcellated_timeseries[salience_ids,:],axis=0)
control_signal = np.mean(parcellated_timeseries[control_ids,:], axis=0)
dmn_signal = np.mean(parcellated_timeseries[dmn_ids,:], axis=0)
nii_mean, nii_std, nii_corr, z_corr_1, z_corr_2 = calc_network_interaction_index(control_signal, dmn_signal, salience_signal, 0.8)
return nii_mean, nii_std, nii_corr
def calc_network_interaction_index(timeseries_1, timeseries_2, timeseries_a, TR, window_length_seconds=40, slide_step_seconds=2, decay_constant=0.333):
import numpy as np
#This function calculates the network interaction index as defined
#by the paper Dysregulated Brain Dynamics in a Triple-Network Saliency
#Model of Schizophrenia and Its Relation to Psychosis published in Biological
#Psychiatry by Supekar et. al., which follows from Time-Resolved Resting-State
#Brain Networks published in PNAS by Zalesky et. al.
#For the triple salience model, timeseries_1 should be a cleaned central executive
#network time signal, timeseries_2 should be a default mode time signal, and
#timeseries_a should be the salience signal. TR must be defined. The window length
#defaults to 40s, and slide step defaults to 2s. The decay constant for the window
#weights defaults to 0.333, and higher values make the weighting approach linearity.
#The window length and slide step will be rounded to the nearest TR.
#The function will output a nii_mean, nii_std, and nii_corr, where (without including
#notation for the decaying weights) the values are described as:
# mean_over_sliding_windows(zcorr_i(timeseries_1, timeseries_a) - z_corr_i(timeseries_2, timeseries_a))
# std_over_sliding_windows(zcorr_i(timeseries_1, timeseries_a) - z_corr_i(timeseries_2, timeseries_a))
# corr_over_sliding_windows(z_corr_i(timeseries_1, timeseries_a), z_corr_i(timeseries_2, timeseries_a))
#The function will calculate nii_mean, nii_std, and nii_corr which is the
#Calculate window length and slide step length in number of TRs
window_length = int(window_length_seconds/TR)
slide_step_length = int(slide_step_seconds/TR)
#Calculate the number of windows and make arrays
#to store the correlations within each window
num_steps = int(np.floor((len(timeseries_1) - window_length)/slide_step_length))
corr_1 = np.zeros(num_steps)
corr_2 = np.zeros(num_steps)
#Calculate the tapered weights for the sliding windows
weights = calc_tapered_weights(window_length, decay_constant)
#Calculate the pearson product moment correlation
#for each window
for i in range(len(corr_1)):
beginning = int(i*slide_step_length)
end = int(i*slide_step_length+window_length)
corr_1[i] = calc_pearson_product_moment_correlation(timeseries_1[beginning:end], timeseries_a[beginning:end], weights)
corr_2[i] = calc_pearson_product_moment_correlation(timeseries_2[beginning:end], timeseries_a[beginning:end], weights)
#Calculate fisher transformation
z_corr_1 = np.arctanh(corr_1)
z_corr_2 = np.arctanh(corr_2)
#Calculate mean difference, std difference, and corr across all windows
nii_mean = np.mean(np.subtract(z_corr_1, z_corr_2))
nii_std = np.std(np.subtract(z_corr_1, z_corr_2))
nii_corr = np.corrcoef(z_corr_1, z_corr_2)[1,0]
return nii_mean, nii_std, nii_corr, np.mean(z_corr_1), np.mean(z_corr_2)
#This function calculates tapered weights for sliding-window
#analyses as described in "Time-Resolved Resting-State Brain
#Networks" by Zalesky et. al in PNAS 2014
def calc_tapered_weights(window_length, decay_constant = 0.333):
#Calculates tapered weights for sliding window connectivity
#analyses. Uses exponentially tapered window as defined in:
#"Time-Resolved Resting-State Brain Networks" by Zalesky et. al
#in PNAS 2014. Window length should be in number of TRs, and decay
#constant should be relative to window_length. Default decay_constant
#is 0.333, which is equivelant to one third the window_length.
#Returns an array with the weights for each timepoint.
#Decay constants << 1 will be more non-linear, and decay constants
# >> 1 will approach linearity
if decay_constant < 0:
raise NameError('Error: Decay Constant must be positive')
decay_constant = window_length*decay_constant
w0 = (1 - np.exp(-1/decay_constant))/(1 - np.exp(-1*window_length/decay_constant))
window_indices = np.linspace(1, window_length, window_length, dtype=int)
weights = np.zeros(len(window_indices))
for i in window_indices:
weights[i - 1] = w0*np.exp((i - len(weights))/decay_constant)
return weights
#These functions implement the pearson product-moment correlation
#as described in "Time-Resolved Resting-State Brain Networks" by
#Zalesky et. al in PNAS 2014
def calc_weighted_mean(time_signal, weights):
#print('Weighted_Mean: ' + str(np.sum(np.multiply(time_signal, weights))))
return np.sum(
|
np.multiply(time_signal, weights)
|
numpy.multiply
|
"""
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import sys
import os
import cv2
import math
import random
import numpy as np
import scipy.misc
import _pickle as cPickle
from ctypes import *
import copy
import glob
import time
from aligning import estimateSimilarityTransform
#sys.path.append('./cocoapi/PythonAPI')
#from pycocotools.cocoeval import COCOeval
#from pycocotools import mask as maskUtils
import matplotlib.pyplot as plt
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
'''Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
'''
# flatten masks
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def compute_mean_l1_coord_diff(mask1, mask2, coord1, coord2, synset, cls_id):
'''Computes IoU overlaps between two sets of masks.
mask1, mask2: [Height, Width]
coord1, coord2: [Height, Width, 3]
'''
# flatten masks
num_pixels = mask1.shape[0] * mask1.shape[1]
mask1 = np.reshape(mask1 > .5, (-1)).astype(np.float32)
mask2 = np.reshape(mask2 > .5, (-1)).astype(np.float32)
coord1 = np.reshape(coord1, (-1, 3)).astype(np.float32)
coord2 = np.reshape(coord2, (-1, 3)).astype(np.float32)
# intersections and union
intersections = np.logical_and(mask1, mask2)
num_pixel_intersection = len(np.where(intersections)[0])
pts1 = coord1[intersections, :].transpose() - 0.5
pts2 = coord2[intersections, :].transpose() - 0.5
def rotation_y_matrix(theta):
rotation_matrix = \
np.array([ np.cos(theta), 0, np.sin(theta),
0, 1, 0,
-np.sin(theta), 0, np.cos(theta)])
rotation_matrix = np.reshape(rotation_matrix, (3, 3))
return rotation_matrix
if synset[cls_id] in ['bottle', 'bowl', 'can']:
M = 20
pts1_symmetry = np.zeros(pts1.shape+(M,)) ## shape: (3, N, 6)
for i in range(M):
rotated_pts1 = rotation_y_matrix(float(i)*np.float32(2*math.pi/M)) @ pts1
pts1_symmetry[:, :, i] = rotated_pts1
pts2_reshape = pts2.reshape([3, -1, 1])
mean_dists = np.mean(np.linalg.norm(pts1_symmetry - pts2_reshape, axis=0), axis=0)
mean_dist = np.amin(mean_dists)
elif synset[cls_id] in ['phone']:
pts1_symmetry = np.zeros(pts1.shape+(2,))
for i in range(2):
rotated_pts1 = rotation_y_matrix(float(i)*np.float32(2*math.pi/2)) @ pts1
#print(rotated_pts1)
pts1_symmetry[:, :, i] = rotated_pts1
pts2_reshape = pts2.reshape([3, -1, 1])
mean_dists = np.mean(np.linalg.norm(pts1_symmetry - pts2_reshape, axis=0), axis=0)
mean_dist = np.amin(mean_dists)
else:
#print(synset[cls_id])
diff = pts1 - pts2
dist = np.linalg.norm(diff, axis=0)
assert dist.shape[0] == num_pixel_intersection
mean_dist = np.mean(dist)
mean_l1_coord_diff = mean_dist
#print(mean_l1_coord_diff, pts1.shape[0])
return mean_l1_coord_diff
def compute_3d_iou(bbox_3d_1, bbox_3d_2, handle_visibility, class_name_1, class_name_2):
'''Computes IoU overlaps between two 3d bboxes.
bbox_3d_1, bbox_3d_1: [3, 8]
'''
# flatten masks
def asymmetric_3d_iou(bbox_3d_1, bbox_3d_2):
bbox_1_max = np.amax(bbox_3d_1, axis=0)
bbox_1_min = np.amin(bbox_3d_1, axis=0)
bbox_2_max = np.amax(bbox_3d_2, axis=0)
bbox_2_min = np.amin(bbox_3d_2, axis=0)
overlap_min = np.maximum(bbox_1_min, bbox_2_min)
overlap_max = np.minimum(bbox_1_max, bbox_2_max)
# intersections and union
if np.amin(overlap_max - overlap_min) <0:
intersections = 0
else:
intersections = np.prod(overlap_max - overlap_min)
union = np.prod(bbox_1_max - bbox_1_min) + np.prod(bbox_2_max - bbox_2_min) - intersections
overlaps = intersections / union
return overlaps
if bbox_3d_1 is None or bbox_3d_2 is None:
return -1
symmetry_flag = False
if class_name_1 in ['bottle', 'bowl', 'can'] and class_name_1 == class_name_2:
symmetry_flag = True
if class_name_1 == 'mug' and class_name_1 == class_name_2 and handle_visibility==0:
symmetry_flag = True
if symmetry_flag:
print('*'*10)
n = 20
theta = 2*math.pi/n
y_rotation_matrix = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
max_iou = 0
for i in range(n):
bbox_center = np.mean(bbox_3d_1, -1, keepdims=True)
bbox_3d_1 = y_rotation_matrix @ (bbox_3d_1 - bbox_center) + bbox_center
max_iou = max(max_iou, asymmetric_3d_iou(bbox_3d_1, bbox_3d_2))
return max_iou
else:
return asymmetric_3d_iou(bbox_3d_1, bbox_3d_2)
def compute_3d_iou_new(RT_1, RT_2, scales_1, scales_2, handle_visibility, class_name_1, class_name_2):
'''Computes IoU overlaps between two 3d bboxes.
bbox_3d_1, bbox_3d_1: [3, 8]
'''
# flatten masks
def asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2):
noc_cube_1 = get_3d_bbox(scales_1, 0)
bbox_3d_1 = transform_coordinates_3d(noc_cube_1, RT_1)
noc_cube_2 = get_3d_bbox(scales_2, 0)
bbox_3d_2 = transform_coordinates_3d(noc_cube_2, RT_2)
bbox_1_max = np.amax(bbox_3d_1, axis=0)
bbox_1_min = np.amin(bbox_3d_1, axis=0)
bbox_2_max = np.amax(bbox_3d_2, axis=0)
bbox_2_min = np.amin(bbox_3d_2, axis=0)
overlap_min = np.maximum(bbox_1_min, bbox_2_min)
overlap_max = np.minimum(bbox_1_max, bbox_2_max)
# intersections and union
if np.amin(overlap_max - overlap_min) <0:
intersections = 0
else:
intersections = np.prod(overlap_max - overlap_min)
union = np.prod(bbox_1_max - bbox_1_min) + np.prod(bbox_2_max - bbox_2_min) - intersections
overlaps = intersections / union
return overlaps
if RT_1 is None or RT_2 is None:
return -1
symmetry_flag = False
if (class_name_1 in ['bottle', 'bowl', 'can'] and class_name_1 == class_name_2) or (class_name_1 == 'mug' and class_name_1 == class_name_2 and handle_visibility==0):
print('*'*10)
noc_cube_1 = get_3d_bbox(scales_1, 0)
noc_cube_2 = get_3d_bbox(scales_2, 0)
bbox_3d_2 = transform_coordinates_3d(noc_cube_2, RT_2)
def y_rotation_matrix(theta):
return np.array([[np.cos(theta), 0, np.sin(theta), 0],
[0, 1, 0 , 0],
[-np.sin(theta), 0, np.cos(theta), 0],
[0, 0, 0 , 1]])
n = 20
max_iou = 0
for i in range(n):
rotated_RT_1 = RT_1@y_rotation_matrix(2*math.pi*i/float(n))
max_iou = max(max_iou,
asymmetric_3d_iou(rotated_RT_1, RT_2, scales_1, scales_2))
else:
max_iou = asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2)
return max_iou
def compute_RT_distances(RT_1, RT_2):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:return: theta: angle difference of R in degree, shift: l2 difference of T in centimeter
'''
#print(RT_1[3, :], RT_2[3, :])
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
R1 = RT_1[:3, :3]/np.cbrt(np.linalg.det(RT_1[:3, :3]))
T1 = RT_1[:3, 3]
R2 = RT_2[:3, :3]/np.cbrt(np.linalg.det(RT_2[:3, :3]))
T2 = RT_2[:3, 3]
R = R1 @ R2.transpose()
theta = np.arccos((np.trace(R) - 1)/2) * 180/np.pi
shift = np.linalg.norm(T1-T2) * 100
# print(theta, shift)
if theta < 5 and shift < 5:
return 10 - theta - shift
else:
return -1
def compute_RT_degree_cm_symmetry(RT_1, RT_2, class_id, handle_visibility, synset_names):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:return: theta: angle difference of R in degree, shift: l2 difference of T in centimeter
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'cap', # 5
'phone', # 6
'monitor', # 7
'laptop', # 8
'mug' # 9
]
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'laptop', # 5
'mug' # 6
]
'''
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
exit()
R1 = RT_1[:3, :3] / np.cbrt(np.linalg.det(RT_1[:3, :3]))
T1 = RT_1[:3, 3]
R2 = RT_2[:3, :3] / np.cbrt(np.linalg.det(RT_2[:3, :3]))
T2 = RT_2[:3, 3]
print(T1,T2)
# try:
# assert np.abs(np.linalg.det(R1) - 1) < 0.01
# assert np.abs(np.linalg.det(R2) - 1) < 0.01
# except AssertionError:
# print(np.linalg.det(R1), np.linalg.det(R2))
if synset_names[class_id] in ['bottle', 'can', 'bowl']: ## symmetric when rotating around y-axis
y = np.array([0, 1, 0])
y1 = R1 @ y
y2 = R2 @ y
theta = np.arccos(y1.dot(y2) / (np.linalg.norm(y1) * np.linalg.norm(y2)))
elif synset_names[class_id] == 'mug' and handle_visibility==0: ## symmetric when rotating around y-axis
y = np.array([0, 1, 0])
y1 = R1 @ y
y2 = R2 @ y
theta = np.arccos(y1.dot(y2) / (np.linalg.norm(y1) * np.linalg.norm(y2)))
elif synset_names[class_id] in ['phone', 'eggbox', 'glue']:
y_180_RT = np.diag([-1.0, 1.0, -1.0])
R = R1 @ R2.transpose()
R_rot = R1 @ y_180_RT @ R2.transpose()
theta = min(np.arccos((np.trace(R) - 1) / 2),
np.arccos((np.trace(R_rot) - 1) / 2))
else:
R = R1 @ R2.transpose()
theta = np.arccos((np.trace(R) - 1) / 2)
theta *= 180 / np.pi
shift = np.linalg.norm(T1 - T2) * 100
result = np.array([theta, shift])
return result
def compute_RT_projection_2d_symmetry(RT_1, RT_2, class_id, handle_visibility, mesh_vertices, intrinsics, synset_names, num_rotation=20):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:param vertices: [3, N].
:param intrinsics: [4, 4]
:return: mean 2d projection distance in pixel
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'laptop', # 5
'mug' # 6
]
'''
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
exit()
RT_1[:3, :3] = RT_1[:3, :3]/np.cbrt(np.linalg.det(RT_1[:3, :3]))
R1 = RT_1[:3, :3]
#T1 = RT_1[:3, 3]
RT_2[:3, :3] = RT_2[:3, :3]/np.cbrt(np.linalg.det(RT_2[:3, :3]))
R2 = RT_2[:3, :3]
#T2 = RT_2[:3, 3]
try:
assert np.abs(np.linalg.det(R1) - 1) < 0.01
assert np.abs(np.linalg.det(R2) - 1) < 0.01
except AssertionError:
print(np.linalg.det(R1), np.linalg.det(R2))
# check the vertices are in meter unit
vertices = np.copy(mesh_vertices)/1000
assert np.amax(vertices) < 0.5, np.amax(vertices)
assert np.amax(vertices) > 0, np.amax(vertices)
assert np.amin(vertices) < 0, np.amin(vertices)
assert np.amin(vertices) > -0.5, np.amin(vertices)
assert vertices.shape[0] == 3
num_vertices = vertices.shape[1]
coords_3d_1 = transform_coordinates_3d(vertices, RT_1)
projected_1 = calculate_2d_projections(coords_3d_1, intrinsics)
coords_3d_2 = transform_coordinates_3d(vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
# calculate reprojection 2d error
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = np.mean(dists)
## take care of symmetry categories
# freely rotate around y axis
if (synset_names[class_id] in ['bottle', 'can', 'bowl']) or (synset_names[class_id] == 'mug' and handle_visibility==0):
def y_rotation_matrix(theta):
return np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
for i in range(1, num_rotation):
theta = 2*math.pi*i/float(num_rotation)
coords_3d_2 = transform_coordinates_3d(y_rotation_matrix(theta)@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
# rotate 180 around y axis
elif synset_names[class_id] in ['phone']:
y_180_RT = np.diag([-1.0, 1.0, -1.0])
coords_3d_2 = transform_coordinates_3d(y_180_RT@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
# rotate 180 around z axis
elif synset_names[class_id] in ['eggbox', 'glue']:
z_180_RT = np.diag([-1.0, -1.0, 1.0])
coords_3d_2 = transform_coordinates_3d(z_180_RT@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
else: ## normal asymmetric objects
min_mean_dist = min_mean_dist
return min_mean_dist
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indicies into ixs[1:], so add 1 to get
# indicies into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indicies of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
def get_3d_bbox(scale, shift = 0):
"""
Input:
scale: [3] or scalar
shift: [3] or scalar
Return
bbox_3d: [3, N]
"""
if hasattr(scale, "__iter__"):
bbox_3d = np.array([[scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, -scale[2] / 2]]) + shift
else:
bbox_3d = np.array([[scale / 2, +scale / 2, scale / 2],
[scale / 2, +scale / 2, -scale / 2],
[-scale / 2, +scale / 2, scale / 2],
[-scale / 2, +scale / 2, -scale / 2],
[+scale / 2, -scale / 2, scale / 2],
[+scale / 2, -scale / 2, -scale / 2],
[-scale / 2, -scale / 2, scale / 2],
[-scale / 2, -scale / 2, -scale / 2]]) +shift
bbox_3d = bbox_3d.transpose()
return bbox_3d
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
assert coordinates.shape[0] == 3
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates
def calculate_2d_projections(coordinates_3d, intrinsics):
"""
Input:
coordinates: [3, N]
intrinsics: [3, 3]
Return
projected_coordinates: [N, 2]
"""
projected_coordinates = intrinsics @ coordinates_3d
projected_coordinates = projected_coordinates[:2, :] / projected_coordinates[2, :]
projected_coordinates = projected_coordinates.transpose()
projected_coordinates = np.array(projected_coordinates, dtype=np.int32)
return projected_coordinates
############################################################
# IMAGE AUGMENTATION
############################################################
def calculate_rotation(image_size, angle):
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
)
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(
|
np.array([-image_w2, -image_h2])
|
numpy.array
|
import numpy as np
from PIL import Image
Single_Num=320 # number of each digits in SVHN 320*10=3200=64*50
batch_size=64
batch_num=50
width=32
height=32
channel=3
classes=10
Total_Num=Single_Num*(classes)
SVHNArrayTemp=np.empty(shape=(Total_Num, width, height, channel)) # random one digits: e.g. 0
SVHN_LabelArrayTemp=np.zeros(shape=(Total_Num, classes))
OnedigitArray=np.empty(shape=(10, width, height, channel)) # save one digit
OnedigitArrayMasks=np.empty(shape=(10, width, height, 1)) # save one digit
# adding
one_cnt=0
cnt=0
for imgFolderName in range(1,classes+1,1):
for k in range(2000,2000+Single_Num):
temp = Image.open('SVHNimages/{}/{:06d}.png'.format(imgFolderName,k+1))
temp_arr=np.array(temp,dtype=np.float)
SVHNArrayTemp[cnt] = temp_arr
if imgFolderName==10:
# OnedigitArray[one_cnt] = temp_arr
# one_cnt=one_cnt+1
SVHN_LabelArrayTemp[cnt, 0] = 1
else:
# SVHNArrayTemp[cnt] = temp_arr
SVHN_LabelArrayTemp[cnt, imgFolderName]=1
cnt=cnt+1
print(cnt) # debug end
# print(one_cnt)
# SVHN_LabelArray1[:, 0] = 1
# print(SVHN_LabelArray1[0:3])
# print(SVHN_LabelArray1[307:322])
# print(SVHN_LabelArray1[3195:3200])
# exit
state1=np.random.get_state()
np.random.shuffle(SVHNArrayTemp) # don't shuffle with test
np.random.set_state(state1)
np.random.shuffle(SVHN_LabelArrayTemp)
"""select digits"""
temp = Image.open('onesample_VI_data/004065_0.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[0] = temp_arr
temp = Image.open('onesample_VI_data/013671_1.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[1] = temp_arr
temp = Image.open('onesample_VI_data/010555_2.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[2] = temp_arr
temp = Image.open('onesample_VI_data/007640_3.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[3] = temp_arr
temp = Image.open('onesample_VI_data/006708_4.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[4] = temp_arr
temp = Image.open('onesample_VI_data/006687_5.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[5] = temp_arr
temp = Image.open('onesample_VI_data/005710_6.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[6] = temp_arr
temp = Image.open('onesample_VI_data/005178_7.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[7] = temp_arr
temp = Image.open('onesample_VI_data/005045_8.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[8] = temp_arr
temp = Image.open('onesample_VI_data/003504_9.png')
temp_arr=np.array(temp,dtype=np.float)
OnedigitArray[9] = temp_arr
temp = Image.open('onesample_VI_data/004065_0_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[0] = temp_arr
temp = Image.open('onesample_VI_data/013671_1_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[1] = temp_arr
temp = Image.open('onesample_VI_data/010555_2_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[2] = temp_arr
temp = Image.open('onesample_VI_data/007640_3_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[3] = temp_arr
temp = Image.open('onesample_VI_data/006708_4_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[4] = temp_arr
temp = Image.open('onesample_VI_data/006687_5_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[5] = temp_arr
temp = Image.open('onesample_VI_data/005710_6_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[6] = temp_arr
temp = Image.open('onesample_VI_data/005178_7_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[7] = temp_arr
temp = Image.open('onesample_VI_data/005045_8_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[8] = temp_arr
temp = Image.open('onesample_VI_data/003504_9_mask.png')
temp_arr=np.array(temp,dtype=np.float).reshape((32,32,1))
OnedigitArrayMasks[9] = temp_arr
use_num=1000
imgs_arr1=np.empty(shape=(use_num,width,height,channel),dtype=np.float) # 0-9 digits, each digits 100
imgs_arr2=np.empty(shape=(use_num,width,height,channel),dtype=np.float) # random img
imgs_arr1_labels=np.empty(shape=(use_num,classes),dtype=np.int)
imgs_arr2_labels=np.empty(shape=(use_num,classes),dtype=np.int)
SVHN_maskArray=
|
np.empty(shape=(use_num,width,height,channel))
|
numpy.empty
|
from keras import Model
from keras.models import Sequential
from keras.layers import Input, Conv2D, MaxPooling2D, Activation, UpSampling2D, BatchNormalization, Conv2DTranspose
import tensorflow as tf
# Define model
model = Sequential()
# Uncomment if needed later
# model.add(BatchNormalization())
#model.add(MaxPooling2D(2))
#model.add(Conv2D(2, 32))
#model.add(UpSampling2D(2))
#model.add(Conv2D(32, 2))
#model.add(UpSampling2D(2))
#model.add(Conv2D(4, 2))
#model.add(Activation("relu"))
model.add(Input(shape=(64, 64, 3)))
model.add(Conv2D(64, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2D(128, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2D(256, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2DTranspose(256, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2DTranspose(64, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2DTranspose(128, 3, strides=(2, 2)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2DTranspose(32, 3, strides=(1, 1)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv2D(4, 2))
model.add(Activation("tanh"))
# Compile model
#optimizer = "rmsprop"
optimizer = "adam"
#loss = "categorical_crossentropy"
loss = "mse"
#loss = "mae"
#loss = "binary_crossentropy"
#loss = "sparse_categorical_crossentropy"
metrics = [
"accuracy"
]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
INPUT_WIDTH = model.input_shape[1]
INPUT_HEIGHT = model.input_shape[2]
INPUT_DIMENTIONS = model.input_shape[3]
OUTPUT_WIDTH = model.output_shape[1]
OUTPUT_HEIGHT = model.output_shape[2]
OUTPUT_DIMENTIONS = model.output_shape[3]
# Model summary
model.summary()
from google.colab import drive
drive.mount('/content/drive')
import random
import os
from PIL import Image
import numpy as np
INPUT_DIRECTORY = "drive/My Drive/Colab Notebooks/MINECRAFT_MODEL/dataset/input"
OUTPUT_DIRECTORY = "drive/My Drive/Colab Notebooks/MINECRAFT_MODEL/dataset/output"
def binary_search(values, key):
lower = 0
upper = len(values)
while lower + 1 < upper:
half = int((lower + upper) / 2)
if key == values[half]:
return half
elif key > values[half]:
lower = half
elif key < values[half]:
upper = half
return -lower
def load_data_index():
input_files = []
input_paths = []
for filename in os.listdir(INPUT_DIRECTORY):
path = os.path.join(INPUT_DIRECTORY, filename)
if os.path.isfile(path):
input_files.append(filename)
input_paths.append(path)
output_files = []
output_paths = []
for filename in os.listdir(OUTPUT_DIRECTORY):
path = os.path.join(OUTPUT_DIRECTORY, filename)
if os.path.isfile(path):
output_files.append(filename)
output_paths.append(path)
input_files.sort()
input_paths.sort()
output_files.sort()
output_paths.sort()
data_index = []
for i, input_file in enumerate(input_files):
o = binary_search(output_files, input_file)
if o > -1:
data_index.append(
[input_paths[i], output_paths[o]]
)
return data_index
def load_data(metadata):
[input_path, output_path] = metadata
input_image = Image.open(input_path)
#input_image = input_image.resize((32, 32))
output_image = Image.open(output_path)
return (np.asarray(input_image),
np.asarray(output_image.convert(mode="RGBA")))
data_index = load_data_index()
from datetime import datetime
# train model
BATCH_SIZE = 3
EPOCHS = 5
processed = 0
total = len(data_index)
random.shuffle(data_index)
data_index_iterator = iter(data_index)
try:
metadata = next(data_index_iterator)
except StopIteration:
metadata = None
while metadata:
batch_size = BATCH_SIZE
x_batch = []
y_batch = []
for _ in range(BATCH_SIZE):
try:
metadata = next(data_index_iterator)
[x_data, y_data] = load_data(metadata)
x_batch.append(x_data)
y_batch.append(y_data)
except StopIteration:
metadata = None
batch_size = len(x_batch)
processed += batch_size
x_batch =
|
np.array(x_batch)
|
numpy.array
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import numpy.fft as fft
import scipy.signal as sig
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
import csv
import datetime
#design output
#v=0 critical current v stuff
#time series for quiet squid
#time series for d
# In[2]:
import time, sys
from IPython.display import clear_output
def update_progress(progress):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
# In[3]:
def noisyRK4(s,th,tau,derivsRK,par,vn10,vn20,vn11,vn21,vn12,vn22):
"""RK4 integrator modified to use noise
DEPENDENCIES
derivsRK - RHS of ODE, fn defined somewhere
INPUTS
s - state vector
th - time, theta
tau - time step size
derivsRK - RHS of ODE, fn defined somewhere
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
sout - new state vector new time
[delta_1,delta_2,ddelta_1,ddelta_2,d^2delta_1,d^2delta_2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
# noisySQUIDrk(s,th,alpha,beta,eta,rho,i,phia,vn1,vn2)
half_tau = 0.5*tau
F1 = derivsRK(s,th,par,vn10,vn20) # use current voltage noise
th_half = th + half_tau
stemp = s + half_tau*F1
F2 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
stemp = s + half_tau*F2
F3 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
th_full = th + tau
stemp = s + tau*F3
F4 = derivsRK(stemp,th_full,par,vn12,vn22) # use full-tau step voltage noise
sout = s + tau/6.*(F1 + F4 + 2.*(F2 + F3))
return sout
# In[4]:
def noisySQUIDrk(s,th,par,vn1,vn2):
"""Returns RHS of ODE
DEPENDENCIES
numpy as np
INPUTS
s - state vector [del1(theta), del2(theta)]
th - time, theta
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
nv1,nv2 - noise values at each junction
OUTPUTS
deriv - array
[ddel1/dth, ddel2/dth, d^2del1/dth^2, d^2del2/dth^2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
#del1 = s[0] # del_1(theta)
#del2 = s[1] # del_2(theta)
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
dddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC)
dddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC)
ddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*dddel1)/(1-rho) + vn1 # ddel1/dth
ddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*dddel2)/(1+rho) + vn2 # ddel2/dth
deriv = np.array([ddel1,ddel2,dddel1,dddel2])
return(deriv)
# In[5]:
def noisySQUID(nStep,tau,s,par):
"""Handles RK4 solver, returns time series sim of SQUID
DEPENDENCIES
noisySQUIDrk - modified RK4 solver
numpy as np
INPUTS
nStep - number of steps
tau - time step size
s - initial state vector
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
OUTPUTS
S - time series state vector
[theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v]"""
#parse out the parameter vector
alpha=par[0]; betaL=par[1]; eta=par[2]; rho=par[3]
i=par[4]; phia=par[5]; Gamma=par[6]; betaC=par[7]
kappa=par[8]
# change state vector s to include all the derivs
# little s denotes a 1-d vector of, current values
# big S denotes the output array of all s, a 2-d array in time
## NOISE ##
# set an appropriate variance based on Gamma.
# variance is twice normal because freq of noise
# is twice that of the sampling freq so that rk4 has
# a noise value to use at each half tau step
var = 4*Gamma/tau
sd = var**.5
# make two time series of noise voltages
# lowercase designators are current values, uppercase are arrays in time
VN1 = np.zeros(2*nStep+1)
VN2 = np.zeros(2*nStep+1)
for ist in range(2*nStep+1):
VN1[ist] = np.random.normal(0,sd)
VN2[ist] = np.random.normal(0,sd)
# DATA STRUCTURE
# S = [theta,del1,del2,ddel1,ddel2,dddel1,dddel2,j,v]
S = np.zeros([8,nStep],float)
# set initial conditions
theta = 0.
S[0,0] = theta
S[1,0] = s[0] # del1
S[2,0] = s[1] # del2
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
S[3,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*s[4])/(1-rho) # ddel1
S[4,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*s[5])/(1+rho) # ddel2
S[5,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC) # dddel1
S[6,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC) # dddel2
s = np.copy(S[1:5,0])
for iStep in range(1,nStep):
vn10 = VN1[2*iStep-2]
vn20 = VN2[2*iStep-2]
vn11 = VN1[2*iStep-1]
vn21 = VN2[2*iStep-1]
vn12 = VN1[2*iStep]
vn22 = VN2[2*iStep]
# noisyRK4(s,th,alpha,beta,eta,rho,i,phia,tau,derivsRK,vn10,vn20,vn11,vn21,vn12,vn22)
s = noisyRK4(s,theta,tau,noisySQUIDrk,par,vn10,vn20,vn11,vn21,vn12,vn22)
S[0,iStep] = theta # time theta
S[1,iStep] = s[0] # del1
S[2,iStep] = s[1] # del2
S[3,iStep] = s[2] # ddel1
S[4,iStep] = s[3] # ddel2
#S[5,iStep] = # dddel1
#S[6,iStep] = # dddel2
theta = theta + tau
# S[5,:] =
# S[6,:] =
S[6] = S[3]*(1+eta)/2 + S[4]*(1-eta)/2
return(S)
# In[9]:
def vj_timeseries(nStep,tau,s,par):
"""Returns time series simulation of squid, figure and csv
DEPENDENCIES
qSQUID()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
figure - plots of
voltage time series w average
circulating current time series w average
output to screen
png 'timeseriesdatetime.png' saved to parent directory
csv - time series csv file containing
theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v
csv 'timeseriesdatetime.csv' saved to parent directory
"""
# run sim
S = noisySQUID(nStep,tau,s,par)
# chop off first 10% of time series to remove any transient
md = int(.1*len(S[0,:]))
# build figure title with parameters used
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s'% (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)))+'\n'+ r'$\rho$=%s, $i$=%s, $\phi_a$=%s' % (str(round(par[3],3)),str(round(par[4],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
# plot
fig, ax = plt.subplots(2,1,figsize=(3,7))
fig.suptitle(ti)
ax1 = plt.subplot(2,1,1)
ax1.plot(S[0,md:],S[6,md:])
ax1.hlines((sum(S[6,md:])/len(S[6,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax1.set(ylabel="Voltage, v",
xticklabels=([]))
ax2 = plt.subplot(2,1,2)
ax2.plot(S[0,md:],S[3,md:])
ax2.hlines((sum(S[3,md:])/len(S[3,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax2.set(ylabel="Circ Current, j",
xlabel=r"Time,$\theta$")
# create output file metadata
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
meta2 = ['# nStep=%s'%nStep,'tau=%s'%tau]
header = ['theta','delta_1','delta_2','j','ddel1/dth','ddel2/dth','v']
csvtime = datetime.datetime.now()
timestr = [datetime.datetime.strftime(csvtime, '# %Y/%m/%d, %H:%M:%S')]
timeti = str(datetime.datetime.strftime(csvtime, '%Y%m%d%H%M%S'))
csvtitle='timeseries'+timeti+'.csv'
pngtitle='timeseris'+timeti+'.png'
Sf = np.matrix.transpose(S)
# create, write, output(close) csv file
with open(csvtitle, 'w') as csvFile:
filewr = csv.writer(csvFile,delimiter=',')
filewr.writerow(timestr)
filewr.writerow(meta1)
filewr.writerow(meta2)
filewr.writerow(header)
filewr.writerows(Sf)
csvFile.close()
# save figure
fig.savefig(pngtitle)
print('csv file written out:', csvtitle)
print('png file written out:', pngtitle)
# In[11]:
def iv_curve(nStep,tau,s,par,alpha=0,betaL=0,eta=0,rho=0,phia=0,Gamma=0,betaC=0,kappa=0):
"""Returns contour plot and data file for IV curves
DEPENDENCIES
qSQUID()
update_progress()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha, beta_L, eta, rho, i, phia]
input parameter LIST - alpha, beta, eta, rho, phia
multiple values of input parameter as list
draws contour for each
if given, overwrites value in par
if not given, value from par is used for one contour
ONLY SUPPLY maximum of one input list here
OUTPUTS
plot - IV contours at levels given in input param array
output to screen
png 'IVdatetime.png' saved to parent directory
csv - IV contours at levels given
csv 'IVdatetime.png' saved to parent directory
"""
# create currents to sweep
i = np.arange(0.,6.,.1)
ch = 0 # check for only one parameter sweeped.
k = 1 # set 0 axis dim to 1 at min
md = int(0.1*len(i)) # cut of the first 10 percent of points in time series
# check if an array was given for an input parameter
# k - length of input parameter array (number of contours)
# parj - build a list of parameters to pass at each array value of that parameter
# la, lc - plot label and csv header lable
# lb - rename parameter array to add in plot and header later
# ti - plot title
# meta1 - csv metadata
# ch - check value, check for only one input parameter array, or none for one contour
if alpha != 0:
alpha = np.array(alpha)
k = len(alpha)
parj = np.zeros([k,9])
la = r'$\alpha$'; lc = 'alpha'
lb = np.copy(alpha)
ti = r'$\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# add input array values to iteration parameters as appropriate
for j in range(k):
parj[j,:] = np.array([alpha[j],par[1],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if betaL != 0:
betaL = np.array(betaL)
k = len(betaL)
parj = np.zeros([k,9])
la = r'$\beta_L$'; lc = 'betaL'
lb = np.copy(betaL)
ti = r'$\alpha$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],betaL[j],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if eta != 0:
eta = np.array(eta)
k = len(eta)
parj = np.zeros([k,9])
la = r'$\eta$'; lc = 'eta'
lb = np.copy(eta)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],eta[j],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if rho != 0:
rho = np.array(rho)
k = len(rho)
parj = np.zeros([k,9])
la = r'$\rho$'; lc = 'rho'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],rho[j],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if phia != 0:
phia = np.array(phia)
k = len(phia)
parj = np.zeros([k,9])
la = r'$\phi_a$'; lc = 'phi_a'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,phia[j],par[6],par[7],par[8]])
ch = ch + 1
if Gamma != 0:
Gamma = np.array(Gamma)
k = len(Gamma)
parj = np.zeros([k,9])
la = r'$\Gamma$'; lc = 'Gamma'
lb = np.copy(Gamma)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s, $\beta_C$=%s, $\kappa$=%s' % (par[0],par[1],par[2],par[3],par[5],par[7],par[8])
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],Gamma[j],par[7],par[8]])
ch = ch + 1
if betaC != 0:
betaC = np.array(betaC)
k = len(betaC)
parj = np.zeros([k,9])
la = r'$\beta_C$'; lc = 'betaC'
lb = np.copy(betaC)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\kappa$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],betaC[j],par[8]])
ch = ch + 1
if kappa != 0:
kappa = np.array(kappa)
k = len(kappa)
parj = np.zeros([k,9])
la = r'$\kappa$'; lc = 'kappa'
lb = np.copy(kappa)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],par[7],kappa[j]])
ch = ch + 1
# if check value is more than one, too many input parameter arrays given
if ch > 1:
return('Please supply at most one parameter to sweep')
# if check value zero, assume plotting only one contour
if ch == 0:
parj = np.zeros([2,9])
parj[0,:] = par
parj[1,:] = par
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+ '\n' + r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' % (str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# build sim output array of appropriate size
# needs as many rows as contours determined by input parameter array
if k > 1:
V = np.zeros([k,len(i)])
else:
V = np.zeros([2,len(i)])
# cp - check progress, total outputs in V
cp = k*len(i)
# loop over k rows and len(i) colums of V
# fill V with average voltage from time series for given params
# parjj - parameter array for this time series
# S - state array output from sim
for j in range(k):
parjj = parj[j,:]
for m in range(len(i)):
parjj[4] = i[m]
S = noisySQUID(nStep,tau,s,parjj)
V[j,m] = sum(S[6,md:])/len(S[6,md:])
# new progress bar current iter/total iters
update_progress((m + j*len(i))/cp)
# fill out progress bar
update_progress(1)
# build output for csv
# join i values and average Voltage matrix
Sf = np.concatenate((np.matrix(i),V),axis=0)
# flip independent axis, i, from horizontal to vertical
Sf = np.matrix.transpose(Sf)
# convert from matrix to array to ease csv output
Sf = np.array(Sf)
# make a figure
# header - csv header info, param input value for contour
fig,ax = plt.subplots()
# one contour, or
if k == 1:
ax.plot(V[0],i)
header = ['i','V']
# k contours
else:
header = ['i']*(k+1)
for j in range(k):
ax.plot(V[j],i,label= la + '=%s' % str(round(lb[j],3)))
header[j+1] = lc + '=%s' % str(round(lb[j],3))
# ic = 0 line for comparison
ax.plot(np.arange(0,2.6,.1),np.arange(0,5.2,.2),'--',
label=r"$i_c=0$")
ax.set(title=ti,
xlabel=r"Average voltage, $\bar{v}$",
ylabel="Bias current, i",
xlim=[0,2.5],ylim=[0,6.])
ax.legend()
fig.tight_layout()
# build rest of metadata needed for csv
meta2 = ['# nStep=%s'%nStep,'tau=%s'%tau]
csvtime = datetime.datetime.now()
timestr = [datetime.datetime.strftime(csvtime, '# %Y/%m/%d, %H:%M:%S')]
timeti = str(datetime.datetime.strftime(csvtime, '%Y%m%d%H%M%S'))
csvtitle='IV'+timeti+'.csv'
pngtitle='IV'+timeti+'.png'
# create, write, and save(close) csv
with open(csvtitle, 'w') as csvFile:
filewr = csv.writer(csvFile,delimiter=',')
filewr.writerow(timestr)
filewr.writerow(meta1)
filewr.writerow(meta2)
filewr.writerow(header)
filewr.writerows(Sf)
csvFile.close()
# save figure
fig.savefig(pngtitle)
print('csv file written out:', csvtitle)
print('png file written out:', pngtitle)
# In[8]:
def vphi_curve(nStep,tau,s,par,alpha=0,betaL=0,eta=0,rho=0,i=0,Gamma=0,betaC=0,kappa=0):
"""Returns contour plot and data file for IV curves
DEPENDENCIES
qSQUID()
update_progress()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha, beta_L, eta, rho, i, phia]
input parameter LIST - alpha, beta, eta, rho, phia
multiple values of input parameter as list
draws contour for each
if given, overwrites value in par
if not given, value from par is used for one contour
ONLY SUPPLY maximum of one input list here
OUTPUTS
plot - IV contours at levels given in input param array
output to screen
png 'IVdatetime.png' saved to parent directory
csv - IV contours at levels given
csv 'IVdatetime.png' saved to parent directory
"""
# create currents to sweep
phia = np.arange(0.,1.05,.05)
ch = 0 # check for only one parameter sweeped.
k = 1 # set 0 axis dim to 1 at min
md = int(0.1*len(phia)) # cut of the first 10 percent of points in time series
# check if an array was given for an input parameter
# k - length of input parameter array (number of contours)
# parj - build a list of parameters to pass at each array value of that parameter
# la, lc - plot label and csv header lable
# lb - rename parameter array to add in plot and header later
# ti - plot title
# meta1 - csv metadata
# ch - check value, check for only one input parameter array, or none for one contour
if alpha != 0:
alpha = np.array(alpha)
k = len(alpha)
parj = np.zeros([k,9])
la = r'$\alpha$'; lc = 'alpha'
lb = np.copy(alpha)
ti = r'$\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# add input array values to iteration parameters as appropriate
for j in range(k):
parj[j,:] = np.array([alpha[j],par[1],par[2],par[3],par[4],0.,par[6],par[7],par[8]])
ch = ch + 1
if betaL != 0:
betaL = np.array(betaL)
k = len(betaL)
parj = np.zeros([k,9])
la = r'$\beta_L$'; lc = 'betaL'
lb = np.copy(betaL)
ti = r'$\alpha$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],betaL[j],par[2],par[3],par[4],0.,par[6],par[7],par[8]])
ch = ch + 1
if eta != 0:
eta = np.array(eta)
k = len(eta)
parj = np.zeros([k,9])
la = r'$\eta$'; lc = 'eta'
lb = np.copy(eta)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],eta[j],par[3],par[4],0.,par[6],par[7],par[8]])
ch = ch + 1
if rho != 0:
rho =
|
np.array(rho)
|
numpy.array
|
from scipy.stats import gamma,norm,beta,truncnorm
import numpy as np
def transform_uniform(x,a,b):
return a + (b-a)*x
def transform_loguniform(x,a,b):
la=np.log(a)
lb=np.log(b)
return np.exp(la + x*(lb-la))
def transform_normal(x,mu,sigma):
return norm.ppf(x,loc=mu,scale=sigma)
def transform_beta(x,a,b):
return beta.ppf(x,a,b)
def transform_exponential(x,a=1.):
return gamma.ppf(x, a)
def transform_truncated_normal(x,mu,sigma,a=0.,b=1.):
ar, br = (a - mu) / sigma, (b - mu) / sigma
return truncnorm.ppf(x,ar,br,loc=mu,scale=sigma)
def readlc(fname):
fin = open(fname,'r')
ts = np.array([])
fs = np.array([])
ferrs = np.array([])
instruments = np.array([])
# Arguments of an optional linear model. This will save the regression matrix "X" in a model of the form X*theta = y, where theta
# are the coefficients:
lm_arguments = {}
# This will save a True or False for each instrument --- True if there are
# inputs and therefore we want a linear model, False if not:
lm_boolean = {}
instrument_names = []
while True:
line = fin.readline()
if line != '':
all_vals = line.split()
t,f,ferr,instrument = all_vals[0:4]
lm_variables = all_vals[4:]
ts = np.append(ts,np.double(t))
fs = np.append(fs,np.double(f))
ferrs = np.append(ferrs,np.double(ferr))
instruments = np.append(instruments,instrument.split()[0])
if instrument.split()[0] not in instrument_names:
instrument_names.append(instrument.split()[0])
if len(lm_variables)>0:
lm_arguments[instrument.split()[0]] = np.array([])
lm_boolean[instrument.split()[0]] = True
else:
lm_boolean[instrument.split()[0]] = False
if lm_boolean[instrument.split()[0]]:
if len(lm_arguments[instrument.split()[0]]) == 0:
lm_arguments[instrument.split()[0]] = np.array(lm_variables).astype(np.double)
else:
lm_arguments[instrument.split()[0]] = np.vstack((lm_arguments[instrument.split()[0]],\
np.array(lm_variables).astype(np.double)))
else:
break
# Identify instrument indeces:
indexes = {}
for instrument in instrument_names:
indexes[instrument] = np.where(instruments == instrument)[0]
return ts,fs,ferrs,instruments,indexes,len(instrument_names),instrument_names,lm_boolean,lm_arguments
def readeparams(fname,RV=False):
fin = open(fname,'r')
GPDictionary = {}
ftime = True
while True:
line = fin.readline()
if line != '':
if line[0] != '#':
vector = line.split()
if RV:
variables = vector
if ftime:
GPDictionary['variables'] = np.double(np.array(variables))
ftime = False
else:
GPDictionary['variables'] = np.vstack((GPDictionary['variables'],np.double(np.array(variables))))
else:
variables,instrument = vector[:-1],vector[-1].split()[0]
if instrument in GPDictionary.keys():
GPDictionary[instrument]['variables'] = np.vstack((GPDictionary[instrument]['variables'],np.double(np.array(variables))))
else:
GPDictionary[instrument] = {}
GPDictionary[instrument]['variables'] = np.double(np.array(variables))
else:
break
return GPDictionary
def readpriors(priorname):
"""
This function takes either a string or a dict and spits out information about the prior. If a string, it
reads a prior file. If a dict, it assumes the input dictionary has already defined all the variables and
distributions and simply spits out information about the system (e.g., number of transiting planets, RV
planets, etc.)
"""
input_dict = False
if type(priorname) == str:
fin = open(priorname)
priors = {}
else:
counter = -1
priors = priorname
input_dict = True
all_parameters = priors.keys()
n_allkeys = len(all_parameters)
n_transit = 0
n_rv = 0
n_params = 0
numbering_transit = np.array([])
numbering_rv = np.array([])
while True:
if not input_dict:
line = fin.readline()
else:
# Dummy variable so we enter the while:
line = 'nc'
counter += 1
if line != '':
if line[0] != '#':
if not input_dict:
out = line.split()
parameter,prior_name,vals = line.split()
parameter = parameter.split()[0]
prior_name = prior_name.split()[0]
vals = vals.split()[0]
priors[parameter] = {}
else:
param = all_parameters[counter]
parameter,prior_name = param,priors[param]['distribution'],
pvector = parameter.split('_')
# Check if parameter/planet is from a transiting planet:
if pvector[0] == 'r1' or pvector[0] == 'p':
pnumber = int(pvector[1][1:])
numbering_transit = np.append(numbering_transit,pnumber)
n_transit += 1
# Check if parameter/planet is from a RV planet:
if pvector[0] == 'K':
pnumber = int(pvector[1][1:])
numbering_rv = np.append(numbering_rv,pnumber)
n_rv += 1
#if parameter == 'r1_p'+str(n_transit+1) or parameter == 'p_p'+str(n_transit+1):
# numbering_transit = np.append(numbering_transit,n_transit+1)
# n_transit += 1
#if parameter == 'K_p'+str(n_rv+1):
# numbering_rv = np.append(numbering_rv,n_rv+1)
# n_rv += 1
if prior_name.lower() == 'fixed':
if not input_dict:
priors[parameter]['type'] = prior_name.lower()
priors[parameter]['value'] = np.double(vals)
priors[parameter]['cvalue'] = np.double(vals)
else:
n_params += 1
if not input_dict:
priors[parameter]['type'] = prior_name.lower()
if priors[parameter]['type'] != 'truncatednormal':
v1,v2 = vals.split(',')
priors[parameter]['value'] = [
|
np.double(v1)
|
numpy.double
|
import os
import numpy as np
from pylightcurve.errors import *
from pylightcurve.__databases__ import plc_data
from pylightcurve.processes.files import open_dict
def _get_filter(photometric_filter):
if photometric_filter not in plc_data.all_filters():
raise PyLCInputError('{0} is not available. Available filters: {1}'.format(
photometric_filter, ','.join(plc_data.all_filters())))
def fp_over_fs(rp_over_rs, sma_over_rs, albedo, emissivity, stellar_temperature, filter_name):
_get_filter(filter_name)
def _black_body(w, t):
# w in mu
w = w / (10 ** 6)
h = 6.62607004 * (10 ** (-34))
c = 3 * (10 ** 8)
w5 = w ** 5
k = 1.38064852 * (10 ** (-23))
return (2 * h * c * c / w5) / (np.exp(h * c / w / k / t) - 1)
planet_temperature = stellar_temperature * np.sqrt(0.5 / sma_over_rs) * (((1 - albedo) / emissivity) ** 0.25)
wavelength_array, band = np.loadtxt(os.path.join(plc_data.photometry(), filter_name + '.pass'), unpack=True)
binsedge = 0.5 * (wavelength_array[:-1] + wavelength_array[1:])
binsedge1 =
|
np.append(wavelength_array[0] - (binsedge[0] - wavelength_array[0]), binsedge)
|
numpy.append
|
import numpy as np
from menpo.shape import PointCloud
def rescale_wrt_min_dim(image, min_size):
r"""
Method that rescales a given image, so that its minimum dimension equals a
given value.
Parameters
----------
image : `menpo.image.Image`
The input menpo image object.
min_size : `int`
The desired size of the minimum dimension.
Returns
-------
rescaled_image : `menpo.image.Image`
The rescaled image.
"""
# Compute the scale factor by enforcing that the minimum image
# direction equals to the provided size.
scale_factor = min_size / min(image.shape)
# Rescale the image
return image.rescale(scale_factor)
def random_centered_crops(image, crop_shape, n_crops):
r"""
Method that generates centered crops that are randomly sampled from a normal
distribution. The method performs the sampling only over the maximum
dimension, because it is assumed that the minimum dimension of the image
equals to the crop shape of that dimension. This method can be used for data
augmentation.
Parameters
----------
image : `menpo.image.Image`
The input menpo image object.
crop_shape : (`int`, `int`)
The desired crop shape.
n_crops : `int`
The number of randomly sampled cropped images to generate. Note that the
number of returned images is ``n_crops + 1``, as it also includes the
region cropped around the image's center.
Returns
-------
cropped_images : `list` of `menpo.image.Image`
The `list` of cropped images.
"""
# Check whether the image has the same shape as the desired crop shape,
# in which case no crop should be performed.
if image.shape == crop_shape:
cropped_images = [image]
else:
# Get image centre
centre = image.centre()
# Get dimension over which to randomly sample
max_dim = np.argmax(image.shape)
# Sample from a normal distribution.
# The mean of the distribution is the image center and the std the
# margin that is left for sampling.
gau_m = centre[max_dim]
gau_std = np.sqrt(
|
np.abs(image.shape[max_dim] - crop_shape[max_dim])
|
numpy.abs
|
#!/usr/bin/env python3
"""References:
* <NAME>., & <NAME>. 2016. Optimal round
trip lunar missions based on the patched-conic approximation.
Computational and Applied Mathematics, 35(3),
753–787. https://doi.org/10.1007/s40314-015-0247-y
"""
import numpy as np
np.seterr(divide='raise', invalid='raise')
#from scipy.optimize import newton
from scipy.linalg import norm
from orbit import Orbit
def rotate_2d(theta):
return np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta),
|
np.cos(theta)
|
numpy.cos
|
import os
import os.path
import pickle
import numpy as np
import tensorflow as tf
from dnnlib import tflib
from visualizer import HtmlPageVisualizer
def Vis(bname,suffix,out,rownames=None,colnames=None):
num_images=out.shape[0]
step=out.shape[1]
if colnames is None:
colnames=[f'Step {i:02d}' for i in range(1, step + 1)]
if rownames is None:
rownames=[str(i) for i in range(num_images)]
visualizer = HtmlPageVisualizer(
num_rows=num_images, num_cols=step + 1, viz_size=256)
visualizer.set_headers(
['Name'] +colnames)
for i in range(num_images):
visualizer.set_cell(i, 0, text=rownames[i])
for i in range(num_images):
for k in range(step):
image=out[i,k,:,:,:]
visualizer.set_cell(i, 1+k, image=image)
# Save results.
visualizer.save(f'./html/'+bname+'_'+suffix+'.html')
def LoadData(img_path):
tmp=img_path+'S'
with open(tmp, "rb") as fp: #Pickling
s_names,all_s=pickle.load( fp)
dlatents=all_s
pindexs=[]
mindexs=[]
for i in range(len(s_names)):
name=s_names[i]
if not('ToRGB' in name):
mindexs.append(i)
else:
pindexs.append(i)
tmp=img_path+'S_mean_std'
with open(tmp, "rb") as fp: #Pickling
m,std=pickle.load( fp)
return dlatents,s_names,mindexs,pindexs,m,std
def LoadModel(model_path,model_name):
# Initialize TensorFlow.
tflib.init_tf()
tmp=os.path.join(model_path,model_name)
with open(tmp, 'rb') as f:
_, _, Gs = pickle.load(f)
Gs.print_layers()
return Gs
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
if nchw_to_nhwc:
images = np.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
np.clip(images, 0, 255, out=images)
images=images.astype('uint8')
return images
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
if nhwc_to_nchw:
images=np.rollaxis(images, 3, 1)
return images/ 255 *(drange[1] - drange[0])+ drange[0]
class Manipulator():
def __init__(self,dataset_name='ffhq'):
self.file_path='./'
self.img_path=self.file_path+'npy/'+dataset_name+'/'
self.model_path=self.file_path+'model/'
self.dataset_name=dataset_name
self.model_name=dataset_name+'.pkl'
self.alpha=[0] #manipulation strength
self.num_images=10
self.img_index=0 #which image to start
self.viz_size=256
self.manipulate_layers=None #which layer to manipulate, list
self.dlatents,self.s_names,self.mindexs,self.pindexs,self.code_mean,self.code_std=LoadData(self.img_path)
self.sess=tf.InteractiveSession()
init = tf.global_variables_initializer()
self.sess.run(init)
self.Gs=LoadModel(self.model_path,self.model_name)
self.num_layers=len(self.dlatents)
self.Vis=Vis
self.noise_constant={}
for i in range(len(self.s_names)):
tmp1=self.s_names[i].split('/')
if not 'ToRGB' in tmp1:
tmp1[-1]='random_normal:0'
size=int(tmp1[1].split('x')[0])
tmp1='/'.join(tmp1)
tmp=(1,1,size,size)
self.noise_constant[tmp1]=np.random.random(tmp)
tmp=self.Gs.components.synthesis.input_shape[1]
d={}
d['G_synthesis_1/dlatents_in:0']=np.zeros([1,tmp,512])
names=list(self.noise_constant.keys())
tmp=tflib.run(names,d)
for i in range(len(names)):
self.noise_constant[names[i]]=tmp[i]
self.fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
self.img_size=self.Gs.output_shape[-1]
def GenerateImg(self,codes):
num_images,step=codes[0].shape[:2]
out=np.zeros((num_images,step,self.img_size,self.img_size,3),dtype='uint8')
for i in range(num_images):
for k in range(step):
d={}
for m in range(len(self.s_names)):
d[self.s_names[m]]=codes[m][i,k][None,:] #need to change
d['G_synthesis_1/4x4/Const/Shape:0']=np.array([1,18, 512], dtype=np.int32)
d.update(self.noise_constant)
img=tflib.run('G_synthesis_1/images_out:0', d)
image=convert_images_to_uint8(img, nchw_to_nhwc=True)
out[i,k,:,:,:]=image[0]
return out
def MSCode(self,dlatent_tmp,boundary_tmp):
step=len(self.alpha)
dlatent_tmp1=[tmp.reshape((self.num_images,-1)) for tmp in dlatent_tmp]
dlatent_tmp2=[
|
np.tile(tmp[:,None],(1,step,1))
|
numpy.tile
|
"""Utilities for creating or modifying graphs matrices."""
import numpy as np
from scipy.sparse import csr_matrix, kron
from tqdm import tqdm
from sklearn.neighbors import NearestNeighbors
import networkx as nx
def adj_matrix_ring(N=None, weights=None):
"""Return the adjacency matrix of a path graph.
Parameters
----------
N: int, default=None
Number of graph nodes.
weights: array, default=None
Array with edge weights.
Returns
-------
A : np.ndarray, shape=(N,N)
"""
assert N is not None or weights is not None, (
"Either 'N' or 'weights' must be given."
)
if N is not None and weights is not None:
print("Ignoring 'N' since 'weights' was also given.")
if weights is None:
weights = np.zeros(N) + 1
return np.roll(np.diag(weights), shift=1, axis=1)
def coords_ring_graph(N):
"""Return the vertices coordinates of a ring graph.
Parameters
----------
N: int
Number of graph nodes.
"""
coords = np.zeros((N, 2))
n = np.arange(N)
coords[:, 0] = np.cos(2.0*np.pi*n/N)
coords[:, 1] = -np.sin(2.0*np.pi*n/N)
return coords
def adj_matrix_path(N=None, weights=None, directed=False):
"""Return the adjacency matrix of a path graph.
Parameters
----------
N: int, default=None
Number of graph nodes.
weights: array, default=None
Array with edge weights. If None, unit weights are used.
If not None, then the given value of N is replaced by
weights + 1.
directed: bool, default=False
If True, a directed graph is created.
Returns
-------
A : np.ndarray, shape=(N,N)
"""
assert N is not None or weights is not None, (
"Either 'N' or 'weights' must be given."
)
if N is not None and weights is not None:
print("Ignoring 'N' since 'weights' was also given.")
if weights is None:
A = np.tri(N, k=1) - np.tri(N, k=-2) - np.eye(N)
else:
assert isinstance(weights, np.ndarray)
N = len(weights) + 1
A = np.zeros((N, N), dtype=weights.dtype)
A[:-1, 1:] = np.diag(weights)
A = A + A.transpose()
if directed:
A = np.tril(A)
return A
def coords_path(N):
"""Coordinates of the vertices in the path graph.
Parameters
----------
N : int
Number of graph vertices.
Returns
-------
coords : np.ndarray, shape=(N,2)
"""
coords = np.array([[i, 0] for i in range(N)])
return coords
def make_path(N, weights=None, directed=False):
"""Create adjacency matrix and coordinates of a path graph.
Parameters
----------
N: int
Number of graph nodes.
weights: array, default=None
Array with edge weights. If None, unit weights are used.
If not None, then the given value of N is replaced by
weights + 1.
directed: bool, default=False
If True, a directed graph is created.
Returns
-------
A : np.ndarray, shape=(N,N)
coords : np.ndarray, shape=(N,2)
"""
if weights is not None:
assert N == len(weights) + 1, (
"Length of weights array is {}, not compatible with "
"{} vertices.".format(len(weights), N)
)
A = adj_matrix_path(N, weights=weights, directed=directed)
coords = coords_path(N)
return A, coords
def make_sensor(N, n_neighbors=5, seed=42, directed=False):
"""Make a sensor graph.
Parameters
----------
N : int
Number of nodes.
n_neighbors : int, default=5
seed : int, default=42
directed : bool, default=False
Example
-------
>>> A, coords = make_sensor(10, n_neighbors=5, directed=True)
>>> A.sum(axis=1)
array([
1.44572985, 1.4334267 , 1.4211341 , 1.42434198, 1.30134343,
1.62947431, 1.96808648, 1.32391798, 1.37514894, 1.25401296])
>>> coords
array([[0.37454012, 0.95071431],
[0.73199394, 0.59865848],
[0.15601864, 0.15599452],
[0.05808361, 0.86617615],
[0.60111501, 0.70807258],
[0.02058449, 0.96990985],
[0.83244264, 0.21233911],
[0.18182497, 0.18340451],
[0.30424224, 0.52475643],
[0.43194502, 0.29122914]])
"""
rnd = np.random.RandomState(seed=42)
coords = rnd.uniform(low=0.0, high=1.0, size=(N, 2))
A = nearest_neighbors(coords, n_neighbors=n_neighbors)
A = A.toarray()
if not directed:
A = A + A.T
return A, coords
def make_grid(rows, columns, weights_r=None, weights_c=None):
"""Create a grid graph.
By "grid graph" we mean the underlying domain of digital images,
as usually modelled by a graph in which each pixel rests on a node
and each node is only connected to its direct neighbors, in the
vertical and horizontal directions.
Parameters
----------
rows: int
Number of rows in the grid.
columns: int
Number of columns in the grid.
weights_r: array, default=None
Weights in the rows. If None, unit weights are considered.
weights_c: array, default=None
Weights in the columns. If None, unit weights are considered.
Returns
-------
A : scipy.sparse.csr_matrix, shape=(N,N)
coords : np.ndarray, shape=(N,2)
"""
A1, coords1 = make_path(columns, weights=weights_c)
A2, coords2 = make_path(rows, weights=weights_r)
N1 = len(A1)
N2 = len(A2)
# Using the property that the grid graph is the cartesian product
# of two path graphs.
A = kron(
csr_matrix(A1), csr_matrix(np.eye(N2))
) + kron(
csr_matrix(np.eye(N1)), csr_matrix(A2)
)
coords = list()
for c1 in coords1[:, 0].ravel():
for c2 in coords2[:, 0].ravel():
coords.append([c1, c2])
coords = np.array(coords)
return A, coords
def nearest_neighbors(
X, n_neighbors=20, algorithm='ball_tree', mode='distance'):
"""Return the nearest neighbors' graph weighted adjacency matrix.
This is a wrapper for the Scikit-learn NearestNeighbors.kneighbors_graph
method.
Parameters
----------
X : np.ndarray()
Array of features.
n_neighbors : int, optional, default: 20
algorithm : str, optional, default: 'ball_tree'
mode : str, optional, default: 'distance'
Return
------
W : weighted adjacency matrix in CSR (Compressed Sparse Row) format
"""
nbrs = NearestNeighbors(
n_neighbors=n_neighbors, algorithm=algorithm).fit(X)
W = nbrs.kneighbors_graph(X, mode=mode)
return W
def adj_matrix_from_coords(coords, theta, verbose=False):
"""Create a gaussian-weighted adjacency matrix using euclidean distance.
Nodes for which the distance is greater than 2*theta are ignored.
Parameters
----------
coords : array
(N, 2) array of coordinates.
theta : float
Variance of the weight distribution.
"""
[N, M] = coords.shape
A = np.zeros((N, N))
for i in (tqdm(np.arange(1, N)) if verbose else np.arange(1, N)):
for j in np.arange(i):
x1 = coords[i, 0]
y1 = coords[i, 1]
x2 = coords[j, 0]
y2 = coords[j, 1]
distance = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
if distance < 2 * theta:
A[i, j] = np.exp(-(distance ** 2)/(2 * theta ** 2))
if verbose:
print("adj_matrix_from_coords process is completed.")
return A + A.transpose()
def adj_matrix_from_coords_limited(coords, limit, theta=1, verbose=False):
"""Create a nearest-neighbors graph with gaussian weights.
Parameters
----------
coords : array
(N, 2) array of coordinates.
limit : int
Minimum number of neighbors.
theta : float
Variance of the gaussian weight distribution.
"""
[N, M] = coords.shape
A = np.zeros((N, N))
for i in (tqdm(np.arange(1, N)) if verbose else
|
np.arange(1, N)
|
numpy.arange
|
import re,os,sys,warnings,numpy,scipy,math,itertools;
from scipy import stats;
from numpy import *;
from multiprocessing import Pool;
from scipy.optimize import fmin_cobyla
from scipy.optimize import fmin_l_bfgs_b
from math import log;
numpy.random.seed(1231);
warnings.filterwarnings('ignore');
#MultiProcessor
MultiProcessor=1;
if len(sys.argv)>=4:
MultiProcessor=int(sys.argv[3]);
#splicing difference cutoff
cutoff=0.1;
if len(sys.argv)>=5:
cutoff=float(sys.argv[4]);
#binomial MLE optimization functions
def logit(x):
if x<0.01:
x=0.01;
if x>0.99:
x=0.99;
return(log(x/(1-x)));
def veclogit(x):
res=[];
for i in x:
res.append(logit(i));
return(res);
#START: log Marginal distribution for the paired analysis
#this is the full log likelihood function within the Laplace approximiation of the marginal distribution of paired replicates
#for one exon only
def myfunc_marginal_full_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sigma=matrix([[var1,rho*sqrt(var1)*sqrt(var2)],[rho*sqrt(var1)*sqrt(var2),var2]]);
sigma_inv=sigma.getI();
vec=matrix([[logit(psi1)-logit(beta1)],[logit(psi2)-logit(beta2)]]);
new_psi1=inclusion_length*psi1/(inclusion_length*psi1+skipping_length*(1-psi1));
new_psi2=inclusion_length*psi2/(inclusion_length*psi2+skipping_length*(1-psi2));
#print('vec');print(vec);
temp1=0;temp2=0;temp3=0;temp4=0;
temp1+=-0.5*log(abs(linalg.det(sigma)));
temp2+=-0.5*dot(dot(vec.getT(),sigma_inv),vec);
temp3+=I1*log(new_psi1)+S1*log(1-new_psi1)+log(psi1)+log(1-psi1);
temp4+=I2*log(new_psi2)+S2*log(1-new_psi2)+log(psi2)+log(1-psi2);
return(temp1+temp2+temp3+temp4);
#return the det of sigma1 matrix for one exon
def myfunc_marginal_sigma1_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sigma=matrix([[var1,rho*sqrt(var1)*sqrt(var2)],[rho*sqrt(var1)*sqrt(var2),var2]]);
sigma_inv=sigma.getI();
vec=matrix([[logit(psi1)-logit(beta1)],[logit(psi2)-logit(beta2)]]);
vec_der=matrix([[1/psi1/(1-psi1)],[1/psi2/(1-psi2)]]);
vec_2der=matrix([[(-1+2*psi1)/pow(psi1,2)/pow(1-psi1,2)],[(-1+2*psi2)/pow(psi2,2)/pow(1-psi2,2)]]);
new_psi1=inclusion_length*psi1/(inclusion_length*psi1+skipping_length*(1-psi1));
new_psi2=inclusion_length*psi2/(inclusion_length*psi2+skipping_length*(1-psi2));
a_apostrophe=sigma_inv[0,0]*pow(vec_der[0,0],2)+(sigma_inv[0,0]*vec[0,0]+sigma_inv[0,1]*vec[1,0])*vec_2der[0,0];
a_apostrophe+=-1*pow(psi1,-2)+pow(1-psi1,-2);
a_apostrophe+=-1*I1*skipping_length*((2*inclusion_length+skipping_length)*psi1+skipping_length*(1-psi1))/pow(psi1,2)/pow(inclusion_length*psi1+skipping_length*(1-psi1),2);
a_apostrophe+=-1*S1*inclusion_length*((inclusion_length+2*skipping_length)*(1-psi1)+inclusion_length*psi1)/pow(1-psi1,2)/pow(inclusion_length*psi1+skipping_length*(1-psi1),2);
b_apostrophe=sigma_inv[0,1]*vec_der[0,0]*vec_der[1,0];
d_apostrophe=sigma_inv[1,1]*pow(vec_der[1,0],2)+(sigma_inv[1,1]*vec[1,0]+sigma_inv[0,1]*vec[0,0])*vec_2der[1,0];
d_apostrophe+=-1*pow(psi2,-2)+pow(1-psi2,-2);
d_apostrophe+=-1*I2*skipping_length*((2*inclusion_length+skipping_length)*psi2+skipping_length*(1-psi2))/pow(psi2,2)/pow(inclusion_length*psi2+skipping_length*(1-psi2),2);
d_apostrophe+=-1*S2*inclusion_length*((inclusion_length+2*skipping_length)*(1-psi2)+inclusion_length*psi2)/pow(1-psi2,2)/pow(inclusion_length*psi2+skipping_length*(1-psi2),2);
sigma_1=matrix([[a_apostrophe,b_apostrophe],[b_apostrophe,d_apostrophe]]);
return(-1*sigma_1);
def myfunc_marginal_paired_rho0(x, *args):
beta1=x[0];beta2=x[1];rho=0;
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
var1=args[8];var2=args[9];
sum=0;temp1=matrix([[1]]);temp2=0;
sum_temp1=0;sum_temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum+=(-0.5*log(abs(linalg.det(temp1))+1)+temp2);
sum_temp1+=temp1;sum_temp2+=temp2;
return(-1*sum);
def myfunc_marginal_der_paired_rho0(x, *args):
beta1=x[0];beta2=x[1];rho=0;
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
var1=args[8];var2=args[9];
sum=array([0.0,0.0]);
sum_temp1=array([0.0,0.0]);sum_temp2=array([0.0,0.0]);
temp1=0;temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length,1);
temp2=myfunc_marginal_full_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum_temp1+=temp1[0:2];sum_temp2+=temp2[0:2];
sum=sum+temp1[0:2]+temp2[0:2];
sigma1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
#if len(args)>10:
# ofile=open('test_output.txt','a');ofile.write(str(temp1[0])+'\t'+str(temp1[1])+'\t'+str(psi1[i])+'\t'+str(psi2[i])+'\t'+str(linalg.det(sigma1))+'\t'+str(-0.5/(linalg.det(sigma1)+1))+'\t'+str(list(sigma1))+'\n');
return(-1*array(sum));
def myfunc_marginal_paired_rho(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
var1=args[8];var2=args[9];
sum=0;temp1=matrix([[1]]);temp2=0;
sum_temp1=0;sum_temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum+=(-0.5*log(abs(linalg.det(temp1))+1)+temp2);
return(-1*sum);
def myfunc_marginal_der_paired_rho(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
var1=args[8];var2=args[9];
sum=array([0.0,0.0,0.0]);
sum_temp1=array([0.0,0.0,0.0,0.0,0.0]);sum_temp2=array([0.0,0.0,0.0,0.0,0.0]);
temp1=0;temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum=sum+(temp1[0:3]+temp2[0:3]);
return(-1*array(sum));
def myfunc_marginal_paired_fixvar(x, *args):
beta1=x[0];beta2=x[1];
#beta1=x[0];beta2=args[11];
#beta2=x[0];beta1=args[11];
rho=args[8];var1=args[9];var2=args[10];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sum=0;temp1=matrix([[1]]);temp2=0;
sum_temp1=0;sum_temp2=0;
#print('fixvar_beta1');print(beta1);print('fixvar_beta2');print(beta2);
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum+=(0.5*log(abs(linalg.det(temp1))+1)-temp2);
sum_temp1+=temp1;sum_temp2+=temp2;
#print('sum_temp1');print(sum_temp1);print('sum_temp2');print(sum_temp2);print('sum');print(sum);
return(sum);
def myfunc_marginal_der_paired_fixvar(x, *args):
beta1=x[0];beta2=x[1];
#beta1=x[0];beta2=args[11];
#beta2=x[0];beta1=args[11];
rho=args[8];var1=args[9];var2=args[10];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sum=array([0.0,0.0]);
#sum_temp1=array([0.0,0.0,0.0,0.0,0.0]);sum_temp2=array([0.0,0.0,0.0,0.0,0.0]);
temp1=0;temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
#sum_temp1=sum_temp1-1*temp1;sum_temp2=sum_temp2-1*temp2;
sum=sum+(-1*temp1[0:2]-1*temp2[0:2]);
#print('sum_temp1_der');print(sum_temp1);print('sum_temp2_der');print(sum_temp2);
return(array(sum));
def myfunc_marginal_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sum=0;temp1=matrix([[1]]);temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum+=(-0.5*log(abs(linalg.det(temp1))+1)+temp2);
return(-1*sum);
#END: log Marginal distribution for the paired analysis
#START: Derivative of the log Marginal distribution for the paired analysis
def myfunc_marginal_full_der_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sigma=matrix([[var1,rho*sqrt(var1)*sqrt(var2)],[rho*sqrt(var1)*sqrt(var2),var2]]);
sigma_inv=sigma.getI();
vec=matrix([[logit(psi1)-logit(beta1)],[logit(psi2)-logit(beta2)]]);
temp=-1*dot(transpose(vec),dot(sigma_inv,matrix([[-1/beta1/(1-beta1),0],[0,-1/beta2/(1-beta2)]])));
temp1=temp[0,0];temp2=temp[0,1];
diag=(-2*rho-1+pow(rho,2))/sqrt(var1)/sqrt(var2);
temp3=-0.5*dot(transpose(vec),dot(matrix([[2*rho/var1,diag],[diag,2*rho/var2]]),vec))/pow(1-rho*rho,2)-0.5/linalg.det(sigma)*(-1*var1*var2);
#temp4=(-1/pow(var1,2)*pow(vec[0,0],2)+rho/pow(var1,1.5)/pow(var2,0.5)*vec[0,0]*vec[1,0])/(1-rho*rho)+(1-rho*rho)*var2/linalg.det(sigma);
#temp5=(-1/pow(var2,2)*pow(vec[1,0],2)+rho/pow(var2,1.5)/pow(var1,0.5)*vec[0,0]*vec[1,0])/(1-rho*rho)+(1-rho*rho)*var1/linalg.det(sigma);
return(array([float(temp1),float(temp2),float(temp3)]));
def myfunc_marginal_sigma1_der_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sigma=matrix([[var1,rho*sqrt(var1)*sqrt(var2)],[rho*sqrt(var1)*sqrt(var2),var2]]);
sigma_inv=sigma.getI();
#print('sigma_inv');print(sigma_inv);
vec=matrix([[logit(psi1)-logit(beta1)],[logit(psi2)-logit(beta2)]]);
vec_der=matrix([[1/psi1/(1-psi1)],[1/psi2/(1-psi2)]]);
vec_2der=matrix([[(-1+2*psi1)/pow(psi1,2)/pow(1-psi1,2)],[(-1+2*psi2)/pow(psi2,2)/pow(1-psi2,2)]]);
sigma1=myfunc_marginal_sigma1_paired([beta1,beta2,rho,var1,var2],I1,S1,psi1,I2,S2,psi2,inclusion_length,skipping_length);
temp1=sigma_inv[0,0]*(-1/beta1/(1-beta1))*vec_2der[0,0]*sigma1[1,1]*(-1)+sigma_inv[0,1]*(-1/beta1/(1-beta1))*vec_2der[1,0]*sigma1[0,0]*(-1);
temp2=sigma_inv[1,1]*(-1/beta2/(1-beta2))*vec_2der[1,0]*sigma1[0,0]*(-1)+sigma_inv[0,1]*(-1/beta2/(1-beta2))*vec_2der[0,0]*sigma1[1,1]*(-1);
#temp1=(-1/var1/beta1/(1-beta1)*vec_2der[0,0])*sigma1[1,1]/(1-rho*rho)*(-1)-(rho/sqrt(var1)/sqrt(var2)/beta1/(1-beta1)*vec_2der[1,0])*sigma1[0,0]/(1-rho*rho);
#temp2=(-1/var2/beta2/(1-beta2)*vec_2der[1,0])*sigma1[0,0]/(1-rho*rho)*(-1)-(rho/sqrt(var1)/sqrt(var2)/beta2/(1-beta2)*vec_2der[0,0])*sigma1[1,1]/(1-rho*rho);
temp3=(-1/sqrt(var1)/sqrt(var2)*vec[1,0]*vec_2der[0,0])*sigma1[1,1]/(1-rho*rho)*(-1);
temp3+=(-1/sqrt(var1)/sqrt(var2)*vec[0,0]*vec_2der[1,0])*sigma1[0,0]/(1-rho*rho)*(-1);
temp3+=-2*(-1/sqrt(var1)/sqrt(var2)*vec_der[0,0]*vec_der[1,0])*sigma1[0,1]/(1-rho*rho)*(-1);
temp3+=4*rho/(1-rho*rho)*linalg.det(sigma1);
#print('test_der');print(linalg.det(sigma1));print(x);print(-0.5/(linalg.det(sigma1))*array([float(temp1),float(temp2),float(temp3)]));
#if len(args)>8:
# ofile=open('test_output_2.txt','a');ofile.write(str([float(temp1),float(temp2),float(temp3)])+'\n');
if linalg.det(sigma1)>0:
return(-0.5/(linalg.det(sigma1)+1)*array([float(temp1),float(temp2),float(temp3)]));
else:
return(-0.5/(linalg.det(sigma1)-1)*array([float(temp1),float(temp2),float(temp3)]));
def myfunc_marginal_der_paired(x, *args):
beta1=x[0];beta2=x[1];rho=x[2];var1=x[3];var2=x[4];
I1=args[0];S1=args[1];psi1=args[2];
I2=args[3];S2=args[4];psi2=args[5];
inclusion_length=args[6];skipping_length=args[7];
sum=array([0.0,0.0,0.0,0.0,0.0]);
sum_temp1=array([0.0,0.0,0.0,0.0,0.0]);sum_temp2=array([0.0,0.0,0.0,0.0,0.0]);
temp1=0;temp2=0;
for i in range(len(psi1)):
temp1=myfunc_marginal_sigma1_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
temp2=myfunc_marginal_full_der_paired([beta1,beta2,rho,var1,var2],I1[i],S1[i],psi1[i],I2[i],S2[i],psi2[i],inclusion_length,skipping_length);
sum_temp1=sum_temp1-1*temp1;sum_temp2=sum_temp2-1*temp2;
sum=sum+(-1*temp1-1*temp2);
return(array(sum[0:5]));
#END: Derivative of the log Marginal distribution for the paired analysis
#START: log-likelihood function of each individual replicate
def myfunc_individual_paired(x, *args):
psi1=x[0];psi2=x[1];
I1=args[0];S1=args[1];beta1=args[2];var1=args[3];
I2=args[4];S2=args[5];beta2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
rho=args[10];
temp=myfunc_marginal_full_paired([beta1,beta2,rho,var1,var2],I1,S1,psi1,I2,S2,psi2,inclusion_length,skipping_length);
return(-1*temp);
def myfunc_individual_der_paired(x,*args):
psi1=x[0];psi2=x[1];
I1=args[0];S1=args[1];beta1=args[2];var1=args[3];
I2=args[4];S2=args[5];beta2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
rho=args[10];
sigma=matrix([[var1,rho*sqrt(var1)*sqrt(var2)],[rho*sqrt(var1)*sqrt(var2),var2]]);
sigma_inv=sigma.getI();
vec=matrix([[logit(psi1)-logit(beta1)],[logit(psi2)-logit(beta2)]]);
vec_der=matrix([[1/psi1/(1-psi1)],[1/psi2/(1-psi2)]]);
new_psi1=inclusion_length*psi1/(inclusion_length*psi1+skipping_length*(1-psi1));
new_psi1_der=inclusion_length*skipping_length/pow(inclusion_length*psi1+skipping_length*(1-psi1),2);
new_psi2=inclusion_length*psi2/(inclusion_length*psi2+skipping_length*(1-psi2));
new_psi2_der=inclusion_length*skipping_length/pow(inclusion_length*psi2+skipping_length*(1-psi2),2);
temp1=0;temp2=0;
temp1+=-1*(sigma_inv[0,0]*vec[0,0]*vec_der[0,0]+sigma_inv[0,1]*vec_der[0,0]*vec[1,0]);
temp1+=1*(I1/new_psi1*new_psi1_der-S1/(1-new_psi1)*new_psi1_der+1/psi1-1/(1-psi1));
temp2+=-1*(sigma_inv[1,1]*vec[1,0]*vec_der[1,0]+sigma_inv[0,1]*vec_der[1,0]*vec[0,0]);
temp2+=1*(I2/new_psi2*new_psi2_der-S2/(1-new_psi2)*new_psi2_der+1/psi2-1/(1-psi2));
return(-1*array([temp1,temp2]));
#END: log-likelihood function of each individual replicate
def myfunc_multivar(x,*args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
#print('psi1');print(psi1);print('psi2');print(psi2);
sum1=0;sum2=0;
for i in psi1:
sum1+=pow(logit(i)-logit(x[0]),2);
sum1=sum1/var1/2;
for i in psi2:
sum2+=pow(logit(i)-logit(x[1]),2);
sum2=sum2/var2/2;
return(sum1+sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(pow(stats.norm.ppf(x[0]),2)+pow(stats.norm.ppf(x[1]),2)-2*rho*stats.norm.ppf(x[0])*stats.norm.ppf(x[1])));
def myfunc_multivar_der(x,*args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
sum1=0;sum2=0;
for i in psi1:
sum1+=-2*(logit(i)-logit(x[0]))/x[0]/(1-x[0]);
sum1=sum1/var1/2;
res1=sum1+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x[0])-2*rho*stats.norm.ppf(x[1]))/stats.norm.pdf(stats.norm.ppf(x[0]));
#print('1');print(x[1]);print(res1);print(stats.norm.pdf(stats.norm.ppf(x[0])));
for i in psi2:
sum2+=-2*(logit(i)-logit(x[1]))/x[1]/(1-x[1]);
sum2=sum2/var2/2;
res2=sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x[1])-2*rho*stats.norm.ppf(x[0]))/stats.norm.pdf(stats.norm.ppf(x[1]));
return(numpy.array([res1,res2]));
def myfunc_1(x, *args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
sum1=0;sum2=0;
for i in psi1:
sum1+=pow(logit(i)-logit(x+cutoff),2);
sum1=sum1/var1/2;
for i in psi2:
sum2+=pow(logit(i)-logit(x),2);
sum2=sum2/var2/2;
return(sum1+sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(pow(stats.norm.ppf(x+cutoff),2)+pow(stats.norm.ppf(x),2)-2*rho*stats.norm.ppf(x+cutoff)*stats.norm.ppf(x)));
def myfunc_der_1(x, *args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
sum1=0;sum2=0;
for i in psi1:
sum1+=-2*(logit(i)-logit(x+cutoff))/(x+cutoff)/(1-x-cutoff);
sum1=sum1/var1/2;
res1=sum1+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x+cutoff)-2*rho*stats.norm.ppf(x))/stats.norm.pdf(stats.norm.ppf(x+cutoff));
for i in psi2:
sum2+=-2*(logit(i)-logit(x))/x/(1-x);
sum2=sum2/var2/2;
res2=sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x)-2*rho*stats.norm.ppf(x+cutoff))/stats.norm.pdf(stats.norm.ppf(x));
return(res1+res2);
def myfunc_2(x, *args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
sum1=0;sum2=0;
for i in psi1:
sum1+=pow(logit(i)-logit(x),2);
sum1=sum1/var1/2;
for i in psi2:
sum2+=pow(logit(i)-logit(x+cutoff),2);
sum2=sum2/var2/2;
return(sum1+sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(pow(stats.norm.ppf(x+cutoff),2)+pow(stats.norm.ppf(x),2)-2*rho*stats.norm.ppf(x+cutoff)*stats.norm.ppf(x)));
def myfunc_der_2(x, *args):
psi1=args[0];psi2=args[1];var1=args[2];var2=args[3];
sum1=0;sum2=0;
for i in psi1:
sum1+=-2*(logit(i)-logit(x))/(x)/(1-x);
sum1=sum1/var1/2;
res1=sum1+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x+cutoff)-2*rho*stats.norm.ppf(x))/stats.norm.pdf(stats.norm.ppf(x+cutoff));
for i in psi2:
sum2+=-2*(logit(i)-logit(x+cutoff))/(x+cutoff)/(1-x-cutoff);
sum2=sum2/var2/2;
res2=sum2+0.1*0.5*(pow(rho,2))/(1-pow(rho,2))*(2*stats.norm.ppf(x)-2*rho*stats.norm.ppf(x+cutoff))/stats.norm.pdf(stats.norm.ppf(x));
return(res1+res2);
def myfunc_marginal_2der(x, args):
I=args[0];S=args[1];beta=args[2];var=args[3];
inclusion_length=args[4];
skipping_length=args[5]
temp1=1/pow(x,2)/pow(1-x,2)*(((2*x-1)*(logit(beta)-logit(x))-1)/var-1);#temp1=(2*x-1)/pow(x,2)/pow(1-x,2)*((logit(beta)-logit(x)-1/(2*x-1))/var+1);
temp2=I*skipping_length*((2*inclusion_length+skipping_length)*x+skipping_length*(1-x))/pow(x,2)/pow(inclusion_length*x+skipping_length*(1-x),2);
temp3=S*inclusion_length*((inclusion_length+2*skipping_length)*(1-x)+inclusion_length*x)/pow(1-x,2)/pow(inclusion_length*x+skipping_length*(1-x),2);
#print('test');print(beta);print(x);print(var);print(temp1);print(temp2);print(temp3);
return(temp1-temp2-temp3);
def myfunc_marginal(x, *args):
beta=x;
I=args[0];S=args[1];psi=args[2];var=args[3];
inclusion_length=args[4];skipping_length=args[5];
sum=0;temp1=0;temp2=0;
for i in range(len(psi)):
new_psi=inclusion_length*psi[i]/(inclusion_length*psi[i]+skipping_length*(1-psi[i]));
f1=I[i]*log(new_psi)+S[i]*log(1-new_psi)-pow(logit(psi[i])-logit(beta),2)/2/var-log(psi[i])-log(1-psi[i]);
temp2+=-1*f1;
f1_2der=abs(myfunc_marginal_2der(psi[i],[I[i],S[i],beta,var,inclusion_length,skipping_length]));
temp1+=0.5*log(abs(f1_2der));
sum+=(0.5*log(abs(f1_2der)+0.00001)-f1);
#print('test_temp1');print(temp1);print(temp2);
return(sum);
def myfunc_marginal_der(x, *args):
beta=x;
I=args[0];S=args[1];psi=args[2];var=args[3];
inclusion_length=args[4];skipping_length=args[5];
sum=0;
for i in range(len(psi)):
new_psi=inclusion_length*psi[i]/(inclusion_length*psi[i]+skipping_length*(1-psi[i]));
f1_3der=1*(2*psi[i]-1)/pow(psi[i],2)/pow(1-psi[i],2)/beta/(1-beta)/var;
f1_2der=myfunc_marginal_2der(psi[i],[I[i],S[i],beta,var,inclusion_length,skipping_length]);
#print('test2');print(f1_2der);
f1_der=(logit(psi[i])-logit(beta))/beta/(1-beta)/var;
f1=I[i]*log(new_psi)+S[i]*log(1-new_psi)-pow(logit(psi[i])-logit(beta),2)/2/var+log(psi[i])+log(1-psi[i]);
sum+=(0.5*f1_3der/(f1_2der)-f1_der);
return(sum);
def myfunc_marginal_1(x, *args):
beta2=x;beta1=x+cutoff;
I1=args[0];S1=args[1];psi1=args[2];var1=args[3];
I2=args[4];S2=args[5];psi2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
return(myfunc_marginal(beta1,I1,S1,psi1,var1,inclusion_length,skipping_length)+myfunc_marginal(beta2,I2,S2,psi2,var2,inclusion_length,skipping_length));
def myfunc_marginal_1_der(x, *args):
beta2=x;beta1=x+cutoff;
I1=args[0];S1=args[1];psi1=args[2];var1=args[3];
I2=args[4];S2=args[5];psi2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
return(myfunc_marginal_der(beta1,I1,S1,psi1,var1,inclusion_length,skipping_length)+myfunc_marginal_der(beta2,I2,S2,psi2,var2,inclusion_length,skipping_length));
def myfunc_marginal_2(x, *args):
beta2=x+cutoff;beta1=x;
I1=args[0];S1=args[1];psi1=args[2];var1=args[3];
I2=args[4];S2=args[5];psi2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
return(myfunc_marginal(beta1,I1,S1,psi1,var1,inclusion_length,skipping_length)+myfunc_marginal(beta2,I2,S2,psi2,var2,inclusion_length,skipping_length));
def myfunc_marginal_2_der(x, *args):
beta2=x+cutoff;beta1=x;
I1=args[0];S1=args[1];psi1=args[2];var1=args[3];
I2=args[4];S2=args[5];psi2=args[6];var2=args[7];
inclusion_length=args[8];skipping_length=args[9];
return(myfunc_marginal_der(beta1,I1,S1,psi1,var1,inclusion_length,skipping_length)+myfunc_marginal_der(beta2,I2,S2,psi2,var2,inclusion_length,skipping_length));
def myfunc_individual(x,*args):
I=args[0];S=args[1];beta=args[2];var=args[3];
inclusion_length=args[4];
skipping_length=args[5]
new_psi=inclusion_length*x/(inclusion_length*x+skipping_length*(1-x));
return(-1*(I*log(new_psi)+S*log(1-new_psi)-(logit(x)-logit(beta))*(logit(x)-logit(beta))/2/var-log(x)-log(1-x)));
def myfunc_individual_der(x,*args):
I=args[0];S=args[1];beta=args[2];var=args[3];
inclusion_length=args[4];
skipping_length=args[5];
new_psi=inclusion_length*x/(inclusion_length*x+skipping_length*(1-x));
new_psi_der=inclusion_length*skipping_length/pow(inclusion_length*x+skipping_length*(1-x),2);
return(-1*(I/new_psi*new_psi_der-S/(1-new_psi)*new_psi_der-(logit(x)-logit(beta))/var/x/(1-x)-1/x+1/(1-x) ));
def myfunc_likelihood(x, args):
I=args[0];S=args[1];beta=args[2];var=args[3];sum=0;N=I+S;
#return(-1*(-log(sqrt((I+S)*x*(1-x)))-(I-(I+S)*x)*(I-(I+S)*x)/2/((I+S)*x*(1-x))-log(sqrt(var))-(x-beta)*(x-beta)/2/var));
#print('debug');print(N);print(var);print(x);print(beta);
if N==0:
return(0);
return(-0.5*((I-N*x)*(I-N*x)/(N*x)+(S-N*(1-x))*(S-N*(1-x))/(N*(1-x)))-log(sqrt(var))-(x-beta)*(x-beta)/2/var);
def MLE_iteration_constrain(i1,i2,s1,s2,effective_inclusion_length,effective_skipping_length):
psi1=vec2psi(i1,s1,effective_inclusion_length,effective_skipping_length);psi2=vec2psi(i2,s2,effective_inclusion_length,effective_skipping_length);
iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;
beta_0=sum(psi1)/len(psi1);
beta_1=sum(psi2)/len(psi2);
var1=10*scipy.var(numpy.array(psi1)-beta_0);
var2=10*scipy.var(numpy.array(psi2)-beta_1);
if var1<=0.01:
var1=0.01;
if var2<=0.01:
var2=0.01;
print('var1');print(var1);print('var2');print(var2);
while((iter_cutoff>0.01)&(count<=iter_maxrun)):
count+=1;
#iteration of beta
beta_0=sum(psi1)/len(psi1);
beta_1=sum(psi2)/len(psi2);
print('var1');print(var1);print('var2');print(var2);
#if abs(sum(psi1)/len(psi1)-sum(psi2)/len(psi2))>cutoff:
if (sum(psi1)/len(psi1))>(sum(psi2)/len(psi2)):#minize psi2 if this is the case
xopt = fmin_l_bfgs_b(myfunc_1,[sum(psi2)/len(psi2)],myfunc_der_1,args=[psi1,psi2,var1,var2],bounds=[[0.001,0.999-cutoff]],iprint=-1)
theta2 = max(min(float(xopt[0]),1-cutoff),0);theta1=theta2+cutoff;
else:#minize psi1 if this is the case
xopt = fmin_l_bfgs_b(myfunc_2,[sum(psi1)/len(psi1)],myfunc_der_2,args=[psi1,psi2,var1,var2],bounds=[[0.001,0.999-cutoff]],iprint=-1)
theta1 = max(min(float(xopt[0]),1-cutoff),0);theta2=theta1+cutoff;
print('constrain_1xopt');print('theta');print(theta1);print(theta2);print(xopt);
#else:
# theta1=sum(psi1)/len(psi1);theta2=sum(psi2)/len(psi2);
beta_0=theta1;beta_1=theta2;
#iteration of psi
new_psi1=[];new_psi2=[];current_sum=0;likelihood_sum=0;
print('constrain_2xopt');
for i in range(len(psi1)):
xopt = fmin_l_bfgs_b(myfunc_individual,[psi1[i]],myfunc_individual_der,args=[i1[i],s1[i],beta_0,var1,effective_inclusion_length,effective_skipping_length],bounds=[[0.01,0.99]],iprint=-1);
new_psi1.append(float(xopt[0]));current_sum+=float(xopt[1]);print(xopt);
#likelihood_sum+=myfunc_marginal(new_psi1[i],[i1[i],s1[i],beta_0,var1,effective_inclusion_length,effective_skipping_length]);
for i in range(len(psi2)):
xopt = fmin_l_bfgs_b(myfunc_individual,[psi2[i]],myfunc_individual_der,args=[i2[i],s2[i],beta_1,var2,effective_inclusion_length,effective_skipping_length],bounds=[[0.01,0.99]],iprint=-1);
new_psi2.append(float(xopt[0]));current_sum+=float(xopt[1]);print(xopt);
#likelihood_sum+=myfunc_marginal(new_psi2[i],[i2[i],s2[i],beta_1,var2,effective_inclusion_length,effective_skipping_length]);
print('new_psi[0]');print(new_psi1[0]);print(new_psi2[0]);
psi1=new_psi1;psi2=new_psi2;
print('count');print(count);print('previous_sum');print(previous_sum);print('current_sum');print(current_sum);
if count>1:
iter_cutoff=abs(previous_sum-current_sum);
previous_sum=current_sum;
#print('constrain');print(theta1);print(theta2);print(psi1);print(psi2);print(current_sum);print(likelihood_sum);
#print(xopt);
return([current_sum,[psi1,psi2,beta_0,beta_1,var1,var2]]);
#return([likelihood_sum,[psi1,psi2,beta_0,beta_1,var1,var2]]);
def MLE_iteration(i1,i2,s1,s2,effective_inclusion_length,effective_skipping_length):
psi1=vec2psi(i1,s1,effective_inclusion_length,effective_skipping_length);psi2=vec2psi(i2,s2,effective_inclusion_length,effective_skipping_length);
iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;
beta_0=sum(psi1)/len(psi1);
beta_1=sum(psi2)/len(psi2);
var1=10*scipy.var(
|
numpy.array(psi1)
|
numpy.array
|
"""
analysis.py
Author: <NAME>
Affiliation: McGill University
Created on: Wed 16 Dec 2020 16:16:41 EST
Description:
"""
import pickle
import numpy as np
from .models import BubbleModel
from scipy.ndimage import gaussian_filter
from .inference import tanh_generic, power_law, power_law_max1, \
broken_power_law, broken_power_law_max1, double_power_law, \
extract_params, power_law_lognorm, erf_Q, power_law_Q, lin_Q
from .util import labels, bin_e2c, bin_c2e, get_error_2d
try:
import matplotlib.pyplot as pl
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LogNorm, Normalize
except ImportError:
pass
_default_modes =
|
np.logspace(-1, 0., 21)
|
numpy.logspace
|
import numpy as np
from smp_manifold_learning.differentiable_models.ecmnn import EqualityConstraintManifoldNeuralNetwork
from smp_manifold_learning.motion_planner.feature import Feature
from smp_manifold_learning.motion_planner.smp_star import SMPStar
from smp_manifold_learning.motion_planner.util import read_cfg
from smp_manifold_learning.motion_planner.task import Task
class LearnedManifold(Feature):
def __init__(self, model_path):
Feature.__init__(self, "Sphere", dim_ambient=3, dim_feature=1)
self.r = 1
self.ecmnn = EqualityConstraintManifoldNeuralNetwork(input_dim=3,
hidden_sizes=[36, 24, 18, 10],
output_dim=1,
use_batch_norm=True, drop_p=0.0,
is_training=False, device='cpu')
self.ecmnn.load(model_path)
def y(self, x):
return self.ecmnn.y(x)
def J(self, x):
return self.ecmnn.J(x)
def param_to_xyz(self, param):
theta = param[0]
phi = param[1]
if np.isscalar(theta):
x = self.r * np.cos(theta) * np.sin(phi)
y = self.r * np.sin(theta) * np.sin(phi)
z = self.r * np.cos(phi)
else:
x = self.r * np.outer(np.cos(theta), np.sin(phi))
y = self.r * np.outer(np.sin(theta), np.sin(phi))
z = self.r * np.outer(np.ones_like(theta),
|
np.cos(phi)
|
numpy.cos
|
import cv2
import argparse
import configparser
import time
import os.path
import numpy as np
import skimage.morphology
### Module imports ###
import sys
sys.path.append('../../')
from common.utility import *
class BgDetector:
"""
Class implementation for detecting fish keypoints.
Utilizes an extracted background image (From ExtractBackground.py)
Image is thresholded using either Entropy split (front) or Intermodal split (Top)
"""
def __init__(self, camId, dataPath):
"""
Initialize object
Input:
camId: Camera view of the video to be analysed. 1 = Top, 2 = Front
dataPath: Path to the video files
"""
self.timer = False
self.camId = camId
self.onlyHeads = (camId == 1)
self.loadSettings(dataPath)
# Load static background and downsample it
bgPath = os.path.join(dataPath, 'background_cam{0}.png'.format(self.camId))
bg = cv2.imread(bgPath)
self.bg = bg[::self.downsample,::self.downsample]
# Frame at different stages
self.frame = None # Original frame
self.dif = None # After background subtraction
self.blur = None # After applying blur
self.thresh = None # After thresholding (i.e. binary)
self.thin = None # After skeletonization
def loadSettings(self, path):
"""
Load settings from config file in the provided path.
Config file includes information on the following, which is set in the object:
downsample_factor: How much the images should be downsampled during processing
blur_size: Size of the the median blur filter
min_blob_size: MInimum size of the detected blobs
Input:
path: String path to the folder where the settings.ini file is located
"""
config = readConfig(path)
c = config['Detector']
self.n_fish = c.getint('n_fish')
self.detectorType = c.get('cam{}_type'.format(self.camId))
self.downsample = c.getint('downsample_factor') # How much to downsample the image by
self.blurSize = c.getint('blur_size') # Size of median blur
self.minBlobSize = c.getint('min_blob_size') # used to filter BLOBs in the "blob" function
self.minPatchArea = c.getint("min_patch_area") # used to filter BLOBs in calceig
self.minSkeletonSize = c.getint("min_skeleton_length") # minimum length between two keypoint in the skeleton (cam1), for the distance to be considered when finding best keypoint
self.winSize = c.getint("window_size") # Size of window around keypoint in calcEig
self.nms_thresh = c.getfloat("nms_threshold") # Threshold for how large an overlap there can be before applying NMS
if self.camId == 1:
self.max_frame = c.getint("cam1_maxframe")
self.min_frame = c.getint("cam1_minframe")
else:
self.max_frame = c.getint("cam2_maxframe")
self.min_frame = c.getint("cam2_minframe")
tl, br = getROI(path, self.camId)
self.tl = tl // self.downsample
self.br = br // self.downsample
print(self.tl, self.br)
def detect(self, frame, bboxes):
"""
Performs the detection step
Input:
frame: The current frame
camId: Which camera view the fram eis from (1 = Top, 2 = Front)
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Downsample video
_start = time.time()
self.frame = frame[::self.downsample,::self.downsample]
_end = time.time()
if(self.timer):
print("Downsample time: {0}".format(_end-_start))
## Subtract background
self.diff = self.bgSubtract(self.frame)
## Blur image
_start = time.time()
self.blur = cv2.medianBlur(self.diff,self.blurSize)
_end = time.time()
if(self.timer):
print("Blur time: {0}".format(_end-_start))
## Threshold image. Method is dependent on camera view
if(self.camId == 1):
# Threshold image using intermodes algorithm
th = self.intermodesSplit(self.blur)
self.thresh = self.blur > th
elif(self.camId == 2):
# Threshold image using max entropy
th = self.entropySplit(self.blur)
self.thresh = self.blur > th
# Remove everything outside of the ROI
self.thresh = self.applyROIMat(self.thresh)
self.bboxes = applyROIBBs(bboxes, self.tl, self.br)
# Find keypoints and boundingbox of the objects based on the detector method
if(self.detectorType == 'blob'):
filtered, bbs = self.blob()
elif(self.detectorType == 'skeleton'):
filtered, bbs = self.skeleton()
else:
print("Error. Unknown detector type. Check settings.ini to see whether detector type is [blob] or [skeleton].")
sys.exit()
return filtered, bbs
def applyROIMat(self, mat):
"""
Sets everything outside of the ROI to 0
Input:
Mat: Input image
Output:
mat: ROI output image
"""
if mat.ndim == 2:
mat[:,:self.tl[0]] = 0
mat[:,self.br[0]+1:] = 0
mat[:self.tl[1]] = 0
mat[self.br[1]+1:] = 0
elif mat.ndim == 3:
mat[:,:self.tl[0],:] = 0
mat[:,self.br[0]+1:,:] = 0
mat[:self.tl[1],:] = 0
mat[self.br[1]+1:,:] = 0
return mat
def blob(self):
"""
Detection method that finds the BLOBs consisting of the most pixels
Input:
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Find BLOBs
img = self.thresh.astype(np.uint8)*255
ret, self.labels = cv2.connectedComponents(img)
## Sort BLOBs based on their pixel count. Assuming the background (label = 0) is the largest
unq_labels, counts = np.unique(self.labels, return_counts = True)
unq_labels = unq_labels[1:]
counts = counts[1:]
sorted_indecies = np.argsort(counts)[::-1]
unq_labels = unq_labels[sorted_indecies]
counts = counts[sorted_indecies]
counts = counts[counts > self.minBlobSize] # Ignore tiny BLOBs
# Find the largest BLOBs
numBlobs = self.n_fish * 2
if len(counts) < numBlobs:
numBlobs = len(counts)
unq_labels = unq_labels[:numBlobs]
## Find rotated bounding boxes of the detected keypoints
bbs = self.findRotatedBB(unq_labels)
## Keypoints are determined by the center-point
filtered = []
for b in bbs:
filtered.append(cv2.KeyPoint(x=b["c_x"],y=b["c_y"], _size = 1))
return filtered, bbs
def skeleton(self):
"""
Detection method that find keypoints in the skeleton of the BLOBs
Input:
Output:
filtered: The detected keypoints after filtering. List of cv2.KeyPoints
bbs: The rotated bounding boxes of the filtered keypoints. List of dicts, with the following keys, containing floats:
tl_x: Top left x coordinate of rotated bounding box
tl_y: Top left y coordinate of rotated bounding box
c_x: x center coordiante of origianl bounding box
c_y: y center coordiante of original bounding box
w: Width ofthe rotated bounding box
h: Height of the rotated bounding box
theta: The angle of the rotated bounding box
"""
## Fill holdes in the thresholded BLOBs
self.thresh = self.fillHoles(self.thresh)
## Extract skeletons of BLOBs
self.thin = skimage.morphology.skeletonize(self.thresh)
## Detect potential keypoints
detections = self.interestPoints(findJunctions=True)
filtered = []
for label in detections:
kps = detections[label]
kps.sort(key=lambda x: x[0].size)
# Remove small detections
kps = [x for x in kps if x[0].size > 1]
# Find the largest of the two keypoints placed furthest from each other
bestkp = self.filterKeypoints(kps)
# Remove the smallest half of the keypoints (in order to remove tail-points etc)
if(self.onlyHeads and len(kps) > 1):
numPts = len(kps)//2
kps = kps[-numPts:]
# If the bestkp has been removed, add it again (largest of the two keypoints placed furthest from each other)
if bestkp and (not bestkp[0] in kps):
kps.extend(bestkp)
#kps.sort(key=lambda x: x.size)
#filtered += [kps[-1]]
filtered += kps
## Find rotated bounding boxes of the detected keypoints
bbs = self.findRotatedBB(filtered)
filtered = [x[0] for x in filtered]
return filtered, bbs
def findRotation(self, img):
"""
Calculates the rotation of the foreground pixels in the binary image
Input:
img: Binary input image
Output:
theta : The orientation in degrees from [-pi/2 : pi/2]
"""
_, cov = self.estimateGaussian(img)
## Get the eigenvalues/vectors and sort them by descending eigenvalues
U, S, _ = np.linalg.svd(cov)
x_v1, y_v1 = U[:,0]
theta = np.arctan((y_v1)/(x_v1)) # arctan vs arctan2. Use arctan2 to handled x_v1 = 0?
return np.rad2deg(theta)
def closestPos(self, img, target):
"""
Finds the closest non-zero pixel position in the image to the given target position
Input:
img: Input image
target: Tuple with the x, y coordinates of the target position
Output:
pos: The position of the closest non-zero pixel
dist: The distance between the target and pos pixels
"""
y, x = np.nonzero(img)
distances = np.sqrt((x-target[0])**2 + (y-target[1])**2)
nearest_index = np.argmin(distances)
dist = np.min(distances)
pos = (x[nearest_index], y[nearest_index])
return pos, dist
def getBB(self, img):
"""
Computes the Axis-aligned bounding box for the provide image.
It is assumed that the input image is a binary image with a single BLOB in it
Input:
img: Input binary image
Output:
tl: Tuple containing the coordinates of the top left point of the BB
br: Tuple containing the coordinates of the bottom right point of the BB
center: Tuple containing the coordinates of the center of the BB
"""
y, x = np.nonzero(img)
if len(y) == 0:
return (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1)
tl = (np.min(x), np.min(y))
br = (np.max(x),
|
np.max(y)
|
numpy.max
|
#!/usr/bin/python3
# Example usage python3 render_suncg.py --min=1 --nc=1
import argparse
import os
import os.path as osp
import sys
import time
import multiprocessing as mp
import queue
import numpy as np
import cv2
import csv
import logging
from rendering import *
from utils import *
from voxel import *
import random
import json
from pywavefront import Wavefront
from ssc.data.suncg import SUNCGLabels
import shutil
from House3D import objrender, create_default_config
from House3D.objrender import Camera, RenderMode, Vec3
from matplotlib.patches import Polygon, Rectangle
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
import binvox_rw
import itertools
suncg_labels = SUNCGLabels()
def image_folder_is_complete(nbr_cameras, res_dir):
#Check for images
for ci in range(nbr_cameras):
for mode_str in image_modes.keys():
# logger.info('checking {}'.format(osp.join(res_dir, "{:04}_{}.png".format(ci, mode_str))))
if not osp.isfile(osp.join(res_dir, mode_str, "{:04}.png".format(ci))):
return False
return True
def voxel_folder_is_complete(nbr_cameras, res_dir):
#Check for voxels
for ci in range(nbr_cameras):
if not osp.isfile(osp.join(res_dir, 'vox', "{:04}.npz".format(ci))):
return False
return True
def render_scene_images(cameras, api, suncg_dir, house_id, result_dir):
mappingFile = cfg['modelCategoryFile']
colormapFile = cfg['colorFile']
modelBlacklistFile = cfg.get('modelBlacklistFile', None)
#Generate folders for image types
for mode_str in image_modes.keys():
os.makedirs(osp.join(result_dir, mode_str), exist_ok = True)
#Load house
house_dir = osp.join(suncg_dir,'house',house_id)
obj_path = osp.join(house_dir,'house.obj')
make_tmp_obj = not osp.isfile(obj_path)
# Generate .obj and .mtl if not done already
if make_tmp_obj:
logger.warning('Generating .mtl and .obj file, precompute with get_data.py for increased speed')
gen_house_obj_mtl(house_dir)
if modelBlacklistFile:
api.loadSceneNoCache(obj_path,mappingFile, colormapFile, modelBlacklistFile)
else:
api.loadSceneNoCache(obj_path,mappingFile, colormapFile)
cam = api.getCamera()
# logger.debug('Setup took: {}ms'.format(int(1e3*(time.time() - start))))
# start = time.time()
#Render Cameras
camera_params = []
K = constructK()
for i, cr in enumerate(cameras):
# Parse camera position
suncg_cam = SUNCGCamera(cr)
camera_params.append({'K':K.tolist(), 'P':suncg_cam.P.tolist()})
pos = Vec3(*suncg_cam.pos)
front = Vec3(*suncg_cam.front)
up = Vec3(*suncg_cam.up)
#Render
cam.set(pos, front, up)
imgs = render_images(api, '{:04}'.format(i), result_dir, image_modes)
with open(osp.join(result_dir, 'camera_params.json'), 'w') as f:
json.dump(camera_params, f)
if make_tmp_obj:
rm_house_obj_mtl(house_dir)
logger.info('Done rendering {}'.format(house_id))
# Ported from SSC Net.
def generate_scene_voxel_grids(cameras, suncg_dir, house_id, result_dir, obj_cache):
# debug_dir = '/data/debug2/{}'.format(house_id)
debug_dir = None
if debug_dir:
shutil.rmtree(debug_dir, ignore_errors=True)
os.makedirs(debug_dir)
#Generate result dir
result_dir = osp.join(result_dir, 'vox')
os.makedirs(result_dir, exist_ok=True)
#Load house
house_json = osp.join(suncg_dir,'house',house_id, 'house.json')
with open(house_json) as f:
house = json.load(f)
#config Params
# voxSize = np.array([20,10,20])
# voxUnit = 0.3
# voxSize = np.array([240,144,240])
# voxUnit = 0.02
# voxSize = np.array([120,72,120])
# voxUnit = 0.04
# voxSize = np.array([60,40,60])
voxSize = np.array([60,40,60])
voxUnit = 0.08
camK = constructK()
im_w = 640
im_h = 480
# Confusing facts:
# Camera coordinate system has as usual Z forward and Y downward
# SUNCG coordinate system has Y facing up
# Output coordinate system has X facing up
# Select grid based on camera location
for camera_idx, cr in enumerate(cameras):
cam = SUNCGCamera(cr)
if debug_dir:
cam_debug_dir = osp.join(debug_dir, 'camera{}'.format(camera_idx))
os.makedirs(cam_debug_dir, exist_ok=True)
else:
cam_debug_dir = None
# Put grid center half the grid length in front of the camera, moving in the XZ-plane
xz_front = cam.front*np.array([1,0,1])
xz_front /= np.linalg.norm(xz_front)
voxOriginWorld = cam.pos + xz_front*voxSize[0]*voxUnit/2
#Correct box center so we always get some floor.
if voxOriginWorld[1] + voxUnit/2 > voxUnit*voxSize[1]/2:
voxOriginWorld[1] = voxUnit*voxSize[1]/2 - voxUnit/2
voxWorldMin = voxOriginWorld - (voxSize*voxUnit/2)
voxWorldMax = voxOriginWorld + (voxSize*voxUnit/2)
gridPtsWorld = np.array(
np.meshgrid(*[np.linspace(voxWorldMin[i], voxWorldMax[i], voxSize[i]) for i in range(3)], indexing='ij'))
gridPtsWorldList = gridPtsWorld.view().reshape((3,-1))
# Create views
gridPtsWorldXZ = gridPtsWorldList[[0,2],:]
gridPtsWorldXY = gridPtsWorldList[:2,:]
gridPtsWorldYZ = gridPtsWorldList[1:,:]
gridPtsWorldY = gridPtsWorldList[1,:]
#Output
gridPtsLabel = np.zeros(gridPtsWorldList.shape[1], dtype=np.uint32)
xz = [0, 2]
for houseLevel in house['levels']:
for node in houseLevel['nodes']:
if node['type'].lower() != 'room':
continue
#Check if we need the room
try:
bbox_min =
|
np.array(node['bbox']['min'])
|
numpy.array
|
import os
import sys
import argparse
import numpy as np
import csv
import cv2
from matplotlib import pyplot as plt
from params import ParamsKITTI, ParamsEuroc
from dataset import KITTIOdometry, EuRoCDataset
FIRST_FRAME = 0
SECOND_FRAME = 1
DEFAULT = 2
class VO:
def __init__(self, path, cam, start_idx=0):
self.stage = FIRST_FRAME
self.curr_idx = start_idx
self.num_processed = 0
self.max_track_length = 20
# dataset-dependent params
self.params = ParamsEuroc()
self.dataset = EuRoCDataset(path)
self.detector = cv2.ORB_create(nfeatures=200, scaleFactor=1.2, nlevels=1, edgeThreshold=31)
self.ffdetector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)
self.extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(bytes=32, use_orientation=False)
self.bf_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
# kpts and descriptors of all frames seen so far
self.kpts = []
self.des = []
self.matches = []
# params for Shi-Tomasi corner detection
self.detector_params = dict(maxCorners = 150,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7)
# tracker params
self.tracker_params = dict(winSize = (21, 21),
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
# hash table to find 3d points across frames
#self.pts_3d = {}
self.pts_3d = []
self.good_idxs = []
self.good_trks = []
# relevant images seen so far
self.prev_img = self.dataset.left[start_idx]
self.curr_img = self.dataset.left[start_idx]
# camera model
self.f = (cam.fx + cam.fy) / 2 # avg of both focal lengths
self.pp = (cam.cx, cam.cy)
self.K = np.append(cam.intrinsic_matrix, np.array([[0, 0, 0]]).T, axis=1) # 3x4 ndarray
# trajectory
self.poses = []
self.viz = True
self.tracks = []
self.new_tracks = []
self.done = False
def detect(self):
print("detecting")
mask = np.zeros_like(self.curr_img)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(mask, (x, y), 5, 0, -1)
self.new_tracks = []
p = cv2.goodFeaturesToTrack(self.curr_img, mask = mask, **(self.detector_params))
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.new_tracks.append([(x, y)])
def track(self):
print("tracking")
img0, img1 = self.prev_img, self.curr_img
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **(self.tracker_params))
p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **(self.tracker_params))
d = abs(p0 - p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
self.good_trks = np.zeros((np.asarray(self.tracks).shape[0],), dtype=np.int8) # should be Nx1
idx = 0
for tr, (x, y), is_good in zip(self.tracks, p1.reshape(-1, 2), good):
#print(self.good_trks)
if not is_good:
idx += 1
continue
self.good_trks[idx] = 1
tr.append((x, y))
if len(tr) > self.max_track_length:
del tr[0]
new_tracks.append(tr)
idx += 1
self.tracks = new_tracks
def extract_rel_pose(self, prev_kpts, curr_kpts):
E, mask = cv2.findEssentialMat(np.array(prev_kpts),
np.array(curr_kpts),
focal=self.f,
pp=self.pp,
method=cv2.RANSAC,
prob=0.99,
threshold=0.5)
_, R, t, _ = cv2.recoverPose(E,
np.array(prev_kpts),
np.array(curr_kpts),
focal=self.f,
pp=self.pp)
return R, t
def solve_pnp(self, kpts):
print("pts_3d shape:", self.pts_3d.shape[1])
print("good_trks shape:", len(self.good_trks))
good_idxs_pnp = self.good_trks[:self.pts_3d.shape[1] + 1].astype(bool)
print("good_idxs_pnp", good_idxs_pnp, "shape:", len(good_idxs_pnp))
print("kpts shape:", np.asarray(kpts).shape)
_, rot, t, inliers = cv2.solvePnPRansac(np.asarray(self.pts_3d[:, good_idxs_pnp]).T,
np.asarray(kpts)[good_idxs_pnp, :], self.K[:,:3], None,
None, None, False, 50, 2.0, 0.9, None)
R = cv2.Rodrigues(rot)[0]
return R, t
def draw(self):
vis = cv2.cvtColor(self.curr_img, cv2.COLOR_GRAY2BGR)
for x, y in [np.int32(tr[-1]) for tr in self.tracks]:
cv2.circle(vis, (x, y), 3, (0, 0, 255), -1)
cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0), 1)
cv2.imshow('LK tracker', vis)
c = cv2.waitKey(0)
return c
def update(self):
print("\n-------------------------------------\nprocessing frame", self.num_processed)
self.prev_img = self.curr_img
self.curr_img = self.dataset.left[self.curr_idx]
if self.stage == FIRST_FRAME:
R = np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, 1.0]]) # rotation matrix
t = np.array([0, 0, 0]) # translation vector
self.poses.append((R, t))
self.stage = SECOND_FRAME
if self.num_processed % 10 == 0 or len(self.tracks) < 10:
self.detect()
print("{} existing tracks, {} new tracks".format(len(self.tracks), len(self.new_tracks)))
if len(self.tracks) > 0:
self.track()
kpts1 = []
kpts2 = []
print("{} tracks kept after tracking".format(len(self.tracks)))
for tr in self.tracks:
if len(tr) < 2:
continue
kpts1.append(tr[-2])
kpts2.append(tr[-1])
if self.stage == SECOND_FRAME:
R, t = self.extract_rel_pose(kpts1, kpts2)
self.stage = DEFAULT
#print(R, t)
elif self.stage == DEFAULT:
R, t = self.solve_pnp(kpts2)
#print(R, t)
self.pts_3d, self.good_idxs = self.triangulate_points(R, t, kpts1, kpts2)
print("self.good_idxs.shape:", len(self.good_idxs))
print("self.good_idxs BEFORE:", len(self.good_idxs))
self.good_idxs = self.good_idxs[list(self.good_trks)]
print("self.good_idxs AFTER:", len(self.good_idxs))
for tr in self.new_tracks:
self.tracks.append(tr)
self.new_tracks = []
if self.viz:
c = self.draw()
if c == 27:
self.done = True
self.num_processed += 1
self.curr_idx += 1
self.prev_img = self.curr_img
def triangulate_points(self, R, t, kpts1, kpts2):
P_1 = self.K.dot(np.linalg.inv(self.T_from_Rt(R, t)))
P_2 = self.K # assume camera 2 is at origin
pts_hom = cv2.triangulatePoints(P_1, P_2, np.asarray(kpts1).T, np.asarray(kpts2).T) # in homogeneous coords
pts = pts_hom / np.tile(pts_hom[-1, :], (4, 1)) # 4xN
good_idxs = (pts[3,:] > 0) & (
|
np.abs(pts[2, :])
|
numpy.abs
|
""" Class for f(N) constraints
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import imp
import pdb
from pyigm.fN.fnmodel import FNModel
from pyigm.fN.constraints import FNConstraint
from pyigm.fN import tau_eff
# Path for pyigm
pyigm_path = imp.find_module('pyigm')[1]
def tst_fn_data(fN_model=None, model_two=None, data_list=None, outfil=None):
""" Make a matplotlib plot like the final figure from P14
See the Notebook for Bokeh plots
Taken from xastropy without actually trying to run
Parameters
----------
fN_model
model_two
data_list
outfil
"""
import matplotlib as mpl
mpl.rcParams['font.family'] = 'STIXGeneral-Regular' # Not for PDF
mpl.rcParams['lines.linewidth'] = 2
from matplotlib import pyplot as plt
#from matplotlib.backends.backend_pdf import PdfPages
# fN data
all_fN_cs = FNConstraint.load_defaults()
# Remove K12
if data_list is None:
fN_cs = [fN_c for fN_c in all_fN_cs if ((fN_c.ref != 'K02') & (fN_c.ref != 'PW09'))]
else:
fN_cs = [fN_c for fN_c in all_fN_cs if fN_c.ref in data_list]
fN_dtype = [fc.fN_dtype for fc in fN_cs]
fig = plt.figure(figsize=(8, 5))
fig.clf()
main = fig.add_axes( [0.1, 0.1, 0.8, 0.8] ) # xypos, xy-size
# f(N) data
main.set_ylabel(r'$\log f(N_{\rm HI})$')
main.set_xlabel(r'$\log N_{\rm HI}$')
main.set_ylim(-25., -9)
for fN_c in fN_cs:
if fN_c.fN_dtype == 'fN':
# Length
ip = range(fN_c.data['NPT'])
val = np.where(fN_c.data['FN'][ip] > -90)[0]
if len(val) > 0:
ipv =
|
np.array(ip)
|
numpy.array
|
import numpy as np
from mrcnn import model as modellib, utils
from mrcnn import visualize
from mrcnn.model import log
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import skimage, skimage.color
from .image_tools import *
from .metrics import *
def get_occlusions(image, size=64):
occlusion_pixels = np.full((size, size, image.shape[2]), [0], image.dtype)
occlusions = []
for x in range(image.shape[0] // size):
occlusions.append([])
for y in range(image.shape[1] // size):
image_new = image.copy()
image_new[x*size:(x+1)*size, y*size:(y+1)*size, :] = occlusion_pixels
occlusions[x].append(image_new)
return occlusions
def plot_occlusion_map(image, heatmap):
# rescale heatmap to [0..1]
heatmap = heatmap - heatmap.min()
heatmap = heatmap/heatmap.max()
heatmap = 1 - heatmap
heatmap = ndimage.gaussian_filter(heatmap, sigma=(5, 5), order=0)
# plot image, heatmap and overlap
plt.subplot(1, 3, 1)
plt.title("Image", fontsize=9)
plt.axis('off')
plt.imshow(image)
plt.subplot(1, 3, 2)
plt.title("Occlusion map", fontsize=9)
plt.axis('off')
plt.imshow(heatmap)
plt.subplot(1, 3, 3)
plt.title("Occlusion map overlap", fontsize=9)
plt.axis('off')
plt.imshow(image)
plt.imshow(heatmap, alpha=0.6)
plt.show()
def visualize_occlusions(dataset_val, inference_config, model):
# Test on a random image
#image_id = 833
image_id =
|
np.random.choice(dataset_val.image_ids)
|
numpy.random.choice
|
## copied from nonlinear_transform_gen.py
''' A class for the distribution of a non-linear monotonic transformation of a continuous random variable
simplest usage:
example: create log-gamma distribution, i.e. y = log(x),
where x is gamma distributed (also available in scipy.stats)
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp)
example: what is the distribution of the discount factor y=1/(1+x)
where interest rate x is normally distributed with N(mux,stdx**2)')?
(just to come up with a story that implies a nice transformation)
invnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, a=-np.inf)
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
Note: I'm working from my version of scipy.stats.distribution.
But this script runs under scipy 0.6.0 (checked with numpy: 1.2.0rc2 and python 2.4)
This is not yet thoroughly tested, polished or optimized
TODO:
* numargs handling is not yet working properly, numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution is untested and incomplete
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
Created on Tuesday, October 28, 2008, 12:40:37 PM
Author: josef-pktd
License: BSD
'''
from scipy import stats
from scipy.stats import distributions
import numpy as np
def get_u_argskwargs(**kwargs):
#Todo: What's this? wrong spacing, used in Transf_gen TransfTwo_gen
u_kwargs = dict((k.replace('u_','',1),v) for k,v in kwargs.items()
if k.startswith('u_'))
u_args = u_kwargs.pop('u_args',None)
return u_args, u_kwargs
class Transf_gen(distributions.rv_continuous):
'''a class for non-linear monotonic transformation of a continuous random variable
'''
def __init__(self, kls, func, funcinv, *args, **kwargs):
#print(args
#print(kwargs
self.func = func
self.funcinv = funcinv
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print(self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf)
b = kwargs.pop('b', np.inf)
self.decr = kwargs.pop('decr', False)
#defines whether it is a decreasing (True)
# or increasing (False) monotonic transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(Transf_gen,self).__init__(a=a, b=b, name = name,
shapes=kls.shapes,
longname = longname,
extradoc = extradoc)
def _cdf(self,x,*args, **kwargs):
#print(args
if not self.decr:
return self.kls._cdf(self.funcinv(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self.kls._cdf(self.funcinv(x),*args, **kwargs)
def _ppf(self, q, *args, **kwargs):
if not self.decr:
return self.func(self.kls._ppf(q,*args, **kwargs))
else:
return self.func(self.kls._ppf(1-q,*args, **kwargs))
def inverse(x):
return np.divide(1.0,x)
mux, stdx = 0.05, 0.1
mux, stdx = 9.0, 1.0
def inversew(x):
return 1.0/(1+mux+x*stdx)
def inversew_inv(x):
return (1.0/x - 1.0 - mux)/stdx #.np.divide(1.0,x)-10
def identit(x):
return x
invdnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, #a=-np.inf,
numargs = 0, name = 'discf', longname = 'normal-based discount factor',
extradoc = '\ndistribution of discount factor y=1/(1+x)) with x N(0.05,0.1**2)')
lognormalg = Transf_gen(stats.norm, np.exp, np.log,
numargs = 2, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp, numargs=1)
## copied form nonlinear_transform_short.py
'''univariate distribution of a non-linear monotonic transformation of a
random variable
'''
class ExpTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#print(args
#print(kwargs
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(ExpTransf_gen,self).__init__(a=a, name=name)
self.kls = kls
def _cdf(self,x,*args):
#print(args
return self.kls._cdf(np.log(x),*args)
def _ppf(self, q, *args):
return np.exp(self.kls._ppf(q,*args))
class LogTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(LogTransf_gen,self).__init__(a=a, name = name)
self.kls = kls
def _cdf(self,x, *args):
#print(args
return self.kls._cdf(np.exp(x),*args)
def _ppf(self, q, *args):
return np.log(self.kls._ppf(q,*args))
def examples_transf():
##lognormal = ExpTransf(a=0.0, xa=-10.0, name = 'Log transformed normal')
##print(lognormal.cdf(1)
##print(stats.lognorm.cdf(1,1)
##print(lognormal.stats()
##print(stats.lognorm.stats(1)
##print(lognormal.rvs(size=10)
print('Results for lognormal')
lognormalg = ExpTransf_gen(stats.norm, a=0, name = 'Log transformed normal general')
print(lognormalg.cdf(1))
print(stats.lognorm.cdf(1,1))
print(lognormalg.stats())
print(stats.lognorm.stats(1))
print(lognormalg.rvs(size=5))
##print('Results for loggamma'
##loggammag = ExpTransf_gen(stats.gamma)
##print(loggammag._cdf(1,10)
##print(stats.loggamma.cdf(1,10)
print('Results for expgamma')
loggammaexpg = LogTransf_gen(stats.gamma)
print(loggammaexpg._cdf(1,10))
print(stats.loggamma.cdf(1,10))
print(loggammaexpg._cdf(2,15))
print(stats.loggamma.cdf(2,15))
# this requires change in scipy.stats.distribution
#print(loggammaexpg.cdf(1,10)
print('Results for loglaplace')
loglaplaceg = LogTransf_gen(stats.laplace)
print(loglaplaceg._cdf(2,10))
print(stats.loglaplace.cdf(2,10))
loglaplaceexpg = ExpTransf_gen(stats.laplace)
print(loglaplaceexpg._cdf(2,10))
## copied from transformtwo.py
'''
Created on Apr 28, 2009
@author: <NAME>
'''
''' A class for the distribution of a non-linear u-shaped or hump shaped transformation of a
continuous random variable
This is a companion to the distributions of non-linear monotonic transformation to the case
when the inverse mapping is a 2-valued correspondence, for example for absolute value or square
simplest usage:
example: create squared distribution, i.e. y = x**2,
where x is normal or t distributed
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
This verifies for normal - chi2, normal - halfnorm, foldnorm, and t - F
TODO:
* numargs handling is not yet working properly,
numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution works in t distribution example
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
'''
class TransfTwo_gen(distributions.rv_continuous):
'''Distribution based on a non-monotonic (u- or hump-shaped transformation)
the constructor can be called with a distribution class, and functions
that define the non-linear transformation.
and generates the distribution of the transformed random variable
Note: the transformation, it's inverse and derivatives need to be fully
specified: func, funcinvplus, funcinvminus, derivplus, derivminus.
Currently no numerical derivatives or inverse are calculated
This can be used to generate distribution instances similar to the
distributions in scipy.stats.
'''
#a class for non-linear non-monotonic transformation of a continuous random variable
def __init__(self, kls, func, funcinvplus, funcinvminus, derivplus,
derivminus, *args, **kwargs):
#print(args
#print(kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print(self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf) # attached to self in super
b = kwargs.pop('b', np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop('shape', False)
#defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen,self).__init__(a=a, b=b,
name = name,
shapes=kls.shapes,
longname = longname,
extradoc = extradoc)
def _rvs(self, *args):
self.kls._size = self._size #size attached to self, not function argument
return self.func(self.kls._rvs(*args))
def _pdf(self,x,*args, **kwargs):
#print(args
if self.shape == 'u':
signpdf = 1
elif self.shape == 'hump':
signpdf = -1
else:
raise ValueError('shape can only be `u` or `hump`')
return signpdf * (self.derivplus(x)*self.kls._pdf(self.funcinvplus(x),*args, **kwargs) -
self.derivminus(x)*self.kls._pdf(self.funcinvminus(x),*args, **kwargs))
#note scipy _cdf only take *args not *kwargs
def _cdf(self,x,*args, **kwargs):
#print(args
if self.shape == 'u':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._sf(x,*args, **kwargs)
def _sf(self,x,*args, **kwargs):
#print(args
if self.shape == 'hump':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._cdf(x, *args, **kwargs)
def _munp(self, n,*args, **kwargs):
return self._mom0_sc(n,*args)
# ppf might not be possible in general case?
# should be possible in symmetric case
# def _ppf(self, q, *args, **kwargs):
# if self.shape == 'u':
# return self.func(self.kls._ppf(q,*args, **kwargs))
# elif self.shape == 'hump':
# return self.func(self.kls._ppf(1-q,*args, **kwargs))
#TODO: rename these functions to have unique names
class SquareFunc(object):
'''class to hold quadratic function with inverse function and derivative
using instance methods instead of class methods, if we want extension
to parametrized function
'''
def inverseplus(self, x):
return np.sqrt(x)
def inverseminus(self, x):
return 0.0 - np.sqrt(x)
def derivplus(self, x):
return 0.5/np.sqrt(x)
def derivminus(self, x):
return 0.0 - 0.5/np.sqrt(x)
def squarefunc(self, x):
return np.power(x,2)
sqfunc = SquareFunc()
squarenormalg = TransfTwo_gen(stats.norm, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'squarenorm', longname = 'squared normal distribution',
extradoc = '\ndistribution of the square of a normal random variable' +\
' y=x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
squaretg = TransfTwo_gen(stats.t, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 1, name = 'squarenorm', longname = 'squared t distribution',
extradoc = '\ndistribution of the square of a t random variable' +\
' y=x**2 with x t(dof,0.0,1)')
def inverseplus(x):
return np.sqrt(-x)
def inverseminus(x):
return 0.0 - np.sqrt(-x)
def derivplus(x):
return 0.0 - 0.5/
|
np.sqrt(-x)
|
numpy.sqrt
|
"""Implements RFS tracking algorithms.
This module contains the classes and data structures
for RFS tracking related algorithms.
"""
import numpy as np
from numpy.linalg import cholesky, inv
import numpy.random as rnd
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.animation as animation
import abc
from copy import deepcopy
from warnings import warn
from gasur.utilities.distributions import GaussianMixture, StudentsTMixture
from gasur.utilities.graphs import k_shortest, murty_m_best
from gncpy.math import log_sum_exp, get_elem_sym_fnc
import gncpy.plotting as pltUtil
import gncpy.filters as gfilts
import gncpy.errors as gerr
class RandomFiniteSetBase(metaclass=abc.ABCMeta):
"""Generic base class for RFS based filters.
Attributes
----------
filter : gncpy.filters.BayesFilter
Filter handling dynamics
prob_detection : float
Modeled probability an object is detected
prob_survive : float
Modeled probability of object survival
birth_terms : list
List of terms in the birth model
clutter_rate : float
Rate of clutter
clutter_density : float
Density of clutter distribution
inv_chi2_gate : float
Chi squared threshold for gating the measurements
save_covs : bool
Save covariance matrix for each state during state extraction
debug_plots : bool
Saves data needed for extra debugging plots
ospa : numpy array
Calculated OSPA value for the given truth data. Must be manually updated
by a function call.
ospa_localization : numpy array
Calculated OSPA value for the given truth data. Must be manually updated
by a function call.
ospa_cardinality : numpy array
Calculated OSPA value for the given truth data. Must be manually updated
by a function call.
"""
def __init__(self, in_filter=None, prob_detection=1, prob_survive=1,
birth_terms=None, clutter_rate=0, clutter_den=0,
inv_chi2_gate=0, save_covs=False, debug_plots=False, **kwargs):
if birth_terms is None:
birth_terms = []
self.filter = deepcopy(in_filter)
self.prob_detection = prob_detection
self.prob_survive = prob_survive
self.birth_terms = deepcopy(birth_terms)
self.clutter_rate = clutter_rate
if isinstance(clutter_den, np.ndarray):
clutter_den = clutter_den.item()
self.clutter_den = clutter_den
self.inv_chi2_gate = inv_chi2_gate
self.save_covs = save_covs
self.debug_plots = debug_plots
self.ospa = None
self.ospa_localization = None
self.ospa_cardinality = None
self._states = [] # local copy for internal modification
self._meas_tab = [] # list of lists, one per timestep, inner is all meas at time
self._covs = [] # local copy for internal modification
super().__init__(**kwargs)
@property
def prob_miss_detection(self):
"""Compliment of :py:attr:`.swarm_estimator.RandomFiniteSetBase.prob_detection`."""
return 1 - self.prob_detection
@property
def prob_death(self):
"""Compliment of :attr:`gasur.swarm_estimator.RandomFinitSetBase.prob_survive`."""
return 1 - self.prob_survive
@property
def num_birth_terms(self):
"""Number of terms in the birth model."""
return len(self.birth_terms)
@abc.abstractmethod
def predict(self, t, **kwargs):
"""Abstract method for the prediction step.
This must be overridden in the inherited class. It is recommended to
keep the same structure/order for the arguments for consistency
between the inherited classes.
"""
pass
@abc.abstractmethod
def correct(self, t, m, **kwargs):
"""Abstract method for the correction step.
This must be overridden in the inherited class. It is recommended to
keep the same structure/order for the arguments for consistency
between the inherited classes.
"""
pass
@abc.abstractmethod
def extract_states(self, **kwargs):
"""Abstract method for extracting states."""
pass
@abc.abstractmethod
def cleanup(self, **kwargs):
"""Abstract method that performs the cleanup step of the filter.
This must be overridden in the inherited class. It is recommended to
keep the same structure/order for the arguments for consistency
between the inherited classes.
"""
pass
def _gate_meas(self, meas, means, covs, meas_mat_args={},
est_meas_args={}):
"""Gates measurements based on current estimates.
Notes
-----
Gating is performed based on a Gaussian noise model.
See :cite:`Cox1993_AReviewofStatisticalDataAssociationTechniquesforMotionCorrespondence`
for details on the chi squared test used.
Parameters
----------
meas : list
2d numpy arrrays of each measurement.
means : list
2d numpy arrays of each mean.
covs : list
2d numpy array of each covariance.
meas_mat_args : dict, optional
keyword arguments to pass to the inner filters get measurement
matrix function. The default is {}.
est_meas_args : TYPE, optional
keyword arguments to pass to the inner filters get estimate
matrix function. The default is {}.
Returns
-------
list
2d numpy arrays of valid measurements.
"""
if len(meas) == 0:
return []
valid = []
for (m, p) in zip(means, covs):
meas_mat = self.filter.get_meas_mat(m, **meas_mat_args)
est = self.filter.get_est_meas(m, **est_meas_args)
meas_pred_cov = meas_mat @ p @ meas_mat.T + self.filter.meas_noise
meas_pred_cov = (meas_pred_cov + meas_pred_cov.T) / 2
v_s = cholesky(meas_pred_cov.T)
inv_sqrt_m_cov = inv(v_s)
for (ii, z) in enumerate(meas):
if ii in valid:
continue
inov = z - est
dist = np.sum((inv_sqrt_m_cov.T @ inov)**2)
if dist < self.inv_chi2_gate:
valid.append(ii)
valid.sort()
return [meas[ii] for ii in valid]
def calculate_ospa(self, truth, c, p):
"""Calculates the OSPA distance between the truth at all timesteps.
Notes
-----
This calculates the Optimal SubPattern Assignment metric for the
extracted states and the supplied truth point distributions. The
calculation is based on
:cite:`Schuhmacher2008_AConsistentMetricforPerformanceEvaluationofMultiObjectFilters`
with much of the math defined in
:cite:`Schuhmacher2008_ANewMetricbetweenDistributionsofPointProcesses`.
A value is calculated for each timestep available in the data.
Parameters
----------
truth : list
Each element represents a timestep and is a list of N x 1 numpy array,
one per true agent in the swarm.
c : float
Distance cutoff for considering a point properly assigned. This
influences how cardinality errors are penalized. For :math:`p = 1`
it is the penalty given false point estimate.
p : int
The power of the distance term. Higher values penalize outliers
more.
"""
num_timesteps = len(self._states)
self.ospa = np.nan * np.ones(num_timesteps)
self.ospa_localization = np.nan * np.ones(num_timesteps)
self.ospa_cardinality = np.nan * np.ones(num_timesteps)
for ii, (x_lst, y_lst) in enumerate(zip(self._states, truth)):
x_empty = x_lst is None or len(x_lst) == 0
y_empty = y_lst is None or len(y_lst) == 0
if x_empty and y_empty:
self.ospa[ii] = 0
self.ospa_localization[ii] = 0
self.ospa_cardinality[ii] = 0
continue
if x_empty or y_empty:
self.ospa[ii] = c
self.ospa_localization[ii] = 0
self.ospa_cardinality[ii] = c
continue
# create row matrices of data
x = np.stack([vec.flatten() for vec in x_lst])
y = np.stack([vec.flatten() for vec in y_lst])
n = x.shape[0]
m = y.shape[0]
x_mat = np.tile(x, (m, 1))
# set y_mat to repeat each value of y n times in a row
y_mat = np.tile(y, (1, x.shape[0])).reshape((n * m, y.shape[1]))
# get distances and set cutoff
dists = np.sqrt(np.sum((x_mat - y_mat)**2, axis=1)).reshape((m, n))
dists = np.minimum(dists, c)**p
# use hungarian to find minimum distances for getting total cost
row_ind, col_ind = linear_sum_assignment(dists)
cost = dists[row_ind, col_ind].sum()
inv_max_card = 1 / np.max([n, m])
card_diff = np.abs(n - m)
inv_p = 1 / p
c_p = c**p
self.ospa[ii] = (inv_max_card * (c_p * card_diff + cost))**inv_p
self.ospa_localization[ii] = (inv_max_card * cost)**inv_p
self.ospa_cardinality[ii] = (inv_max_card * c_p * card_diff)**inv_p
def plot_ospa_history(self, time_units='index', time=None, **kwargs):
"""Plots the OSPA history.
This requires that the OSPA has been calcualted by the approriate
function first.
Parameters
----------
time_units : string, optional
Text representing the units of time in the plot. The default is
'index'.
time : numpy array, optional
Vector to use for the x-axis of the plot. If none is given then
vector indices are used. The default is None.
**kwargs : dict
Additional plotting options for :meth:`gncpy.plotting.init_plotting_opts`
function. Values implemented here are `f_hndl`, and any values
relating to title/axis text formatting.
Returns
-------
fig : matplotlib figure
Figure object the data was plotted on.
"""
if self.ospa is None:
warn('OSPA must be calculated before plotting')
return
opts = pltUtil.init_plotting_opts(**kwargs)
fig = opts['f_hndl']
if fig is None:
fig = plt.figure()
fig.add_subplot(1, 1, 1)
if time is None:
time = np.arange(self.ospa.size, dtype=int)
fig.axes[0].grid(True)
fig.axes[0].plot(time, self.ospa)
pltUtil.set_title_label(fig, 0, opts, ttl="OSPA Metric",
x_lbl='Time ({})'.format(time_units),
y_lbl="OSPA")
fig.tight_layout()
return fig
class ProbabilityHypothesisDensity(RandomFiniteSetBase):
"""Implements the Probability Hypothesis Density filter.
The kwargs in the constructor are passed through to the parent constructor.
Notes
-----
The filter implementation is based on :cite:`Vo2006_TheGaussianMixtureProbabilityHypothesisDensityFilter`
Attributes
----------
gating_on : bool
flag indicating if measurement gating should be performed. The
default is False.
inv_chi2_gate : float
threshold for the chi squared test in the measurement gating. The
default is 0.
extract_threshold : float
threshold for extracting the state. The default is 0.5.
prune_threshold : float
threshold for removing hypotheses. The default is 10**-5.
merge_threshold : float
threshold for merging hypotheses. The default is 4.
max_gauss : int
max number of gaussians to use. The default is 100.
"""
def __init__(self, gating_on=False, inv_chi2_gate=0, extract_threshold=0.5,
prune_threshold=10**-5, merge_threshold=4, max_gauss=100,
**kwargs):
self.gating_on = gating_on
self.inv_chi2_gate = inv_chi2_gate
self.extract_threshold = extract_threshold
self.prune_threshold = prune_threshold
self.merge_threshold = merge_threshold
self.max_gauss = max_gauss
self._gaussMix = GaussianMixture()
super().__init__(**kwargs)
@property
def states(self):
"""Read only list of extracted states.
This is a list with 1 element per timestep, and each element is a list
of the best states extracted at that timestep. The order of each
element corresponds to the label order.
"""
if len(self._states) > 0:
return self._states[-1]
else:
return []
@property
def covariances(self):
"""Read only list of extracted covariances.
This is a list with 1 element per timestep, and each element is a list
of the best covariances extracted at that timestep. The order of each
element corresponds to the state order.
Warns
-----
RuntimeWarning
If the class is not saving the covariances, and returns an
empty list
"""
if not self.save_covs:
raise RuntimeWarning("Not saving covariances")
return []
if len(self._covs) > 0:
return self._covs[-1]
else:
return []
@property
def cardinality(self):
"""Read only cardinality of the RFS."""
if len(self._states) == 0:
return 0
else:
return len(self._states[-1])
def predict(self, timestep, filt_args={}):
"""Prediction step of the PHD filter.
This predicts new hypothesis, and propogates them to the next time
step. It also updates the cardinality distribution. Because this calls
the inner filter's predict function, the keyword arguments must contain
any information needed by that function.
Parameters
----------
timestep: float
current timestep
filt_args : dict, optional
Passed to the inner filter. The default is {}.
Returns
-------
None.
"""
self._gaussMix = self._predict_prob_density(timestep, self._gaussMix,
filt_args)
for gm in self.birth_terms:
self._gaussMix.weights.extend(gm.weights)
self._gaussMix.means.extend(gm.means)
self._gaussMix.covariances.extend(gm.covariances)
def _predict_prob_density(self, timestep, probDensity, filt_args):
"""Predicts the probability density.
Loops over all elements in a probability distribution and performs
the filter prediction.
Parameters
----------
timestep: float
current timestep
probDensity : :class:`gasur.utilities.distributions.GaussianMixture`
Probability density to perform prediction on.
filt_args : dict
Passed directly to the inner filter.
Returns
-------
gm : :class:`gasur.utilities.distributions.GaussianMixture`
predicted Gaussian mixture.
"""
gm_tup = zip(probDensity.means,
probDensity.covariances)
gm = GaussianMixture()
gm.weights = [self.prob_survive * x for x in probDensity.weights.copy()]
for ii, (m, P) in enumerate(gm_tup):
self.filter.cov = P
n_mean = self.filter.predict(timestep, m, **filt_args)
gm.covariances.append(self.filter.cov.copy())
gm.means.append(n_mean)
return gm
def correct(self, timestep, meas_in, meas_mat_args={}, est_meas_args={},
filt_args={}):
"""Correction step of the PHD filter.
This corrects the hypotheses based on the measurements and gates the
measurements according to the class settings. It also updates the
cardinality distribution.
Parameters
----------
timestep: float
current timestep
meas_in : list
2d numpy arrays representing a measurement.
meas_mat_args : dict, optional
keyword arguments to pass to the inner filters get measurement
matrix function. Only used if gating is on. The default is {}.
est_meas_args : TYPE, optional
keyword arguments to pass to the inner filters estimate
measurements function. Only used if gating is on. The default is {}.
filt_args : dict, optional
keyword arguments to pass to the inner filters correct function.
The default is {}.
.. todo::
Fix the measurement gating
Returns
-------
None.
"""
meas = deepcopy(meas_in)
if self.gating_on:
meas = self._gate_meas(meas, self._gaussMix.means,
self._gaussMix.covariances, meas_mat_args,
est_meas_args)
self._meas_tab.append(meas)
gmix = deepcopy(self._gaussMix)
gmix.weights = [self.prob_miss_detection * x for x in gmix.weights]
gm = self._correct_prob_density(timestep, meas, self._gaussMix, filt_args)
gm.weights.extend(gmix.weights)
self._gaussMix.weights = gm.weights.copy()
gm.means.extend(gmix.means)
self._gaussMix.means = gm.means.copy()
gm.covariances.extend(gmix.covariances)
self._gaussMix.covariances = gm.covariances.copy()
def _correct_prob_density(self, timestep, meas, probDensity, filt_args):
"""Corrects the probability densities.
Loops over all elements in a probability distribution and preforms
the filter correction.
Parameters
----------
meas : list
2d numpy arrays of each measurement.
probDensity : :py:class:`gasur.utilities.distributions.GaussianMixture`
probability density to run correction on.
filt_args : dict
arguements to pass to the inner filter correct function.
Returns
-------
gm : :py:class:`gasur.utilities.distributions.GaussianMixture`
corrected probability density.
"""
gm = GaussianMixture()
det_weights = [self.prob_detection * x for x in probDensity.weights]
for z in meas:
w_lst = []
for jj in range(0, len(probDensity.means)):
self.filter.cov = probDensity.covariances[jj]
state = probDensity.means[jj]
(mean, qz) = self.filter.correct(timestep, z, state, **filt_args)
cov = self.filter.cov
w = qz * det_weights[jj]
gm.means.append(mean)
gm.covariances.append(cov)
w_lst.append(w)
gm.weights.extend([x / (self.clutter_rate * self.clutter_den
+ sum(w_lst)) for x in w_lst])
return gm
def _prune(self):
"""Removes hypotheses below a threshold.
This should be called once per time step after the correction and
before the state extraction.
"""
idx = np.where(np.asarray(self._gaussMix.weights)
< self.prune_threshold)
idx = np.ndarray.flatten(idx[0])
for index in sorted(idx, reverse=True):
del self._gaussMix.means[index]
del self._gaussMix.weights[index]
del self._gaussMix.covariances[index]
def _merge(self):
"""Merges nearby hypotheses."""
loop_inds = set(range(0, len(self._gaussMix.means)))
w_lst = []
m_lst = []
p_lst = []
while len(loop_inds) > 0:
jj = np.argmax(self._gaussMix.weights)
comp_inds = []
inv_cov = inv(self._gaussMix.covariances[jj])
for ii in loop_inds:
diff = self._gaussMix.means[ii] - self._gaussMix.means[jj]
val = diff.T @ inv_cov @ diff
if val <= self.merge_threshold:
comp_inds.append(ii)
w_new = sum([self._gaussMix.weights[ii] for ii in comp_inds])
m_new = sum([self._gaussMix.weights[ii] * self._gaussMix.means[ii]
for ii in comp_inds]) / w_new
p_new = sum([self._gaussMix.weights[ii]
* self._gaussMix.covariances[ii]
for ii in comp_inds]) / w_new
w_lst.append(w_new)
m_lst.append(m_new)
p_lst.append(p_new)
loop_inds = loop_inds.symmetric_difference(comp_inds)
for ii in comp_inds:
self._gaussMix.weights[ii] = -1
self._gaussMix.weights = w_lst
self._gaussMix.means = m_lst
self._gaussMix.covariances = p_lst
def _cap(self):
"""Removes least likely hypotheses until a maximum number is reached.
This should be called once per time step after pruning and
before the state extraction.
"""
if len(self._gaussMix.weights) > self.max_gauss:
idx = np.argsort(self._gaussMix.weights)
w = sum(self._gaussMix.weights)
for index in sorted(idx[0:-self.max_gauss], reverse=True):
del self._gaussMix.means[index]
del self._gaussMix.weights[index]
del self._gaussMix.covariances[index]
self._gaussMix.weights = [x * (w / sum(self._gaussMix.weights))
for x in self._gaussMix.weights]
def extract_states(self):
"""Extracts the best state estimates.
This extracts the best states from the distribution. It should be
called once per time step after the correction function.
"""
inds = np.where(np.asarray(self._gaussMix.weights)
>= self.extract_threshold)
inds = np.ndarray.flatten(inds[0])
s_lst = []
c_lst = []
for jj in inds:
num_reps = round(self._gaussMix.weights[jj])
s_lst.extend([self._gaussMix.means[jj]] * num_reps)
if self.save_covs:
c_lst.extend([self._gaussMix.covariances[jj]] * num_reps)
self._states.append(s_lst)
if self.save_covs:
self._covs.append(c_lst)
def cleanup(self, enable_prune=True, enable_cap=True, enable_merge=True,
enable_extract=True):
"""Performs the cleanup step of the filter.
This can prune, cap, and extract states. It must be called once per
timestep. If this is called with `enable_extract` set to true then
the extract states method does not need to be called separately. It is
recommended to call this function instead of
:meth:`gasur.swarm_estimator.tracker.GeneralizedLabeledMultiBernoulli.extract_states`
directly.
Parameters
----------
enable_prune : bool, optional
Flag indicating if prunning should be performed. The default is True.
enable_cap : bool, optional
Flag indicating if capping should be performed. The default is True.
enable_merge : bool, optional
Flag indicating if merging should be performed. The default is True.
enable_extract : bool, optional
Flag indicating if state extraction should be performed. The default is True.
Returns
-------
None.
"""
if enable_prune:
self._prune()
if enable_merge:
self._merge()
if enable_cap:
self._cap()
if enable_extract:
self.extract_states()
def __ani_state_plotting(self, f_hndl, tt, states, show_sig, plt_inds, sig_bnd,
color, marker, state_lbl, added_sig_lbl,
added_state_lbl, scat=None):
if scat is None:
if not added_state_lbl:
scat = f_hndl.axes[0].scatter([], [], color=color,
edgecolors=(0, 0, 0),
marker=marker)
else:
scat = f_hndl.axes[0].scatter([], [], color=color,
edgecolors=(0, 0, 0),
marker=marker, label=state_lbl)
if len(states) == 0:
return scat
x = np.concatenate(states, axis=1)
if show_sig:
sigs = [None] * len(states)
for ii, cov in enumerate(self._covs[tt]):
sig = np.zeros((2, 2))
sig[0, 0] = cov[plt_inds[0], plt_inds[0]]
sig[0, 1] = cov[plt_inds[0], plt_inds[1]]
sig[1, 0] = cov[plt_inds[1], plt_inds[0]]
sig[1, 1] = cov[plt_inds[1], plt_inds[1]]
sigs[ii] = sig
# plot
for ii, sig in enumerate(sigs):
if sig is None:
continue
w, h, a = pltUtil.calc_error_ellipse(sig, sig_bnd)
if not added_sig_lbl:
s = r'${}\sigma$ Error Ellipses'.format(sig_bnd)
e = Ellipse(xy=x[plt_inds, ii], width=w,
height=h, angle=a, zorder=-10000,
animated=True, label=s)
else:
e = Ellipse(xy=x[plt_inds, ii], width=w,
height=h, angle=a, zorder=-10000,
animated=True)
e.set_clip_box(f_hndl.axes[0].bbox)
e.set_alpha(0.15)
e.set_facecolor(color)
f_hndl.axes[0].add_patch(e)
scat.set_offsets(x[plt_inds[0:2], :].T)
return scat
def plot_states(self, plt_inds, state_lbl='States', state_color=None,
**kwargs):
"""Plots the best estimate for the states.
This assumes that the states have been extracted. It's designed to plot
two of the state variables (typically x/y position). The error ellipses
are calculated according to :cite:`Hoover1984_AlgorithmsforConfidenceCirclesandEllipses`
Keyword arguments are processed with
:meth:`gncpy.plotting.init_plotting_opts`. This function
implements
- f_hndl
- true_states
- sig_bnd
- rng
- meas_inds
- lgnd_loc
- marker
Parameters
----------
plt_inds : list
List of indices in the state vector to plot
state_lbl : string
Value to appear in legend for the states. Only appears if the
legend is shown
Returns
-------
Matplotlib figure
Instance of the matplotlib figure used
"""
opts = pltUtil.init_plotting_opts(**kwargs)
f_hndl = opts['f_hndl']
true_states = opts['true_states']
sig_bnd = opts['sig_bnd']
rng = opts['rng']
meas_inds = opts['meas_inds']
lgnd_loc = opts['lgnd_loc']
marker = opts['marker']
if rng is None:
rng = rnd.default_rng(1)
plt_meas = meas_inds is not None
show_sig = sig_bnd is not None and self.save_covs
s_lst = deepcopy(self._states)
x_dim = None
if f_hndl is None:
f_hndl = plt.figure()
f_hndl.add_subplot(1, 1, 1)
# get state dimension
for states in s_lst:
if len(states) > 0:
x_dim = states[0].size
break
# get array of all state values for each label
added_sig_lbl = False
added_true_lbl = False
added_state_lbl = False
added_meas_lbl = False
r = rng.random()
b = rng.random()
g = rng.random()
if state_color is None:
color = (r, g, b)
else:
color = state_color
for tt, states in enumerate(s_lst):
if len(states) == 0:
continue
x = np.concatenate(states, axis=1)
if show_sig:
sigs = [None] * len(states)
for ii, cov in enumerate(self._covs[tt]):
sig = np.zeros((2, 2))
sig[0, 0] = cov[plt_inds[0], plt_inds[0]]
sig[0, 1] = cov[plt_inds[0], plt_inds[1]]
sig[1, 0] = cov[plt_inds[1], plt_inds[0]]
sig[1, 1] = cov[plt_inds[1], plt_inds[1]]
sigs[ii] = sig
# plot
for ii, sig in enumerate(sigs):
if sig is None:
continue
w, h, a = pltUtil.calc_error_ellipse(sig, sig_bnd)
if not added_sig_lbl:
s = r'${}\sigma$ Error Ellipses'.format(sig_bnd)
e = Ellipse(xy=x[plt_inds, ii], width=w,
height=h, angle=a, zorder=-10000,
label=s)
added_sig_lbl = True
else:
e = Ellipse(xy=x[plt_inds, ii], width=w,
height=h, angle=a, zorder=-10000)
e.set_clip_box(f_hndl.axes[0].bbox)
e.set_alpha(0.15)
e.set_facecolor(color)
f_hndl.axes[0].add_patch(e)
if not added_state_lbl:
f_hndl.axes[0].scatter(x[plt_inds[0], :], x[plt_inds[1], :],
color=color, edgecolors=(0, 0, 0),
marker=marker, label=state_lbl)
added_state_lbl = True
else:
f_hndl.axes[0].scatter(x[plt_inds[0], :], x[plt_inds[1], :],
color=color, edgecolors=(0, 0, 0),
marker=marker)
# if true states are available then plot them
if true_states is not None:
if x_dim is None:
for states in true_states:
if len(states) > 0:
x_dim = states[0].size
break
max_true = max([len(x) for x in true_states])
x = np.nan * np.ones((x_dim, len(true_states), max_true))
for tt, states in enumerate(true_states):
for ii, state in enumerate(states):
x[:, [tt], ii] = state.copy()
for ii in range(0, max_true):
if not added_true_lbl:
f_hndl.axes[0].plot(x[plt_inds[0], :, ii],
x[plt_inds[1], :, ii],
color='k', marker='.',
label='True Trajectories')
added_true_lbl = True
else:
f_hndl.axes[0].plot(x[plt_inds[0], :, ii],
x[plt_inds[1], :, ii],
color='k', marker='.')
if plt_meas:
meas_x = []
meas_y = []
for meas_tt in self._meas_tab:
mx_ii = [m[meas_inds[0]].item() for m in meas_tt]
my_ii = [m[meas_inds[1]].item() for m in meas_tt]
meas_x.extend(mx_ii)
meas_y.extend(my_ii)
color = (128 / 255, 128 / 255, 128 / 255)
meas_x = np.asarray(meas_x)
meas_y = np.asarray(meas_y)
if not added_meas_lbl:
f_hndl.axes[0].scatter(meas_x, meas_y, zorder=-1, alpha=0.35,
color=color, marker='^',
edgecolors=(0, 0, 0),
label='Measurements')
else:
f_hndl.axes[0].scatter(meas_x, meas_y, zorder=-1, alpha=0.35,
color=color, marker='^',
edgecolors=(0, 0, 0))
f_hndl.axes[0].grid(True)
pltUtil.set_title_label(f_hndl, 0, opts, ttl="State Estimates",
x_lbl="x-position", y_lbl="y-position")
if lgnd_loc is not None:
plt.legend(loc=lgnd_loc)
plt.tight_layout()
return f_hndl
def animate_state_plot(self, plt_inds, state_lbl='States', state_color=None,
interval=250, repeat=True, repeat_delay=1000,
save_path=None, **kwargs):
"""Creates an animated plot of the states.
Parameters
----------
plt_inds : list
indices of the state vector to plot.
state_lbl : string, optional
label for the states. The default is 'States'.
state_color : tuple, optional
3-tuple for rgb value. The default is None.
interval : int, optional
interval of the animation in ms. The default is 250.
repeat : bool, optional
flag indicating if the animation loops. The default is True.
repeat_delay : int, optional
delay between loops in ms. The default is 1000.
save_path : string, optional
file path and name to save the gif, does not save if not given.
The default is None.
**kwargs : dict, optional
Standard plotting options for
:meth:`gncpy.plotting.init_plotting_opts`. This function
implements
- f_hndl
- sig_bnd
- rng
- meas_inds
- lgnd_loc
- marker
Returns
-------
anim :
handle to the animation.
"""
opts = pltUtil.init_plotting_opts(**kwargs)
f_hndl = opts['f_hndl']
sig_bnd = opts['sig_bnd']
rng = opts['rng']
meas_inds = opts['meas_inds']
lgnd_loc = opts['lgnd_loc']
marker = opts['marker']
plt_meas = meas_inds is not None
show_sig = sig_bnd is not None and self.save_covs
f_hndl.axes[0].grid(True)
pltUtil.set_title_label(f_hndl, 0, opts, ttl="State Estimates",
x_lbl="x-position", y_lbl="y-position")
fr_number = f_hndl.axes[0].annotate("0", (0, 1),
xycoords="axes fraction",
xytext=(10, -10),
textcoords="offset points",
ha="left", va="top",
animated=False)
added_sig_lbl = False
added_state_lbl = False
added_meas_lbl = False
r = rng.random()
b = rng.random()
g = rng.random()
if state_color is None:
s_color = (r, g, b)
else:
s_color = state_color
state_scat = f_hndl.axes[0].scatter([], [], color=s_color,
edgecolors=(0, 0, 0),
marker=marker, label=state_lbl)
meas_scat = None
if plt_meas:
m_color = (128 / 255, 128 / 255, 128 / 255)
if meas_scat is None:
if not added_meas_lbl:
lbl = 'Measurements'
meas_scat = f_hndl.axes[0].scatter([], [], zorder=-1,
alpha=0.35,
color=m_color,
marker='^',
edgecolors='k',
label=lbl)
added_meas_lbl = True
else:
meas_scat = f_hndl.axes[0].scatter([], [], zorder=-1,
alpha=0.35,
color=m_color,
marker='^',
edgecolors='k')
def update(tt, *fargs):
nonlocal added_sig_lbl
nonlocal added_state_lbl
nonlocal added_meas_lbl
nonlocal state_scat
nonlocal meas_scat
nonlocal fr_number
fr_number.set_text("Timestep: {j}".format(j=tt))
states = self._states[tt]
state_scat = self.__ani_state_plotting(f_hndl, tt, states,
show_sig, plt_inds,
sig_bnd, s_color, marker,
state_lbl, added_sig_lbl,
added_state_lbl,
scat=state_scat)
added_sig_lbl = True
added_state_lbl = True
if plt_meas:
meas_tt = self._meas_tab[tt]
meas_x = [m[meas_inds[0]].item() for m in meas_tt]
meas_y = [m[meas_inds[1]].item() for m in meas_tt]
meas_x = np.asarray(meas_x)
meas_y = np.asarray(meas_y)
meas_scat.set_offsets(np.array([meas_x, meas_y]).T)
# plt.figure(f_hndl.number)
anim = animation.FuncAnimation(f_hndl, update,
frames=len(self._states),
interval=interval,
repeat_delay=repeat_delay,
repeat=repeat)
if lgnd_loc is not None:
plt.legend(loc=lgnd_loc)
if save_path is not None:
writer = animation.PillowWriter(fps=30)
anim.save(save_path, writer=writer)
return anim
class CardinalizedPHD(ProbabilityHypothesisDensity):
"""Implements the Cardinalized Probability Hypothesis Density filter.
The kwargs in the constructor are passed through to the parent constructor.
Notes
-----
The filter implementation is based on
:cite:`Vo2006_TheCardinalizedProbabilityHypothesisDensityFilterforLinearGaussianMultiTargetModels`
and :cite:`Vo2007_AnalyticImplementationsoftheCardinalizedProbabilityHypothesisDensityFilter`.
Attributes
----------
agents_per_state : list, optional
number of agents per state. The default is [].
"""
def __init__(self, agents_per_state=None, max_expected_card=10, **kwargs):
if agents_per_state is None:
agents_per_state = []
self.agents_per_state = agents_per_state
self._max_expected_card = max_expected_card
self._card_dist = np.zeros(self.max_expected_card + 1) # local copy for internal modification
self._card_dist[0] = 1
self._card_time_hist = [] # local copy for internal modification
self._n_states_per_time = []
super().__init__(**kwargs)
@property
def max_expected_card(self):
"""Maximum expected cardinality. The default is 10."""
return self._max_expected_card
@max_expected_card.setter
def max_expected_card(self, x):
self._card_dist = np.zeros(x + 1)
self._card_dist[0] = 1
self._max_expected_card = x
@property
def cardinality(self):
"""Cardinality of the RFS."""
return np.argmax(self._card_dist)
def predict(self, timestep, **kwargs):
"""Prediction step of the CPHD filter.
This predicts new hypothesis, and propogates them to the next time
step. It also updates the cardinality distribution.
Parameters
----------
timestep: float
current timestep
**kwargs : dict, optional
See :meth:gasur.swarm_estimator.tracker.ProbabilityHypothesisDensity.predict`
for the available arguments.
Returns
-------
None.
"""
super().predict(timestep, **kwargs)
survive_cdn_predict = np.zeros(self.max_expected_card + 1)
for j in range(0, self.max_expected_card):
terms =
|
np.zeros((self.max_expected_card + 1, 1))
|
numpy.zeros
|
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
from africanus.rime.fast_beam_cubes import beam_cube_dde, freq_grid_interp
def rf(*a, **kw):
return np.random.random(*a, **kw)
def rc(*a, **kw):
return rf(*a, **kw) + 1j*rf(*a, **kw)
@pytest.fixture
def beam_freq_map():
return np.array([.5, .56, .7, .91, 1.0])
@pytest.fixture
def beam_freq_map_montblanc():
""" Montblanc doesn't handle values outside the cube in the same way """
return np.array([.4, .56, .7, .91, 1.1])
@pytest.fixture
def freqs():
"""
Related to the beam_freq_map fixture.
Explanation of frequency test values:
1. One value (0.4) below the beam freq range
2. One value (0.5) on the first beam freq
3. One value (1.0) on the last beam freq
4. One value (1.1) above the beam freq range
"""
return
|
np.array([.4, .5, .6, .7, .8, .9, 1.0, 1.1])
|
numpy.array
|
#!/usr/bin/env python
from __future__ import division
from collections import OrderedDict
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn.apionly as sns
from dask import delayed, multiprocessing
from dask.diagnostics import ProgressBar
import comptools as comp
import comptools.analysis.plotting as plotting
def data_config_to_sim_config(data_config):
if not data_config in comp.datafunctions.get_data_configs():
raise ValueError('Invalid data config, {}, entered...'.format(data_config))
if 'IC86' in data_config:
sim_config = 'IC86.2012'
else:
sim_config = 'IC79.2010'
return sim_config
def get_config_flux(config):
sim_config = data_config_to_sim_config(config)
pipeline_str = 'BDT'
pipeline = comp.get_pipeline(pipeline_str)
energybins = comp.analysis.get_energybins()
# Load simulation and training features
df_sim_train, df_sim_test = comp.load_sim(config=sim_config, verbose=False)
feature_list, feature_labels = comp.analysis.get_training_features()
# Load data
df_data = comp.load_data(config=config)
X_data = comp.dataframe_functions.dataframe_to_array(df_data,
feature_list + ['lap_log_energy'])
log_energy = X_data[:,-1]
X_data = X_data[:,:-1]
pipeline.fit(df_sim_train[feature_list], df_sim_train['target'])
data_predictions = pipeline.predict(X_data)
# Get composition masks
data_labels = np.array([comp.dataframe_functions.label_to_comp(pred) for pred in data_predictions])
data_light_mask = data_labels == 'light'
data_heavy_mask = data_labels == 'heavy'
# Get number of identified comp in each energy bin
df_flux = {}
comp_list = ['light', 'heavy']
for composition in comp_list:
comp_mask = data_labels == composition
df_flux['counts_' + composition] = np.histogram(log_energy[comp_mask],
bins=energybins.log_energy_bins)[0]
df_flux['counts_' + composition + '_err'] = np.sqrt(df_flux['counts_' + composition])
df_flux['counts_total'] = np.histogram(log_energy, bins=energybins.log_energy_bins)[0]
df_flux['counts_total_err'] = np.sqrt(df_flux['counts_total'])
# Solid angle
max_zenith_rad = df_sim_train['lap_zenith'].max()
solid_angle = 2*np.pi*(1-np.cos(max_zenith_rad))
df_flux['solid_angle'] = solid_angle
# Livetime
livetime, livetime_err = comp.get_detector_livetime(config=config)
df_flux['livetime'] = livetime
df_flux['livetime_err'] = livetime_err
return df_flux
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates and saves flux plot')
parser.add_argument('-c', '--config', dest='config', nargs='*',
choices=comp.datafunctions.get_data_configs(),
help='Detector configuration')
parser.add_argument('--correct_eff_area', dest='correct_eff_area',
default=False, action='store_true',
help='Option to normalize effective areas to IC86.2012 simulation')
args = parser.parse_args()
results = [delayed(get_config_flux)(config) for config in args.config]
df_flux = delayed(pd.DataFrame)(results, index=args.config)
with ProgressBar():
print('Computing flux for {}'.format(args.config))
df_flux = df_flux.compute(get=multiprocessing.get,
num_workers=len(results))
# df_flux.to_hdf('flux_dataframe.hdf', 'dataframe')
# df_flux = pd.read_hdf('flux_dataframe.hdf', mode='r')
energybins = comp.analysis.get_energybins()
# Effective area
eff_area = comp.get_effective_area_fit(config='IC86.2012',
energy_points=energybins.energy_midpoints)
print(df_flux)
# Flux vs energy
color_dict = comp.analysis.get_color_dict()
comp_list = ['light', 'heavy']
# Plot flux for each year separately
for config in args.config:
fig, ax = plt.subplots()
df_flux_config = df_flux.loc[config]
for composition in comp_list + ['total']:
flux, flux_err = comp.analysis.get_flux(
df_flux_config['counts_' + composition],
energybins=energybins.energy_bins,
eff_area=eff_area,
livetime=df_flux_config['livetime'],
livetime_err=df_flux_config['livetime_err'],
solid_angle=df_flux_config['solid_angle'])
plotting.plot_steps(energybins.log_energy_bins, flux, yerr=flux_err,
ax=ax, color=color_dict[composition], label=composition)
ax.set_yscale("log", nonposy='clip')
ax.set_xlabel('$\mathrm{\log_{10}(E_{reco}/GeV)}$')
ax.set_ylabel('$\mathrm{ E^{2.7} \ J(E) \ [GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$')
ax.set_xlim([energybins.log_energy_min, energybins.log_energy_max])
ax.set_ylim([10**3, 10**5])
ax.grid(linestyle='dotted', which="both")
leg = plt.legend(loc='upper center', frameon=False,
bbox_to_anchor=(0.5, # horizontal
1.15),# vertical
ncol=len(comp_list)+1, fancybox=False)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
outfile = os.path.join(comp.paths.figures_dir, 'flux',
'flux-{}.png'.format(config))
comp.check_output_dir(outfile)
plt.savefig(outfile)
# Plot combined flux for all years
fig, ax = plt.subplots()
for composition in comp_list + ['total']:
livetime_err = comp.get_summation_error(df_flux['livetime_err'])
counts = df_flux['counts_' + composition].sum()
print('counts = {}'.format(counts))
counts_err = np.sqrt(np.sum(df_flux['counts_' + composition + '_err']**2, axis=0))
print('counts_err = {}'.format(counts_err))
flux, flux_err = comp.analysis.get_flux(
counts, counts_err=counts_err,
energybins=energybins.energy_bins,
eff_area=eff_area,
livetime=df_flux['livetime'].sum(),
livetime_err=livetime_err,
solid_angle=df_flux['solid_angle'].mean())
plotting.plot_steps(energybins.log_energy_bins, flux, yerr=flux_err,
ax=ax, color=color_dict[composition], label=composition)
ax.set_yscale("log", nonposy='clip')
ax.set_xlabel('$\mathrm{\log_{10}(E_{reco}/GeV)}$')
ax.set_ylabel('$\mathrm{ E^{2.7} \ J(E) \ [GeV^{1.7} m^{-2} sr^{-1} s^{-1}]}$')
ax.set_xlim([energybins.log_energy_min, energybins.log_energy_max])
ax.set_ylim([10**3, 10**5])
ax.grid(linestyle='dotted', which="both")
leg = plt.legend(loc='upper center', frameon=False,
bbox_to_anchor=(0.5, # horizontal
1.15),# vertical
ncol=len(comp_list)+1, fancybox=False)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
config_str = '_'.join(args.config)
outfile = os.path.join(comp.paths.figures_dir, 'flux',
'flux-combined-{}.png'.format(config_str))
comp.check_output_dir(outfile)
plt.savefig(outfile)
# Individual years on single plot
if len(args.config) > 1:
# Get colors
df_flux['light_color'] = sns.color_palette('Blues', len(args.config)).as_hex()
df_flux['heavy_color'] = sns.color_palette('Oranges', len(args.config)).as_hex()
df_flux['total_color'] = sns.color_palette('Greens', len(args.config)).as_hex()
if args.correct_eff_area:
ratio = OrderedDict()
df_flux_2012 = df_flux.loc['IC86.2012']
for config in args.config:
df_flux_config = df_flux.loc[config]
rate = df_flux_config['counts_total'] / df_flux_config['livetime']
rate_2012 = df_flux_2012['counts_total'] / df_flux_2012['livetime']
ratio[config] = rate[6]/rate_2012[6]
else:
ratio = {config: 1.0 for config in args.config}
print(ratio)
# Plot rate for each year on single plot
fig, ax = plt.subplots()
for composition in comp_list + ['total']:
for config in args.config:
df_flux_config = df_flux.loc[config]
rate, rate_err = comp.ratio_error(
df_flux_config['counts_' + composition],
|
np.sqrt(df_flux_config['counts_' + composition])
|
numpy.sqrt
|
# Version 3.1; <NAME>; Polar Geospatial Center, University of Minnesota; 2019
from __future__ import division
import copy
import math
import operator
import os
import sys
import traceback
from collections import deque
from itertools import product
from PIL import Image
from warnings import warn
import cv2
import numpy as np
import osgeo
from osgeo import gdal_array, gdalconst
from osgeo import gdal, ogr, osr
import scipy
import shapely.geometry
import shapely.ops
from scipy import ndimage as sp_ndimage
from skimage.draw import polygon_perimeter
from skimage import morphology as sk_morphology
from skimage.filters.rank import entropy
from skimage.util import unique_rows
gdal.UseExceptions()
if sys.version_info[0] < 3:
from DecimatePoly import DecimatePoly
else:
from lib.DecimatePoly import DecimatePoly
_script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.version_info[0] < 3:
_ext_fid = open(os.path.join(_script_dir, 'outline.c'), 'r')
_outline = _ext_fid.read()
_ext_fid.close()
_ext_fid = open(os.path.join(_script_dir, 'outline_every1.c'), 'r')
_outline_every1 = _ext_fid.read()
_ext_fid.close()
else:
_ext_fid = open(os.path.join(_script_dir, 'outline.c'), 'r', encoding='utf-8')
_outline = _ext_fid.read()
_ext_fid.close()
_ext_fid = open(os.path.join(_script_dir, 'outline_every1.c'), 'r', encoding='utf-8')
_outline_every1 = _ext_fid.read()
_ext_fid.close()
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedDataTypeError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedMethodError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
#############
# Raster IO #
#############
# Legacy; Retained for quick instruction of useful GDAL raster information extraction methods.
def oneBandImageToArrayZXY_projRef(rasterFile):
"""
Opens a single-band raster image as a NumPy 2D array [Z] and returns it along
with [X, Y] coordinate ranges of pixels in the raster grid as NumPy 1D arrays
and the projection definition string for the raster dataset in OpenGIS WKT format.
"""
if not os.path.isfile(rasterFile):
raise RasterIOError("No such rasterFile: '{}'".format(rasterFile))
ds = gdal.Open(rasterFile, gdal.GA_ReadOnly)
proj_ref = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
xmin, ymax = gt[0], gt[3]
dx, dy = gt[1], gt[5]
X = xmin + np.arange(ds.RasterXSize) * dx
Y = ymax + np.arange(ds.RasterYSize) * dy
Z = ds.GetRasterBand(1).ReadAsArray()
return Z, X, Y, proj_ref
def openRaster(file_or_ds, target_srs=None, reproject_resample_method='nearest'):
"""
Open a raster image as a GDAL dataset object.
Parameters
----------
file_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
Returns
-------
ds : osgeo.gdal.Dataset
The raster image as a GDAL dataset.
Notes
-----
If `rasterFile_or_ds` is a GDAL dataset,
it is returned without modification.
"""
ds = None
if type(file_or_ds) == gdal.Dataset:
ds = file_or_ds
elif isinstance(file_or_ds, str):
if not os.path.isfile(file_or_ds):
raise RasterIOError("No such rasterFile: '{}'".format(file_or_ds))
try:
ds = gdal.Open(file_or_ds, gdal.GA_ReadOnly)
except RuntimeError:
print("RuntimeError when opening file/dataset: {}".format(file_or_ds))
raise
else:
raise InvalidArgumentError("Invalid input type for `file_or_ds`: {}".format(
type(file_or_ds)))
if target_srs is not None:
ds = reprojectGDALDataset(ds, target_srs, reproject_resample_method)
return ds
def reprojectGDALDataset(ds_in, target_srs, interp_str):
input_srs_type = type(target_srs)
if input_srs_type is osgeo.osr.SpatialReference:
if not target_srs.IsProjected():
raise RasterIOError("`target_srs` is a osgeo.osr.SpatialReference object but is not projected")
elif input_srs_type is osgeo.gdal.Dataset or input_srs_type is str and os.path.isfile(target_srs):
target_srs = extractRasterData(target_srs, 'spat_ref')
else:
target_srs_in = target_srs
target_srs_out = osr.SpatialReference()
if input_srs_type is int:
target_srs_out.ImportFromEPSG(target_srs_in)
elif input_srs_type is str:
if input_srs_type.upper().startswith('EPSG:'):
target_srs_out.ImportFromEPSG(int(target_srs_in.upper().lstrip('EPSG:')))
elif '+proj' in target_srs_in:
target_srs_out.ImportFromProj4(target_srs_in)
else:
target_srs_out.ImportFromWkt(target_srs_in)
else:
raise RasterIOError("`target_srs` type is unsupported: {}".format(input_srs_type))
target_srs = target_srs_out
interp_gdal = interp_str2gdal(interp_str)
source_srs, dx, dy = extractRasterData(ds_in, 'spat_ref', 'dx', 'dy')
if source_srs.IsSame(target_srs) == 1:
return ds_in
temp_inmemory_file_path = '/vsimem/reproj.tif'
gdal.Warp(
temp_inmemory_file_path, ds_in,
dstSRS=target_srs, resampleAlg=interp_gdal,
xRes=dx, yRes=dy, targetAlignedPixels=True,
format="GTiff"
)
ds_out = gdal.Open(temp_inmemory_file_path, gdal.GA_ReadOnly)
gdal.Unlink(temp_inmemory_file_path)
return ds_out
def gdalReadAsArraySetsmSceneBand(raster_band, make_nodata_nan=False):
scale = raster_band.GetScale()
offset = raster_band.GetOffset()
if scale is None:
scale = 1.0
if offset is None:
offset = 0.0
if scale == 1.0 and offset == 0.0:
array_data = raster_band.ReadAsArray()
if make_nodata_nan:
nodata_val = raster_band.GetNoDataValue()
if nodata_val is not None:
array_data[array_data == nodata_val] = np.nan
else:
if raster_band.DataType != gdalconst.GDT_Int32:
raise RasterIOError(
"Expected GDAL raster band with scale!=1.0 or offset!=0.0 to be of Int32 data type"
" (scaled int LERC_ZSTD-compressed 50cm DEM), but data type is {}".format(
gdal.GetDataTypeName(raster_band.DataType)
)
)
if scale == 0.0:
raise RasterIOError(
"GDAL raster band has invalid parameters: scale={}, offset={}".format(scale, offset)
)
nodata_val = raster_band.GetNoDataValue()
array_data = raster_band.ReadAsArray(buf_type=gdalconst.GDT_Float32)
adjust_where = (array_data != nodata_val) if nodata_val is not None else True
if scale != 1.0:
np.multiply(array_data, scale, out=array_data, where=adjust_where)
if offset != 0.0:
np.add(array_data, offset, out=array_data, where=adjust_where)
if make_nodata_nan:
array_nodata = np.logical_not(adjust_where, out=adjust_where)
array_data[array_nodata] = np.nan
del adjust_where
if array_data is None:
raise RasterIOError("`raster_band.ReadAsArray()` returned None")
return array_data
def getCornerCoords(gt, shape):
"""
Retrieve the georeferenced corner coordinates of a raster image.
The corner coordinates of the raster are calculated from
the rasters's geometric transformation specifications and
the dimensions of the raster.
Parameters
----------
gt : numeric tuple `(top_left_x, dx_x, dx_y, top_left_y, dy_x, dy_y)`
The affine geometric transformation ("geotransform" or "geo_trans")
describing the relationship between pixel coordinates and
georeferenced coordinates.
Pixel coordinates start at `(0, 0)` [row, col] for the top left pixel
in the raster image, increasing down rows and right across columns.
Georeferenced coordinates `(x_geo, y_geo)` are calculated for pixels
in the image by the pixel coordinates `(pix_row, pix_col)` as follows:
`x_geo = top_left_x + pix_row*dx_x + pix_col*dx_y`
`y_geo = top_left_y + pix_row*dy_x + pix_col*dy_y`
shape : tuple of positive int, 2 elements
Dimensions of the raster image in (num_rows, num_cols) format.
Returns
-------
corner_coords : ndarray (5, 2)
Georeferenced corner coordinates of the raster image,
in (x, y) coordinate pairs, starting and ending at the
top left corner, clockwise.
"""
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
def coordsToWkt(point_coords):
"""
Retrieve a WKT polygon representation of an ordered list of
point coordinates.
Parameters
----------
point_coords : 2D sequence of floats/ints like ndarray
of shape (npoints, ndim)
Ordered list of points, each represented by a list of
coordinates that define its position in space.
Returns
-------
wkt : str
WKT polygon representation of `point_coords`.
"""
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in xy]) for xy in point_coords])
)
def wktToCoords(wkt):
"""
Create an array of point coordinates from a WKT polygon string.
Parameters
----------
wkt : str
WKT polygon representation of points with coordinate data
to be extracted.
Returns
-------
point_coords : ndarray of shape (npoints, ndim)
Ordered list of point coordinates extracted from `wkt`.
"""
coords_list = eval(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return np.array(coords_list)
def extractRasterData(rasterFile_or_ds, *params):
"""
Extract information from a single-band raster image file.
Parameters
----------
rasterFile_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
params : str
Names of parameters to be extracted from the raster dataset.
'array'/'z' ------ matrix of image pixel values as ndarray (2D)
'shape'----------- pixel shape of image as tuple (nrows, ncols)
'x' -------------- georeferenced grid coordinates corresponding to
each column of pixels in image as ndarray (1D)
'y' -------------- georeferenced grid coordinates corresponding to
each row of pixels in image as ndarray (1D)
'dx' ------------- x length of each pixel in georeferenced pixel-grid coordinates,
corresponding to x[1] - x[0] from 'x' param (dx may be negative)
'dy' ------------- y length of each pixel in georeferenced pixel-grid coordinates,
corresponding to y[1] - y[0] from 'y' param (dy may be negative)
'res' ------------ (absolute) resolution of square pixels in image
(NaN if pixels are not square)
'geo_trans' ------ affine geometric transformation
(see documentation for `getCornerCoords`)
'corner_coords' -- georeferenced corner coordinates of image extent
(see documentation for `getCornerCoords`)
'proj_ref' ------- projection definition string in OpenGIS WKT format
(None if projection definition is not available)
'spat_ref' ------- spatial reference as osgeo.osr.SpatialReference object
(None if spatial reference is not available)
'geom' ----------- polygon geometry of image extent as osgeo.ogr.Geometry object
'geom_sr' -------- polygon geometry of image extent as osgeo.ogr.Geometry object
with spatial reference assigned (if available)
'nodata_val' ----- pixel value that should be interpreted as "No Data"
'dtype_val' ------ GDAL type code for numeric data type of pixel values (integer)
'dtype_str' ------ GDAL type name for numeric data type of pixel values (string)
Returns
-------
value_list : list
List of parameter data with length equal to the number
of parameter name arguments given in the function call.
The order of returned parameter data corresponds directly to
the order of the parameter name arguments.
If only one parameter name argument is provided, the single
datum is returned itself, not in a list.
Examples
--------
>>> f = 'my_raster.tif'
>>> image_data, resolution = extractRasterData(f, 'array', 'res')
>>> resolution
2
>>> extractRasterData(f, 'dy')
-2
"""
ds = openRaster(rasterFile_or_ds)
pset = set(params)
invalid_pnames = pset.difference({'ds', 'shape', 'z', 'array', 'x', 'y',
'dx', 'dy', 'res', 'geo_trans', 'corner_coords',
'proj_ref', 'spat_ref', 'geom', 'geom_sr',
'nodata_val', 'dtype_val', 'dtype_str'})
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'z', 'array', 'nodata_val', 'dtype_val', 'dtype_str'}):
band = ds.GetRasterBand(1)
if pset.intersection({'z', 'array'}):
try:
array_data = gdalReadAsArraySetsmSceneBand(band)
except RasterIOError as e:
traceback.print_exc()
print("Error reading raster: {}".format(rasterFile_or_ds))
raise
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = (ds.RasterYSize, ds.RasterXSize) if 'array_data' not in vars() else array_data.shape
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = ds.GetGeoTransform()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = ds.GetProjectionRef()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = getCornerCoords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=coordsToWkt(corner_coords))
if pset.intersection({'nodata_val'}):
nodata_val = band.GetNoDataValue()
if pset.intersection({'dtype_val', 'dtype_str'}):
dtype_val = band.DataType
if pset.intersection({'dtype_str'}):
dtype_str = gdal.GetDataTypeName(dtype_val)
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = ds
elif pname == 'shape':
value = shape
elif pname in ('z', 'array'):
value = array_data
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
warn("Spatial reference could not be extracted from raster dataset, "
"so extracted geometry has not been assigned a spatial reference.")
elif pname == 'nodata_val':
value = nodata_val
elif pname == 'dtype_val':
value = dtype_val
elif pname == 'dtype_str':
value = dtype_str
value_list.append(value)
if len(value_list) == 1:
value_list = value_list[0]
return value_list
# Legacy; Retained for a visual aid of equivalences between NumPy and GDAL data types.
# Use gdal_array.NumericTypeCodeToGDALTypeCode to convert from NumPy to GDAL data type.
def dtype_np2gdal_old(dtype_in, form_out='gdal', force_conversion=False):
"""
Converts between input NumPy data type (dtype_in may be either
NumPy 'dtype' object or already a string) and output GDAL data type.
If form_out='numpy', the corresponding NumPy 'dtype' object will be
returned instead, allowing for quick lookup by string name.
If the third element of a dtype_dict conversion tuple is zero,
that conversion of NumPy to GDAL data type is not recommended. However,
the conversion may be forced with the argument force_conversion=True.
"""
dtype_dict = { # ---GDAL LIMITATIONS---
'bool' : (np.bool, gdal.GDT_Byte, 0), # GDAL no bool/logical/1-bit
'int8' : (np.int8, gdal.GDT_Byte, 1), # GDAL byte is unsigned
'int16' : (np.int16, gdal.GDT_Int16, 1),
'int32' : (np.int32, gdal.GDT_Int32, 1),
'intc' : (np.intc, gdal.GDT_Int32, 1), # np.intc ~= np.int32
'int64' : (np.int64, gdal.GDT_Int32, 0), # GDAL no int64
'intp' : (np.intp, gdal.GDT_Int32, 0), # intp ~= np.int64
'uint8' : (np.uint8, gdal.GDT_Byte, 1),
'uint16' : (np.uint16, gdal.GDT_UInt16, 1),
'uint32' : (np.uint32, gdal.GDT_UInt32, 1),
'uint64' : (np.uint64, gdal.GDT_UInt32, 0), # GDAL no uint64
'float16' : (np.float16, gdal.GDT_Float32, 1), # GDAL no float16
'float32' : (np.float32, gdal.GDT_Float32, 1),
'float64' : (np.float64, gdal.GDT_Float64, 1),
'complex64' : (np.complex64, gdal.GDT_CFloat32, 1),
'complex128': (np.complex128, gdal.GDT_CFloat64, 1),
}
errmsg_unsupported_dtype = "Conversion of NumPy data type '{}' to GDAL is not supported".format(dtype_in)
try:
dtype_tup = dtype_dict[str(dtype_in).lower()]
except KeyError:
raise UnsupportedDataTypeError("No such NumPy data type in lookup table: '{}'".format(dtype_in))
if form_out.lower() == 'gdal':
if dtype_tup[2] == 0:
if force_conversion:
print(errmsg_unsupported_dtype)
else:
raise UnsupportedDataTypeError(errmsg_unsupported_dtype)
dtype_out = dtype_tup[1]
elif form_out.lower() == 'numpy':
dtype_out = dtype_tup[0]
else:
raise UnsupportedDataTypeError("The following output data type format is not supported: '{}'".format(form_out))
return dtype_out
def dtype_np2gdal(dtype_np):
# TODO: Write docstring.
if dtype_np == np.bool:
promote_dtype = np.uint8
elif dtype_np == np.int8:
promote_dtype = np.int16
elif dtype_np == np.float16:
promote_dtype = np.float32
else:
promote_dtype = None
if promote_dtype is not None:
warn("NumPy array data type ({}) does not have equivalent GDAL data type and is not "
"supported, but can be safely promoted to {}".format(dtype_np, promote_dtype(1).dtype))
dtype_np = promote_dtype
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_np)
if dtype_gdal is None:
raise InvalidArgumentError("NumPy array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_np))
return dtype_gdal, promote_dtype
def interp_str2gdal(interp_str):
# TODO: Write docstring.
interp_choices = ('nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode')
interp_dict = {
'nearest' : gdal.GRA_NearestNeighbour,
'linear' : gdal.GRA_Bilinear,
'bilinear' : gdal.GRA_Bilinear,
'cubic' : gdal.GRA_Cubic,
'bicubic' : gdal.GRA_Cubic,
'spline' : gdal.GRA_CubicSpline,
'lanczos' : gdal.GRA_Lanczos,
'average' : gdal.GRA_Average,
'mode' : gdal.GRA_Mode,
}
if interp_str not in interp_dict:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp_str))
return interp_dict[interp_str]
def saveArrayAsTiff(array, dest,
X=None, Y=None, proj_ref=None, geotrans_rot_tup=(0, 0),
nodata_val='like_raster', dtype_out=None, nbits=None, co_args='compress',
like_raster=None):
"""
Save a NumPy 2D array as a single-band raster image in GeoTiff format.
Parameters
----------
array : ndarray, 2D
Array containing the values of pixels to be saved in the image,
one value per pixel.
dest : str (file path)
File path where the raster image will be saved.
If a file already exists at this path, it will be overwritten.
X : None or (ndarray, 1D)
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `array[:, j]`.
If None, `like_raster` must be provided.
Y : None or (ndarray, 1D)
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `array[i, :]`
If None, `like_raster` must be provided.
proj_ref : None, str (WKT or Proj4), or osr.SpatialReference
Projection reference of the raster image to be saved, specified as
either a WKT/Proj4 string or an osr.SpatialReference object.
If None, `like_raster` must be provided.
geotrans_rot_tup : None or tuple (2 floats)
The third and fifth elements of the geometric transformation tuple
that specify rotation from north-up of the raster image to be saved.
If a north-up output is desired, let both elements be zero.
See documentation for `getCornerCoords` for more information on the
geometric transformation tuple.
If None, `like_raster` must be provided.
nodata_val : 'like_raster', None, or int/float
Non-NaN value in `array` that will be classified as "no data" in the
output raster image.
If 'like_raster', allow this value to be set equal to the nodata value
of `like_raster`.
dtype_out : data type as str (e.g. 'uint16'), NumPy data type
(e.g. np.uint16), or numpy.dtype object (e.g. from arr.dtype)
Numeric type of values in the output raster image.
If 'n-bit', write output raster image in an unsigned integer GDAL
data type with ['NBITS=n'] option in driver, where n is set to `nbits`
if `nbits` is not None. If `nbits` is None, n is calculated to be only
as large as necessary to capture the maximum value of `array`, and the
output array data type is unsigned integer of minimal bitdepth.
nbits : None or 1 <= int <= 32
Only applies when `dtype_out='nbits'`.
co_args : None, 'compress', or list of '[ARG_NAME]=[ARG_VALUE]' strings
Creation Option arguments to pass to the `Create` method of the GDAL
Geotiff driver that instantiates the output raster dataset.
If 'compress', the following default arguments are used:
'BIGTIFF=IF_SAFER'
'COMPRESS=LZW'
'TILED=YES'
The 'NBITS=X' argument may not be used -- that is set by the `nbits`
argument for this function.
A list of Creation Option arguments may be found here: [1].
like_raster : None, str (file path), or osgeo.gdal.Dataset
File path or GDAL dataset for a raster image of identical dimensions,
geographic location/extent, spatial reference, and nodata value as
the raster image that will be saved.
If provided, `X`, `Y`, `proj_ref`, and `geotrans_rot_tup` should not
be provided, as these metrics will be taken from the like raster.
Returns
-------
None
Notes
-----
The OSGeo `gdal_translate` program [1] must be callable by name
from the current working directory at the time this function is called.
References
----------
.. [1] https://www.gdal.org/frmt_gtiff.html
"""
spat_ref = None
projstr_wkt = None
projstr_proj4 = None
if proj_ref is None:
pass
elif type(proj_ref) == osr.SpatialReference:
spat_ref = proj_ref
elif isinstance(proj_ref, str):
spat_ref = osr.SpatialReference()
if proj_ref.lstrip().startswith('PROJCS'):
projstr_wkt = proj_ref
spat_ref.ImportFromWkt(projstr_wkt)
elif proj_ref.lstrip().startswith('+proj='):
projstr_proj4 = proj_ref
spat_ref.ImportFromProj4(projstr_proj4)
else:
raise InvalidArgumentError("`proj_ref` of string type has unknown format: '{}'".format(proj_ref))
else:
raise InvalidArgumentError("`proj_ref` must be a string or osr.SpatialReference object, "
"but was of type {}".format(type(proj_ref)))
dtype_is_nbits = (dtype_out is not None and type(dtype_out) is str and dtype_out == 'nbits')
if co_args is not None and co_args != 'compress':
if type(co_args) != list:
raise InvalidArgumentError("`co_args` must be a list of strings, but was {}".format(co_args))
if dtype_is_nbits:
for arg in co_args:
if arg.startswith('NBITS='):
raise InvalidArgumentError("`co_args` cannot include 'NBITS=X' argument. "
"Please use this function's `nbits` argument.")
shape = array.shape
dtype_gdal = None
if like_raster is not None:
ds_like = openRaster(like_raster)
if shape[0] != ds_like.RasterYSize or shape[1] != ds_like.RasterXSize:
raise InvalidArgumentError("Shape of `like_rasterFile` '{}' ({}, {}) does not match "
"the shape of `array` {}".format(
like_raster, ds_like.RasterYSize, ds_like.RasterXSize, shape)
)
geo_trans = extractRasterData(ds_like, 'geo_trans')
if proj_ref is None:
spat_ref = extractRasterData(ds_like, 'spat_ref')
if nodata_val == 'like_raster':
nodata_val = extractRasterData(ds_like, 'nodata_val')
if dtype_out is None:
dtype_gdal = extractRasterData(ds_like, 'dtype_val')
else:
if shape[0] != Y.size or shape[1] != X.size:
raise InvalidArgumentError("Lengths of [`Y`, `X`] grid coordinates ({}, {}) do not match "
"the shape of `array` ({})".format(Y.size, X.size, shape))
geo_trans = (X[0], X[1]-X[0], geotrans_rot_tup[0],
Y[0], geotrans_rot_tup[1], Y[1]-Y[0])
if nodata_val == 'like_raster':
nodata_val = None
if dtype_out is not None:
if dtype_is_nbits:
if nbits is None:
nbits = int(math.floor(math.log(float(max(1, np.max(array))), 2)) + 1)
elif type(nbits) != int or nbits < 1:
raise InvalidArgumentError("`nbits` must be an integer in the range [1,32]")
if nbits <= 8:
dtype_gdal = gdal.GDT_Byte
elif nbits <= 16:
dtype_gdal = gdal.GDT_UInt16
elif nbits <= 32:
dtype_gdal = gdal.GDT_UInt32
else:
raise InvalidArgumentError("Output array requires {} bits of precision, "
"but GDAL supports a maximum of 32 bits")
else:
if type(dtype_out) is str:
dtype_out = eval('np.{}'.format(dtype_out.lower()))
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_out)
if dtype_gdal is None:
raise InvalidArgumentError("Output array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_out))
dtype_in = array.dtype
dtype_in_gdal, promote_dtype = dtype_np2gdal(dtype_in)
if promote_dtype is not None:
array = array.astype(promote_dtype)
dtype_in = promote_dtype(1).dtype
if dtype_out is not None:
if dtype_is_nbits:
if not np.issubdtype(dtype_in, np.unsignedinteger):
warn("Input array data type ({}) is not unsigned and may be incorrectly saved "
"with n-bit precision".format(dtype_in))
elif dtype_in != dtype_out:
warn("Input array NumPy data type ({}) differs from output "
"NumPy data type ({})".format(dtype_in, dtype_out(1).dtype))
elif dtype_gdal is not None and dtype_gdal != dtype_in_gdal:
warn("Input array GDAL data type ({}) differs from output "
"GDAL data type ({})".format(gdal.GetDataTypeName(dtype_in_gdal),
gdal.GetDataTypeName(dtype_gdal)))
if dtype_gdal is None:
dtype_gdal = dtype_in_gdal
sys.stdout.write("Saving Geotiff {} ...".format(dest))
sys.stdout.flush()
# Create the output raster dataset in memory.
if co_args is None:
co_args = []
if co_args == 'compress':
co_args = []
co_args.extend(['BIGTIFF=IF_SAFER']) # Will create BigTIFF
# if the resulting file *might* exceed 4GB.
co_args.extend(['COMPRESS=LZW']) # Do LZW compression on output image.
co_args.extend(['TILED=YES']) # Force creation of tiled TIFF files.
if dtype_is_nbits:
co_args.extend(['NBITS={}'.format(nbits)])
if spat_ref is not None:
if projstr_wkt is None:
projstr_wkt = spat_ref.ExportToWkt()
if projstr_proj4 is None:
projstr_proj4 = spat_ref.ExportToProj4()
sys.stdout.write(" GDAL data type: {}, NoData value: {}, Creation Options: {}, Projection (Proj4): {} ...".format(
gdal.GetDataTypeName(dtype_gdal), nodata_val, ' '.join(co_args) if co_args else None, projstr_proj4.strip())
)
sys.stdout.flush()
sys.stdout.write(" creating file ...")
sys.stdout.flush()
driver = gdal.GetDriverByName('GTiff')
ds_out = driver.Create(dest, shape[1], shape[0], 1, dtype_gdal, co_args)
ds_out.SetGeoTransform(geo_trans)
if projstr_wkt is not None:
ds_out.SetProjection(projstr_wkt)
band = ds_out.GetRasterBand(1)
if nodata_val is not None:
band.SetNoDataValue(nodata_val)
sys.stdout.write(" writing array values ...")
sys.stdout.flush()
band.WriteArray(array)
# Write the output raster dataset to disk.
sys.stdout.write(" finishing file ...")
sys.stdout.flush()
ds_out = None # Dereference dataset to initiate write to disk of intermediate image.
sys.stdout.write(" done!\n")
sys.stdout.flush()
#######################
# Array Manipulations #
#######################
def getWindow(array, i, j, window_shape=(3, 3), output='array', bounds_check=True):
# TODO: Write docstring.
output_choices = ('array', 'indices')
if output not in output_choices:
raise InvalidArgumentError("`output` must be one of {}, "
"but was {}".format(output_choices, output))
win_nrows, win_ncols = window_shape
if bounds_check:
if win_nrows < 1 or win_ncols < 1:
raise InvalidArgumentError("`window_shape` must be a tuple of two positive ints")
arr_nrows, arr_ncols = array.shape
i_backup = i
j_backup = j
if i < 0:
i = arr_nrows + i
if j < 0:
j = arr_ncols + j
if i >= arr_nrows:
raise InvalidArgumentError("Index `i`={} is outside `array` bounds".format(i_backup))
if j >= arr_ncols:
raise InvalidArgumentError("Index `j`={} is outside `array` bounds".format(j_backup))
win_halfrowsz = (win_nrows-1) / 2
win_halfcolsz = (win_ncols-1) / 2
win_r0 = int(i - np.ceil(win_halfrowsz))
win_r1 = int(i + np.floor(win_halfrowsz) + 1)
win_c0 = int(j - np.ceil(win_halfcolsz))
win_c1 = int(j + np.floor(win_halfcolsz) + 1)
if not bounds_check:
if win_r1 == 0:
win_r1 = None
if win_c1 == 0:
win_c1 = None
return ( array[win_r0:win_r1, win_c0:win_c1] if output == 'array'
else (win_r0, win_r1, win_c0, win_c1))
if win_r0 < 0 or win_r1 > arr_nrows or win_c0 < 0 or win_c1 > arr_ncols:
raise InvalidArgumentError("Window falls outside `array` bounds")
return ( array[win_r0:win_r1, win_c0:win_c1] if output == 'array'
else (win_r0, win_r1, win_c0, win_c1))
def rotate_arrays_if_kernel_has_even_sidelength(array, kernel):
"""
Return 180-degree rotated views into the provided arrays
if `kernel` has an even side length.
Parameters
----------
array : ndarray, 2D
Primary array associated with `kernel`.
kernel : ndarray, 2D
Kernel array.
Returns
-------
array_out, kernel_out, rotation_flag : tuple
Tuple containing views into `array` and `kernel`,
and a flag that is True if the views of these two
arrays have been rotated by 180 degrees.
See Also
--------
fix_array_if_rotation_was_applied
Notes
-----
The sole purpose of this function is to assist other
functions in this array utility suite in their attempts
to mimic the behavior of corresponding MATLAB functions
at the pixel level when dealing with a kernel/structure
that has an even side length.
"""
for s in kernel.shape:
if s % 2 == 0:
return np.rot90(array, 2), np.rot90(kernel, 2), True
return array, kernel, False
def fix_array_if_rotation_was_applied(array, rotation_flag):
"""
Return 180-degree rotated view into the provided array
if `rotation_flag` is True.
Parameters
----------
array : ndarray, 2D
Array that may or may not need undoing of rotation.
rotation_flag : bool
True if `array` rotation should be undone.
False if `array` does not need undoing of rotation.
Returns
-------
array_out : ndarray, 2D
View into `array` that may or may not have had
rotation undone.
See Also
--------
rotate_arrays_if_kernel_has_even_sidelength
Notes
-----
The sole purpose of this function is to assist other
functions in this array utility suite in their attempts
to mimic the behavior of corresponding MATLAB functions
at the pixel level when dealing with a kernel/structure
that has an even side length.
"""
return np.rot90(array, 2) if rotation_flag else array
def rot90_pixcoords(coords, shape_in, k=1):
"""
Rotate 2D (row, col) pixel coordinates taken from an
array of a defined nrows x ncols shape by 90 degrees.
Rotation direction is counterclockwise.
Parameters
----------
coords : 2D ndarray or list/tuple of two 1D ndarrays
2D (row, col) pixel coordinates.
May be in the format of the output of np.where
(2D ndarray, shape like (npoints, 2)) [1] or
np.argwhere (tuple of two 1D ndarrays, each of
size npoints) [2].
shape_in : tuple of positive int
Shape of array that pixel coordinates came from
before the desired rotation has been applied,
like (nrows, ncols) output of `array.shape`.
k : int
Number of times the coordinates are rotated by
90 degrees.
Returns
-------
coords_out : same format, type, shape as `coords`
2D (row, col) pixel coordinates rotated from
the corresponding coordinates in `coords`.
See Also
--------
numpy.rot90 [3]
flip_pixcoords
Notes
-----
Say `coords` index into array 'a' to return values
of a set of pixels 'a_vals' as follows:
`a_vals = a[coords]`
Rotate both `a` and `coords` 90 degrees the same
number of times `k` to get array 'b' and pixel
coords 'coords_b' that index into 'b' to return
'b_vals'.
`b = numpy.rot90(a, k)`
`coords_b = rot90_pixcoords(coords, a.shape, k)`
`b_vals = b[coords_b]`
The values in 'a_vals' and 'b_vals' are identical.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
.. [2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.argwhere.html
.. [3] https://docs.scipy.org/doc/numpy/reference/generated/numpy.rot90.html
"""
if type(coords) == np.ndarray:
row_in, col_in = coords.T
else:
row_in, col_in = coords
k = k % 4
if k == 0:
row_out = row_in
col_out = col_in
elif k == 1:
row_out = (shape_in[1]-1) - col_in
col_out = row_in
elif k == 2:
row_out = (shape_in[0]-1) - row_in
col_out = (shape_in[1]-1) - col_in
elif k == 3:
row_out = col_in
col_out = (shape_in[0]-1) - row_in
if type(coords) == np.ndarray:
result = np.array([row_out, col_out]).T
else:
result = (row_out, col_out)
return result
def flip_pixcoords(coords, shape_in, axis=0):
"""
Flip 2D (row, col) pixel coordinates taken from an
array of a defined nrows x ncols shape across an axis.
Parameters
----------
coords : 2D ndarray or list/tuple of two 1D ndarrays
2D (row, col) pixel coordinates.
May be in the format of the output of np.where
(2D ndarray, shape like (npoints, 2)) [1] or
np.argwhere (tuple of two 1D ndarrays, each of
size npoints) [2].
shape_in : tuple of positive int
Shape of array that pixel coordinates came from,
like (nrows, ncols) output of `array.shape`.
axis : 0 or 1
If 0, flip coordinates vertically.
If 1, flip coordinates horizontally.
See Also
--------
numpy.rot90 [3]
rot90_pixcoords
Returns
-------
coords_out : same format, type, shape as `coords`
2D (row, col) pixel coordinates flipped from
the corresponding coordinates in `coords`.
Notes
-----
Say `coords` index into array 'a' to return values
of a set of pixels 'a_vals' as follows:
`a_vals = a[coords]`
Flip both `a` and `coords` over the same axis with
number `axis` to get array 'b' and pixel coords
'coords_b' that index into 'b' to return 'b_vals'.
`b = numpy.flip(a, axis)`
`coords_b = flip_pixcoords(coords, a.shape, axis)`
`b_vals = b[coords_b]`
The values in 'a_vals' and 'b_vals' are identical.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
.. [2] https://docs.scipy.org/doc/numpy/reference/generated/numpy.argwhere.html
.. [3] https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
"""
if type(coords) == np.ndarray:
row_in, col_in = coords.T
else:
row_in, col_in = coords
if axis == 0:
row_out = (shape_in[0]-1) - row_in
col_out = col_in
elif axis == 1:
row_out = row_in
col_out = (shape_in[1]-1) - col_in
else:
raise InvalidArgumentError("`axis` must be 0 or 1")
if type(coords) == np.ndarray:
result = np.array([row_out, col_out]).T
else:
result = (row_out, col_out)
return result
def array_round_proper(array, in_place=False):
"""
Round data in a floating point array to the nearest integer,
rounding up for positive X.5 and down for negative X.5.
Parameters
----------
array : ndarray of floating dtype
Floating point array to round.
in_place : bool
If True, round array in place.
If False, copy array before rounding.
Returns
-------
array_round : ndarray of floating dtype
The rounded array.
"""
if not in_place:
array = np.copy(array)
array_gt_zero = array > 0
np.add(array, 0.5, out=array, where=array_gt_zero)
np.floor(array, out=array, where=array_gt_zero)
del array_gt_zero
array_lt_zero = array < 0
np.subtract(array, 0.5, out=array, where=array_lt_zero)
np.ceil(array, out=array, where=array_lt_zero)
del array_lt_zero
return array
def astype_round_and_crop(array, dtype_out, allow_modify_array=False):
"""
Cast a floating point array to an integer data type,
first rounding data values and cropping all values to
the minimum and maximum representable values for the
output data type.
Parameters
----------
array : ndarray
Array containing data to be cast.
dtype_out : numpy data type (e.g. numpy.int32) or numpy.dtype
The data type `array` is to be cast to.
allow_modify_array : bool
If True, values in input `array` may be modified.
Returns
-------
array_out : ndarray of type `dtype_out`
The new array that has been cast from `array`.
Notes
-----
This function is meant to replicate MATLAB array type casting.
"""
# The trivial case
if dtype_out == np.bool:
return array.astype(dtype_out)
array_dtype_np = array.dtype.type
dtype_out_np = dtype_out if type(dtype_out) != np.dtype else dtype_out.type
if np.issubdtype(array_dtype_np, np.floating) and np.issubdtype(dtype_out_np, np.integer):
# TODO: Consider replacing the following potentially costly call with
# -t np.around(array) if round-half-to-nearest-whole-even is acceptable.
array = array_round_proper(array, allow_modify_array)
return astype_cropped(array, dtype_out_np, allow_modify_array)
def astype_cropped(array, dtype_out, allow_modify_array=False):
"""
Cast an array to a new data type, first cropping all values
to the minimum and maximum representable values for the
output data type.
Parameters
----------
array : ndarray
Array containing data to be cast.
dtype_out : numpy data type (e.g. numpy.int32) or numpy.dtype
The data type `array` is to be cast to.
allow_modify_array : bool
If True, values in input `array` may be modified.
Returns
-------
array_cropped : ndarray of type `dtype_out`
The new array that has been cast from `array`.
Notes
-----
The purpose of this function is to prevent underflow and
underflow during casting, something numpy.ndarray.astype
does not do. [1]
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.astype.html
"""
# The trivial case
if dtype_out == np.bool:
return array.astype(dtype_out)
dtype_out_np = dtype_out if type(dtype_out) != np.dtype else dtype_out.type
dtype_info_fn = np.finfo if np.issubdtype(dtype_out_np, np.floating) else np.iinfo
dtype_out_min = dtype_info_fn(dtype_out_np).min
dtype_out_max = dtype_info_fn(dtype_out_np).max
array_cropped = array if allow_modify_array else None
try:
array_cropped = np.clip(array, dtype_out_min, dtype_out_max, out=array_cropped)
except OverflowError:
dtype_out_min_float = float(dtype_out_min)
dtype_out_max_float = float(dtype_out_max)
warn("Integers for {} clip range [{}, {}] are too large for underlying C code of numpy.clip(). "
"Casting clip range to float: [{}, {}]".format(dtype_out_np(1).dtype,
dtype_out_min, dtype_out_max,
dtype_out_min_float, dtype_out_max_float))
array_cropped = np.clip(array, dtype_out_min_float, dtype_out_max_float, out=array_cropped)
return array_cropped.astype(dtype_out)
def getDataArray(array, label=0, label_type='nodata'):
"""
Classify values in an array as "data" or non-"data".
Parameters
----------
array : ndarray
Array to be classified.
label : bool/int/float
Value of nodes in `array` that are classified as
"data" (if label_type='data')
or non-"data" (if label_type='nodata').
label_type : str; 'data' or 'nodata'
Whether `label` is a classification for "data"
or non-"data" nodes.
Returns
-------
data_array : ndarray of bool, same shape as `array`
Binary mask of `array` where "data" nodes are one
and non-"data" nodes are zero.
"""
label_type_choices = ('data', 'nodata')
if label_type not in label_type_choices:
raise InvalidArgumentError("`label_type` must be one of {}, "
"but was {}".format(label_type_choices, label_type))
if (array.dtype == np.bool
and ((label_type == 'nodata' and label == 0)
or (label_type == 'data' and label == 1))):
data_array = array
elif np.isnan(label):
data_array = np.isnan(array) if label_type == 'data' else ~np.isnan(array)
else:
data_array = (array == label) if label_type == 'data' else (array != label)
return data_array
######################
# Array Calculations #
######################
def interp2_fill_oob(X, Y, Zi, Xi, Yi, fillval=np.nan, coord_grace=True):
# Rows and columns of Zi outside the domain of Z are made NaN.
# Assume X and Y coordinates are monotonically increasing/decreasing
# so hopefully we only need to work a short way inwards from the edges.
Xi_size = Xi.size
Yi_size = Yi.size
Xmin = min(X[0], X[-1])
Ymax = max(Y[0], Y[-1])
x_lfttest_val = X[0]
x_rgttest_val = X[-1]
y_toptest_val = Y[0]
y_bottest_val = Y[-1]
if x_lfttest_val == Xmin:
# X-coords increase from left to right.
x_lfttest_op = operator.lt
x_rgttest_op = operator.gt
else:
# X-coords decrease from left to right.
x_lfttest_op = operator.gt
x_rgttest_op = operator.lt
if y_toptest_val == Ymax:
# Y-coords decrease from top to bottom.
y_toptest_op = operator.gt
y_bottest_op = operator.lt
else:
# Y-coords increase from top to bottom.
y_toptest_op = operator.lt
y_bottest_op = operator.gt
if coord_grace:
x_grace = (X[1] - X[0]) / 64
y_grace = (Y[1] - Y[0]) / 16
x_lfttest_val -= x_grace
x_rgttest_val += x_grace
y_toptest_val -= y_grace
y_bottest_val += y_grace
x_lfttest_op = operator.le if x_lfttest_op(0, 1) else operator.ge
x_rgttest_op = operator.le if x_rgttest_op(0, 1) else operator.ge
y_toptest_op = operator.le if y_toptest_op(0, 1) else operator.ge
y_bottest_op = operator.le if y_bottest_op(0, 1) else operator.ge
i = 0
while x_lfttest_op(Xi[i], x_lfttest_val) and i < Xi_size:
Zi[:, i] = fillval
i += 1
i = -1
while x_rgttest_op(Xi[i], x_rgttest_val) and i >= -Xi_size:
Zi[:, i] = fillval
i -= 1
j = 0
while y_toptest_op(Yi[j], y_toptest_val) and j < Yi_size:
Zi[j, :] = fillval
j += 1
j = -1
while y_bottest_op(Yi[j], y_bottest_val) and j >= -Yi_size:
Zi[j, :] = fillval
j -= 1
return Zi
# def interp2_cv2(X, Y, Z, Xi, Yi, interp_str, extrapolate=False, oob_val=np.nan):
# xx = np.repeat(np.reshape((Xi-X[0]/2, (1, X.size)), Y.size, axis=0)
# yy = np.repeat(np.reshape((Yi-Y[0])/-2, (Y.size, 1)), X.size, axis=1)
# cv2.remap(Z, xx.astype(np.float32), yy.astype(np.float32), cv2.INTER_LINEAR)
# pass
def interp2_gdal(X, Y, Z, Xi, Yi, interp_str, extrapolate=False, oob_val=np.nan):
"""
Resample array data from one set of x-y grid coordinates to another.
Parameters
----------
X : ndarray, 1D
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `Z[:, j]`.
Y : ndarray, 1D
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `Z[i, :]`.
Z : ndarray, 2D
Array containing values to be resampled.
Xi : ndarray, 1D
New grid x-coordinates, like `X` array.
Yi : ndarray, 1D
New grid y-coordinates, like `Y` array.
interp_str : str
Interpolation/resampling method, must be one of the following:
'nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode'
extrapolate : bool
Whether or not to interpolate values for pixels with new grid coords
`Xi` and `Yi` that fall outside the range of old grid coords `X` and `Y`.
If True, allow the interpolation method to set the values of these pixels.
If False, set the values of these pixels to `oob_val`.
oob_val : int/float
(Option only applies when `extrapolate=True`.)
Value to fill any regions of the output array where new grid coords
`Xi` and `Yi` fall outside the range of old grid coords `X` and `Y`.
Returns
-------
Zi : ndarray, 2D, same shape and type as `Z`
The resampled array.
"""
dtype_gdal, promote_dtype = dtype_np2gdal(Z.dtype)
if promote_dtype is not None:
Z = Z.astype(promote_dtype)
interp_gdal = interp_str2gdal(interp_str)
mem_drv = gdal.GetDriverByName('MEM')
ds_in = mem_drv.Create('', X.size, Y.size, 1, dtype_gdal)
ds_in.SetGeoTransform((X[0], X[1]-X[0], 0,
Y[0], 0, Y[1]-Y[0]))
ds_in.GetRasterBand(1).WriteArray(Z)
ds_out = mem_drv.Create('', Xi.size, Yi.size, 1, dtype_gdal)
ds_out.SetGeoTransform((Xi[0], Xi[1]-Xi[0], 0,
Yi[0], 0, Yi[1]-Yi[0]))
gdal.ReprojectImage(ds_in, ds_out, '', '', interp_gdal)
Zi = ds_out.GetRasterBand(1).ReadAsArray()
if not extrapolate:
interp2_fill_oob(X, Y, Zi, Xi, Yi, oob_val)
return Zi
def interp2_scipy(X, Y, Z, Xi, Yi, interp, extrapolate=False, oob_val=np.nan,
griddata=False,
SBS=False,
RGI=False, RGI_extrap=True, RGI_fillVal=None,
CLT=False, CLT_fillVal=np.nan,
RBS=False):
# TODO: Rewrite docstring in new standard.
"""
Aims to provide similar functionality to interp2_gdal using SciPy's
interpolation library. However, initial tests show that interp2_gdal
both runs more quickly and produces output more similar to MATLAB's
interp2 function for every method required by Ian's mosaicking script.
griddata, SBS, and CLT interpolation methods are not meant to be used
for the resampling of a large grid as is done here.
"""
order_dict = {
'nearest' : 0,
'linear' : 1,
'bilinear' : 1,
'quadratic': 2,
'cubic' : 3,
'bicubic' : 3,
'quartic' : 4,
'quintic' : 5,
}
order = order_dict[interp]
method_set = True in (griddata, SBS, RGI, CLT, RBS)
if griddata:
# Supports nearest, linear, and cubic interpolation methods.
# Has errored out with "QH7074 qhull warning: more than 16777215 ridges.
# ID field overflows and two ridges may have the same identifier."
# when used on large arrays. Fails to draw a convex hull of input points.
# Needs more testing, but seems to handle NaN input. Output for linear and
# cubic methods shows NaN borders when interpolating out of input domain.
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
Zi = scipy.interpolate.griddata((xx.flatten(), yy.flatten()), Z.flatten(),
(xxi.flatten(), yyi.flatten()), interp)
Zi.resize((Yi.size, Xi.size))
elif SBS:
# Supports all 5 orders of spline interpolation.
# Can't handle NaN input; results in all NaN output.
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
fn = scipy.interpolate.SmoothBivariateSpline(xx.flatten(), yy.flatten(), Z.flatten(),
kx=order, ky=order)
Zi = fn.ev(xxi, yyi)
Zi.resize((Yi.size, Xi.size))
elif RGI or (not method_set and (order == 0 or (order == 1 and np.any(np.isnan(Z))))):
# Supports nearest and linear interpolation methods.
xxi, yyi = np.meshgrid(Xi, Yi[::-1])
pi = np.column_stack((yyi.flatten(), xxi.flatten()))
fn = scipy.interpolate.RegularGridInterpolator((Y[::-1], X), Z, method=interp,
bounds_error=(not RGI_extrap), fill_value=RGI_fillVal)
Zi = fn(pi, method=interp)
Zi.resize((Yi.size, Xi.size))
elif CLT or (not method_set and (order == 3 and np.any(np.isnan(Z)))):
# Performs cubic interpolation of data,
# but includes logic to first perform a nearest resampling of input NaNs.
# Produces the same error as scipy.interpolate.griddata when used on large arrays.
if np.any(np.isnan(Z)):
Zi = interp2_scipy(X, Y, Z, Xi, Yi, 'nearest')
Zi_data = np.where(~np.isnan(Zi))
Z_data = np.where(~np.isnan(Z))
p = np.column_stack((Z_data[0], Z_data[1]))
pi = np.column_stack((Zi_data[0], Zi_data[1]))
fn = scipy.interpolate.CloughTocher2DInterpolator(p, Z[Z_data], fill_value=CLT_fillVal)
Zi[Zi_data] = fn(pi)
else:
xx, yy = np.meshgrid(X, Y)
xxi, yyi = np.meshgrid(Xi, Yi)
p = np.column_stack((xx.flatten(), yy.flatten()))
pi = np.column_stack((xxi.flatten(), yyi.flatten()))
fn = scipy.interpolate.CloughTocher2DInterpolator(p, Z.flatten(), fill_value=CLT_fillVal)
Zi = fn(pi)
Zi.resize((Yi.size, Xi.size))
elif RBS or (not method_set and (order in (2, 4))):
# Supports all 5 orders of spline interpolation.
# Can't handle NaN input; results in all NaN output.
fn = scipy.interpolate.RectBivariateSpline(Y[::-1], X, Z,
kx=order, ky=order)
Zi = fn(Yi[::-1], Xi, grid=True)
else:
# Supports linear, cubic, and quintic interpolation methods.
# Can't handle NaN input; results in all NaN output.
# Default interpolator for its presumed efficiency.
fn = scipy.interpolate.interp2d(X, Y[::-1], Z, kind=interp)
Zi = fn(Xi, Yi)
if not extrapolate:
interp2_fill_oob(X, Y, Zi, Xi, Yi, oob_val)
return Zi
def imresize(array, size, interp='bicubic', dtype_out='input',
method='cv2', float_resize=True, round_proper=True,
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str; 'nearest', 'area', 'bilinear', 'bicubic', or 'lanczos'
Interpolation method to use during resizing.
dtype_out : str; 'input' or 'float'
If 'input', data type of the returned array is
the same as `array`.
If 'float' and `array` data type is of floating type,
data type of the returned array is the same.
If 'float' and `array` data type is of integer type,
data type of the returned array is float32.
method : str; 'cv2', 'pil', 'gdal', or 'scipy'
Specifies which method used to perform resizing.
'cv2' ------ cv2.resize [1]
'pil' ------ PIL.Image.resize [2]
'scipy' ---- scipy.misc.imresize (WILL BE RETIRED SOON) [3]
'gdal' ----- interp2_gdal (local, utilizes gdal.ReprojectImage [4])
float_resize : bool
If True, convert integer arrays to float32 before resizing.
round_proper : bool
If the resized array is converted from floating
to an integer data type (such as when `float_resize=True`
and `dtype_out='input'`)...
- If True, round X.5 values up to (X + 1).
- If False, round X.5 values to nearest even integer to X.
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize_pil
imresize_old
Notes
-----
This function is meant to replicate MATLAB's `imresize` function [5].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void resize(InputArray src, OutputArray dst, Size dsize, double fx, double fy, int interpolation)
.. [2] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [3] https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html
.. [4] http://gdal.org/java/org/gdal/gdal/gdal.html#ReprojectImage-org.gdal.gdal.Dataset-org.gdal.gdal.Dataset-java.lang.String-java.lang.String-int-double-double-org.gdal.gdal.ProgressCallback-java.util.Vector-
https://svn.osgeo.org/gdal/trunk/autotest/alg/reproject.py
.. [5] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
dtype_in = array.dtype
method_choices = ('cv2', 'pil', 'scipy', 'gdal')
if method not in method_choices:
raise InvalidArgumentError("`method` must be one of {}, but was '{}'".format(method_choices, method))
dtype_out_choices = ('input', 'float')
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, but was '{}'".format(dtype_out_choices, dtype_out))
# Handle interpolation method lookups.
interp_dict = None
if method == 'cv2':
interp_dict = {
'nearest' : cv2.INTER_NEAREST,
'area' : cv2.INTER_AREA,
'bilinear' : cv2.INTER_LINEAR,
'bicubic' : cv2.INTER_CUBIC,
'lanczos' : cv2.INTER_LANCZOS4,
}
elif method == 'pil':
interp_dict = {
'nearest' : Image.NEAREST,
'box' : Image.BOX,
'linear' : Image.BILINEAR,
'bilinear' : Image.BILINEAR,
'hamming' : Image.HAMMING,
'cubic' : Image.BICUBIC,
'bicubic' : Image.BICUBIC,
'lanczos' : Image.LANCZOS,
}
if interp_dict is not None:
if interp not in interp_dict.keys():
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_dict.keys(), interp))
interp_code = interp_dict[interp]
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup.copy()
# Handle input data type and conversions.
promote_dtype = None
promote_is_demote = False
if float_resize:
if np.issubdtype(dtype_in, np.floating):
pass
else:
array = array.astype(np.float32)
elif method == 'cv2':
if dtype_in == np.bool:
promote_dtype = np.uint8
elif dtype_in == np.int8:
promote_dtype = np.int16
elif dtype_in == np.float16:
promote_dtype = np.float32
elif dtype_in in (np.int32, np.uint32, np.int64, np.uint64):
raise InvalidArgumentError("`array` data type cannot be of 32/64-bit int/uint "
"when method='{}', but was {}; consider setting "
"`float_resize=True`".format(method, dtype_in))
elif method == 'pil':
if dtype_in == np.uint16:
promote_dtype = np.int32
elif dtype_in in (np.uint32, np.int64, np.uint64):
if np.any(array > np.iinfo(np.int32).max) or np.any(array < np.iinfo(np.int32).min):
raise InvalidArgumentError("`array` data type ({}) is not supported by method='{}', "
"but values cannot fit in int32; consider setting "
"`float_resize=True`")
promote_dtype = np.int32
promote_is_demote = True
elif dtype_in == np.float16:
promote_dtype = np.float32
if promote_dtype is not None:
warn("`array` data type ({}) is not supported by '{}' resizing method, "
"but can safely be {}{} to {} for processing".format(dtype_in, method,
'promoted'*(not promote_is_demote), 'demoted'*promote_is_demote, promote_dtype(1).dtype))
array = array.astype(promote_dtype)
# Resize array.
if method == 'cv2':
array_r = cv2.resize(array, tuple(list(new_shape)[::-1]), interpolation=interp_code)
elif method == 'pil':
image = (Image.frombytes(mode='1', size=array.shape[::-1], data=np.packbits(array, axis=1))
if array.dtype == np.bool else Image.fromarray(array))
image = image.resize(tuple(list(new_shape)[::-1]), interp_code)
# Set "default" data type for reading data into NumPy array.
if image.mode == '1':
dtype_out_pil = np.bool
image = image.convert('L')
elif image.mode == 'L':
dtype_out_pil = np.uint8
elif image.mode == 'I':
dtype_out_pil = np.int32
elif image.mode == 'F':
dtype_out_pil = np.float32
# Convert Pillow Image to NumPy array.
array_r = np.fromstring(image.tobytes(), dtype=dtype_out_pil)
array_r = array_r.reshape((image.size[1], image.size[0]))
elif method == 'gdal':
# Set up grid coordinate arrays, then run interp2_gdal.
X = np.arange(array.shape[1]) + 1
Y = np.arange(array.shape[0]) + 1
Xi = np.linspace(X[0], X[-1] + (X[1]-X[0]), num=(new_shape[1] + 1))[0:-1]
Yi = np.linspace(Y[0], Y[-1] + (Y[1]-Y[0]), num=(new_shape[0] + 1))[0:-1]
array_r = interp2_gdal(X, Y, array, Xi, Yi, interp, extrapolate=False)
elif method == 'scipy':
PILmode = 'L' if array.dtype in (np.bool, np.uint8) else 'F'
if PILmode == 'L' and array.dtype != np.uint8:
array = array.astype(np.uint8)
array_r = scipy.misc.imresize(array, new_shape, interp, PILmode)
# Handle output data type and conversions.
if dtype_out == 'input' and array_r.dtype != dtype_in:
if round_proper:
array_r = astype_round_and_crop(array_r, dtype_in, allow_modify_array=True)
else:
array_r = astype_cropped(array_r, dtype_in, allow_modify_array=True)
elif dtype_out == 'float' and not np.issubdtype(array_r.dtype, np.floating):
array_r = array_r.astype(np.float32)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def imresize_pil(array, size, interp='bicubic', dtype_out='input',
float_resize=True, round_proper=True,
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str; 'nearest', 'box', 'bilinear', 'hamming',
'bicubic', or 'lanczos'
Interpolation method to use during resizing.
dtype_out : str; 'default' or 'input'
If 'default' and `float_resize=True`, the returned
array data type will be float32.
If 'default' and `float_resize=False`, the returned
array data type will be...
- bool if `array` is bool
- uint8 if `array` is uint8
- int32 if `array` is integer other than uint8
- float32 if `array` is floating
If 'input', the returned array data type will be
the same as `array` data type.
float_resize : bool
If True, convert the Pillow image of `array`
to PIL mode 'F' before resizing.
If False, allow the Pillow image to stay in its
default PIL mode for resizing.
The rounding scheme of resized integer images with
integer PIL modes (e.g. 'L' or 'I') is unclear when
compared with the same integer images in the 'F' PIL mode.
This option has no effect when `array` dtype is floating.
round_proper : bool
If the resized array is converted from floating
to an integer data type (such as when `float_resize=True`
and `dtype_out='input'`)...
- If True, round X.5 values up to (X + 1).
- If False, round X.5 values to nearest even integer to X.
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize
imresize_old
Notes
-----
This function is a wrapper for Pillow's `PIL.Image.resize` function [1]
meant to replicate MATLAB's `imresize` function [2].
References
----------
.. [1] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [2] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
array_dtype_in = array.dtype
interp_choices = ('nearest', 'box', 'bilinear', 'hamming', 'bicubic', 'lanczos')
interp_dict = {
'nearest' : Image.NEAREST,
'box' : Image.BOX,
'linear' : Image.BILINEAR,
'bilinear' : Image.BILINEAR,
'hamming' : Image.HAMMING,
'cubic' : Image.BICUBIC,
'bicubic' : Image.BICUBIC,
'lanczos' : Image.LANCZOS,
}
try:
interp_pil = interp_dict[interp]
except KeyError:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp))
dtype_out_choices = ('default', 'input')
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, but was '{}'".format(dtype_out_choices, dtype_out))
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup
# Convert NumPy array to Pillow Image.
image = None
if array_dtype_in == np.bool:
if float_resize:
image = Image.fromarray(array, 'L')
else:
image = Image.frombytes(mode='1', size=array.shape[::-1], data=np.packbits(array, axis=1))
else:
if array_dtype_in == np.float16:
array = array.astype(np.float32)
if not float_resize:
if array_dtype_in == np.uint16:
array = array.astype(np.int32)
elif array_dtype_in == np.uint32:
if np.any(array > np.iinfo(np.int32).max):
raise InvalidArgumentError("`array` of uint32 cannot be converted to int32")
array = array.astype(np.int32)
image = Image.fromarray(array)
if float_resize and image.mode != 'F':
image = image.convert('F')
# Resize array.
image = image.resize(tuple(list(new_shape)[::-1]), interp_pil)
# Set "default" data type for reading data into NumPy array.
if image.mode == '1':
dtype_out_np = np.bool
image = image.convert("L")
elif image.mode == 'L':
dtype_out_np = np.uint8
elif image.mode == 'I':
dtype_out_np = np.int32
elif image.mode == 'F':
dtype_out_np = np.float32
# Convert Pillow Image to NumPy array.
array_r = np.fromstring(image.tobytes(), dtype=dtype_out_np)
array_r = array_r.reshape((image.size[1], image.size[0]))
# Clean up resized array.
if dtype_out == 'input' and array_r.dtype != array_dtype_in:
if round_proper:
array_r = astype_round_and_crop(array_r, array_dtype_in, allow_modify_array=True)
else:
array_r = astype_cropped(array_r, array_dtype_in, allow_modify_array=True)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def imresize_old(array, size, interp='bicubic', dtype_out='input',
method='pil',
one_dim_axis=1):
"""
Resize an array.
Parameters
----------
array : ndarray, 2D
The array to resize.
size : shape tuple (2D) or scalar value
If shape tuple, returns an array of this size.
If scalar value, returns an array of shape
that is `size` times the shape of `array`.
interp : str
Interpolation method to use during resizing.
See documentation for a particular `method`.
dtype_out : str; 'input' or 'float'
If 'input', data type of the returned array is
the same as `array`.
If 'float' and `array` data type is of floating type,
data type of the returned array is the same.
If 'float' and `array` data type is of integer type,
data type of the returned array is float32.
method : str; 'cv2', 'pil', 'gdal', or 'scipy'
Specifies which method used to perform resizing.
'cv2' ------ cv2.resize [1]
'pil' ------ PIL.Image.resize [2]
'scipy' ---- scipy.misc.imresize (WILL BE RETIRED SOON) [3]
'gdal' ----- interp2_gdal (local, utilizes gdal.ReprojectImage [4])
one_dim_axis : int, 0 or 1
Which directional layout to give to a one-dimensional
`array` before resizing.
If 0, array runs vertically downwards across rows.
If 1, array runs horizontally rightwards across columns.
Returns
-------
array_r : ndarray, 2D, same type as `array`
The resized array.
See Also
--------
imresize
imresize_pil
Notes
-----
This function is meant to replicate MATLAB's `imresize` function [5].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#void resize(InputArray src, OutputArray dst, Size dsize, double fx, double fy, int interpolation)
.. [2] http://pillow.readthedocs.io/en/3.1.x/reference/Image.html
.. [3] https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html
.. [4] http://gdal.org/java/org/gdal/gdal/gdal.html#ReprojectImage-org.gdal.gdal.Dataset-org.gdal.gdal.Dataset-java.lang.String-java.lang.String-int-double-double-org.gdal.gdal.ProgressCallback-java.util.Vector-
https://svn.osgeo.org/gdal/trunk/autotest/alg/reproject.py
.. [5] https://www.mathworks.com/help/images/ref/imresize.html
"""
array_backup = array
method_choices = ('cv2', 'gdal', 'pil', 'scipy')
dtype_out_choices = ('float', 'input')
if method not in method_choices:
raise UnsupportedMethodError("`method` must be one of {}, "
"but was '{}'".format(method_choices, method))
if dtype_out not in dtype_out_choices:
raise InvalidArgumentError("`dtype_out` must be one of {}, "
"but was '{}'".format(dtype_out_choices, dtype_out))
# Handle 1D array input.
one_dim_flag = False
if array.ndim == 1:
one_dim_flag = True
if one_dim_axis == 0:
array_shape_1d = (array.size, 1)
elif one_dim_axis == 1:
array_shape_1d = (1, array.size)
else:
raise InvalidArgumentError("`one_dim_axis` must be either 0 or 1")
array = np.reshape(array, array_shape_1d)
# If a resize factor is provided for size, round up the x, y pixel
# sizes for the output array to match MATLAB's imresize function.
new_shape = size if type(size) == tuple else tuple(np.ceil(np.dot(size, array.shape)).astype(int))
if one_dim_flag and type(size) != tuple:
new_shape = (new_shape[0], 1) if one_dim_axis == 0 else (1, new_shape[1])
# The trivial case
if new_shape == array.shape:
return array_backup
array_dtype_in = array.dtype
dtype_out_np = None
if dtype_out == 'float':
dtype_out_np = array_dtype_in if np.issubdtype(array_dtype_in, np.floating) else np.float32
elif dtype_out == 'input':
dtype_out_np = array_dtype_in
if method == 'cv2':
interp_dict = {
'nearest' : cv2.INTER_NEAREST,
'area' : cv2.INTER_AREA,
'bilinear' : cv2.INTER_LINEAR,
'bicubic' : cv2.INTER_CUBIC,
'lanczos' : cv2.INTER_LANCZOS4,
}
try:
interp_cv2 = interp_dict[interp]
except KeyError:
raise InvalidArgumentError("For `method=cv2`, `interp` must be one of {}, "
"but was '{}'".format(interp_dict.keys(), interp))
if array_dtype_in == np.bool:
array = array.astype(np.uint8)
array_r = cv2.resize(array, tuple(list(new_shape)[::-1]), interpolation=interp_cv2)
elif method == 'gdal':
# Set up grid coordinate arrays, then run interp2_gdal.
X = np.arange(array.shape[1]) + 1
Y = np.arange(array.shape[0]) + 1
Xi = np.linspace(X[0], X[-1] + (X[1]-X[0]), num=(new_shape[1] + 1))[0:-1]
Yi = np.linspace(Y[0], Y[-1] + (Y[1]-Y[0]), num=(new_shape[0] + 1))[0:-1]
array_r = interp2_gdal(X, Y, array, Xi, Yi, interp, extrapolate=False)
elif method == 'pil':
return imresize_pil(array, new_shape, interp)
elif method == 'scipy':
PILmode = 'L' if array.dtype in (np.bool, np.uint8) else 'F'
if PILmode == 'L' and array.dtype != np.uint8:
array = array.astype(np.uint8)
array_r = scipy.misc.imresize(array, new_shape, interp, PILmode)
# Clean up resized array.
if array_r.dtype != dtype_out_np:
array_r = astype_round_and_crop(array_r, dtype_out_np, allow_modify_array=True)
if one_dim_flag:
result_size_1d = new_shape[0] if one_dim_axis == 0 else new_shape[1]
array_r = np.reshape(array_r, result_size_1d)
return array_r
def conv2_slow(array, kernel, shape='full', default_double_out=True, zero_border=True,
fix_float_zeros=True, nan_over_zero=True, allow_flipped_processing=True):
"""
Convolve two 2D arrays.
Parameters
----------
array : ndarray, 2D
Primary array to convolve.
kernel : ndarray, 2D, smaller shape than `array`
Secondary, smaller array to convolve with `array`.
shape : str; 'full', 'same', or 'valid'
See documentation for `scipy.signal.convolve` [1].
default_double_out : bool
If True and `array` is not of floating data type,
casts the result to float64 before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
zero_border : bool
When `kernel` hangs off the edges of `array`
during convolution calculations...
If True, pixels beyond the edges of `array`
are extrapolated as zeros.
If False, pixels beyond the edges of `array`
are extrapolated as the value of the closest edge pixel.
This option only applies when `shape='same'`,
since a zero border is required when `shape='full'`
and does not make sense when `shape='valid'`.
fix_float_zeros : bool
To correct for FLOP error in convolution where the result
should be zero but isn't, immediately following convolution
map array values between -1.0e-12 and +1.0e-11 to zero.
nan_over_zero : bool
If True, let NaN x 0 = NaN in convolution computation.
If False, let NaN x 0 = 0 in convolution computation.
allow_flipped_processing : bool
If True and at least one of `kernel`'s side lengths is even,
rotate both `array` `kernel` 180 degrees before performing convolution,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
Returns
-------
array_c : ndarray, 2D
A 2D array containing the convolution of the input array and kernel.
See Also
--------
conv2
Notes
-----
This function is meant to replicate MATLAB's conv2 function [2].
Scipy's convolution function cannot handle NaN input as it results in all NaN output.
In comparison, MATLAB's conv2 function takes a sensible approach by letting NaN win out
in all calculations involving pixels with NaN values in the input array.
To replicate this, we set all NaN values to zero before performing convolution,
then mask our result array with NaNs according to a binary dilation of ALL NaN locations
in the input array, dilating using a structure of ones with same shape as the provided kernel.
For large arrays, this function will use an FFT method for convolution that results in
FLOP errors on the order of 10^-12. For this reason, a floating result array will have
all resulting pixel values between -1.0e-12 and 10.0e-12 set to zero.
References
----------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve.html
.. [2] https://www.mathworks.com/help/matlab/ref/conv2.html
"""
shape_choices = ('full', 'same', 'valid')
if shape not in shape_choices:
raise InvalidArgumentError("`shape` must be one of {}, but was '{}'".format(shape_choices, shape))
if default_double_out:
dtype_out = None
if np.issubdtype(array.dtype, np.floating):
dtype_out = array.dtype
if (np.issubdtype(kernel.dtype, np.floating)
and int(str(kernel.dtype).replace('float', '')) > int(str(dtype_out).replace('float', ''))):
warn("Since default_double_out=True, kernel with floating dtype ({}) at greater precision than "
"array floating dtype ({}) is cast to array dtype".format(kernel.dtype, dtype_out))
kernel = kernel.astype(dtype_out)
else:
dtype_out = np.float64
if kernel.dtype == np.bool:
warn("Boolean data type for kernel is not supported, casting to float32")
kernel = kernel.astype(np.float32)
rotation_flag = False
if allow_flipped_processing:
array, kernel, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, kernel)
# Take a record of where all NaN values are located
# before setting the values of those pixels to zero.
fixnans_flag = False
if np.issubdtype(array.dtype, np.floating):
array_nans = np.isnan(array)
if np.any(array_nans):
fixnans_flag = True
array[array_nans] = 0
else:
del array_nans
# Edge settings
array_backup = array
if (fixnans_flag and shape != 'same') or (shape == 'same' and not zero_border):
if shape in ('full', 'same'):
pady_top, padx_lft = (np.array(kernel.shape) - 1) / 2
pady_bot, padx_rht = np.array(kernel.shape) / 2
elif shape == 'valid':
pady_top, padx_lft = np.array(kernel.shape) / 2
pady_bot, padx_rht = (np.array(kernel.shape) - 1) / 2
pady_top, padx_lft = int(pady_top), int(padx_lft)
pady_bot, padx_rht = int(pady_bot), int(padx_rht)
if shape == 'same': # and not zero_border
array = np.pad(array, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
# Perform convolution.
method = scipy.signal.choose_conv_method(array, kernel, shape)
array_c = scipy.signal.convolve(array, kernel, shape, method)
if method != 'direct' and fix_float_zeros and np.issubdtype(array_c.dtype, np.floating):
# Fix FLOP error from FFT method where we assume zero was the desired result.
array_c[(-1.0e-12 < array_c) & (array_c < 10.0e-12)] = 0
# Apply dilation of original NaN pixels to result.
if fixnans_flag:
array_nans_backup = array_nans
if shape != 'same' or not zero_border:
if shape == 'full':
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
elif shape == 'same': # and not zero_border
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
dilate_structure = np.ones(kernel.shape, dtype=np.uint8)
if not nan_over_zero:
dilate_structure[kernel == 0] = 0
array_nans_dilate = imdilate(array_nans, dilate_structure)
if shape == 'valid':
pady_bot = -pady_bot if pady_bot > 0 else None
padx_rht = -padx_rht if padx_rht > 0 else None
array_nans_dilate = array_nans_dilate[pady_top:pady_bot, padx_lft:padx_rht]
array_c[array_nans_dilate] = np.nan
# Return the input array to its original state.
array_backup[array_nans_backup] = np.nan
# Clean up result array.
if shape == 'same' and not zero_border:
pady_bot = -pady_bot if pady_bot > 0 else None
padx_rht = -padx_rht if padx_rht > 0 else None
array_c = array_c[pady_top:pady_bot, padx_lft:padx_rht]
# FIXME: Make returned data type function like conv2.
if default_double_out and array_c.dtype != dtype_out:
array_c = array_c.astype(dtype_out)
return fix_array_if_rotation_was_applied(array_c, rotation_flag)
def conv2(array, kernel, shape='full', conv_depth='default',
zero_border=True, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Convolve two 2D arrays.
Parameters
----------
array : ndarray, 2D
Primary array to convolve.
kernel : ndarray, 2D, smaller shape than `array`
Secondary, smaller array to convolve with `array`.
shape : str; 'full', 'same', or 'valid'
See documentation for MATLAB's `conv2` function [2].
conv_depth : str; 'default', 'input', 'int16', 'single'/'float32', or 'double'/'float64'
Sets the data type depth of the convolution function filter2D,
and correspondingly sets the data type of the returned array.
'default': If `array` is of floating data type,
returns an array of that data type, otherwise returns
an array of float64.
'input': Returns an array of the same data type as `array`.
'int16': Returns an array of int16.
'single'/'float32': Returns an array of float32.
'double'/'float64': Returns an array of float64.
BEWARE: 'float32' option results in
zero_border : bool
When `kernel` hangs off the edges of `array`
during convolution calculations...
If True, pixels beyond the edges of `array`
are extrapolated as zeros.
If False, pixels beyond the edges of `array`
are extrapolated as the value of the closest edge pixel.
This option only applies when `shape='same'`,
since a zero border is required when `shape='full'`
and does not make sense when `shape='valid'`.
fix_float_zeros : bool
To correct for FLOP error in convolution where the result
should be zero but isn't, immediately following convolution
map array values between...
- float32 (single):
-1.0e-6 and +1.0e-6 to zero.
- float54 (double):
-1.0e-15 and +1.0e-15 to zero.
nan_same : bool
NaN values are treated as 0 in convolution computation,
but NaN pixels are retained from input to output.
nan_over_zero : bool
If True, let NaN x 0 = NaN in convolution computation.
If False, let NaN x 0 = 0 in convolution computation.
allow_flipped_processing : bool
If True and at least one of `kernel`'s side lengths is even,
rotate both `array` `kernel` 180 degrees before performing convolution,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [2].
Returns
-------
array_c : ndarray, 2D
Array containing the convolution of input array and kernel.
See Also
--------
conv2_slow
Notes
-----
This function utilizes a fast OpenCV function `filter2D` [1]
as a means to replicate MATLAB's `conv2` function [2].
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#filter2d
.. [2] https://www.mathworks.com/help/matlab/ref/conv2.html
"""
shape_choices = ('full', 'same', 'valid')
if shape not in shape_choices:
raise InvalidArgumentError("`shape` must be one of {}, but was '{}'".format(shape_choices, shape))
conv_depth_choices = ('default', 'input', 'int16', 'single', 'float32', 'double', 'float64')
if conv_depth not in conv_depth_choices:
raise InvalidArgumentError("`conv_depth` must be one of {}, but was '{}'".format(conv_depth_choices, conv_depth))
cv2_array_dtypes = [np.uint8, np.int16, np.uint16, np.float32, np.float64]
cv2_kernel_dtypes = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.int64, np.uint64, np.float32, np.float64]
# Check array data type.
array_error = False
array_dtype_in = array.dtype
if array_dtype_in not in cv2_array_dtypes:
array_dtype_errmsg = ("Fast convolution method only allows array dtypes {}, "
"but was {}".format([str(d(1).dtype) for d in cv2_array_dtypes], array.dtype))
# Only cast to a higher data type for safety.
array_dtype_cast = None
if array_dtype_in == np.bool:
array_dtype_cast = np.uint8
elif array_dtype_in == np.int8:
array_dtype_cast = np.int16
elif array_dtype_in == np.float16:
array_dtype_cast = np.float32
if array_dtype_cast is None:
array_error = True
# Check kernel data type.
kernel_error = False
kernel_dtype_in = kernel.dtype
if kernel_dtype_in not in cv2_kernel_dtypes:
kernel_dtype_errmsg = ("Fast convolution method only allows kernel dtypes {} "
"but was {}".format([str(d(1).dtype) for d in cv2_kernel_dtypes], kernel.dtype))
# Only cast to a higher data type for safety.
kernel_dtype_cast = None
if kernel_dtype_in == np.bool:
kernel_dtype_cast = np.uint8
elif kernel_dtype_in == np.uint32:
kernel_dtype_cast = np.uint64
elif kernel_dtype_in == np.float16:
kernel_dtype_cast = np.float32
if kernel_dtype_cast is None:
kernel_error = True
# Fall back to old (slower) conv2 function
# if array or kernel data type is unsupported.
if array_error or kernel_error:
dtype_errmsg = "{}{}{}".format(array_dtype_errmsg * array_error,
"\n" * (array_error * kernel_error),
kernel_dtype_errmsg * kernel_error)
if conv_depth != 'default':
raise UnsupportedDataTypeError(dtype_errmsg + "\nSince conv_depth ('{}') != 'default', "
"cannot fall back to other method".format(conv_depth))
warn(dtype_errmsg + "\n-> Falling back to slower, less exact method")
return conv2_slow(array, kernel, shape, True,
nan_over_zero, allow_flipped_processing)
# Promote array or kernel to higher data type if necessary
# to continue with faster and more reliable convolution method.
array_casted = False
if 'array_dtype_cast' in vars():
if array_dtype_in != np.bool:
warn(array_dtype_errmsg + "\n-> Casting array from {} to {} for processing".format(
array_dtype_in, array_dtype_cast(1).dtype))
array = array.astype(array_dtype_cast)
array_casted = True
if 'kernel_dtype_cast' in vars():
if array_dtype_in != np.bool:
warn(kernel_dtype_errmsg + "\n-> Casting kernel from {} to {} for processing".format(
kernel_dtype_in, kernel_dtype_cast(1).dtype))
kernel = kernel.astype(kernel_dtype_cast)
# Set convolution depth and output data type.
ddepth = None
dtype_out = None
conv_dtype_error = False
if conv_depth == 'default':
if np.issubdtype(array_dtype_in, np.floating):
ddepth = -1
dtype_out = array_dtype_in
else:
ddepth = cv2.CV_64F
dtype_out = np.float64
elif conv_depth == 'input':
ddepth = -1
dtype_out = array_dtype_in
elif conv_depth == 'int16':
ddepth = cv2.CV_16S
dtype_out = np.int16
if array.dtype != np.uint8:
conv_dtype_error = True
conv_dtype_errmsg = "conv_depth can only be 'int16' if array dtype is uint8"
elif conv_depth in ('single', 'float32'):
ddepth = cv2.CV_32F
dtype_out = np.float32
if array.dtype == np.float64:
conv_dtype_error = True
conv_dtype_errmsg = "conv_depth can only be 'single'/'float32' if array dtype is not float64"
elif conv_depth in ('double', 'float64'):
ddepth = cv2.CV_64F
dtype_out = np.float64
if array.dtype == np.float32:
conv_dtype_errmsg = "conv_depth can only be 'double'/'float64' if array dtype is not float32"
warn(conv_dtype_errmsg + "\n-> Casting array from float32 to float64 for processing")
array = array.astype(np.float64)
array_casted = True
if conv_dtype_error:
raise UnsupportedDataTypeError(conv_dtype_errmsg)
rotation_flag = False
if allow_flipped_processing:
array, kernel, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, kernel)
# Take a record of where all NaN values are located
# before setting the values of those pixels to zero.
fixnans_flag = False
if np.issubdtype(array.dtype, np.floating):
array_nans = np.isnan(array)
if np.any(array_nans):
fixnans_flag = True
if not array_casted:
array_backup = array
array[array_nans] = 0
else:
del array_nans
# Edge settings
if shape != 'same' or not zero_border:
# The following differences in where to split
# an even side length for a kernel is purely
# to mimic MATLAB's conv2 function.
if shape == 'full' or (shape == 'same' and not zero_border):
pady_top, padx_lft = (np.array(kernel.shape) - 1) / 2
pady_bot, padx_rht = np.array(kernel.shape) / 2
elif shape == 'valid':
pady_top, padx_lft = np.array(kernel.shape) / 2
pady_bot, padx_rht = (np.array(kernel.shape) - 1) / 2
pady_top, padx_lft = int(pady_top), int(padx_lft)
pady_bot, padx_rht = int(pady_bot), int(padx_rht)
if shape == 'full':
array = np.pad(array, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
# Perform convolution.
array_c = cv2.filter2D(array, ddepth, np.rot90(kernel, 2),
borderType=(cv2.BORDER_CONSTANT if zero_border else cv2.BORDER_REPLICATE))
if fix_float_zeros and np.issubdtype(array_c.dtype, np.floating):
# Fix FLOP error where we assume zero was the desired result.
if array_c.dtype == np.float32:
array_c[(-1.0e-6 < array_c) & (array_c < 1.0e-6)] = 0
elif array_c.dtype == np.float64:
array_c[(-1.0e-15 < array_c) & (array_c < 1.0e-15)] = 0
if array_c.dtype != dtype_out:
array_c = astype_round_and_crop(array_c, dtype_out, allow_modify_array=True)
# Crop result if necessary.
if shape == 'valid':
if pady_bot >= 0:
pady_bot = -pady_bot if pady_bot > 0 else None
if padx_rht >= 0:
padx_rht = -padx_rht if padx_rht > 0 else None
array_c = array_c[pady_top:pady_bot, padx_lft:padx_rht]
# Apply dilation of original NaN pixels to result.
if fixnans_flag:
array_nans_backup = array_nans
if shape == 'full':
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'constant', constant_values=0)
elif shape == 'same' and not zero_border:
array_nans = np.pad(array_nans, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'edge')
if nan_same:
array_nans_dilate = array_nans
else:
dilate_structure = np.ones(kernel.shape, dtype=np.uint8)
if not nan_over_zero:
dilate_structure[kernel == 0] = 0
array_nans_dilate = imdilate(array_nans, dilate_structure)
if shape == 'valid' or (shape == 'same' and not zero_border):
if pady_bot >= 0:
pady_bot = -pady_bot if pady_bot > 0 else None
if padx_rht >= 0:
padx_rht = -padx_rht if padx_rht > 0 else None
array_nans_dilate = array_nans_dilate[pady_top:pady_bot, padx_lft:padx_rht]
array_c[array_nans_dilate] = np.nan
# Restore the input array to its original state.
if not array_casted:
array_backup[array_nans_backup] = np.nan
return fix_array_if_rotation_was_applied(array_c, rotation_flag)
def filt2(array, kernel, shape='same', conv_depth='default',
zero_border=False, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Apply the (convolution) filter kernel to an array in 2D.
See documentation for `conv2`, but replace the word "convolve" with "filter".
Notes
-----
The mathematical convolution function (as implemented in conv2)
rotates the kernel 180 degrees before sliding it over the array
and performing the multiplications/additions.
"""
return conv2(array, np.rot90(kernel, 2), shape, conv_depth,
zero_border, fix_float_zeros,
nan_same, nan_over_zero,
allow_flipped_processing)
def moving_average(array, nhood, shape='same', conv_depth='default',
zero_border=True, fix_float_zeros=True,
nan_same=False, nan_over_zero=True,
allow_flipped_processing=True):
"""
Calculate the moving average over an array.
Parameters
----------
array : ndarray, 2D
Array for which to calculate the moving average.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for moving window.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for moving window.
shape :
See documentation for `conv2`.
conv_depth : str; 'default', 'single', or 'double'
Specifies the floating data type of the convolution kernel.
See documentation for `conv2`.
zero_border : bool
See documentation for `conv2`.
fix_float_zeros : bool
See documentation for `conv2`.
nan_same : bool
See documentation for `conv2`.
nan_over_zero : bool
See documentation for `conv2`.
allow_flipped_processing : bool
See documentation for `conv2` function.
See Also
--------
conv2
conv2_slow
Returns
-------
moving_average : ndarray, 2D
Array containing the moving average of the input array.
"""
conv_dtype_choices = ('default', 'single', 'double')
structure = None
if type(nhood) in (int, tuple):
size = nhood
elif type(nhood) == np.ndarray:
structure = nhood
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if conv_depth not in conv_dtype_choices:
raise UnsupportedDataTypeError("float_dtype must be one of {}, "
"but was {}".format(conv_dtype_choices, conv_depth))
if conv_depth == 'default':
float_dtype = np.float32 if array.dtype == np.float32 else np.float64
else:
float_dtype = np.float32 if conv_depth == 'single' else np.float64
if structure is not None:
if not np.any(structure):
# The trivial case,
# must be handled to prevent divide by zero error.
return np.zeros_like(array, float_dtype)
if np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`structure` may only contain zeros and ones")
else:
if type(size) == int:
structure = np.ones((size, size), dtype=float_dtype)
elif type(size) == tuple:
structure = np.ones(size, dtype=float_dtype)
conv_kernel = np.rot90(np.divide(structure, np.sum(structure), dtype=float_dtype), 2)
return conv2(array, conv_kernel, shape, conv_depth,
zero_border, fix_float_zeros,
nan_same, nan_over_zero,
allow_flipped_processing)
def conv_binary_prevent_overflow(array, structure):
"""
Make sure structure array has great enough positive bitdepth
to be convolved with binary primary array.
Parameters
----------
array : ndarray of bool or int, 2D
Primary integer array to convolve.
Must be a binary array of only zero/False and one/True.
structure : ndarray of bool or int, 2D
Secondary, smaller integer array to convolve with `array`.
Must be a binary array of only zero/False and one/True.
Returns
-------
structure : ndarray, possible uint cast of `structure`
Either the same `structure` array or a cast or `structure`
to a uint data type with more positive bitdepth than the
input array.
"""
# Get upper bound on minimum positive bitdepth for convolution.
conv_bitdepth_pos = math.log(np.prod(structure.shape)+1, 2)
dtype_bitdepths_pos = (1, 7, 8, 15, 16, 31, 32, 63, 64)
for b in dtype_bitdepths_pos:
if conv_bitdepth_pos <= b:
conv_bitdepth_pos = b
break
# Parse input array and structure data type for bitdepth.
input_bitdepth_pos = 0
for arr in (array, structure):
arr_dtype = arr.dtype
if arr.dtype == np.bool:
arr_posbits = 1
elif np.issubdtype(arr_dtype, np.int):
arr_posbits = int(str(arr.dtype).replace('int', '')) - 1
elif np.issubdtype(arr_dtype, np.uint):
arr_posbits = int(str(arr.dtype).replace('uint', ''))
elif np.issubdtype(arr_dtype, np.floating):
arr_posbits = np.inf
else:
arr_posbits = 0
input_bitdepth_pos = max(input_bitdepth_pos, arr_posbits)
if input_bitdepth_pos == 0:
# Handle unknown data type by casting structure to
# maximum possible bitdepth.
structure = structure.astype(np.uint64)
else:
# If maximum positive bitdepth from inputs is too low,
# cast structure to minimum positive bitdepth for conovlution.
if input_bitdepth_pos < conv_bitdepth_pos:
if (conv_bitdepth_pos % 2) != 0:
conv_bitdepth_pos += 1
structure = structure.astype(eval('np.uint{}'.format(conv_bitdepth_pos)))
return structure
def imerode_slow(array, nhood, iterations=1, mode='auto',
cast_structure_for_speed=True, allow_flipped_processing=True):
"""
Erode an array with the provided binary structure.
Parameters
----------
array : ndarray, 2D
Array to erode.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for erosion.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for erosion.
iterations : positive int
Number of times to perform the erosion.
mode : str; 'auto', 'conv', 'skimage', 'scipy', or 'scipy_grey'
Specifies which method will be used to perform erosion.
'auto' -------- use the fastest of ('conv', 'scipy') given array, structure sizes
'conv' -------- `conv2`
'skimage' ----- `skimage.morphology.binary_erosion` [1]
'scipy' ------- `scipy.ndimage.binary_erosion` [2]
'scipy_grey' -- `scipy.ndimage.grey_erosion` [3]
cast_structure_for_speed : bool
If True and `structure` is not float32 data type, cast it to float32.
This produces the fastest results overall for all methods,
and for 'conv' method this prevents a potential fallback call to
`conv2_slow` if input structure has an unsupported data type for
fast OpenCV method used in `conv2`.
allow_flipped_processing : bool
If True and at least one of `structure`'s side lengths is even,
rotate both `array` `structure` 180 degrees before performing erosion,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [4].
Returns
-------
array_e : ndarray, same shape and type as `array`
Array containing the erosion of the input array by the structure.
See Also
--------
imdilate_slow
Notes
-----
This function is meant to replicate MATLAB's `imerode` function [4].
Strictly binary erosion will be performed if and only if `array.dtype` is `np.bool`,
otherwise greyscale erosion will be performed. However, greyscale erosion on a
binary array containing only values X and Y produces the same result as if the
values [min(X, Y), max(X, Y)] were mapped to [0, 1] and cast to a boolean array,
passed into this function, then mapped values in the result array back to their
original values (for floating `array`, note `-inf < 0 < inf < NaN`).
All modes will handle greyscale erosion when `array` is not boolean.
For `array` of feasibly large sizes containing more than two values,
'scipy_grey' is the fastest method for performing greyscale erosion,
but since the method may interpolate on the boundaries between regions
of differing values (which the MATLAB function does not do), it is not
an acceptable default method and is not considered when `mode=='auto'`.
In preliminary testing, all three methods 'conv', 'scipy', and 'skimage'
are able to reproduce the results of the MATLAB function for both binary
and greyscale erosion (with the exception of some edge pixels when
`structure` with a False/zero center element is used in grey erosion,
which produces nonsensical values where proper erosion cannot be detected
by these three methods as well as MATLAB's function -- only the 'scipy_grey'
method handles this case properly).
References
----------
.. [1] http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion
.. [2] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.binary_erosion.html
.. [3] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.grey_erosion.html
.. [4] https://www.mathworks.com/help/images/ref/imerode.html
"""
mode_choices = ('auto', 'conv', 'skimage', 'scipy', 'scipy_grey')
structure = None
if type(nhood) == int:
structure = np.ones((nhood, nhood), dtype=np.float32)
elif type(nhood) == tuple:
structure = np.ones(nhood, dtype=np.float32)
elif type(nhood) == np.ndarray:
structure = nhood
if structure.dtype != np.bool and np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`nhood` structure contains values other than 0 and 1")
if cast_structure_for_speed and structure.dtype != np.float32:
structure = structure.astype(np.float32)
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if mode not in mode_choices:
raise UnsupportedMethodError("'mode' must be one of {}, but was '{}'".format(mode_choices, mode))
if mode == 'auto':
# FIXME: Get new time coefficients for faster conv2 function now being used.
# Make an estimate of the runtime for 'conv' and 'scipy' methods,
# then choose the faster method.
array_elements = np.prod(array.shape)
struc_elements = np.prod(structure.shape)
time_conv = 1.25e-07 * array_elements - 7.61e-02
time_scipy = ( (1.56e-10 * array_elements - 2.66e-04) * struc_elements
+ (1.34e-08 * array_elements - 1.42e-02) )
mode = 'conv' if time_conv < time_scipy else 'scipy'
if mode == 'conv':
if ( not np.issubdtype(array.dtype, np.floating)
and not np.issubdtype(structure.dtype, np.floating) ):
# Make sure one of the input integer arrays has great enough
# positive bitdepth to prevent overflow during convolution.
if array.dtype != np.bool and np.any(~np.logical_or(array == 0, array == 1)):
structure = structure.astype(np.uint64)
else:
structure = conv_binary_prevent_overflow(array, structure)
structure = np.rot90(structure, 2)
rotation_flag = False
if allow_flipped_processing:
array, structure, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, structure)
if mode == 'skimage':
pady, padx = np.array(structure.shape) / 2
pady, padx = int(pady), int(padx)
if array.dtype == np.bool:
padval = 1
else:
padval = np.inf if np.issubdtype(array.dtype, np.floating) else np.iinfo(array.dtype).max
array = np.pad(array, ((pady, pady), (padx, padx)), 'constant', constant_values=padval)
for i in range(iterations):
if array.dtype == np.bool:
# Binary erosion
if mode == 'conv':
array_e = (conv2(~array, structure, shape='same', allow_flipped_processing=False) == 0)
elif mode in ('scipy', 'scipy_grey'):
array_e = sp_ndimage.binary_erosion(array, structure, border_value=1)
elif mode == 'skimage':
array_e = sk_morphology.binary_erosion(array, structure)
elif mode == 'scipy_grey':
# Greyscale erosion
if np.any(structure != 1):
if not np.issubdtype(structure.dtype, np.floating):
structure = structure.astype(np.float32)
array_e = sp_ndimage.grey_erosion(array, structure=(structure - 1))
else:
array_e = sp_ndimage.grey_erosion(array, size=structure.shape)
else:
# Greyscale erosion
array_vals = np.unique(array)
if np.issubdtype(array.dtype, np.floating):
array_vals_nans = np.isnan(array_vals)
has_nans = np.any(array_vals_nans)
if has_nans:
array_nans = np.isnan(array)
# Remove possible multiple occurrences of "nan" in results of np.unique().
array_vals = np.delete(array_vals, np.where(np.isnan(array_vals)))
array_vals = np.append(array_vals, np.nan)
else:
has_nans = False
# Start with an array full of the lowest value from the input array.
# Overlay the erosion of all higher-value layers (combined)
# as the second-lowest value. Call this the new lowest value,
# and repeat until all layers have been added up through the highest value.
array_e = np.full_like(array, array_vals[0])
for val in array_vals[1:]:
if not np.isnan(val):
mask_val = (array >= val) if not has_nans else np.logical_or(array >= val, array_nans)
else:
mask_val = array_nans if mode != 'skimage' else np.logical_or(array_nans, array == np.inf)
if mode == 'conv':
result_val = (conv2(~mask_val, structure, shape='same', allow_flipped_processing=False) == 0)
elif mode == 'scipy':
result_val = sp_ndimage.binary_erosion(mask_val, structure, border_value=1)
elif mode == 'skimage':
result_val = sk_morphology.binary_erosion(mask_val, structure)
array_e[result_val] = val
array = array_e
if mode == 'skimage':
array_e = array_e[pady:-pady, padx:-padx]
return fix_array_if_rotation_was_applied(array_e, rotation_flag)
def imdilate_slow(array, nhood, iterations=1, mode='auto',
cast_structure_for_speed=True, allow_flipped_processing=True):
"""
Dilate an array with the provided binary structure.
Parameters
----------
array : ndarray, 2D
Array to dilate.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for dilation.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for dilation.
iterations : positive int
Number of times to perform the dilation.
mode : str; 'auto', 'conv', 'skimage', 'scipy', or 'scipy_grey'
Specifies which method will be used to perform dilation.
'auto' -------- use the fastest of ('conv', 'scipy') given array, structure sizes
'conv' -------- `conv2`
'skimage' ----- `skimage.morphology.binary_dilation` [1]
'scipy' ------- `scipy.ndimage.binary_dilation` [2]
'scipy_grey' -- `scipy.ndimage.grey_dilation` [3]
cast_structure_for_speed : bool
If True and `structure` is not float32 data type, cast it to float32.
This produces the fastest results overall for all methods,
and for 'conv' method this prevents a potential fallback call to
`conv2_slow` if input structure has an unsupported data type for
fast OpenCV method used in `conv2`.
allow_flipped_processing : bool
If True and at least one of `structure`'s side lengths is even,
rotate both `array` `structure` 180 degrees before performing dilation,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [4].
Returns
-------
array_d : ndarray, same shape and type as `array`
Array containing the dilation of the input array by the structure.
See Also
--------
imerode_slow
Notes
-----
This function is meant to replicate MATLAB's `imdilate` function [4].
Strictly binary dilation will be performed if and only if `array.dtype` is `np.bool`,
otherwise greyscale dilation will be performed. However, greyscale dilation on a
binary array containing only values X and Y produces the same result as if the
values [min(X, Y), max(X, Y)] were mapped to [0, 1] and cast to a boolean array,
passed into this function, then mapped values in the result array back to their
original values (for floating `array`, note `-inf < 0 < inf < NaN`).
All modes will handle greyscale dilation when `array` is not boolean.
For `array` of feasibly large sizes containing more than two values,
'scipy_grey' is the fastest method for performing greyscale dilation,
but since the method may interpolate on the boundaries between regions
of differing values (which the MATLAB function does not do), it is not
an acceptable default method and is not considered when `mode=='auto'`.
In preliminary testing, all three methods 'conv', 'scipy', and 'skimage'
are able to reproduce the results of the MATLAB function for both binary
and greyscale dilation (with the exception of some edge pixels when
`structure` with a False/zero center element is used in grey dilation,
which produces nonsensical values where proper dilation cannot be detected
by these three methods as well as MATLAB's function -- only the 'scipy_grey'
method handles this case properly).
References
----------
.. [1] http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation
.. [2] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.binary_dilation.html
.. [3] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.grey_dilation.html
.. [4] https://www.mathworks.com/help/images/ref/imdilate.html
"""
mode_choices = ('auto', 'conv', 'skimage', 'scipy', 'scipy_grey')
structure = None
if type(nhood) == int:
structure = np.ones((nhood, nhood), dtype=np.float32)
elif type(nhood) == tuple:
structure = np.ones(nhood, dtype=np.float32)
elif type(nhood) == np.ndarray:
structure = nhood
if structure.dtype != np.bool and np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`nhood` structure contains values other than 0 and 1")
if cast_structure_for_speed and structure.dtype != np.float32:
structure = structure.astype(np.float32)
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if mode not in mode_choices:
raise UnsupportedMethodError("'mode' must be one of {}, but was '{}'".format(mode_choices, mode))
if mode == 'auto':
# FIXME: Get new time coefficients for faster conv2 function now being used.
# Make an estimate of the runtime for 'conv' and 'scipy' methods,
# then choose the faster method.
array_elements = np.prod(array.shape)
struc_elements = np.prod(structure.shape)
time_conv = 1.23e-07 * array_elements - 4.62e-02
time_scipy = ( (6.60e-10 * array_elements - 3.59e-04) * struc_elements
+ (2.43e-08 * array_elements + 4.05e-02) )
mode = 'conv' if time_conv < time_scipy else 'scipy'
if mode == 'conv':
if ( not np.issubdtype(array.dtype, np.floating)
and not np.issubdtype(structure.dtype, np.floating) ):
# Make sure one of the input integer arrays has great enough
# positive bitdepth to prevent overflow during convolution.
if array.dtype != np.bool and np.any(~np.logical_or(array == 0, array == 1)):
structure = structure.astype(np.uint64)
else:
structure = conv_binary_prevent_overflow(array, structure)
rotation_flag = False
if mode in ('scipy', 'scipy_grey', 'skimage') and allow_flipped_processing:
array, structure, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, structure)
for i in range(iterations):
if array.dtype == np.bool:
# Binary dilation
if mode == 'conv':
array_d = (conv2(array, structure, shape='same', allow_flipped_processing=False) > 0)
elif mode in ('scipy', 'scipy_grey'):
array_d = sp_ndimage.binary_dilation(array, structure, border_value=0)
elif mode == 'skimage':
array_d = sk_morphology.binary_dilation(array, structure)
elif mode == 'scipy_grey':
# Greyscale dilation
if np.any(structure != 1):
if not np.issubdtype(structure.dtype, np.floating):
structure = structure.astype(np.float32)
array_d = sp_ndimage.grey_dilation(array, structure=(structure - 1))
else:
array_d = sp_ndimage.grey_dilation(array, size=structure.shape)
else:
# Greyscale dilation
array_vals = np.unique(array)
if np.issubdtype(array.dtype, np.floating):
array_vals_nans = np.isnan(array_vals)
has_nans = np.any(array_vals_nans)
if has_nans:
# Remove possible multiple occurrences of "nan" in results of np.unique().
array_vals = np.delete(array_vals, np.where(np.isnan(array_vals)))
array_vals = np.append(array_vals, np.nan)
# Start with an array full of the lowest value from the input array,
# then overlay the dilation of each higher-value layer,
# one at a time, until all layers have been added.
array_d = np.full_like(array, array_vals[0])
for val in array_vals[1:]:
mask_val = (array == val) if not np.isnan(val) else np.isnan(array)
if mode == 'conv':
result_val = (conv2(mask_val, structure, shape='same', allow_flipped_processing=False) > 0)
elif mode == 'scipy':
result_val = sp_ndimage.binary_dilation(mask_val, structure, border_value=0)
elif mode == 'skimage':
result_val = sk_morphology.binary_dilation(mask_val, structure)
array_d[result_val] = val
array = array_d
return fix_array_if_rotation_was_applied(array_d, rotation_flag)
def imerode(array, nhood, iterations=1, allow_flipped_processing=True):
"""
Erode an array with the provided binary structure.
See documentation for `imerode_imdilate_cv2`.
"""
return imerode_imdilate_cv2(array, nhood, iterations, allow_flipped_processing, erode=True)
def imdilate(array, nhood, iterations=1, allow_flipped_processing=True):
"""
Dilate an array with the provided binary structure.
See documentation for `imerode_imdilate_cv2`.
"""
return imerode_imdilate_cv2(array, nhood, iterations, allow_flipped_processing, erode=False)
def imerode_imdilate_cv2(array, nhood, iterations=1,
allow_flipped_processing=True, erode=True):
"""
Erode/Dilate an array with the provided binary structure.
Parameters
----------
array : ndarray, 2D
Array to erode/dilate.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for erosion/dilation.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for erosion/dilation.
iterations : positive int
Number of times to perform the erosion/dilation.
allow_flipped_processing : bool
If True and at least one of `structure`'s side lengths is even,
rotate both `array` `structure` 180 degrees before performing erosion/dilation,
then rotate the result array 180 degrees before returning.
The sole purpose of this option is to allow this function
to most closely replicate the corresponding MATLAB array method [3,4].
erode : bool
If True, perform erosion.
If False, perform dilation.
Returns
-------
array_ed : ndarray, same shape and type as `array`
Array containing the erosion/dilation of the input array by the structure.
Notes
-----
This wrapper function for OpenCV's `erode`/`dilate` function [1,2] is meant to replicate
MATLAB's `imerode`/`imdilate` function [3,4].
In preliminary testing, this method reproduces results of the MATLAB function
for both binary and greyscale erosion/dilation, with the exception of some edge pixels
when `structure` with a False/zero center element is used in grey erosion/dilation,
which produces nonsensical values where proper erosion/dilation cannot be detected
by this method as well as MATLAB's function.
References
----------
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#erode
.. [1] https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#dilate
.. [3] https://www.mathworks.com/help/images/ref/imerode.html
.. [3] https://www.mathworks.com/help/images/ref/imdilate.html
"""
structure = None
if type(nhood) == int:
structure = np.ones((nhood, nhood), dtype=np.uint8)
elif type(nhood) == tuple:
structure = np.ones(nhood, dtype=np.uint8)
elif type(nhood) == np.ndarray:
structure = nhood
if structure.dtype != np.bool and np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`nhood` structure contains values other than 0 and 1")
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
cv2_dtypes = [np.uint8, np.int16, np.uint16, np.float32, np.float64]
# Check array data type.
array_dtype_in = array.dtype
if array_dtype_in not in cv2_dtypes:
dtype_errmsg = ("Fast erosion/dilation method only allows array dtypes {}, "
"but was {}".format([str(d(1).dtype) for d in cv2_dtypes], array_dtype_in))
# Only cast to a higher data type for safety.
array_dtype_cast = None
if array_dtype_in == np.bool:
array_dtype_cast = np.uint8
elif array_dtype_in == np.int8:
array_dtype_cast = np.int16
elif array_dtype_in == np.float16:
array_dtype_cast = np.float32
if array_dtype_cast is not None:
# warn(dtype_errmsg + "\n-> Casting array from {} to {} for processing".format(
# array_dtype_in, array_dtype_cast(1).dtype))
array = array.astype(array_dtype_cast)
if array_dtype_cast is None:
# Fall back to old (slower) imdilate/imerode functions.
warn(dtype_errmsg + "\n-> Falling back to slower methods")
fn = imerode_slow if erode else imdilate_slow
return fn(array, nhood, iterations, allow_flipped_processing=allow_flipped_processing)
# Check structure data type.
if structure.dtype != np.uint8:
warn("Fast erosion/dilation method only allows structure dtype np.uint8, but was {}".format(structure.dtype)
+ "\n-> Casting structure from {} to uint8".format(structure.dtype))
structure = structure.astype(np.uint8)
rotation_flag = False
if erode:
# Erosion settings
fn = cv2.erode
if allow_flipped_processing:
array, structure, rotation_flag = rotate_arrays_if_kernel_has_even_sidelength(array, structure)
else:
# Dilation settings
fn = cv2.dilate
structure = np.rot90(structure, 2)
# Perform erosion/dilation.
array_ed = fn(array, structure, iterations=iterations, borderType=cv2.BORDER_REPLICATE)
if array_ed.dtype != array_dtype_in:
array_ed = array_ed.astype(array_dtype_in)
return fix_array_if_rotation_was_applied(array_ed, rotation_flag)
def bwareaopen(array, size_tolerance, connectivity=8, in_place=False):
"""
Remove connected components smaller than the specified size.
This is a wrapper function for Scikit-Image's `morphology.remove_small_objects` [1]
meant to replicate MATLAB's `bwareaopen` function [2] for boolean input `array`.
Parameters
----------
See documentation for `skimage.morphology.remove_small_objects` [1], where...
array : ndarray, 2D
Equivalent to `ar`.
size_tolerance : positive int
Equivalent to `min_size`.
connectivity : int, 4 or 8
For drawing boundaries...
If 4, only pixels with touching edges are considered connected.
If 8, pixels with touching edges and corners are considered connected.
in_place : bool
Equivalent to `in_place`.
Returns
-------
bwareaopen : ndarray, same shape and type as `array`
The input array with small connected components removed.
References
----------
.. [1] http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.remove_small_objects
"""
return sk_morphology.remove_small_objects(array, size_tolerance, connectivity/4, in_place)
def bwboundaries_array(array, side='inner', connectivity=8, noholes=False,
grey_boundaries=False, edge_boundaries=True):
"""
Return an array with 1-pixel-wide lines (borders) highlighting
boundaries between areas of differing values in the input array.
Parameters
----------
array : ndarray, 2D
Array from which to extract data value boundaries.
side : str, 'inner' or 'outer'
Between areas of different values in `array`...
If 'inner', boundaries are drawn on the side of the higher value.
If 'outer', boundaries are drawn on the side of the lower value.
connectivity : int, 4 or 8
For drawing boundaries...
If 4, only pixels with touching edges are considered connected.
If 8, pixels with touching edges and corners are considered connected.
noholes : bool
(Option only applies for boolean `array`.)
If True, do not draw boundaries of zero clusters surrounded by ones.
grey_boundaries : bool
If True and a non-boolean array is provided,
boundary pixels in the result array are assigned the same value
as their location counterparts in `array`.
Thus, the value a particular section of boundary border takes on
is determined by `side`. Additionally, if `side='inner'/'outer'`,
the fill value between boundaries is the minimum/maximum value of
`array`, respectively.
If False, return a boolean array with True-valued pixels
highlighting only the location of all boundaries.
edge_boundaries : bool
If True, copy the values of all edge pixels in `array` to the
result array.
Returns
-------
array_b : ndarray, same shape as `array`
Either a binary array with 1-px borders of ones highlighting
boundaries between areas of differing values in the input array,
or an array of the same type as the input array with the same
1-px borders taking on the values of the input array at those
locations if grey_boundaries=True.
See Also
--------
imerode
imdilate
Notes
-----
This function utilizes local `imerode` and `imdilate` functions
as a means to replicate MATLAB's `bwboundaries` function [1].
References
----------
.. [1] https://www.mathworks.com/help/images/ref/bwboundaries.html
"""
side_choices = ('inner', 'outer')
conn_choices = (4, 8)
if side not in side_choices:
raise InvalidArgumentError("`side` must be one of {}, but was '{}'".format(side_choices, side))
if connectivity not in conn_choices:
raise InvalidArgumentError("`connectivity` must be one of {}, but was {}".format(conn_choices, connectivity))
structure = np.zeros((3, 3), dtype=np.uint8)
if connectivity == 8:
structure[:, 1] = 1
structure[1, :] = 1
elif connectivity == 4:
structure[:, :] = 1
if noholes:
array = sp_ndimage.binary_fill_holes(array)
fn = imerode if side == 'inner' else imdilate
# Find boundaries.
array_b_bw = (array != fn(array, structure))
if grey_boundaries and array.dtype != np.bool:
fillval = np.nanmin(array) if side == 'inner' else np.max(array)
array_b = np.full_like(array, fillval)
array_b[array_b_bw] = array[array_b_bw]
else:
array_b = array_b_bw
# Edge pixels may not be marked as boundary pixels
# by erosion or dilation, and must be added manually.
if edge_boundaries:
array_b[ 0, :] = array[ 0, :]
array_b[-1, :] = array[-1, :]
array_b[ :, 0] = array[ :, 0]
array_b[ :, -1] = array[ :, -1]
return array_b
def entropyfilt(array, nhood=np.ones((9,9),dtype=np.uint8), bin_bitdepth=8, nbins=None,
scale_from='dtype_max', symmetric_border=True, allow_modify_array=False):
"""
Calculate local entropy of a grayscale image.
If the numerical range of data in `array` is greater than the
provided maximum number of bins (through either `nbins` or
`bin_bitdepth`), data values are scaled down to fit within the
number of bins (as a range of continuous integers) and cast to an
integer data type before entropy calculation.
If `array` data type is floating, values are rounded and cast to
an integer data type regardless, but (if `scale_from='array_range'`)
no pre-scaling is applied if the input data range is within the
maximum number of bins.
Parameters
----------
array : ndarray, 2D
Array for which to calculate local entropy.
nhood : positive int, tuple like `array.shape`, or (ndarray, 2D)
If an integer / tuple, specifies the side length / shape
of structure (of ones) to be used as structure for filter.
If ndarray, must be a binary array with True/1-valued elements
specifying the structure for filter.
bin_bitdepth : None or `1 <= int <= 16`
Scale `array` data to fit in `2^bin_bitdepth` bins for
entropy calculation if range of values is greater than
number of bins.
If None, `nbins` must be provided and this is set by
`bin_bitdepth = math.log(nbins, 2)`.
nbins : None or `2 <= int <= 2^16`
(If not None, overrides `bin_bitdepth`)
Scale `array` data to fit in `nbins` bins for entropy
calculation if necessary. for entropy
calculation if range of values is greater than number
of bins.
If None, `bin_bitdepth` must be provided.
scale_from : str; 'dtype_max' or 'array_range'
If 'dtype_max' and bitdepth of `array` data type is
greater than `bin_bitdepth`, scale array data to fit
in `nbins` bins by first dividing array values by the
maximum possible value for the input array data type
before multiplying by `nbins`.
If 'array_range' and the range of values in `array` is
greater than `nbins`, scale array data by translating
the minimum array value to zero then dividing values
by the maximum array value before multiplying by `nbins`.
symmetric_border : bool
If True, pads `array` edges with the reflections of
each edge so that `kernel` picks up these values when
it hangs off the edges of `array` during entropy
calculations. Mimics MATLAB's `entropyfilt` function [2].
If False, only values within the bounds of `array` are
considered during entropy calculations.
allow_modify_array : bool
(Option only applies for floating `array`.)
Allow modifying values in `array` to save some memory
allocation in the case that rounding of data values is
performed on the input array itself.
Returns
-------
array_filt : ndarray of float64, same shape as `array`
Array containing the entropy-filtered image.
Notes
-----
This function utilizes Scikit-Image's `filters.rank.entropy`
function [1] as a means to replicate MATLAB's `entropyfilt`
function [2].
Kernel-wise entropy calculations are done as described in
MATLAB's documentation for its `entropy` function [3].
Scikit-Image's entropy function accepts only uint8 and uint16
arrays, but since it appears uint16 processes faster than
uint8, array copy is cast to uint16 before being sent in.
References
----------
.. [1] http://scikit-image.org/docs/dev/api/skimage.filters.rank.html?highlight=entropy#skimage.filters.rank.entropy
http://scikit-image.org/docs/dev/auto_examples/filters/plot_entropy.html
.. [2] https://www.mathworks.com/help/images/ref/entropyfilt.html
.. [3] https://www.mathworks.com/help/images/ref/entropy.html
"""
structure = None
if type(nhood) == int:
structure = np.ones((nhood, nhood), dtype=np.uint8)
elif type(nhood) == tuple:
structure = np.ones(nhood, dtype=np.uint8)
elif type(nhood) == np.ndarray:
structure = nhood
if structure.dtype != np.bool and np.any(~np.logical_or(structure == 0, structure == 1)):
raise InvalidArgumentError("`nhood` structure contains values other than 0 and 1")
else:
raise InvalidArgumentError("`nhood` type may only be int, tuple, or ndarray, "
"but was {} (nhood={})".format(type(nhood), nhood))
if bin_bitdepth is None and nbins is None:
raise InvalidArgumentError("Either `bin_bitdepth` or `nbins` must be provided")
if nbins is None:
if type(bin_bitdepth) == int and 1 <= bin_bitdepth <= 16:
nbins = 2**bin_bitdepth
else:
raise InvalidArgumentError("`bin_bitdepth` must be an integer between 1 and 16, inclusive, "
"but was {}".format(bin_bitdepth))
else:
if type(nbins) == int and 2 <= nbins <= 2**16:
bin_bitdepth = math.log(nbins, 2)
else:
raise InvalidArgumentError("`nbins` must be an integer between 2 and 2**16, inclusive, "
"but was {}".format(nbins))
# Check array data type.
array_backup = array
array_dtype_in = array.dtype
array_dtype_bitdepth = None
array_dtype_max = None
array_dtype_unsigned = False
if array_dtype_in == np.bool:
array_dtype_bitdepth = 1
array_dtype_max = 1
if np.issubdtype(array_dtype_in, np.integer):
array_dtype_bitdepth = int(str(array_dtype_in).split('int')[-1])
array_dtype_max = np.iinfo(array_dtype_in).max
if array_dtype_in.kind == 'u':
array_dtype_unsigned = True
elif np.issubdtype(array_dtype_in, np.floating):
array_dtype_bitdepth = np.inf
array_dtype_max = np.finfo(array_dtype_in).max
else:
raise UnsupportedDataTypeError("array dtype {} is not supported".format(array_dtype_in))
# Create scaled-down version of array according
# to input bin_bitdepth or number of bins nbins.
if nbins is None:
nbins = 2**bin_bitdepth
if scale_from == 'dtype_max' and not array_dtype_unsigned:
# For signed array data types, bin_array_max is a one-sided limit.
# For even values of nbins, let nbins be decreased by one to accomodate.
if nbins == 2:
raise InvalidArgumentError("`nbins` must be >= 3 for signed `array` data type "
"when scale_from='dtype_max'")
bin_array_max = int(np.ceil(nbins/2) - 1)
else:
bin_array_max = nbins - 1
bin_array = None
if array_dtype_bitdepth <= bin_bitdepth:
bin_array = array
elif scale_from == 'dtype_max':
if not np.issubdtype(array_dtype_in, np.floating):
array = array.astype(np.float32) if array_dtype_bitdepth <= 16 else array.astype(np.float64)
bin_array = array / array_dtype_max * bin_array_max
elif scale_from == 'array_range':
array_min = np.nanmin(array)
array_max = np.nanmax(array)
array_range = array_max - array_min
if array_range < nbins:
if array_min >= 0 and array_max <= np.iinfo(np.uint16).max:
bin_array = array
else:
# Since only value *counts*, not numerical values themselves,
# matter to entropy filter, shift array values so that minimum
# is set to zero.
if np.issubdtype(array_dtype_in, np.floating):
array = array_round_proper(array, allow_modify_array)
bin_array = np.empty_like(array, np.uint16)
np.subtract(array, array_min, out=bin_array, casting='unsafe')
else:
# Shift array values so that minimum is set to zero,
# then scale to maximum number of bins.
if not np.issubdtype(array_dtype_in, np.floating):
array = array.astype(np.float32) if array_dtype_bitdepth <= 16 else array.astype(np.float64)
bin_array = (array - array_min) / array_range * bin_array_max
# Convert bin array to uint16.
# This is to both catch integer/floating arrays and
# cast them to an acceptable data type for `entropy`
# function, and because it appears uint16 processes
# faster than uint8.
if bin_array.dtype != np.uint16:
if np.issubdtype(bin_array.dtype, np.floating):
if bin_array is not array_backup:
allow_modify_array = True
bin_array = array_round_proper(bin_array, allow_modify_array)
bin_array = bin_array.astype(np.uint16)
# Edge settings
if symmetric_border:
pady_top, padx_lft = (np.array(structure.shape) - 1) / 2
pady_bot, padx_rht = np.array(structure.shape) / 2
pady_top, padx_lft = int(pady_top), int(padx_lft)
pady_bot, padx_rht = int(pady_bot), int(padx_rht)
bin_array = np.pad(bin_array, ((pady_top, pady_bot), (padx_lft, padx_rht)), 'symmetric')
# Perform entropy filter.
array_filt = entropy(bin_array, structure)
# Crop result if necessary.
if symmetric_border:
pady_bot = -pady_bot if pady_bot != 0 else None
padx_rht = -padx_rht if padx_rht != 0 else None
array_filt = array_filt[pady_top:pady_bot, padx_lft:padx_rht]
return array_filt
def convex_hull_image_offsets_diamond(ndim):
# TODO: Continue to update this fork of skimage function until
# -t the skimage version includes fast polygon_perimeter function.
offsets = np.zeros((2 * ndim, ndim))
for vertex, (axis, offset) in enumerate(product(range(ndim), (-0.5, 0.5))):
offsets[vertex, axis] = offset
return offsets
def convex_hull_image(image, offset_coordinates=True, tolerance=1e-10):
# TODO: Continue to update this fork of skimage function until
# -t the skimage version includes fast polygon_perimeter function.
"""Compute the convex hull image of a binary image.
The convex hull is the set of pixels included in the smallest convex
polygon that surround all white pixels in the input image.
Parameters
----------
image : array
Binary input image. This array is cast to bool before processing.
offset_coordinates : bool, optional
If ``True``, a pixel at coordinate, e.g., (4, 7) will be represented
by coordinates (3.5, 7), (4.5, 7), (4, 6.5), and (4, 7.5). This adds
some "extent" to a pixel when computing the hull.
tolerance : float, optional
Tolerance when determining whether a point is inside the hull. Due
to numerical floating point errors, a tolerance of 0 can result in
some points erroneously being classified as being outside the hull.
Returns
-------
hull : (M, N) array of bool
Binary image with pixels in convex hull set to True.
References
----------
.. [1] http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/
"""
ndim = image.ndim
# In 2D, we do an optimisation by choosing only pixels that are
# the starting or ending pixel of a row or column. This vastly
# limits the number of coordinates to examine for the virtual hull.
if ndim == 2:
coords = sk_morphology._convex_hull.possible_hull(image.astype(np.uint8))
else:
coords = np.transpose(np.nonzero(image))
if offset_coordinates:
# when offsetting, we multiply number of vertices by 2 * ndim.
# therefore, we reduce the number of coordinates by using a
# convex hull on the original set, before offsetting.
hull0 = scipy.spatial.ConvexHull(coords)
coords = hull0.points[hull0.vertices]
# Add a vertex for the middle of each pixel edge
if offset_coordinates:
offsets = convex_hull_image_offsets_diamond(image.ndim)
coords = (coords[:, np.newaxis, :] + offsets).reshape(-1, ndim)
# ERIK'S NOTE: Added the following conditional barrier for speed.
if offset_coordinates or ndim != 2:
# repeated coordinates can *sometimes* cause problems in
# scipy.spatial.ConvexHull, so we remove them.
coords = unique_rows(coords)
# Find the convex hull
hull = scipy.spatial.ConvexHull(coords)
vertices = hull.points[hull.vertices]
# If 2D, use fast Cython function to locate convex hull pixels
if ndim == 2:
# ERIK'S NOTE: Substituted grid_points_in_poly() for the following for speed.
# mask = grid_points_in_poly(image.shape, vertices)
hull_perim_r, hull_perim_c = polygon_perimeter(vertices[:, 0], vertices[:, 1])
mask = np.zeros(image.shape, dtype=np.bool)
mask[hull_perim_r, hull_perim_c] = True
mask = sp_ndimage.morphology.binary_fill_holes(mask)
else:
gridcoords = np.reshape(np.mgrid[tuple(map(slice, image.shape))],
(ndim, -1))
# A point is in the hull if it satisfies all of the hull's inequalities
coords_in_hull = np.all(hull.equations[:, :ndim].dot(gridcoords) +
hull.equations[:, ndim:] < tolerance, axis=0)
mask = np.reshape(coords_in_hull, image.shape)
return mask
def concave_hull_traverse_delaunay(boundary_points, convex_hull, vertex_neighbor_vertices,
boundary_res=0):
"""
Traverse paths for convex hull edge erosion to obtain
information necessary for computing the concave hull image.
Triangle edges in the input Delaunay triangulation that are
considered for erosion are cataloged with their edge length
and critical value of erosion tolerance, information to be
used by `concave_hull_image`.
Parameters
----------
boundary_points : ndarray of float, shape (npoints, 2)
Coordinates of all data (nonzero) pixels in the original
image from which the concave hull image is being extracted.
This must be identical to the source coordinates for the
Delaunay triangulation from which `convex_hull` and
`vertex_neighbor_vertices` are derived [1].
convex_hull : ndarray of int, shape (nedges, 2) [2]
Vertices of facets forming the convex hull of the point set.
Each element contains a set of indices into `boundary_points`
used to retrieve coordinates for convex hull edge endpoints.
This must be derived from the same Delaunay triangulation
as `vertex_neighbor_vertices`.
vertex_neighbor_vertices : tuple of two ndarrays of int (indices, indptr) [3]
Used to determine neighboring vertices of vertices.
The indices of neighboring vertices of vertex k are
`indptr[indices[k]:indices[k+1]]`.
This must be derived from the same Delaunay triangulation
as `convex_hull`.
boundary_res : positive int
Minimum x or y *coordinate-wise* distance between two points
in a triangle for their edge to be traversed, thereby allowing
the triangle on the other side of that edge to be considered
for erosion.
If there are regions in the triangulation associated with
a particular minimum point density whose boundaries should
not be breached by erosion (such as regions of "good data"
points taken from an image that have a regular pixel spacing
of 1 coordinate unit), set this parameter to the smallest
value that keeps these areas from being eroded.
The purpose of this is to prevent unnecessary computation.
Returns
-------
alpha_min, alpha_max, edge_info, amin_edges : tuple
Maximum and minimum erosion tolerance, information on
edges considered for erosion (endpoint indices, edge length,
critical value of erosion tolerance, index of third point in
triangle considered for erosion), and a list of edges that
play a direct role in determining the minimum erosion tolerance.
Notes
-----
Edges in the triangulation are considered for erosion based
on side length, and it is from side length that critical values
of erosion tolerance are determined. In code, side length is
referred to as "alpha", with global maximum and minimum lengths
considered for erosion `alpha_max` and `alpha_min`. An edge
that is considered for erosion has a particular local minimum
erosion tolerance `local_mam` ("local max alpha min") which is
the critical value at which an alpha cutoff value (see doc for
`concave_hull_image`::`alpha_cutoff_mode`) less than `local_mam`
value results in this edge being eroded.
It is called local *max* alpha min because the local minimum
erosion tolerance for an edge down one path (from a convex hull
edge) may be less than the local minimum erosion tolerance for
the same edge down a different path (from either the same or a
different convex hull edge). All paths are traversed iteratively
but in a recursive fashion to catalog the maximum of local
minimum erosion tolerance values for each edge considered for
erosion, along with the correct third point in the triangle
that should be eroded if the edge is eroded.
References
----------
.. [1] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.Delaunay.html
.. [2] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.Delaunay.convex_hull.html#scipy.spatial.Delaunay.convex_hull
.. [3] https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.Delaunay.vertex_neighbor_vertices.html#scipy.spatial.Delaunay.vertex_neighbor_vertices
"""
indices, indptr = vertex_neighbor_vertices
alpha_min = boundary_res
alpha_max = boundary_res
edge_info = {}
revisit_edges = deque()
amin_edges = set()
for k1, k2 in convex_hull:
next_edge = (k1, k2) if k1 < k2 else (k2, k1)
p1, p2 = boundary_points[[k1, k2]]
p1_p2 = p2 - p1
next_alpha = np.sqrt(np.sum(np.square(p2 - p1)))
alpha_max = max(alpha_max, next_alpha)
if abs(p1_p2[0]) > boundary_res or abs(p1_p2[1]) > boundary_res:
# Start traversing triangulation
# from this convex hull edge.
k3 = set(indptr[indices[k1]:indices[k1+1]]).intersection(
set(indptr[indices[k2]:indices[k2+1]])).pop()
edge_info[next_edge] = [next_alpha, next_alpha, k3]
revisit_edges.append((next_edge, k3))
while len(revisit_edges) > 0:
# Resume traversal from the "other" edge
# in a traversed triangle.
next_edge, revisit_k3 = revisit_edges.pop()
k1, k2 = next_edge
k3 = revisit_k3
revisit_edge_info = edge_info[next_edge]
local_mam = revisit_edge_info[1]
p1, p2, p3 = boundary_points[[k1, k2, k3]]
while True:
forward_edges = []
edge_1_3 = None
p1_p3 = p3 - p1
edge_2_3 = None
p2_p3 = p3 - p2
# Limit edges traversed, filtering by
# edge lengths longer than boundary_res.
if abs(p1_p3[0]) > boundary_res or abs(p1_p3[1]) > boundary_res:
edge_1_3 = (k1, k3) if k1 < k3 else (k3, k1)
forward_edges.append(edge_1_3)
if abs(p2_p3[0]) > boundary_res or abs(p2_p3[1]) > boundary_res:
edge_2_3 = (k2, k3) if k2 < k3 else (k3, k2)
forward_edges.append(edge_2_3)
next_edge = None
for fedge in forward_edges:
ka, kb = fedge
# Determine the third point in the forward triangle.
kc = set(indptr[indices[ka]:indices[ka+1]]).intersection(
set(indptr[indices[kb]:indices[kb+1]])).difference({k1, k2})
if len(kc) == 0:
# We've arrived at a convex hull edge.
if fedge == edge_1_3:
edge_1_3 = None
else:
edge_2_3 = None
continue
elif len(kc) == 1:
# Only one forward triangle exists.
kc = kc.pop()
else:
# More than one forward triangle exists.
# Choose the triangle with lesser area.
kc_list = list(kc)
kc_temp = None
kc_area_min = float('inf')
pa, pb = boundary_points[[ka, kb]]
for kc in kc_list:
pc = boundary_points[kc]
kc_area = abs(pa[0]*(pb[1]-pc[1]) + pb[0]*(pc[1]-pa[1]) + pc[0]*(pa[1]-pb[1])) / 2.0
if kc_area < kc_area_min:
kc_area_min = kc_area
kc_temp = kc
kc = kc_temp
if fedge not in edge_info:
# Catalog this edge.
fedge_alpha = np.sqrt(np.sum(np.square(p1_p3 if fedge == edge_1_3 else p2_p3)))
fedge_mam = min(local_mam, fedge_alpha)
edge_info[fedge] = [fedge_alpha, fedge_mam, kc]
else:
# Update max alpha min for this edge.
fedge_info = edge_info[fedge]
fedge_alpha, fedge_mam_old, _ = fedge_info
fedge_mam = min(local_mam, fedge_alpha)
if fedge_mam > fedge_mam_old:
# Update third point in this edge's triangle
# with that of the forward triangle.
fedge_info[1] = fedge_mam
fedge_info[2] = kc
else:
# Raise global alpha min to this edge's
# max alpha min value if it is lower,
# and halt traversal on this path.
if fedge_mam > alpha_min:
alpha_min = fedge_mam
amin_edges.add(fedge)
if fedge == edge_1_3:
edge_1_3 = None
else:
edge_2_3 = None
continue
if next_edge is None:
# Traverse forward on this edge.
next_edge = fedge
next_mam = fedge_mam
next_k3 = kc
if next_edge is not None:
# Continue forward traversal on the
# first of two possible forward edges.
# edge_1_3, if passed boundary_res check,
# takes priority over edge_2_3.
if edge_1_3 is not None:
if next_edge[0] == k1:
# p1 = p1
p2 = p3
else:
p2 = p1
p1 = p3
if edge_2_3 is not None:
# Save edge_2_3 along with the third
# point in its forward triangle,
# to be traversed once the current
# traversal reaches its end.
revisit_edges.append((edge_2_3, kc))
else:
if next_edge[0] == k2:
p1 = p2
p2 = p3
else:
p1 = p3
# p2 = p2
k1, k2 = next_edge
k3 = next_k3
p3 = boundary_points[next_k3]
local_mam = next_mam
if revisit_k3:
# The revisited edge was successfully
# traversed, so make sure the third point
# in its forward triangle is set accordingly.
revisit_edge_info[2] = revisit_k3
revisit_k3 = None
else:
break
return alpha_min, alpha_max, edge_info, amin_edges
def concave_hull_image(image, concavity,
fill=True, alpha_cutoff_mode='unique',
boundary_res=2, debug=False):
"""
Compute the concave hull image of a binary image.
The concave hull image is the convex hull image with edges
of the hull eroded where the hull can have a tighter fit
to the data without losing any coverage of data pixels
(here, "data" refers to pixels with nonzero value).
Parameters
----------
image : ndarray, 2D
Binary array from which to extract the concave hull image.
concavity : 0 <= float <= 1
How much to erode the edges of the convex hull.
If 0, does not erode the edges of the convex hull,
so what is returned is the convex hull image.
If 1, erodes the edges of the convex hull with the
smallest possible erosion tolerance ("alpha length")
that keeps the concave hull from splitting into
multiple polygons.
fill : bool
Whether or not to fill the concave hull in the returned image.
If True, fill the concave hull.
If False, let the concave hull have a 1-px-wide border.
alpha_cutoff_mode : str; 'mean', 'median', or 'unique'
The method used to determine the erosion threshold.
If 'mean', `alpha_cut = (alpha_min + alpha_max) / 2`.
If 'median', set `alpha_cut` to the median value from
the set of all max alpha min values of edges.
If 'unique', set `alpha_cut` to the median value from
the set of all unique max alpha min (mam) values of edges.
See docs for `concave_hull_traverse_delaunay` for
details on the relationship between "alpha" values
and erosion tolerance.
boundary_res : positive int (2 appears safe for ~10k x ~10k pixel images)
Minimum x or y *coordinate-wise* distance between two points
in a triangle for their edge to be traversed, thereby allowing
the triangle on the other side of that edge to be considered
for erosion.
If there are regions in the triangulation associated with
a particular minimum point density whose boundaries should
not be breached by erosion (such as regions of "good data"
points taken from an image that have a regular pixel spacing
of 1 coordinate unit), set this parameter to the smallest
value that keeps these areas from being eroded.
The purpose of this is to prevent unnecessary computation.
debug : bool or 1 <= int <= 4
(Non-False value requires `matplotlib` and `tifffile` packages.)
Whether or not to interrupt the run of this function
with plots displaying the Delaunay triangulation of
the image as the function progresses through stages:
1 -- Initial triangulation plot
2 -- Triangulation traversal
Plot shows eroded triangles shaded with their
max alpha min (mam) values as calculated by
`concave_hull_traverse_delaunay`. Darker shade
corresponds to a lower mam value; these triangles
require a higher `concavity` value to erode.
Keyboard interaction allows changing `concavity`,
which decides cutoff mam value `alpha_cut`, or
set `alpha_cut` directly to see how erosion changes.
Set `alpha_cut` to 0 to see all triangles considered
for erosion (which is not all triangles in the
triangulation, due to nonzero `boundary_res` argument
to `concave_hull_traverse_delaunay`) shaded.
Red edges show critical edges which, if eroded,
would cause the concave hull to split into multiple
polygons.
3 -- Concave hull boundary points
4 -- Save an image that shows the concave hull with
1-px-wide boundaries around data clusters (following
the inside edge) overlaid.
If True, perform all debugging steps.
Returns
-------
image_cchull : ndarray, 2D, same shape as `image`
Binary image with pixels in concavve hull set to True.
"""
if 0 <= concavity <= 1:
pass
else:
raise InvalidArgumentError("`concavity` must be between 0 and 1, inclusive, "
"but was {}".format(concavity))
if alpha_cutoff_mode not in ('mean', 'median', 'unique'):
raise UnsupportedMethodError("alpha_cutoff_mode='{}'".format(alpha_cutoff_mode))
# Find data coverage boundaries.
data_boundary = bwboundaries_array(image, connectivity=8, noholes=True, side='inner')
boundary_points = np.argwhere(data_boundary)
if debug:
try:
import matplotlib.pyplot as plt
except ImportError as e:
print("matplotlib package is necessary for `debug={}`".format(debug))
raise e
else:
del data_boundary
# Create the Delaunay triangulation.
tri = scipy.spatial.Delaunay(boundary_points)
if debug in (True, 1):
print("[DEBUG] concave_hull_image (1): Initial triangulation plot")
plt.triplot(boundary_points[:, 1], -boundary_points[:, 0], tri.simplices.copy(), lw=1)
plt.plot(boundary_points[:, 1], -boundary_points[:, 0], 'o', ms=1)
plt.show()
# Extract information from triangulation.
hull_convex = tri.convex_hull
vertex_neighbor_vertices = tri.vertex_neighbor_vertices
indices, indptr = vertex_neighbor_vertices
# Retrieve edge information for erosion from triangulation.
alpha_min, alpha_max, edge_info, amin_edges = concave_hull_traverse_delaunay(
boundary_points, hull_convex, vertex_neighbor_vertices, boundary_res
)
# Determine alpha cutoff value.
alpha_cut = None
if concavity == 0 or alpha_min == alpha_max:
alpha_cut = np.inf
elif alpha_cutoff_mode == 'mean':
alpha_cut = (alpha_min + alpha_max) / 2
elif alpha_cutoff_mode in ('median', 'unique'):
mam_allowed = [einfo[1] for einfo in edge_info.values() if einfo[1] > alpha_min]
if not mam_allowed:
warn("Of {} total edges in edge_info, none have mam > alpha_min={}".format(len(edge_info), alpha_min))
alpha_cut = np.inf
else:
if alpha_cutoff_mode == 'unique':
mam_allowed = list(set(mam_allowed))
mam_allowed.sort()
alpha_cut = mam_allowed[-int(np.ceil(len(mam_allowed) * concavity))]
del mam_allowed
# Show triangulation traversal and allow modifying concavity parameter,
# setting alpha_cut based on alpha_cutoff_mode, or modify alpha_cut itself.
if debug in (True, 2):
print("[DEBUG] concave_hull_image (2): Triangulation traversal")
print("alpha_min = {}".format(alpha_min))
print("alpha_max = {}".format(alpha_max))
print("concavity = {}".format(concavity))
print("alpha_cut = {}".format(alpha_cut))
while True:
erode_simplices = []
erode_tris_mam = []
amin_instances = {}
for edge in edge_info:
einfo = edge_info[edge]
if einfo[1] >= alpha_cut:
erode_simplices.append([edge[0], edge[1], einfo[2]])
erode_tris_mam.append(einfo[1])
if einfo[1] == alpha_min:
amin_tris = []
amin_instances[edge] = amin_tris
for k1 in edge:
amin_neighbors = indptr[indices[k1]:indices[k1+1]]
for k2 in amin_neighbors:
possible_k3 = set(indptr[indices[k1]:indices[k1+1]]).intersection(set(indptr[indices[k2]:indices[k2+1]]))
for k3 in possible_k3:
amin_tris.append([k1, k2, k3])
plt.triplot(boundary_points[:, 1], -boundary_points[:, 0], tri.simplices.copy(), lw=1)
if erode_simplices:
plt.triplot(boundary_points[:, 1], -boundary_points[:, 0], erode_simplices, color='black', lw=1)
plt.tripcolor(boundary_points[:, 1], -boundary_points[:, 0], erode_simplices, facecolors=np.array(erode_tris_mam), lw=1)
for amin_edge in amin_edges:
plt.plot(boundary_points[amin_edge, 1], -boundary_points[amin_edge, 0], 'r--', lw=1)
for amin_edge in amin_instances:
amin_tris = amin_instances[amin_edge]
plt.triplot(boundary_points[:, 1], -boundary_points[:, 0], amin_tris, color='red', lw=1)
plt.plot(boundary_points[:, 1], -boundary_points[:, 0], 'o', ms=1)
for hull_edge in hull_convex:
plt.plot(boundary_points[hull_edge, 1], -boundary_points[hull_edge, 0], 'yo', lw=1.5)
for amin_edge in amin_instances:
plt.plot(boundary_points[amin_edge, 1], -boundary_points[amin_edge, 0], 'ro', lw=1.5)
plt.show()
user_input = input("Modify params? (y/n): ")
if user_input.lower() != "y":
break
validInput = False
while not validInput:
try:
user_input = input("concavity = ")
if user_input == "":
break
else:
user_input_num = float(user_input)
if 0 <= user_input_num <= 1:
pass
else:
raise ValueError
concavity = user_input_num
alpha_cut = None
if concavity == 0 or alpha_min == alpha_max:
alpha_cut = np.inf
elif alpha_cutoff_mode == 'mean':
alpha_cut = (alpha_min + alpha_max) / 2
elif alpha_cutoff_mode in ('median', 'unique'):
mam_allowed = [einfo[1] for einfo in edge_info.values() if einfo[1] > alpha_min]
if not mam_allowed:
warn("Of {} total edges in edge_info, none have mam > alpha_min={}".format(len(edge_info), alpha_min))
alpha_cut = np.inf
else:
if alpha_cutoff_mode == 'unique':
mam_allowed = list(set(mam_allowed))
mam_allowed.sort()
alpha_cut = mam_allowed[-int(np.ceil(len(mam_allowed) * concavity))]
del mam_allowed
validInput = True
print("alpha_cut = {}".format(alpha_cut))
except ValueError:
print("concavity must be an int or float between 0 and 1")
while not validInput:
try:
user_input = input("alpha_cut = ")
if user_input == "":
break
else:
user_input_num = float(user_input)
alpha_cut = user_input_num
validInput = True
except ValueError:
print("alpha_cut must be an int or float")
# Gather eroded triangles and triangles containing edges
# with length equal to alpha_min.
erode_tris = []
amin_tris = []
for edge in edge_info:
einfo = edge_info[edge]
if einfo[1] >= alpha_cut:
erode_tri = shapely.geometry.Polygon(boundary_points[[edge[0], edge[1], einfo[2]]])
if not erode_tri.is_valid:
# Assume the three points in the triangle are all on the same line.
print("Erosion triangle is invalid (likely points lie on a line), skipping: {}".format(list(zip(*erode_tri.exterior.coords.xy))))
pass
else:
erode_tris.append(erode_tri)
if einfo[1] == alpha_min:
amin_indices = []
for k1 in edge:
amin_neighbors = indptr[indices[k1]:indices[k1+1]]
for k2 in amin_neighbors:
possible_k3 = set(indptr[indices[k1]:indices[k1+1]]).intersection(set(indptr[indices[k2]:indices[k2+1]]))
for k3 in possible_k3:
amin_indices.extend([k1, k2, k3])
amin_shape = shapely.geometry.MultiPoint(boundary_points[
|
np.unique(amin_indices)
|
numpy.unique
|
'''
Hypothesis 1: ensemble learning with BCC improves performance with no training data.
*Plot* the F1 score with increasing amounts of training data for BCC. Tested methods:
(BASELINE x) majority vote;
(BASELINE y) best individual source model performance
on the target domain;
(BASELINE z) model trained on data from all source domains;
(a) a DNN model trained on the gold standard for the target domain (a ceiling rather than a baseline);
(b) IBCC over the models from source domains, tested on the target domain;
(c) Like (b) but with BSC-seq (note that BSC includes a linear model);
Other things we can consider including later if there is space:
(d) an ensemble containing the DNN from target domain and DNNs from source domains
(do other domain models provide complementary information? is a model trained on
just a few labels still useful, does it add noise, or does it get ignored?).
(e) a shallow linear model trained on the target domain (does a simpler model work better with very
small data?)
(f) linear model added to the ensemble.
'''
import os
import sys
import numpy as np
import json
from baselines.dawid_and_skene import ibccvb
from evaluation.experiment import output_root_dir
from experiments.AAAI2020.base_models import run_base_models
from bayesian_combination import bayesian_combination
from experiments.AAAI2020.helpers import evaluate, Dataset, get_anno_matrix, get_root_dir
reload = True
rerun_aggregators = True
verbose = False
if len(sys.argv) > 1:
uberdomain = sys.argv[1]
else:
uberdomain = 'TEd'
datadir = os.path.join(get_root_dir(), 'data/famulus_%s' % uberdomain)
nclasses = 9 # four types means I and B tags for each type + 1 O tag gives 9 different tags or class labels
base_models = ['bilstm-crf', 'crf']#'crf', 'flair-pos']#, 'flair-ner'] # 'flair' -- not implemented yet, do this if we can't get lample to work
print('Using base models: ' + str(base_models))
#iterate through the types of span we want to predict
for classid in [0, 1, 2, 3]:
resdir = os.path.join(output_root_dir, 'famulus_%s_task1_type%i_basemodels%s' % (uberdomain, classid, '--'.join(base_models)) )
if not os.path.exists(resdir):
os.mkdir(resdir)
predfile = os.path.join(resdir, 'preds.json')
trpredfile = os.path.join(resdir, 'trpreds.json')
resfile = os.path.join(resdir, 'res.json')
if reload and not rerun_aggregators and os.path.exists(predfile):
with open(predfile, 'r') as fh:
preds = json.load(fh)
with open(trpredfile, 'r') as fh:
trpreds = json.load(fh)
with open(resfile, 'r') as fh:
res = json.load(fh)
else:
preds = {}
trpreds = {}
res = {}
for base_model_str in base_models:
for classid2 in [0, 1, 2, 3]:
dataset2 = Dataset(datadir, classid2)
if classid2 == classid:
dataset = dataset2
basepreds, basetrpreds, baseres = run_base_models(dataset2, classid2, uberdomain, base_model_str, reload)
for key in basepreds:
if key == 'a' or key == 'MV' or key == 'baseline_every' or 'ibcc' in key or 'bayesian_combination-seq' in key:
continue # 'a' is the in-domain performance. We don't use the in-domain model as part of an ensemble.
# The others are crap that shouldn't be in there.
if len(basepreds[key]) == 0:
continue # skip any entries that don't really have predictions. Why are they there?
ntest_domains = len(basepreds[key])
if verbose:
print('Processing model type %s, base labeller %s we found %i sets of test results' %
(base_model_str, key, ntest_domains))
new_key = base_model_str + '_' + str(classid2) + '_' + key
preds[new_key] = basepreds[key]
trpreds[new_key] = basetrpreds[key]
# BASELINE X -- majority vote from labellers trained on other domains
preds['agg_MV'] = []
trpreds['agg_MV'] = []
res['agg_MV'] = []
allgold = []
alldocstart = []
for didx, tedomain in enumerate(dataset.domains):
votes, _ = get_anno_matrix(classid, preds, didx)
counts = np.zeros((votes.shape[0], 3))
for c in range(3): # iterate through IOB
counts[:, c] = np.sum(votes==c, axis=1)
mv = np.argmax(counts, axis=1)
preds['agg_MV'].append(mv.tolist())
res_s = evaluate(mv, dataset.tegold[tedomain], dataset.tedocstart[tedomain], f1type='all')
res['agg_MV'].append(res_s)
print(' Spantype %i: %i base labellers, F1 score=%s for majority vote tested on %s' %
(classid, votes.shape[1], str(np.around(res_s, 2)), tedomain))
allgold.append(dataset.tegold[tedomain])
alldocstart.append(dataset.tedocstart[tedomain])
cross_f1 = evaluate(np.concatenate(preds['agg_MV']),
np.concatenate(allgold),
np.concatenate(alldocstart),
f1type='all')
print('*** Spantype %i, F1 score=%s for MV (micro average over domains) ' % (classid, str(cross_f1)) )
print('*** Spantype %i, F1 score=%s for MV (macro average over domains) ' % (classid, str(np.mean(res['agg_MV'], axis=0))) )
# Combination methods: IBCC and BSC-seq. Run the aggregation methods separately for each test domain. ------------------
max_iter = 100
alpha0_factor = 0.1
alpha0_diags = 0.1
beta0_factor = 0.1
if rerun_aggregators or 'agg_ibcc' not in res or not len(res['agg_ibcc']):
preds['agg_ibcc'] = []
res['agg_ibcc'] = []
for didx, tedomain in enumerate(dataset.domains):
N = len(dataset.tetext[tedomain]) # number of test points
# First, put the base labellers into a table.
annos, uniform_priors = get_anno_matrix(classid, preds, didx, include_all=False)
K = annos.shape[1] # number of annotators
# now run IBCC
probs, _ = ibccvb(annos, 3, beta0_factor, alpha0_factor, alpha0_diags, max_iter=max_iter,
uniform_priors=uniform_priors, verbose=True)
agg = np.argmax(probs, axis=1)
aggibcc = agg
preds['agg_ibcc'].append(agg.flatten().tolist())
res_s = evaluate(agg, dataset.tegold[tedomain], dataset.tedocstart[tedomain], f1type='all')
res['agg_ibcc'].append(res_s)
print(' Spantype %i: F1 score=%s for IBCC, tested on %s' % (classid, str(np.around(res_s, 2)), tedomain))
with open(predfile, 'w') as fh:
json.dump(preds, fh)
with open(resfile, 'w') as fh:
json.dump(res, fh)
else:
for didx, tedomain in enumerate(dataset.domains):
print(' Spantype %i: F1 score=%f for IBCC, tested on %s' % (classid, res['agg_ibcc'][didx], tedomain))
cross_f1 = evaluate(np.concatenate(preds['agg_ibcc']),
|
np.concatenate(allgold)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""Copy of test_correctness.py
Created on Wed Sep 23 21:15:18 2015
@author: akusok
"""
from unittest import TestCase
import numpy as np
import tempfile
import os
from hpelm import HPELM
from hpelm.modules.hdf5_tools import make_hdf5
# noinspection PyArgumentList
class TestCorrectness(TestCase):
tfiles = None
def makeh5(self, data):
f, fname = tempfile.mkstemp()
os.close(f)
self.tfiles.append(fname)
make_hdf5(data, fname)
return fname
def makefile(self):
f, fname = tempfile.mkstemp()
os.close(f)
os.remove(fname)
self.tfiles.append(fname)
return fname
def setUp(self):
self.tfiles = []
def tearDown(self):
for fname in self.tfiles:
os.remove(fname)
def test_NonNumpyInputs_RaiseError(self):
X = np.array([['1', '2'], ['3', '4'], ['5', '6']])
T = self.makeh5(np.array([[1], [2], [3]]))
hpelm = HPELM(2, 1)
hpelm.add_neurons(1, "lin")
self.assertRaises(AssertionError, hpelm.train, X, T)
def test_NonNumpyTargets_RaiseError(self):
X = self.makeh5(np.array([[1, 2], [3, 4], [5, 6]]))
T = np.array([['a'], ['b'], ['c']])
hpelm = HPELM(2, 1)
hpelm.add_neurons(1, "lin")
self.assertRaises(AssertionError, hpelm.train, X, T)
def test_OneDimensionInputs_RunsCorrectly(self):
X = self.makeh5(np.array([1, 2, 3]))
T = self.makeh5(np.array([[1], [2], [3]]))
hpelm = HPELM(1, 1)
hpelm.add_neurons(1, "lin")
hpelm.train(X, T)
def test_OneDimensionTargets_RunsCorrectly(self):
X = self.makeh5(np.array([1, 2, 3]))
T = self.makeh5(np.array([1, 2, 3]))
hpelm = HPELM(1, 1)
hpelm.add_neurons(1, "lin")
hpelm.train(X, T)
def test_WrongDimensionalityInputs_RaiseError(self):
X = self.makeh5(np.array([[1, 2], [3, 4], [5, 6]]))
T = self.makeh5(np.array([[1], [2], [3]]))
hpelm = HPELM(1, 1)
hpelm.add_neurons(1, "lin")
self.assertRaises(AssertionError, hpelm.train, X, T)
def test_WrongDimensionalityTargets_RaiseError(self):
X = self.makeh5(np.array([[1, 2], [3, 4], [5, 6]]))
T = self.makeh5(np.array([[1], [2], [3]]))
hpelm = HPELM(1, 2)
hpelm.add_neurons(1, "lin")
self.assertRaises(AssertionError, hpelm.train, X, T)
def test_ZeroInputs_RunsCorrectly(self):
X = self.makeh5(np.array([[0, 0], [0, 0], [0, 0]]))
T = self.makeh5(np.array([1, 2, 3]))
hpelm = HPELM(2, 1)
hpelm.add_neurons(1, "lin")
hpelm.train(X, T)
def test_OneDimensionTargets2_RunsCorrectly(self):
X = self.makeh5(
|
np.array([[1, 2], [3, 4], [5, 6]])
|
numpy.array
|
"""
@date: 2021/06/19
@description:
"""
import matplotlib.pyplot as plt
import cv2
import numpy as np
from utils.conversion import uv2pixel
from utils.boundary import corners2boundary, corners2boundaries, find_peaks, connect_corners_uv, get_object_cor, \
visibility_corners
def draw_boundary(pano_img, corners: np.ndarray = None, boundary: np.ndarray = None, draw_corners=True, show=False,
step=0.01, length=None, boundary_color=None, marker_color=None, title=None, visible=True):
if marker_color is None:
marker_color = [0, 0, 1]
if boundary_color is None:
boundary_color = [0, 1, 0]
assert corners is not None or boundary is not None, "corners or boundary error"
shape = sorted(pano_img.shape)
assert len(shape) > 1, "pano_img shape error"
w = shape[-1]
h = shape[-2]
pano_img = pano_img.copy()
if (corners is not None and len(corners) > 2) or \
(boundary is not None and len(boundary) > 2):
if isinstance(boundary_color, list) or isinstance(boundary_color, np.array):
if boundary is None:
boundary = corners2boundary(corners, step, length, visible)
boundary = uv2pixel(boundary, w, h)
pano_img[boundary[:, 1], boundary[:, 0]] = boundary_color
pano_img[np.clip(boundary[:, 1] + 1, 0, h - 1), boundary[:, 0]] = boundary_color
pano_img[np.clip(boundary[:, 1] - 1, 0, h - 1), boundary[:, 0]] = boundary_color
if pano_img.shape[1] > 512:
pano_img[np.clip(boundary[:, 1] + 1, 0, h - 1), np.clip(boundary[:, 0] + 1, 0, w - 1)] = boundary_color
pano_img[np.clip(boundary[:, 1] + 1, 0, h - 1), np.clip(boundary[:, 0] - 1, 0, w - 1)] = boundary_color
pano_img[np.clip(boundary[:, 1] - 1, 0, h - 1), np.clip(boundary[:, 0] + 1, 0, w - 1)] = boundary_color
pano_img[np.clip(boundary[:, 1] - 1, 0, h - 1), np.clip(boundary[:, 0] - 1, 0, w - 1)] = boundary_color
pano_img[boundary[:, 1], np.clip(boundary[:, 0] + 1, 0, w - 1)] = boundary_color
pano_img[boundary[:, 1],
|
np.clip(boundary[:, 0] - 1, 0, w - 1)
|
numpy.clip
|
"""Utility functions."""
import base64
import os
import subprocess
import cv2
import numpy as np
import torch
from models import MODEL_ZOO
from models import build_generator
from models import parse_gan_type
__all__ = ['postprocess', 'load_generator', 'factorize_weight',
'HtmlPageVisualizer']
CHECKPOINT_DIR = 'checkpoints'
def to_tensor(array):
"""Converts a `numpy.ndarray` to `torch.Tensor`.
Args:
array: The input array to convert.
Returns:
A `torch.Tensor` with dtype `torch.FloatTensor` on cuda device.
"""
assert isinstance(array, np.ndarray)
return torch.from_numpy(array).type(torch.FloatTensor).cuda()
def postprocess(images, min_val=-1.0, max_val=1.0):
"""Post-processes images from `torch.Tensor` to `numpy.ndarray`.
Args:
images: A `torch.Tensor` with shape `NCHW` to process.
min_val: The minimum value of the input tensor. (default: -1.0)
max_val: The maximum value of the input tensor. (default: 1.0)
Returns:
A `numpy.ndarray` with shape `NHWC` and pixel range [0, 255].
"""
assert isinstance(images, torch.Tensor)
images = images.detach().cpu().numpy()
images = (images - min_val) * 255 / (max_val - min_val)
images = np.clip(images + 0.5, 0, 255).astype(np.uint8)
images = images.transpose(0, 2, 3, 1)
return images
def load_generator(model_name):
"""Loads pre-trained generator.
Args:
model_name: Name of the model. Should be a key in `models.MODEL_ZOO`.
Returns:
A generator, which is a `torch.nn.Module`, with pre-trained weights
loaded.
Raises:
KeyError: If the input `model_name` is not in `models.MODEL_ZOO`.
"""
if model_name not in MODEL_ZOO:
raise KeyError(f'Unknown model name `{model_name}`!')
model_config = MODEL_ZOO[model_name].copy()
url = model_config.pop('url') # URL to download model if needed.
# Build generator.
print(f'Building generator for model `{model_name}` ...')
generator = build_generator(**model_config)
print(f'Finish building generator.')
# Load pre-trained weights.
os.makedirs(CHECKPOINT_DIR, exist_ok=True)
checkpoint_path = os.path.join(CHECKPOINT_DIR, model_name + '.pth')
print(f'Loading checkpoint from `{checkpoint_path}` ...')
if not os.path.exists(checkpoint_path):
print(f' Downloading checkpoint from `{url}` ...')
subprocess.call(['wget', '--quiet', '-O', checkpoint_path, url])
print(f' Finish downloading checkpoint.')
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if 'generator_smooth' in checkpoint:
generator.load_state_dict(checkpoint['generator_smooth'])
else:
generator.load_state_dict(checkpoint['generator'])
generator = generator.cuda()
generator.eval()
print(f'Finish loading checkpoint.')
return generator
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
The input can be a list or a tuple or a string, which is either a comma
separated list of numbers 'a, b, c', or a dash separated range 'a - c'.
Space in the string will be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are
equal to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are
equal to or smaller than this value. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Unable to parse the input!')
else:
raise ValueError(f'Invalid type of input: `{type(obj)}`!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def get_weights(generator, layer_idx='all', apply_norm=True):
"""Obtains weight matrix from specified generator and layer selection. Adapted from `factorize_weights`
Args:
generator: Generator to get.
layer_idx: Indices of layers to interpret, especially for StyleGAN and
StyleGAN2. (default: `all`)
Returns:
A weight matrix.
Raises:
ValueError: If the generator type is not supported.
"""
# Get GAN type.
gan_type = parse_gan_type(generator)
# Get layers.
if gan_type in ['stylegan', 'stylegan2']:
if layer_idx == 'all':
layers = list(range(generator.num_layers))
else:
layers = parse_indices(layer_idx,
min_val=0,
max_val=generator.num_layers - 1)
# Factorize semantics from weight.
weights = []
for idx in layers:
layer_name = f'layer{idx}'
if gan_type == 'stylegan2' and idx == generator.num_layers - 1:
layer_name = f'output{idx // 2}'
if gan_type in ['stylegan', 'stylegan2']:
weight = generator.synthesis.__getattr__(layer_name).style.weight.T
weights.append(weight.cpu().detach().numpy())
weight = np.concatenate(weights, axis=1).astype(np.float32)
if apply_norm:
weight = weight / np.linalg.norm(weight, axis=0, keepdims=True) # Q: is normalizing the weight values here necessary?
return weight
def factorize_weight(generator, layer_idx='all'):
"""Factorizes the generator weight to get semantics boundaries.
Args:
generator: Generator to factorize.
layer_idx: Indices of layers to interpret, especially for StyleGAN and
StyleGAN2. (default: `all`)
Returns:
A tuple of (layers_to_interpret, semantic_boundaries, eigen_values).
Raises:
ValueError: If the generator type is not supported.
"""
# Get GAN type.
gan_type = parse_gan_type(generator)
# Get layers.
if gan_type == 'pggan':
layers = [0]
elif gan_type in ['stylegan', 'stylegan2']:
if layer_idx == 'all':
layers = list(range(generator.num_layers))
else:
layers = parse_indices(layer_idx,
min_val=0,
max_val=generator.num_layers - 1)
# Factorize semantics from weight.
weights = []
for idx in layers:
layer_name = f'layer{idx}'
if gan_type == 'stylegan2' and idx == generator.num_layers - 1:
layer_name = f'output{idx // 2}'
if gan_type == 'pggan':
weight = generator.__getattr__(layer_name).weight
weight = weight.flip(2, 3).permute(1, 0, 2, 3).flatten(1)
elif gan_type in ['stylegan', 'stylegan2']:
weight = generator.synthesis.__getattr__(layer_name).style.weight.T
weights.append(weight.cpu().detach().numpy())
weight =
|
np.concatenate(weights, axis=1)
|
numpy.concatenate
|
import numpy as np
import gym
import keras
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
import scipy.io as sio
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
import time
import os
'''
policy part
'''
policy_list = ['maxG','minSNR','random','cline']
def policy(env, policy, now):
dx = env.SPplacex
dy = env.SPplacey
selected = np.where(env.G != 0)[0]
if policy == 'maxG':
num = np.argmax(env.G)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'minSNR':
num = now
if env.G[num] == 0:
tnum =
|
np.argmin(env.SNR[selected] + 10000)
|
numpy.argmin
|
import random
import numpy
from math import sqrt
# -- The Point class represents points in n-dimensional space
class Point:
# Instance variables
# self.coords is a list of coordinates for this Point
# self.n is the number of dimensions this Point lives in (ie, its space)
# self.reference is an object bound to this Point
# Initialize new Points
def __init__(self, coords, reference=None):
self.coords = coords
self.n = len(coords)
self.reference = reference
# Return a string representation of this Point
def __repr__(self):
return str(self.coords)
# -- The Cluster class represents clusters of points in n-dimensional space
class Cluster:
# Instance variables
# self.points is a list of Points associated with this Cluster
# self.n is the number of dimensions this Cluster's Points live in
# self.centroid is the sample mean Point of this Cluster
def __init__(self, points):
# We forbid empty Clusters (they don't make mathematical sense!)
if len(points) == 0:
raise Exception("ILLEGAL: EMPTY CLUSTER")
self.points = points
self.n = points[0].n
# We also forbid Clusters containing Points in different spaces
# Ie, no Clusters with 2D Points and 3D Points
for p in points:
if p.n != self.n:
raise Exception("ILLEGAL: MULTISPACE CLUSTER")
# Figure out what the centroid of this Cluster should be
self.centroid = self.calculateCentroid()
# Return a string representation of this Cluster
def __repr__(self):
return str(self.points)
# Update function for the K-means algorithm
# Assigns a new list of Points to this Cluster, returns centroid difference
def update(self, points):
old_centroid = self.centroid
self.points = points
self.centroid = self.calculateCentroid()
x1, y1, z1 = old_centroid.coords
x2, y2, z2 = self.centroid.coords
return sqrt(
(x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2)
)
# Calculates the centroid Point - the centroid is the sample mean Point
# (in plain English, the average of all the Points in the Cluster)
def calculateCentroid(self):
centroid_coords = []
# For each coordinate:
for i in range(self.n):
# Take the average across all Points
centroid_coords.append(0.0)
for p in self.points:
centroid_coords[i] = centroid_coords[i] + p.coords[i]
centroid_coords[i] = centroid_coords[i] / len(self.points)
# Return a Point object using the average coordinates
return Point(centroid_coords)
def radiusOfGyration(self):
ptCoords = [x.coords for x in self.points]
delta = numpy.array(ptCoords) - self.centroid.coords
rg = sqrt(sum(numpy.sum(delta * delta, 1)) / float(len(ptCoords)))
return rg
def encapsualtingRadius(self):
ptCoords = [x.coords for x in self.points]
delta = numpy.array(ptCoords) - self.centroid.coords
rM = sqrt(max(
|
numpy.sum(delta * delta, 1)
|
numpy.sum
|
# imports
import numpy as np
import pandas as pd
from scipy.interpolate import griddata, Akima1DInterpolator
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from sklearn.utils.fixes import parse_version
from utils import fit, modify
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import collections, colors, transforms
# formatting
plt.rcParams['legend.title_fontsize'] = 'large'
plt.rcParams['legend.fontsize'] = 'medium'
fontP = FontProperties()
fontP.set_size('medium')
plt.style.use(['science', 'ieee', 'std-colors'])
# plt.style.use(['science', 'scatter'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
def plot_scatter(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False,
figsize=(6, 4), scattersize=2):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
fig, ax = plt.subplots(figsize=figsize)
#cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
for name, df in dficts.items():
# filter dataframe
if min_cm:
df = df[df['cm'] > min_cm]
# sort by x-parameter and get x- and y-arrays for plotting
if xparameter is None or xparameter == 'index':
x = df.index
else:
df = df.sort_values(by=xparameter)
x = df[xparameter]
y = df[yparameter]
if z0:
y = y - z0
# take absolute value
if take_abs:
y = np.abs(y)
# plot
#cs = next(cscatter)
ax.scatter(x, y, s=scattersize)
# ax.set_xlabel(xparameter, fontsize=18)
# ax.set_ylabel(yparameter, fontsize=18)
# ax.grid(alpha=0.125)
# ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper right', fancybox=True, shadow=False)
return fig, ax
def plot_mean(dficts, xparameter='y', yparameter='z', min_cm=0.5, z0=0, take_abs=False, fit_function=None):
"""
Plot all data (xparameter, yparameter) as scatter points with different colors.
:param dficts:
:param xparameter:
:param yparameter:
:param min_cm:
:param z0:
:return:
"""
cscatter = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
cerror = iter(cm.Spectral(np.linspace(0.95, 0.2, len(dficts.keys()))))
fig, ax = plt.subplots(figsize=(7.25, 4.25))
means = []
for name, df in dficts.items():
# filter dataframe
df = df[df['cm'] > min_cm]
y = df[yparameter] - z0
# take absolute value
if take_abs:
y = np.abs(y)
yerr = np.std(y)
y = np.mean(y)
means.append(y)
# plot
cs = next(cscatter)
ax.errorbar(name, y, yerr=yerr * 2, fmt='o', color=cs, ecolor=next(cerror), elinewidth=3, capsize=4, alpha=0.75)
ax.scatter(name, y, color=cs)
ax.set_xlabel(xparameter, fontsize=18)
ax.set_ylabel(yparameter, fontsize=18)
ax.grid(alpha=0.125)
ax.legend(dficts.keys(), prop=fontP, title=r'$dz$ (mm)', loc='upper left', fancybox=True, shadow=False)
# fit the function
if fit_function is not None:
names = list(dficts.keys())
popt, pcov, fit_func = fit.fit(names, means, fit_function=fit_function)
# plot fitted function
xfit = np.linspace(0,
|
np.max(names)
|
numpy.max
|
import pyvista as pv
import sympy as sp
from sympy import Matrix, lambdify
import numpy as np
from PyQt5 import Qt, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from pyvistaqt import QtInteractor, MultiPlotter
import sys, os, time, glob
import trimesh
import pymeshfix as mf
# initiate stored mesh
mesh = pv.PolyData()
class MainWindow(Qt.QMainWindow):
def __init__(self, parent=None, show=True):
Qt.QMainWindow.__init__(self, parent)
# create the frame
self.frame = Qt.QFrame()
vlayout = Qt.QVBoxLayout()
# add the pyvista interactor object
self.plotter = QtInteractor(self.frame)
# self.plotter = MultiPlotter(nrows = 2, ncols = 2)
vlayout.addWidget(self.plotter.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
# simple menu
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('File')
editMenu = mainMenu.addMenu('Edit')
# opening a mesh file
self.open_mesh_action = Qt.QAction('Open Mesh...', self)
self.open_mesh_action.triggered.connect(self.open_mesh)
fileMenu.addAction(self.open_mesh_action)
# set centroid
self.skewed_centroid_action = Qt.QAction('Skewed Centroid', self)
self.skewed_centroid_action.triggered.connect(self.skewed_centroid_check)
fileMenu.addAction(self.skewed_centroid_action)
# save screenshot
self.save_screenshot_action = Qt.QAction('Save Screenshot', self)
self.save_screenshot_action.triggered.connect(self.save_screenshot)
fileMenu.addAction(self.save_screenshot_action)
# exit button
exitButton = Qt.QAction('Exit', self)
exitButton.setShortcut('Ctrl+Q')
exitButton.triggered.connect(self.close)
fileMenu.addAction(exitButton)
# create cubic skeleton (neighbor-selection criteria: min - min)
self.cubic_skeleton_min_min_action = Qt.QAction('Cubic Skeleton (min - min)', self)
self.cubic_skeleton_min_min_action.triggered.connect(self.cubic_skeleton_min_min)
editMenu.addAction(self.cubic_skeleton_min_min_action)
# create cubic skeleton (neighbor-selection criteria: min - max)
self.cubic_skeleton_min_max_action = Qt.QAction('Cubic Skeleton (min - max)', self)
self.cubic_skeleton_min_max_action.triggered.connect(self.cubic_skeleton_min_max)
editMenu.addAction(self.cubic_skeleton_min_max_action)
# create cubic skeleton (neighbor-selection criteria: max - max)
self.cubic_skeleton_max_max_action = Qt.QAction('Cubic Skeleton (max - max)', self)
self.cubic_skeleton_max_max_action.triggered.connect(self.cubic_skeleton_max_max)
editMenu.addAction(self.cubic_skeleton_max_max_action)
# create cubic skeleton (neighbor-selection criteria: max - min)
self.cubic_skeleton_max_min_action = Qt.QAction('Cubic Skeleton (max - min)', self)
self.cubic_skeleton_max_min_action.triggered.connect(self.cubic_skeleton_max_min)
editMenu.addAction(self.cubic_skeleton_max_min_action)
# create cuboid skeleton (neighbor-selection criteria: min - min)
self.cuboid_skeleton_min_min_action = Qt.QAction('Cuboid Skeleton (min - min)', self)
self.cuboid_skeleton_min_min_action.triggered.connect(self.cuboid_skeleton_min_min)
editMenu.addAction(self.cuboid_skeleton_min_min_action)
# create cuboid skeleton (neighbor-selection criteria: min - max)
self.cuboid_skeleton_min_max_action = Qt.QAction('Cuboid Skeleton (min - max)', self)
self.cuboid_skeleton_min_max_action.triggered.connect(self.cuboid_skeleton_min_max)
editMenu.addAction(self.cuboid_skeleton_min_max_action)
# create cuboid skeleton (neighbor-selection criteria: max - max)
self.cuboid_skeleton_max_max_action = Qt.QAction('Cuboid Skeleton (max - max)', self)
self.cuboid_skeleton_max_max_action.triggered.connect(self.cuboid_skeleton_max_max)
editMenu.addAction(self.cuboid_skeleton_max_max_action)
# create cuboid skeleton (neighbor-selection criteria: max - min)
self.cuboid_skeleton_max_min_action = Qt.QAction('Cuboid Skeleton (max - min)', self)
self.cuboid_skeleton_max_min_action.triggered.connect(self.cuboid_skeleton_max_min)
editMenu.addAction(self.cuboid_skeleton_max_min_action)
# Show max cube & raytracing process
self.max_cube_action = Qt.QAction('Max Cube', self)
self.max_cube_action.triggered.connect(self.show_max_cube)
editMenu.addAction(self.max_cube_action)
if show:
self.show()
# # turning plotter background white and axes labels black
# self.plotter.set_background(color = 'w')
# self.plotter.add_axes(interactive=None, line_width=2, color="k", x_color=None, y_color=None, z_color=None, xlabel='X', ylabel='Y', zlabel='Z', labels_off=False, box= None, box_args=None)
self.plotter.add_axes(interactive=None, line_width=2, x_color=None, y_color=None, z_color=None, xlabel='X', ylabel='Y', zlabel='Z', labels_off=False, box= None, box_args=None)
def open_mesh(self):
""" add a mesh to the pyqt frame """
global int_surface, ext_surface, mesh_vol, mesh
global x_range, y_range, z_range, Vol_centroid
global open_mesh_run
global mesh_name
# track pre-processing starting time
open_mesh_start = time.time()
# open file
file_info = QtWidgets.QFileDialog.getOpenFileName()
file_path = file_info[0]
# determine file type and if conversion needed
_, file_name = os.path.split(file_path)
mesh_name, mesh_type = os.path.splitext(file_name)
# read mesh & transform according to principal axes
print(file_path)
pre = trimesh.load(file_path)
orient = pre.principal_inertia_transform
pre = pre.apply_transform(orient)
# a, b = pre.symmetry_section
# print(pre.symmetry_section)
# y = [0,-1,0]
# sym_orient = trimesh.geometry.align_vectors(y, a)
# pre = pre.apply_transform(sym_orient)
post_file_path = 'data/'+ mesh_name + '_oriented.stl'
pre.export(post_file_path)
ext_surface = pv.read(post_file_path)
# scale meshes accordingly
if mesh_name == 'elephant':
ext_surface.points *= 12 # Elephant
elif mesh_name == 'Bracket S24D1':
ext_surface.points /= 10 # Bracket
elif mesh_name == 'knight':
ext_surface.points /= 2 # Knight
# create internal offset
thickness = 0.1 # inches
grid = pv.create_grid(ext_surface).triangulate()
solid = grid.clip_surface(ext_surface)
solid.compute_implicit_distance(ext_surface, inplace=True)
imp_dis_max = max(solid['implicit_distance'])
shell_threshold = imp_dis_max - thickness
shell = solid.clip_scalar('implicit_distance', value = shell_threshold)
int_surface = shell.extract_geometry().triangulate()
meshfix = mf.MeshFix(int_surface)
meshfix.repair(verbose=True)
mesh = solid.clip_surface(int_surface, invert=False)
# print mesh info
print("Mesh Name:", mesh_name)
print("Mesh Type:", mesh_type[1:])
# find mesh centroid and translate the mesh so that's the origin
Vol_centroid = np.array([0,0,0])
self.skewed_centroid_action.setCheckable(True)
# reset plotter
self.reset_plotter(Vol_centroid)
# find the max and min of x,y,z axes of mesh
ranges = mesh.bounds
x_range = abs(ranges[0] - ranges[1])
y_range = abs(ranges[2] - ranges[3])
z_range = abs(ranges[4] - ranges[5])
print("x:", float(format(x_range, ".2f")), "in")
print("y:", float(format(y_range, ".2f")), "in")
print("z:", float(format(z_range, ".2f")), "in")
# mesh volume
mesh_vol = float(format(int_surface.volume, ".2f"))
print("Mesh Volume:", mesh_vol, "in^3")
# track pre-processing ending time & duration
open_mesh_end = time.time()
open_mesh_run = open_mesh_end - open_mesh_start
print("Pre-Processing run time: %g seconds" % (open_mesh_run))
print("Mesh Cells:", mesh.n_cells)
def save_screenshot(self):
''' saves screenshot of current render window'''
screenshot_path = 'screenshot/' + mesh_name + '.png'
self.plotter.screenshot(screenshot_path)
def skewed_centroid_check(self):
''' depending if the menu item is checked or not, the centroid is either skewed
with the 2nd moment of inertia or being the origin of the principal axes '''
if self.skewed_centroid_action.isChecked():
Vol_centroid = self.centroid(ext_surface)
else:
Vol_centroid = np.array([0,0,0])
self.reset_plotter(Vol_centroid)
return Vol_centroid
def reset_plotter(self, Vol_centroid):
""" clear plotter of mesh or interactive options """
# clear plotter
self.plotter.clear()
# callback opened mesh
# self.plotter.add_mesh(ext_surface, show_edges = True, color="w", opacity=0.6)
self.plotter.add_mesh(mesh, show_edges = True, color="w", opacity=0.3)
# show origin
self.plotter.add_axes_at_origin(xlabel='X', ylabel='Y', zlabel='Z', line_width=6, labels_off=True)
self.plotter.add_mesh(pv.PolyData(Vol_centroid), color='r', point_size=40, render_points_as_spheres=True)
def centroid(self, mesh):
""" find centroid volumetrically and indicate on graph """
global V
# find the vertices & the vertex indices of each triangular face
V = np.array(mesh.points)
col = len(V)
f_ind = np.array(mesh.faces.reshape((-1,4))[:, 1:4])
# define an arbitrary start point from middle of max and min of X,Y,Z of
# all points: in a convex manifold it falls inside the volume (requires
# segmentation for general application)
start = np.array(mesh.center)
X_start = start[0]
Y_start = start[1]
Z_start = start[2]
# initialize variables
centroids = []
Vol_total = 0
Sum_vol_x = 0
Sum_vol_y = 0
Sum_vol_z = 0
# find centroid from all tetrahedra made with arbitrary center and triangular faces
for i in range(0, col-1, 3):
# find the center of each tetrahedron (average of X,Y,Z of
# 4 vertices, 3 from the triangle, and one arbitrary start point)
X_cent = (X_start + V[f_ind[i,0],0] + V[f_ind[i+1,0],0] + V[f_ind[i+2,0],0]) / 4
Y_cent = (Y_start + V[f_ind[i,1],1] + V[f_ind[i+1,1],1] + V[f_ind[i+2,1],1]) / 4
Z_cent = (Z_start + V[f_ind[i,2],2] + V[f_ind[i+1,2],2] + V[f_ind[i+2,2],2]) / 4
# compute the volume of each tetrahedron
V1 = np.array([V[f_ind[i,0],0], V[f_ind[i,1],1], V[f_ind[i,2],2]])**2 - np.array([X_start, Y_start, Z_start])**2
V2 = np.array([V[f_ind[i+1,0],0], V[f_ind[i+1,1],1], V[f_ind[i+1,2],2]])**2 - np.array([V[f_ind[i,0],0], V[f_ind[i,1],1], V[f_ind[i,2],2]])**2
V3 = np.array([V[f_ind[i+2,0],0], V[f_ind[i+2,1],1], V[f_ind[i+2,2],2]])**2 - np.array([V[f_ind[i+1,0],0], V[f_ind[i+1,1],1], V[f_ind[i+1,2],2]])**2
V1 = V1.reshape((-1,1))
V2 = V2.reshape((-1,1))
V3 = V3.reshape((-1,1))
Vol = abs(np.linalg.det(np.hstack([V1, V2, V3]))) / 6
# tally up each cycle
Vol_total = Vol_total + Vol
Sum_vol_x = Sum_vol_x + Vol * X_cent
Sum_vol_y = Sum_vol_y + Vol * Y_cent
Sum_vol_z = Sum_vol_z + Vol * Z_cent
centroids.append([X_cent,Y_cent,Z_cent])
# find & show centroid
centroids = np.asarray(centroids)
Vol_centroid = [Sum_vol_x, Sum_vol_y, Sum_vol_z] / Vol_total
return Vol_centroid
def cubic_skeleton_min_min(self):
''' fill mesh with cubic skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'min'
second_combining_order = 'min'
# track algorithm starting time
cubic_skeleton_start = time.time()
_, max_normal, _ = self.max_cube_ray(int_surface, Vol_centroid)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
self.next_cubes_ray(int_surface, max_normal)
# track algorithm ending time & show duration
cubic_skeleton_end = time.time()
cubic_skeleton_run = cubic_skeleton_end - cubic_skeleton_start
print("Partition run time: %g seconds" % (cubic_skeleton_run))
# show total run time
total_run = open_mesh_run + cubic_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cubic_skeleton_min_max(self):
''' fill mesh with cubic skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'min'
second_combining_order = 'max'
# track algorithm starting time
cubic_skeleton_start = time.time()
_, max_normal, _ = self.max_cube_ray(int_surface, Vol_centroid)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# self.next_cubes_ray(int_surface, max_normal)
# track algorithm ending time & show duration
cubic_skeleton_end = time.time()
cubic_skeleton_run = cubic_skeleton_end - cubic_skeleton_start
print("Partition run time: %g seconds" % (cubic_skeleton_run))
# show total run time
total_run = open_mesh_run + cubic_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cubic_skeleton_max_max(self):
''' fill mesh with cubic skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'max'
second_combining_order = 'max'
# track algorithm starting time
cubic_skeleton_start = time.time()
_, max_normal, _ = self.max_cube_ray(int_surface, Vol_centroid)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# self.next_cubes_ray(int_surface, max_normal)
# track algorithm ending time & show duration
cubic_skeleton_end = time.time()
cubic_skeleton_run = cubic_skeleton_end - cubic_skeleton_start
print("Partition run time: %g seconds" % (cubic_skeleton_run))
# show total run time
total_run = open_mesh_run + cubic_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cubic_skeleton_max_min(self):
''' fill mesh with cubic skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'max'
second_combining_order = 'min'
# track algorithm starting time
cubic_skeleton_start = time.time()
_, max_normal, _ = self.max_cube_ray(int_surface, Vol_centroid)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# self.next_cubes_ray(int_surface, max_normal)
# track algorithm ending time & show duration
cubic_skeleton_end = time.time()
cubic_skeleton_run = cubic_skeleton_end - cubic_skeleton_start
print("Partition run time: %g seconds" % (cubic_skeleton_run))
# show total run time
total_run = open_mesh_run + cubic_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cuboid_skeleton_min_min(self):
''' fill mesh with cuboid skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'min'
second_combining_order = 'min'
# track algorithm starting time
cuboid_skeleton_start = time.time()
_, max_normal, intxn = self.max_cube_ray(int_surface, Vol_centroid, ext = True)
self.max_cuboid(int_surface, intxn, Vol_centroid, max_normal)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# track algorithm ending time & show duration
cuboid_skeleton_end = time.time()
cuboid_skeleton_run = cuboid_skeleton_end - cuboid_skeleton_start
print("Partition run time: %g seconds" % (cuboid_skeleton_run))
# show total run time
total_run = open_mesh_run + cuboid_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cuboid_skeleton_min_max(self):
''' fill mesh with cuboid skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'min'
second_combining_order = 'max'
# track algorithm starting time
cuboid_skeleton_start = time.time()
_, max_normal, intxn = self.max_cube_ray(int_surface, Vol_centroid, ext = True)
self.max_cuboid(int_surface, intxn, Vol_centroid, max_normal)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# track algorithm ending time & show duration
cuboid_skeleton_end = time.time()
cuboid_skeleton_run = cuboid_skeleton_end - cuboid_skeleton_start
print("Partition run time: %g seconds" % (cuboid_skeleton_run))
# show total run time
total_run = open_mesh_run + cuboid_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cuboid_skeleton_max_max(self):
''' fill mesh with cuboid skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'max'
second_combining_order = 'max'
# track algorithm starting time
cuboid_skeleton_start = time.time()
_, max_normal, intxn = self.max_cube_ray(int_surface, Vol_centroid, ext = True)
self.max_cuboid(int_surface, intxn, Vol_centroid, max_normal)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# track algorithm ending time & show duration
cuboid_skeleton_end = time.time()
cuboid_skeleton_run = cuboid_skeleton_end - cuboid_skeleton_start
print("Partition run time: %g seconds" % (cuboid_skeleton_run))
# show total run time
total_run = open_mesh_run + cuboid_skeleton_run
print("Total run time: %g seconds" % (total_run))
def cuboid_skeleton_max_min(self):
''' fill mesh with cuboid skeleton'''
global first_combining_order, second_combining_order
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
# set combining orders
first_combining_order = 'max'
second_combining_order = 'min'
# track algorithm starting time
cuboid_skeleton_start = time.time()
_, max_normal, intxn = self.max_cube_ray(int_surface, Vol_centroid, ext = True)
self.max_cuboid(int_surface, intxn, Vol_centroid, max_normal)
cube, ranked_cube, size_error = self.max_cube_slice(mesh)
# if (size_error == False):
# self.combine_pair_partitions(cube, ranked_cube)
cube = self.combine_pair_partitions(cube, ranked_cube)
self.output(cube)
# track algorithm ending time & show duration
cuboid_skeleton_end = time.time()
cuboid_skeleton_run = cuboid_skeleton_end - cuboid_skeleton_start
print("Partition run time: %g seconds" % (cuboid_skeleton_run))
# show total run time
total_run = open_mesh_run + cuboid_skeleton_run
print("Total run time: %g seconds" % (total_run))
def show_max_cube(self):
# check which centroid is used
Vol_centroid = self.skewed_centroid_check()
_, max_normal, intxn = self.max_cube_ray(int_surface, Vol_centroid, ext = True)
self.max_cuboid(int_surface, intxn, Vol_centroid, max_normal)
def max_cube_ray(self, mesh, Vol_centroid, ext = False):
""" add a maximally inscribed cube within the opened mesh (via ray tracing) """
global r_len
global face_center, max_cube_vol, max_cube, max_cuboid
global max_cube_start, max_cube_end, max_cube_run
global max_cube_V, max_cube_F
# initiate variables
max_cube = 0
max_cuboid = 0
# find mesh vertices
V = np.array(mesh.points)
# find the nearest possible cube vertex from top rays & mesh intersection
top_vert = self.cube_center_ray(mesh, Vol_centroid, 'z')
top = self.furthest_pt(top_vert, Vol_centroid)
# find the nearest possible cube vertex from bottom rays & mesh intersection
bottom_vert = self.cube_center_ray(mesh, Vol_centroid, '-z')
bottom = self.furthest_pt(bottom_vert, Vol_centroid)
# find the nearest possible cube vertex between the two
if top[0] < bottom[0]:
p = top[1]
V = top[2]
else:
p = bottom[1]
V = bottom[2]
# set the furthest ray intersection of the mesh as starting vertex of the cube
intxn = V[p,:]
# create and show max cube
max_cube_V, max_cube_F, max_cube_vol = self.create_cube(intxn, Vol_centroid, np.array([0,0,Vol_centroid[2]]))
max_cube = pv.PolyData(max_cube_V, max_cube_F)
self.plotter.add_mesh(max_cube, show_edges=True, line_width=5, color="orange", opacity = 0.8)
# find & show max cube face centers
cell_center = pv.PolyData(max_cube_V, max_cube_F).cell_centers()
face_center = np.array(cell_center.points)
# self.plotter.add_mesh(cell_center, color="r", point_size=8, render_points_as_spheres=True)
# find max cube face normals
max_normal = pv.PolyData(max_cube_V, max_cube_F).cell_normals
# max cube volume
if (ext == False):
max_cube_vol = float(format(max_cube_vol, ".2f"))
print("Cube Center Volume:", max_cube_vol, "in^3")
return face_center, max_normal, intxn
def cube_center_ray(self, mesh, start, dir):
''' from starting point shoot out n rays to find vertices of possible cubes '''
global r_num, r_rot, r_dec, r_len
# initialize variables
r_num = 1
r_rot = np.pi/2
r_dec = -2*np.pi/r_num
ray_size = np.zeros((4, 3))
r_dir = ray_size
r_dir_norm = ray_size
r_end = ray_size
r_int = []
ori_r_int = []
l_wid = 10
pt_size = 25
rays = [0] * 4
ints = [0] * 4
# set ray length
r_len = np.sqrt((x_range/2)**2 + (y_range/2)**2 + (z_range/2)**2)
# create rays by rotating the first, which creates the cube with xyz axes as its face normals
for i in range(0, r_num):
for j in range(0, 4):
if (j == 0) and (dir == 'z'):
r_dir[0] = np.array([np.sqrt(2)/2 * np.cos(np.pi/4 + r_dec * i), np.sqrt(2)/2 * np.sin(np.pi/4 + r_dec * i), 0.5])
r_dir_norm[0] = r_dir[0] / np.linalg.norm(r_dir[0])
r_end[0] = start + r_dir_norm[0] * r_len
# set rotation matrix about 'z'
R = self.rot_axis(np.array([0,0,1]))
elif (j == 0) and (dir == '-z'):
r_dir[0] = np.array([np.sqrt(2)/2 * np.cos(np.pi/4 + r_dec * i), np.sqrt(2)/2 * np.sin(np.pi/4 + r_dec * i), -0.5])
r_dir_norm[0] = r_dir[0] / np.linalg.norm(r_dir[0])
r_end[0] = start + r_dir_norm[0] * r_len
# set rotation matrix about '-z'
R = self.rot_axis(np.array([0,0,-1]))
else:
r_end[j] = np.dot(R(j*r_rot), (r_end[0] - start).T).T
r_end[j] = r_end[j] + start
# perform ray trace
r_pts, _ = mesh.ray_trace(start, r_end[j])
# show rays
# rays[j] = self.plotter.add_mesh(pv.Line(Vol_centroid, r_end[j]), color='w', line_width=l_wid)
# ints[j] = self.plotter.add_mesh(pv.PolyData(r_pts[0]), color='w', point_size=pt_size)
# create an array of ray intersections
r_int = np.append(r_int, r_pts[0])
r_int = np.reshape(r_int, (4,3))
_, ori_p, ori_V = self.nearest_pt(r_int, start)
r_int = []
ori_r_int = np.append(ori_r_int, ori_V[ori_p,:])
ori_r_int = np.reshape(ori_r_int, (r_num,3))
return ori_r_int
def nearest_pt(self, vert, starting_pt):
""" find nearest vertex: for segmented convex manifold, a cube with volume centroid as
center and nearest vertex as cube vertex, it falls inside the volume """
# find nearest point from the list of points
c = len(vert)
dist = np.zeros(c)
for i in range(0, c):
dist[i] = np.sqrt((vert[i,0] - starting_pt[0])**2 + (vert[i,1] - starting_pt[1])**2
+ (vert[i,2] - starting_pt[2])**2)
# find index of the nearest point
nearest = min(dist)
p = np.where(dist == nearest)
p = p[0].item()
return nearest, p, vert
def furthest_pt(self, vert, starting_pt):
global p, furthest, dist
""" find furthest vertex among the list of nearest vertices """
# find furthest point from the list of points
c = len(vert)
dist = np.zeros(c)
for i in range(0, c):
dist[i] = np.sqrt((vert[i,0] - starting_pt[0])**2 + (vert[i,1] - starting_pt[1])**2
+ (vert[i,2] - starting_pt[2])**2)
# find index of the furthest point
furthest = max(dist)
p = np.where(dist == furthest)
p = p[0][0]
return furthest, p, vert
def create_cube(self, vertex, starting_pt, axis):
''' create cube from the nearest pt & centroid '''
global edge_length
if (axis[0] == 0) and (axis[1] == 0) and (axis[2] == 0):
axis[2] = 1
vert_trans = np.array([0,0,0])
elif (starting_pt[0] == 0) and (starting_pt[1] == 0) and (starting_pt[2] == 0):
vert_trans = np.array([0,0,0])
else:
vert_trans = starting_pt
for i in range(0,3):
if round(axis[i]) == 1 or round(axis[i]) == -1:
vert_trans[i] == 0
# find the other 7 vertices
# 3 vertices can be found by rotating the first point 90 degrees 3 times around Z axis of centroid
# 4 vertices can be found by translating the first four vertices twice the half edge
# found from the distance times sin(pi/4)
R = self.rot_axis(axis / np.linalg.norm(axis))
# construct the array of the first 4 vertices
V_1 = np.array(vertex - vert_trans)
V_2 = np.dot(R(np.pi/2), V_1.T).T
V_3 = np.dot(R(np.pi), V_1.T).T
V_4 = np.dot(R(3*np.pi/2), V_1.T).T
# cube_V_start = np.array([V_1, V_2, V_3, V_4])
cube_V_start = np.array([V_1, V_2, V_3, V_4]) + np.ones((4,1)) * [vert_trans]
cube_V_start_center = np.array(pv.PolyData(cube_V_start).center)
# show nearest vertex of cube
V_1 = np.array(vertex)
self.plotter.add_mesh(pv.PolyData(V_1), color="y", point_size=30.0, render_points_as_spheres=True)
# find the translation distance
trans_dis = starting_pt - cube_V_start_center
trans_dir = trans_dis / np.linalg.norm(trans_dis)
dia_dis = np.sqrt((V_1[0]-cube_V_start_center[0])**2 + (V_1[1]-cube_V_start_center[1])**2 + (V_1[2]-cube_V_start_center[2])**2)
half_edge = np.ones((4,1)) * [trans_dir] * dia_dis * np.sin(np.pi/4)
edge_length = dia_dis * np.sin(np.pi/4) * 2
cube_trans = np.asarray(2*half_edge, dtype=np.float64)
# construct the cube
cube_V_end = np.add(cube_V_start, cube_trans)
cube_V = np.vstack((cube_V_start, cube_V_end))
cube_F = np.hstack([[4,0,1,2,3],
[4,0,3,7,4],
[4,0,1,5,4],
[4,1,2,6,5],
[4,2,3,7,6],
[4,4,5,6,7]])
# cube volume
cube_vol = (2 * np.linalg.norm(half_edge[0,:]))**3
return cube_V, cube_F, cube_vol
def rot_axis(self, axis):
''' create a rotational matrix about an arbitrary axis '''
t = sp.Symbol('t')
R_t = Matrix([[sp.cos(t)+axis[0]**2*(1-sp.cos(t)), axis[0]*axis[1]*(1-sp.cos(t))-axis[2]*sp.sin(t), axis[0]*axis[2]*(1-sp.cos(t))+axis[1]*sp.sin(t)],
[axis[1]*axis[0]*(1-sp.cos(t))+axis[2]*sp.sin(t), sp.cos(t)+axis[1]**2*(1-sp.cos(t)), axis[1]*axis[2]*(1-sp.cos(t))-axis[0]*sp.sin(t)],
[axis[2]*axis[0]*(1-sp.cos(t))-axis[1]*sp.sin(t), axis[2]*axis[1]*(1-sp.cos(t))+axis[0]*sp.sin(t), sp.cos(t)+axis[2]**2*(1-sp.cos(t))]])
R = lambdify(t, R_t)
return R
def max_cuboid(self, mesh, nearest_pt, Vol_centroid, max_normal):
''' extend max cube into maximally inscribed cuboid '''
global face_center, max_cuboid, max_cuboid_vol
# fix max_normals
dir_check = (face_center - Vol_centroid) * 2 / edge_length
x_check = np.abs(np.around(max_normal[0,0] - dir_check[0,0]))
y_check = np.abs(np.around(max_normal[0,1] - dir_check[0,1]))
z_check = np.abs(np.around(max_normal[0,2] - dir_check[0,2]))
print(x_check)
print(y_check)
print(z_check)
print(max_normal)
print(dir_check)
if (x_check == 2) or (y_check == 2) or (z_check == 2):
max_normal = -max_normal
# find the 3 out of 6 normal directions the max cube can be extended towards
ext_dir = np.empty(shape=(3,3))
main_dir = nearest_pt - Vol_centroid
ind = 0
for i in range(0, 6):
if np.dot(main_dir, max_normal[i]) < 0:
ext_dir[ind] = max_normal[i]
ind += 1
# extend faces by shooting a ray from the 4 vertices on each extendable face
# in the direction of its face normal. Find the nearest intersection and
# it would be the limit of extension for that face
for i in range(0, 3):
F_ind = np.where((np.around(max_normal) == np.around(ext_dir[i])).all(axis=1))
F_ind = F_ind[0][0]
np.reshape(max_cube_F, (6,5))
faces = np.reshape(max_cube_F, (6,5))
print(faces)
V_ind = faces[F_ind, 1:5]
print(V_ind)
current_V = np.vstack([max_cube_V[V_ind[0]], max_cube_V[V_ind[1]], max_cube_V[V_ind[2]], max_cube_V[V_ind[3]]])
print(current_V)
ext_V = self.ext_ray(mesh, current_V, ext_dir[i])
max_cube_V[V_ind] = ext_V
# create & show extended max cube
max_cuboid = pv.PolyData(max_cube_V, max_cube_F)
self.plotter.add_mesh(max_cuboid, show_edges=True, line_width=5, color="y", opacity = 0.4)
# find face centers of extended max cube
cell_center = max_cuboid.cell_centers()
face_center = np.array(cell_center.points)
# find face normals of the extended max cube
max_normal = max_cuboid.cell_normals
# extended max cube volume
max_cuboid_vol = float(format(max_cuboid.volume, ".2f"))
print("Extended Max Cube Volume:", max_cuboid_vol)
def ext_ray(self, mesh, current_V, ext_dir):
''' shoot rays from vertices of a cube face towards face normal & obtain intersections with mesh '''
# initialize variables
ext_end = current_V + ext_dir * np.ones((4,1)) * r_len
ext_dis = np.zeros(4)
ext_rays = [0] * 6 * r_num
ext_ints = [0] * 6 * r_num
# set raytracing parameters
l_wid = 3
pt_size = 10
# perform ray tracing per extending face vertex
for i in range(0,4):
ext_int, _ = mesh.ray_trace(current_V[i], ext_end[i])
ext_dis[i] = np.sqrt((ext_int[0][0] - current_V[i][0])**2 + (ext_int[0][1] - current_V[i][1])**2
+ (ext_int[0][2] - current_V[i][2])**2)
# show rays
# ext_rays[i] = self.plotter.add_mesh(pv.Line(current_V[i], ext_end[i]), color='w', line_width=l_wid)
# ext_ints[i] = self.plotter.add_mesh(pv.PolyData(ext_int[0]), color='w', point_size=pt_size)
# extend vertices by the shortest intersection distance
ext_V = current_V + ext_dir * np.ones((4,1)) * min(ext_dis)
return ext_V
def search_string_in_file(self, read_obj, file_name, string_to_search, search_start_line = None):
''' search for the given string in file and return lines
containing that string, along with line numbers '''
# initiate variables
loc = 0
search_start = False
line_content = []
a = []
b = []
c = []
# close to change file access option
read_obj.close()
# Open the file in read only mode
with open(file_name, 'r') as read_obj:
# Read all lines in the file one by one
for line in read_obj:
loc += len(line)
# check if this is the line indicated in seart_start_line
# if yes then change search_start to true
if search_start_line != None:
if search_start_line in line:
search_start = True
else:
search_start = True
# if search_start is true and string_to_search is found,
# return the i,j,k indexes as a,b,c
if (search_start == True) and (string_to_search in line):
line_content.append(line.rstrip())
a = int(line_content[0][6])
b = int(line_content[0][8])
c = int(line_content[0][10])
return a, b, c
def max_cube_slice(self, mesh):
''' splitting the mesh in 26 regions (surrounding the cube center) with the 6 faces of cube center '''
global face_center
# creating a 3x3x3 matrix representing the 27 regions
height = np.zeros(3, dtype=object)
side =
|
np.zeros((3,3), dtype=object)
|
numpy.zeros
|
import logging
import numpy
from cqcpy import ft_utils, utils
from pyscf.lib import einsum
from . import cc_utils
from . import ft_cc_energy
from . import ft_cc_equations
from . import ft_mp
from . import quadrature
class neq_ccsd(object):
"""Non-equilibrium coupled cluster singles and doubles"""
def __init__(self, sys, T, mu=0.0, tmax=0.0, econv=1e-8,
max_iter=40, damp=0.0, ngr=100, ngi=10, iprint=1):
self.T = T
self.mu = mu
self.econv = econv
self.max_iter = max_iter
self.alpha = damp
self.tmax = tmax
self.ngr = ngr
self.ngi = ngi
self.iprint = iprint
if T > 0.0:
self.beta = 1.0/T
else:
self.beta = 80
if not sys.verify(self.T, self.mu):
raise Exception("Sytem temperature inconsistent with CC temp")
self.sys = sys
self.dia = None
self.dba = None
self.dji = None
self.dai = None
def run(self, T1=None, T2=None):
"""Run CCSD calculation."""
return self._neq_ccsd(T1in=T1, T2in=T2)
def _neq_ccsd(self, T1in=None, T2in=None):
beta = self.beta
tmax = self.tmax
mu = self.mu
# get time-grid
ngr = self.ngr
ngi = self.ngi
tii, gi, Gi = quadrature.simpsons(self.ngi, beta)
tir, gr, Gr = quadrature.midpoint(ngr, tmax)
self.gr = gr
self.Gr = Gr
self.gi = gi
self.Gi = Gi
self.tir = tir
self.tii = tii
# get orbital energies
en = self.sys.g_energies_tot()
# get 0th and 1st order contributions
En = self.sys.const_energy()
g0 = ft_utils.GP0(beta, en, mu)
E0 = ft_mp.mp0(g0) + En
E1 = self.sys.get_mp1()
E01 = E0 + E1
# get scaled integrals
F, Ff, Fb, I = cc_utils.get_ft_integrals_neq(self.sys, en, beta, mu)
# get energy differences
D1 = utils.D1(en, en)
D2 = utils.D2(en, en)
#D1 = en[:,None] - en[None,:]
#D2 = en[:,None,None,None] + en[None,:,None,None] \
# - en[None,None,:,None] - en[None,None,None,:]
# get MP2 T-amplitudes
if T1in is not None and T2in is not None:
T1oldf = T1in[0:ngr]
T1oldb = T1in[ngr:ngr+ngi]
T1oldi = T1in[ngr+ngi:]
T2oldf = T2in[0:ngr]
T2oldb = T2in[ngr:ngr+ngi]
T2oldi = T2in[ngr+ngi:]
else:
T1oldf = -Ff.vo.copy()
T1oldb = -Fb.vo.copy()
Idr = numpy.ones((ngr))
Idi = numpy.ones((ngi))
T1oldi = -numpy.einsum('v,ai->vai', Idi, F.vo)
T2oldb = -numpy.einsum('v,abij->vabij', Idr, I.vvoo)
T2oldf = T2oldb.copy()
T2oldi = -numpy.einsum('v,abij->vabij', Idi, I.vvoo)
T1oldf, T1oldb, T1oldi = quadrature.int_tbar1_keldysh(
ngr, ngi, T1oldf, T1oldb, T1oldi, tir, tii, D1, Gr, Gi)
T2oldf, T2oldb, T2oldi = quadrature.int_tbar2_keldysh(
ngr, ngi, T2oldf, T2oldb, T2oldi, tir, tii, D2, Gr, Gi)
Ei = ft_cc_energy.ft_cc_energy_neq(
T1oldf, T1oldb, T1oldi, T2oldf, T2oldb, T2oldi,
Ff.ov, Fb.ov, F.ov, I.oovv, gr, gi, beta, Qterm=False)
logging.info("MP2 Energy {:.10f}".format(Ei))
converged = False
thresh = self.econv
max_iter = self.max_iter
alpha = self.alpha
i = 0
Eold = 888888888.888888888
nl1 = numpy.linalg.norm(T1oldf) + 0.0001
nl1 += numpy.linalg.norm(T1oldb)
nl1 += numpy.linalg.norm(T1oldi)
nl2 = numpy.linalg.norm(T2oldf) + 0.0001
nl2 += numpy.linalg.norm(T2oldb)
nl2 += numpy.linalg.norm(T2oldi)
while i < max_iter and not converged:
# form new T1 and T2
T1f, T1b, T1i, T2f, T2b, T2i = \
ft_cc_equations.neq_ccsd_stanton(
Ff, Fb, F, I, T1oldf, T1oldb, T1oldi, T2oldf, T2oldb,
T2oldi, D1, D2, tir, tii, ngr, ngi, Gr, Gi)
res1 = numpy.linalg.norm(T1f - T1oldf) / nl1
res1 += numpy.linalg.norm(T1b - T1oldb) / nl1
res1 += numpy.linalg.norm(T1i - T1oldi) / nl1
res2 = numpy.linalg.norm(T2f - T2oldf) / nl2
res2 += numpy.linalg.norm(T2b - T2oldb) / nl2
res2 += numpy.linalg.norm(T2i - T2oldi) / nl2
# damp new T-amplitudes
T1oldf = alpha*T1oldf + (1.0 - alpha)*T1f
T1oldb = alpha*T1oldb + (1.0 - alpha)*T1b
T1oldi = alpha*T1oldi + (1.0 - alpha)*T1i
T2oldf = alpha*T2oldf + (1.0 - alpha)*T2f
T2oldb = alpha*T2oldb + (1.0 - alpha)*T2b
T2oldi = alpha*T2oldi + (1.0 - alpha)*T2i
nl1 = numpy.linalg.norm(T1oldf) + 0.0001
nl1 += numpy.linalg.norm(T1oldb)
nl1 += numpy.linalg.norm(T1oldi)
nl2 = numpy.linalg.norm(T2oldf) + 0.0001
nl2 += numpy.linalg.norm(T2oldb)
nl2 += numpy.linalg.norm(T2oldi)
# compute energy
E = ft_cc_energy.ft_cc_energy_neq(
T1oldf, T1oldb, T1oldi, T2oldf, T2oldb, T2oldi,
Ff.ov, Fb.ov, F.ov, I.oovv, gr, gi, beta)
# determine convergence
logging.info(' %2d (%.8f,%.8f) %.4E' % (i + 1, E.real, E.imag, res1 + res2))
i = i + 1
if numpy.abs(E - Eold) < thresh:
converged = True
Eold = E
if not converged:
logging.warning("NEQ-CCSD did not converge!")
self.T1f = T1oldf
self.T1b = T1oldb
self.T1i = T1oldi
self.T2f = T2oldf
self.T2b = T2oldb
self.T2i = T2oldi
return (Eold + E01, Eold)
def _neq_ccsd_lambda(self, L1=None, L2=None):
"""Solve FT-CCSD Lambda equations."""
beta = self.beta
mu = self.mu
# get time-grid
ngr = self.ngr
ngi = self.ngi
tir = self.tir
tii = self.tii
Gi = self.Gi
gi = self.gi
Gr = self.Gr
gr = self.gr
# get energies and occupation numbers
en = self.sys.g_energies_tot()
#En = self.sys.const_energy()
#g0 = ft_utils.GP0(beta, en, mu)
#E0 = ft_mp.mp0(g0) + En
#E1 = self.sys.get_mp1()
#E01 = E0 + E1
# get scaled integrals
F, Ff, Fb, I = cc_utils.get_ft_integrals_neq(self.sys, en, beta, mu)
# get energy differences
D1 = utils.D1(en, en)
D2 = utils.D2(en, en)
#D1 = en[:,None] - en[None,:]
#D2 = en[:,None,None,None] + en[None,:,None,None] \
# - en[None,None,:,None] - en[None,None,None,:]
if L2 is None:
# Use T^{\dagger} as a guess for Lambda
L1oldf = numpy.transpose(self.T1f, (0, 2, 1))
L1oldb = numpy.transpose(self.T1b, (0, 2, 1))
L1oldi = numpy.transpose(self.T1i, (0, 2, 1))
L2oldf = numpy.transpose(self.T2f, (0, 3, 4, 1, 2))
L2oldb = numpy.transpose(self.T2b, (0, 3, 4, 1, 2))
L2oldi = numpy.transpose(self.T2i, (0, 3, 4, 1, 2))
else:
L2oldf = L2[0]
L2oldb = L2[1]
L2oldi = L2[2]
if L1 is None:
L1oldf = numpy.zeros(self.T1f.shape)
L1oldb = numpy.zeros(self.T1b.shape)
L1oldi = numpy.zeros(self.T1i.shape)
else:
L1oldf = L1[0]
L1oldb = L1[1]
L1oldi = L1[2]
# run lambda iterations
thresh = self.econv
max_iter = self.max_iter
alpha = self.alpha
i = 0
nl1 = numpy.linalg.norm(L1oldf) + 0.0001
nl1 +=
|
numpy.linalg.norm(L1oldb)
|
numpy.linalg.norm
|
"""
BootstrapChainLadder implementation.
"""
import functools
import warnings
import numpy as np
import pandas as pd
from numpy.random import RandomState
from scipy import stats
from .base import BaseRangeEstimator, BaseRangeEstimatorResult
class BootstrapChainLadder(BaseRangeEstimator):
"""
The purpose of the bootstrap technique is to estimate the predicition
error of the total reserve estimate and to approximate the predictive
distribution. It is often impractical to obtain the prediction error
using an analytical approach due to the complexity of reserve estimators.
Predicition error is comprised of two components: process error
and estimation error (Prediction Error = Estimation Error + Process Error).
The estimation error (parameter error) represents the uncertainty in the
parameter estimates given that the model is correctly specified. The
process error is analogous to the variance of a random variable,
representing the uncertainty in future outcomes.
The procedure used to generate the predicitive distribution of reserve
estimates is based on Leong et al. Appendix A, assuming the starting point
is a triangle of cumulative losses:
1. Calculate the all-year volume-weighted age-to-age factors.
2. Estimate the fitted historical cumulative paid loss and ALAE
using the latest diagonal of the original triangle and the
age-to-age factors from [1] to un-develop the losses.
3. Calculate the unscaled Pearson residuals, degrees of freedom
and scale parameter.
4. Calculate the adjusted Pearson residuals.
5. Sample with replacement from the adjusted Pearson residuals.
6. Calculate the triangle of sampled incremental losses
(I^ = m + r_adj * sqrt(m)), where I^ = Resampled incremental loss,
m = Incremental fitted loss (from [2]) and r_adj = Adjusted Pearson
residuals.
7. Using the triangle from [6], project future losses using the
Chain Ladder method.
8. Include Process variance by simulating each incremental future
loss from a Gamma distribution with mean = I^ and
variance = I^ * scale parameter.
9. Estimate unpaid losses using the Chain Ladder technique.
10. Repeat for the number of cycles specified.
The collection of projected ultimates for each origin year over all
bootstrap cycles comprises the predictive distribtuion of reserve
estimates.
Note that the estimate of the distribution of losses assumes
development is complete by the final development period. This is
to avoid the complication associated with modeling a tail factor.
References
----------
1. <NAME>., and <NAME>, (2002), *Stochastic Claims Reserving in General
Insurance*, British Actuarial Journal 8(3): 443-518.
2. CAS Working Party on Quantifying Variability in Reserve Estimates,
*The Analysis and Estimation of Loss & ALAE Variability: A Summary Report*,
Casualty Actuarial Society Forum, Fall 2005.
3. Leong et al., (2012), *Back-Testing the ODP Bootstrap of the Paid
Chain-Ladder Model with Actual Historical Claims Data*, Casualty Actuarial
Society E-Forum.
4. Kirschner, et al., *Two Approaches to Calculating Correlated Reserve
Indications Across Multiple Lines of Business* Appendix III, Variance
Journal, Volume 2/Issue 1.
5. <NAME>., (2016), *Using the ODP Bootstrap Model: A
Practicioner's Guide*, CAS Monograph Series Number 4: Casualty Actuarial
Society, 2016.
"""
def __init__(self, cumtri):
"""
The BootstrapChainLadder class definition.
Parameters
----------
cumtri: triangle.CumTriangle
A cumulative triangle instance.
"""
super().__init__(cumtri=cumtri)
self._dfrlvi = None
self._dof = None
def __call__(self, sims=1000, q=[.75, .95], procdist="gamma", parametric=False,
two_sided=False, interpolation="linear", random_state=None):
"""
``BootstrapChainLadder`` simulation initializer. Generates predictive
distribution of reserve outcomes by origin and in total.
The estimated distribution of losses assumes development is complete
by the final development period in order to avoid the complication of
modeling a tail factor.
Parameters
----------
sims: int
The number of bootstrap simulations to perform. Defaults to 1000.
q: array_like of float or float
Quantile or sequence of quantiles to compute, which must be
between 0 and 1 inclusive.
procdist: str
The distribution used to incorporate process variance. Currently,
this can only be set to "gamma".
two_sided: bool
Whether the two_sided interval should be included in summary
output. For example, if ``two_sided==True`` and ``q=.95``, then
the 2.5th and 97.5th quantiles of the bootstrapped reserve
distribution will be returned [(1 - .95) / 2, (1 + .95) / 2]. When
False, only the specified quantile(s) will be computed. Defaults
to False.
parametric: bool
If True, fit standardized residuals to a normal distribution, and
sample from this parameterized distribution. Otherwise, bootstrap
procedure samples with replacement from the collection of
standardized residuals. Defaults to False.
interpolation: {"linear", "lower", "higher", "midpoint", "nearest"}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j. See
``numpy.quantile`` for more information. Default value is "linear".
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
BootstrapChainLadderResult
"""
ldfs = self._ldfs(sel="all-weighted")
cldfs = self._cldfs(ldfs=ldfs)
maturity = self.tri.maturity.astype(str)
latest = self.tri.latest_by_origin
trisqrd = self._trisqrd(ldfs=ldfs)
# Obtain reference to BootstrapChainLadder estimates.
tri_fit_cum = self._tri_fit_cum(ldfs=ldfs)
tri_fit_incr = self._tri_fit_incr(fitted_tri_cum=tri_fit_cum)
unscld_residuals = self._resid_us(fitted_tri_incr=tri_fit_incr)
adjust_residuals = self._resid_adj(resid_us=unscld_residuals)
scale_param = self._scale_param(resid_us=unscld_residuals)
sampling_dist = self._sampling_dist(resid_adj=adjust_residuals)
dfsamples = self._bs_samples(
sampling_dist=sampling_dist, fitted_tri_incr=tri_fit_incr,
sims=sims, parametric=parametric,
random_state=random_state
)
dfldfs = self._bs_ldfs(dfsamples=dfsamples)
dfforecasts = self._bs_forecasts(dfsamples, dfldfs, scale_param)
dfprocerror = self._bs_process_error(
dfforecasts=dfforecasts, scale_param=scale_param, procdist=procdist,
random_state=random_state
)
dfreserves = self._bs_reserves(dfprocerror=dfprocerror)
ultimates = dfreserves.groupby(["origin"])["ultimate"].mean()
ultimates[latest.index.min()] = latest[latest.index.min()]
reserves = pd.Series(ultimates - latest, name="reserve")
std_error = self._bs_std_error(dfreserves)
cv = pd.Series(std_error / reserves, name="cv")
qtls, qtlhdrs = self._qtls_formatter(q=q, two_sided=two_sided)
# Compile Chain Ladder point estimate summary.
dfmatur = maturity.to_frame().reset_index(drop=False).rename({"index": "origin"}, axis=1)
dfcldfs = cldfs.to_frame().reset_index(drop=False).rename({"index": "maturity"}, axis=1)
dfcldfs["maturity"] = dfcldfs["maturity"].astype(str)
dfcldfs["emergence"] = 1 / dfcldfs["cldf"]
dfsumm = dfmatur.merge(dfcldfs, on=["maturity"], how="left").set_index("origin")
dfsumm.index.name = None
dflatest = latest.to_frame().rename({"latest_by_origin": "latest"}, axis=1)
dfsumm = functools.reduce(
lambda df1, df2: df1.join(df2),
(dflatest, ultimates.to_frame(), reserves.to_frame(), std_error.to_frame(), cv.to_frame()),
dfsumm
)
# Add "Total" index and set to NaN fields that shouldn't be aggregated.
dfsumm.loc["total"] = dfsumm.sum()
dfsumm.loc["total", "maturity"] = ""
dfsumm.loc["total", ["cldf", "emergence"]] = np.NaN
dfsumm.loc["total", "std_error"] = std_error["total"]
dfsumm.loc["total", "cv"] = std_error["total"] / dfsumm.loc["total", "reserve"]
# Attach quantiles.
dftotal_res = dfreserves.groupby(["sim"], as_index=False).sum()
dftotal_res["origin"] = "total"
dfreserves = pd.concat([dfreserves, dftotal_res])
for ii, jj in zip(qtls, qtlhdrs):
dfsumm[jj] = dfsumm.index.map(
lambda v: np.percentile(
dfreserves[dfreserves.origin == v]["reserve"].values,
100 * ii, interpolation=interpolation
)
)
bcl_result = BootstrapChainLadderResult(
summary=dfsumm, tri=self.tri, ldfs=ldfs, tail=1.0, trisqrd=trisqrd,
reserve_dist=dfreserves, sims_data=dfprocerror, scale_param=scale_param,
dof=self.dof, unscaled_residuals=unscld_residuals,
adjusted_residuals=adjust_residuals,
sampling_dist=None if parametric else sampling_dist,
fitted_tri_cum=tri_fit_cum, fitted_tri_incr=tri_fit_incr, sims=sims,
procdist=procdist, parametric=parametric, q=q, interpolation=interpolation
)
return(bcl_result)
@property
def dfrlvi(self):
"""
Transform triangle's last valid origin index into DataFrame format.
Returns
-------
pd.DataFrame
"""
if self._dfrlvi is None:
df = self.tri.rlvi.reset_index(drop=False)
df = df.rename({"index": "origin", "dev": "l_act_dev"}, axis=1)
self._dfrlvi = df.drop("col_offset", axis=1)
return(self._dfrlvi)
def _get_dfcombined(self, dfsamples, dfldfs):
"""
Merge output of ``self._bs_samples`` and ``self._bs_ldfs``.
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
dfldfs: pd.DataFrame
Output from ``self._bs_ldfs``.
Returns
-------
pd.DataFrame
"""
dfcombined = dfsamples.merge(dfldfs, on=["sim", "dev"], how="left")
dfcombined = dfcombined.merge(self.dfrlvi, on=["origin"], how="left")
return(dfcombined.reset_index(drop=True).sort_values(by=["sim", "origin", "dev"]))
@property
def dof(self):
"""
Return the degress of freedom.
Returns
-------
int
"""
if self._dof is None:
self._dof = self.tri.nbr_cells - (self.tri.columns.size - 1) + self.tri.index.size
return(self._dof)
def _scale_param(self, resid_us):
"""
Return the scale parameter, which is the sum of the squared unscaled
Pearson residuals over the degrees of freedom. This method is intended
for internal use only.
Parameters
----------
resid_us: pd.DataFrame
Unscaled Pearson residuals, typically output by
``self._resid_us``.
Returns
-------
float
"""
return((resid_us**2).sum().sum() / self.dof)
def _tri_fit_cum(self, ldfs):
"""
Return the cumulative fitted triangle using backwards recursion,
starting with the observed cumulative paid/incurred-to-date along the
latest diagonal.
Parameters
----------
ldfs: pd.Series
Selected ldfs, typically the output of calling ``self._ldfs``.
Returns
-------
pd.DataFrame
"""
fitted_tri_cum = self.tri.copy(deep=True)
for ii in range(fitted_tri_cum.shape[0]):
iterrow = fitted_tri_cum.iloc[ii, :]
if iterrow.isnull().any():
# Find first NaN element in iterrow.
nan_hdr = iterrow.isnull()[iterrow.isnull() == True].index[0]
nan_idx = fitted_tri_cum.columns.tolist().index(nan_hdr)
init_idx = nan_idx - 1
else:
# If here, iterrow is the most mature exposure period.
init_idx = fitted_tri_cum.shape[1] - 1
# Set to NaN any development periods earlier than init_idx.
fitted_tri_cum.iloc[ii, :init_idx] = np.NaN
# Iterate over rows, undeveloping triangle from latest diagonal.
for jj in range(fitted_tri_cum.iloc[ii, :init_idx].size, 0, -1):
prev_col_idx, curr_col_idx, curr_ldf_idx = jj, jj - 1, jj - 1
prev_col_val = fitted_tri_cum.iloc[ii, prev_col_idx]
curr_ldf_val = ldfs.iloc[curr_ldf_idx]
fitted_tri_cum.iloc[ii, curr_col_idx] = (prev_col_val / curr_ldf_val)
return(fitted_tri_cum)
@staticmethod
def _tri_fit_incr(fitted_tri_cum):
"""
Return a fitted incremental triangle.
Parameters
----------
fitted_tri_cum: pd.DataFrame
Typically the output from ``self._tri_fit_cum``.
Returns
-------
pd.DataFrame
"""
tri = fitted_tri_cum.diff(axis=1)
tri.iloc[:, 0] = fitted_tri_cum.iloc[:, 0]
return(tri)
def _resid_us(self, fitted_tri_incr):
"""
Return unscaled Pearson residuals, given by
:math:`r_{us} = \\frac{I - m}{\\sqrt{|m|}}`, where :math:`r_{us}` represents the
unscaled Pearson residuals, :math:`I` the actual incremental losses and :math:`m`
fitted incremental losses.
Parameters
----------
fitted_tri_incr: pd.DataFrame
Typically the output from ``self._tri_fit_incr``.
Returns
-------
pd.DataFrame
"""
# I represents actual incremental losses, m fitted incremental losses.
I = pd.DataFrame(self.tri.to_incr())
m = fitted_tri_incr
return((I - m) / np.sqrt(m.abs()))
def _resid_adj(self, resid_us):
"""
Compute and return the adjusted Pearson residuals, given by
:math:`r_{adj} = \\sqrt{\\frac{N}{dof}} * r_{us}`, where *r_adj*
represents the adjusted Pearson residuals, *N* the number of triangle cells,
*dof* the degress of freedom and *r_us* the unscaled Pearson residuals.
Parameters
----------
resid_us: pd.DataFrame
Unscaled Pearson residuals, typically output by ``self._resid_us``.
Returns
-------
pd.DataFrame
"""
return(np.sqrt(self.tri.nbr_cells / self.dof) * resid_us)
@staticmethod
def _sampling_dist(resid_adj):
"""
Return ``resid_adj`` as a 1-dimensional array, which will be sampled
from with replacement in order to produce synthetic triangles for
bootstrapping. Any NaN's and 0's present in ``resid_adj`` will not be
present in the returned array.
Parameters
----------
resid_adj: pd.DataFrame
Adjusted Pearson residuals, typically output by ``self._resid_adj``.
Returns
-------
np.ndarray
"""
resid_ = resid_adj.iloc[:-1, :-1].values.ravel()
return(resid_[np.logical_and(~np.isnan(resid_), resid_ != 0)])
def _bs_samples(self, sampling_dist, fitted_tri_incr, sims=1000, parametric=False,
random_state=None):
"""
Return DataFrame containing sims resampled-with-replacement
incremental loss triangles if ``parametric=False``, otherwise
random variates from a normal distribution with mean zero and
variance derived from ``resid_adj``. Randomly generated incremental
data gets cumulated in preparation for ldf calculation in next
step.
Parameters
----------
sampling_dist: np.ndarray
The residuals from the fitted incremental triangle coerced
into a one-dimensional numpy array.
fitted_tri_incr: pd.DataFrame
The incremental triangle fitted using backwards recursion.
Typically the output of ``self._tri_fit_incr``.
sims: int
The number of bootstrap simulations to run. Defaults to 1000.
parametric: bool
If True, fit standardized residuals to a normal distribution, and
sample from the parameterized distribution. Otherwise, bootstrap
procedure proceeds by sampling with replacement from the array
of standardized residuals. Defaults to False.
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
pd.DataFrame
"""
if random_state is not None:
if isinstance(random_state, int):
prng = RandomState(random_state)
elif isinstance(random_state, RandomState):
prng = random_state
else:
prng = RandomState()
sampling_dist = sampling_dist.flatten()
fti = fitted_tri_incr.reset_index(drop=False).rename({"index": "origin"}, axis=1)
dfm = pd.melt(fti, id_vars=["origin"], var_name="dev", value_name="value")
dfm = dfm[~np.isnan(dfm["value"])].astype({"origin": int, "dev": int, "value": float})
# Make positive any first development period negative values.
min_devp = dfm["dev"].min()
dfm["value"] = np.where(
np.logical_and(dfm["dev"].values == min_devp, dfm["value"].values < 0),
1., dfm["value"].values
)
dfi = self.tri.to_tbl(dropna=False).drop("value", axis=1)
dfp = dfi.merge(dfm, how="outer", on=["origin", "dev"])
dfp["rectype"] = np.where(np.isnan(dfp["value"].values), "forecast", "actual")
dfp = dfp.rename({"value": "incr"}, axis=1)
dfp["incr_sqrt"] = np.sqrt(dfp["incr"].values)
dfrtypes = {"origin": int, "dev": int, "incr": float,
"incr_sqrt": float, "rectype": str}
dfrcols = ["origin", "dev", "incr", "rectype", "incr_sqrt"]
# Replicate dfp sims times then redefine datatypes.
dfr = pd.DataFrame(np.tile(dfp, (sims, 1)), columns=dfrcols).astype(dfrtypes)
# Assign simulation identifier to each record in dfr.
dfr["sim"] = np.divmod(dfr.index, self.tri.shape[0] * self.tri.shape[1])[0]
sample_size = dfr.shape[0]
if parametric:
# Sample random standard normal residuals.
dfr["resid"] = prng.normal(loc=0, scale=sampling_dist.std(ddof=1), size=sample_size)
else:
# Randomly sample residuals from sampling_dist.
dfr["resid"] = prng.choice(sampling_dist, sample_size, replace=True)
# Calcuate resampled incremental and cumulative losses.
dfr["resid"] = np.where(dfr["rectype"].values == "forecast", np.NaN, dfr["resid"].values)
dfr = dfr.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True)
dfr["samp_incr"] = dfr["incr"].values + dfr["resid"].values * dfr["incr_sqrt"].values
dfr["samp_cum"] = dfr.groupby(["sim", "origin"], as_index=False)["samp_incr"].cumsum()
return(dfr.reset_index(drop=True))
def _bs_ldfs(self, dfsamples):
"""
Compute and return loss development factors for each set of synthetic
loss data.
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
Returns
-------
pd.DataFrame
"""
keepcols = ["sim", "origin", "dev", "samp_cum", "last_origin"]
new_col_names = {"index": "dev", "origin": "last_origin", "row_offset": "origin_offset"}
dflvi = self.tri.clvi.reset_index(drop=False).rename(new_col_names, axis=1)
dfinit = dfsamples.merge(dflvi, how="left", on=["dev"])
dfinit = dfinit[keepcols].sort_values(by=["sim", "dev", "origin"])
df = dfinit[~
|
np.isnan(dfinit["samp_cum"])
|
numpy.isnan
|
"""
Test functions for models.formula
"""
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import *
import sys, nose
#automatic conversion with 2to3 makes mistakes in formula, changes
#"if type(self.name) is types.StringType" to "if type(self.name) is bytes"
try:
from scikits.statsmodels.sandbox import formula #, contrast #, utils
from scikits.statsmodels.sandbox import contrast_old as contrast
except:
if sys.version_info[0] >= 3:
raise nose.SkipTest('No tests here')
else:
raise
def setup():
if sys.version_info[0] >= 3:
raise nose.SkipTest('No tests here')
class TestTerm(TestCase):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
self.assertRaises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
self.assert_(isinstance(f, formula.Formula))
self.assert_(f.hasterm(t1))
self.assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
self.assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
self.assertEqual(str(f), str(formula.Formula(t1)))
f = intercept * t1
self.assertEqual(str(f), str(formula.Formula(t1)))
class TestFormula(TestCase):
def setUp(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
self.assertEqual(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
self.assertEqual(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
self.assertEqual(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
self.assertEqual(xx().shape, (50,))
xx.namespace = space3
self.assertEqual(xx().shape, (30,))
xx = X * formula.I
self.assertEqual(xx().shape, (50,))
xx.namespace = space3
self.assertEqual(xx().shape, (30,))
xx = X * X
self.assertEqual(xx.namespace, X.namespace)
xx = X + Y
self.assertEqual(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':
|
np.arange(50)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
from scipy.linalg import inv, sqrtm
import copy
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import RegressorMixin,BaseEstimator,TransformerMixin, defaultdict
from ..preprocessing.robcent import VersatileScaler
from ._sudire_utils import *
from scipy.linalg import orth
import warnings
import statsmodels.robust.scale as srs
from scipy.stats import trim_mean
import dcor as dc
#import Ball
import statsmodels.api as sm
import inspect
from ..ipopt_temp.ipopt_wrapper import minimize_ipopt
from ..ipopt_temp.jacobian import FunctionWithApproxJacobianCentral,FunctionWithApproxJacobian
from ..dicomo._dicomo_utils import *
from ..utils.utils import *
class sudire(_BaseComposition,BaseEstimator,TransformerMixin,RegressorMixin):
"""SUDIRE Sufficient Dimension Reduction
The class allows for Sufficient Dimension Reduction using a variety of
methods. If the method requires optimization of a function,
This optimization is done through the Interior Point Optimizer (IPOPT)
algorithm.
Parameters
----------
sudiremeth: function or class. sudiremeth in this package can also be used, but user defined functions can be processed. Built in options are :
save : Sliced Average Variance Estimation
sir : Slices Inverse Regression
dr : Directional Regression
iht : Iterative Hessian Transformations
dcov-sdr : SDR via Distance Covariance
mdd-sdr : SDR via Martingale Difference Divergence.
bcov-sdr : SDR via ball covariance
n_components : int
dimension of the central subspace.
trimming : float
trimming percentage to be entered as pct/100
optimizer_options : dict
with options to pass on to the optimizer.Includes:
max_iter : int
Maximal number of iterations.
tol: float
relative convergence tolerance
constr_viol_tol : float
Desired threshold for the constraint violation.
optimizer_constraints : dict or list of dicts
further constraints to be passed on to the optimizer function.
optimizer_arguments: dict
extra arguments to be passed to the sudiremeth function during optimization.
optimizer_start : numpy array
starting value for the optimization.
center : str
how to center the data. options accepted are options from sprm.preprocessing
center_data : bool
If True, the data will be centered before the dimension reduction
scale_data : bool
if set to False, convergence to correct optimum is not a given. Will throw a warning.
compression : bool
Use internal data compresion step for flat data.
n_slices : int
The number of slices for SAVE, SIR, DR
is_distance_mat : bool
if the inputed matrices for x and y are distance matrices.
dmetric : str
distance metric used internally. Defaults to 'euclidean'
fit_ols : bool
if True, an OLS model is fitted after the dimension reduction.
copy : bool
Whether to make a deep copy of the input data or not.
verbose : bool
Set to True prints the iteration number.
return_scaling_object: bool.
If True, the scaling object will be return after the dimension reduction.
Attributes
----------
Attributes always provided
- `x_loadings_`: Estimated basis of the central subspace
- `x_scores_`: The projected X data.
- `x_loc_`: location estimate for X
- `x_sca_`: scale estimate for X
- ` ols_obj` : fitted OLS objected
- `y_loc_`: y location estimate
- `y_sca_`: y scale estimate
Attributes created only when corresponding input flags are `True`:
- `whitening_`: whitened data matrix (usually denoted K)
- `scaling_object_`: scaling object from `VersatileScaler`
"""
def __init__(self,
sudiremeth = 'dcov-sdr',
n_components = 2,
trimming = 0,
optimizer_options = {'max_iter': 1000},
optimizer_constraints = None,
optimizer_arguments =None,
optimizer_start = None,
center_data=True,
center='mean',
scale_data=True,
whiten_data=False,
compression = False,
n_slices = 6,
dmetric = 'euclidean',
fit_ols = True,
copy=True,
verbose=True,
return_scaling_object=True):
# Called arguments
self.sudiremeth = sudiremeth
self.n_components = n_components
self.trimming = trimming
self.optimizer_options = optimizer_options
self.optimizer_constraints = optimizer_constraints
self.optimizer_arguments = optimizer_arguments
self.optimizer_start = optimizer_start
self.center = center
self.center_data = center_data
self.scale_data = scale_data
self.whiten_data = whiten_data
self.compression = compression
self.n_slices = n_slices
self.dmetric = dmetric
self.fit_ols = fit_ols
self.copy = copy
self.verbose = verbose
self.return_scaling_object = return_scaling_object
# Other global parameters
self.licenter = ['mean','median']
if not(self.center in self.licenter):
raise(ValueError('Only location estimator classes allowed are: "mean", "median"'))
self.limeths =['sir', 'save', 'dr','dcov-sdr','bcov-sdr','mdd-sdr','phd','iht']
if (not(self.sudiremeth in self.limeths) and not callable(self.sudiremeth)):
raise(ValueError('Only SDR methods allowed are : "sir", "save", "dr", "dcov-sdr","bcov-sdr", "mdd-sdr", "iht","phd"'))
def fit(self,X,y,*args,**kwargs):
"""
Fit a Sufficient Dimension Reduction Model.
Parameters
----------
X : matrix or data frame
Input data of predictors
y : vector or 1D matrix
Response data
args or kwargs :
Further parameters to user defined sudiremeth can be passed here
Returns
-------
self
-------
"""
# Collect optional fit arguments
if 'dmetric' not in kwargs:
dmetric = 'euclidean'
else:
dmetric = kwargs.get('dmetric')
if 'biascorr' not in kwargs:
biascorr = False
else:
biascorr = kwargs.get('biascorr')
if 'flag' not in kwargs:
flag = 'two-block'
else :
flag = kwargs.get('flag')
if 'is_distance_mat' not in kwargs :
is_distance_mat =False
else :
is_distance_mat = kwargs.pop('is_distance_mat')
# Initiate some parameters and data frames
if self.copy:
X0 = copy.deepcopy(X)
self.X0 = X0
else:
X0 = X
X = convert_X_input(X0)
n,p = X0.shape
trimming = self.trimming
# Check dimensions
if self.n_components > min(n,p):
raise(MyException('number of components cannot exceed number of variables or sample size'))
# Pre-processing adjustment if whitening
if self.whiten_data:
self.center_data = True
self.scale_data = False
self.compression = False
print('All results produced are for whitened data')
#Store original X data mean and varcov matrix.
varmatx = np.cov(X,rowvar=0)
meanx = X.mean(axis=0)
N2 = inv(sqrtm(varmatx))
# Data Compression for flat tables if required
if ((p>n) and self.compression):
V,S,U = np.linalg.svd(X.T,full_matrices=False)
X = np.matmul(U.T,np.diag(S))
n,p = X.shape
if (srs.mad(X)==0).any():
warnings.warn('Due to low scales in data, compression would induce zero scales.'
+ '\n' + 'Proceeding without compression.')
dimensions = False
if copy:
X = copy.deepcopy(X0)
else:
X = X0
else:
dimensions = True
else:
dimensions = False
# Centring and scaling
# centering :
if self.center_data:
if self.center != 'mean':
centring = VersatileScaler(center=self.center, scale ='None',trimming=self.trimming)
Xs = centring.fit_transform(X0)
mX = centring.col_loc_
sX = centring.col_sca_
else :
Xs = X -trim_mean(X, self.trimming,axis=0)
mX = trim_mean(X, self.trimming,axis=0)
sX = np.sqrt(np.diag(varmatx))
else:
Xs = X0
mX = np.zeros((1,p))
sX = np.ones((1,p))
if self.scale_data:
if self.center=='mean':
scale = 'std'
N2 = inv(sqrtm(varmatx))
Xs = np.matmul(Xs,N2)
elif((self.center=='median') or (self.center=='l1median')):
scale = 'mad'
centring = VersatileScaler(center=self.center,scale=scale,trimming=trimming)
Xs = centring.fit_transform(X0)
mX = centring.col_loc_
sX = centring.col_sca_
else :
raise(MyException('centering options have to be either "mean", "median", or "l1median"'))
else:
scale = 'None'
warnings.warn('Without scaling, convergence to optima is not given')
# Initiate centring object and scale X data
if self.whiten_data:
V,S,U = np.linalg.svd(Xs.T,full_matrices=False)
del U
K = (V/S)[:,:p]
del S
Xs = np.matmul(Xs, K)
Xs *= np.sqrt(p)
# Pre-process y data
ny = y.shape[0]
y = convert_y_input(y)
if len(y.shape) < 2:
y = np.matrix(y).reshape((ny,1))
if ny != n:
raise(MyException('X and y number of rows must agree'))
# Pre-process y data when available
if flag != 'one-block':
ny = y.shape[0]
y = convert_y_input(y)
if len(y.shape) < 2:
y = np.matrix(y).reshape((ny,1))
if ny != n:
raise(MyException('X and y number of rows must agree'))
if self.copy:
y0 = copy.deepcopy(y)
self.y0 = y0
if self.center_data:
ys=y # dont center y
my = np.mean(y, axis=0)
sy = np.sqrt(np.var(y,axis=0))
else:
ys = y
my = 0
sy = 1
ys = ys.astype('float64')
else:
ys = None
if(self.sudiremeth == 'sir'):
P = SIR(Xs, ys, self.n_slices,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
P = np.matmul(N2,P)
projMat = np.matmul(np.matmul(P,inv(np.matmul(P.T,P))),P.T)
T = np.matmul(self.X0, P)
elif(self.sudiremeth =='save'):
P = SAVE(Xs, ys, self.n_slices,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
P = np.matmul(N2,P)
projMat = np.matmul(np.matmul(P,inv(np.matmul(P.T,P))),P.T)
T = np.matmul(self.X0, P)
elif(self.sudiremeth == 'dr') :
P = DR(Xs, ys, self.n_slices,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
P = np.matmul(N2,P)
projMat = np.matmul(np.matmul(P,inv(np.matmul(P.T,P))),P.T)
T = np.matmul(self.X0, P)
elif(self.sudiremeth == 'phd'):
P = PHD(Xs, ys,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
P = np.matmul(N2,P)
projMat = np.matmul(np.matmul(P,inv(np.matmul(P.T,P))),P.T)
T = np.matmul(self.X0, P)
elif(self.sudiremeth == 'iht'):
P = IHT(Xs, ys,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
P = np.matmul(N2,P)
projMat = np.matmul(np.matmul(P,inv(np.matmul(P.T,P))),P.T)
T = np.matmul(self.X0, P)
else : #SDR obtained through optimization of some function
## choose starting value for DCOV-SDR
if self.optimizer_start is None :
save_start=SAVE(Xs,ys,3,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
save_start = np.matmul(N2,save_start)
sir_start=SIR(Xs, ys,6,self.n_components,self.center_data, self.scale_data)
if self.scale_data:
sir_start = np.matmul(N2,sir_start)
beta_save= orth(save_start)
dc_save = dc.distance_covariance_sqr(np.matmul(Xs,beta_save),y)
beta_sir = orth(sir_start)
dc_sir = dc.distance_covariance_sqr(
|
np.matmul(Xs,beta_sir)
|
numpy.matmul
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attributions metrics."""
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import attributions
from tensorflow_model_analysis.metrics import metric_specs
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
class AttributionsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
def testHasAttributionsMetrics(self):
specs_with_attributions = metric_specs.specs_from_metrics({
'output_name': [
tf.keras.metrics.MeanSquaredError('mse'),
attributions.TotalAttributions()
]
})
self.assertTrue(
attributions.has_attributions_metrics(specs_with_attributions))
specs_without_attributions = metric_specs.specs_from_metrics([
tf.keras.metrics.MeanSquaredError('mse'),
])
self.assertFalse(
attributions.has_attributions_metrics(specs_without_attributions))
def testMeanAttributions(self):
computation = attributions.MeanAttributions().computations()[-1]
total_attributions_key = metric_types.AttributionsKey(
name='_total_attributions')
example_count_key = metric_types.MetricKey(name='example_count')
metrics = {
total_attributions_key: {
'feature1': 1.0,
'feature2': -2.0
},
example_count_key: 0.5
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([metrics])
| 'ComputeMetric' >> beam.Map(lambda x: ((), computation.result(x))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_attributions = got[0]
self.assertEqual(got_slice_key, ())
mean_attributions_key = metric_types.AttributionsKey(
name='mean_attributions')
self.assertIn(mean_attributions_key, got_attributions)
self.assertDictElementsAlmostEqual(
got_attributions[mean_attributions_key], {
'feature1': 1.0 / 0.5,
'feature2': -2.0 / 0.5,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testMeanAbsoluteAttributions(self):
computation = attributions.MeanAbsoluteAttributions().computations()[-1]
total_absolute_attributions_key = metric_types.AttributionsKey(
name='_total_absolute_attributions')
example_count_key = metric_types.MetricKey(name='example_count')
metrics = {
total_absolute_attributions_key: {
'feature1': 1.0,
'feature2': 2.0
},
example_count_key: 0.5
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([metrics])
| 'ComputeMetric' >> beam.Map(lambda x: ((), computation.result(x))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_attributions = got[0]
self.assertEqual(got_slice_key, ())
mean_attributions_key = metric_types.AttributionsKey(
name='mean_absolute_attributions')
self.assertIn(mean_attributions_key, got_attributions)
self.assertDictElementsAlmostEqual(
got_attributions[mean_attributions_key], {
'feature1': 1.0 / 0.5,
'feature2': 2.0 / 0.5,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
{
'testcase_name': 'basic',
'model_name': '',
'output_name': '',
'examples': [{
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'feature1': 1.1,
'feature2': -1.2,
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'feature1': -2.1,
'feature2': 2.2
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'feature1': 3.1,
'feature2': -3.2
}
}],
'expected_values': {
'feature1': (1.1 - 2.1 + 3.1),
'feature2': (-1.2 + 2.2 - 3.2),
},
},
{
'testcase_name': 'multi-model',
'model_name': 'model',
'output_name': '',
'examples': [{
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'feature1': 11.1,
'feature2': -11.2
},
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'feature1': -22.1,
'feature2': 22.2
},
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'feature1': 33.1,
'feature2': -33.2
},
}
}],
'expected_values': {
'feature1': (11.1 - 22.1 + 33.1),
'feature2': (-11.2 + 22.2 - 33.2),
},
},
{
'testcase_name': 'multi-model-multi-output',
'model_name': 'model',
'output_name': 'output',
'examples': [{
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'output': {
'feature1': 111.1,
'feature2': -111.2
},
},
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'output': {
'feature1': -222.1,
'feature2': 222.2
},
},
}
}, {
'labels': None,
'predictions': None,
'example_weights': np.array(1.0),
'attributions': {
'model': {
'output': {
'feature1': 333.1,
'feature2': -333.2
},
},
}
}],
'expected_values': {
'feature1': (111.1 - 222.1 + 333.1),
'feature2': (-111.2 + 222.2 - 333.2),
},
},
)
def testTotalAttributionsWithMultiModelsAndOutputs(self, model_name,
output_name, examples,
expected_values):
computations = attributions.TotalAttributions().computations(
model_names=[model_name], output_names=[output_name])
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create(examples)
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
|
'CombineAttributions' >> beam.CombinePerKey(computations[0].combiner)
| 'ComputeResult' >> beam.Map( # comment to add lamda on own line
lambda x: (x[0], computations[1].result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_attributions = got[0]
self.assertEqual(got_slice_key, ())
total_attributions_key = metric_types.AttributionsKey(
name='total_attributions',
model_name=model_name,
output_name=output_name)
self.assertIn(total_attributions_key, got_attributions)
self.assertDictElementsAlmostEqual(
got_attributions[total_attributions_key], expected_values)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(('empty', None, {
'feature1': np.array([6.33, 6.39, 6.36]),
'feature2': np.array([6.63, 6.69, 6.66]),
}), ('class_id', metric_types.SubKey(class_id=0), {
'feature1': 6.33,
'feature2': 6.63,
}), ('k', metric_types.SubKey(k=2), {
'feature1': 6.36,
'feature2': 6.66,
}), ('top_k', metric_types.SubKey(top_k=2), {
'feature1':
|
np.array([6.39, 6.36])
|
numpy.array
|
import numpy as np
# Gotran generated code for the "grandi" model
def init_state_values_single(**values):
"""
Initialize state values
"""
# Init values
# m=0.003793087414436, h=0.626221949492493, j=0.624553572490432,
# x_kr=0.0210022533039071, x_ks=0.00428016666258923,
# x_to_s=0.000440445885642567, y_to_s=0.785115828275182,
# x_to_f=0.000440438103758954, y_to_f=0.999995844038706,
# d=2.92407183949469e-06, f=0.995135796703515,
# f_Ca_Bj=0.0246760872105795, f_Ca_Bsl=0.0152723084239416,
# Ry_Rr=0.890806040818203, Ry_Ro=7.40481128853622e-07,
# Ry_Ri=9.07666168960848e-08, Na_Bj=3.4543773303328,
# Na_Bsl=0.753740951477775, Tn_CL=0.00893455096919132,
# Tn_CHc=0.117412025936615, Tn_CHm=0.0106160166692932,
# CaM=0.000295573424135051, Myo_c=0.00192322252438022,
# Myo_m=0.137560495022823, SRB=0.00217360235649355,
# SLL_j=0.00740524521680039, SLL_sl=0.00990339304377132,
# SLH_j=0.0735890020284214, SLH_sl=0.114583623436917,
# Csqn_b=1.19723145924432, Ca_sr=0.554760499828172,
# Na_j=8.40537012592918, Na_sl=8.40491910001025, Na_i=8.40513364344858,
# K_i=120, Ca_j=0.000175882395147342, Ca_sl=0.000106779509977354,
# Ca_i=8.72509677797499e-05, V_m=-81.4552030512661
init_values = np.array(
[
0.003793087414436,
0.626221949492493,
0.624553572490432,
0.0210022533039071,
0.00428016666258923,
0.000440445885642567,
0.785115828275182,
0.000440438103758954,
0.999995844038706,
2.92407183949469e-06,
0.995135796703515,
0.0246760872105795,
0.0152723084239416,
0.890806040818203,
7.40481128853622e-07,
9.07666168960848e-08,
3.4543773303328,
0.753740951477775,
0.00893455096919132,
0.117412025936615,
0.0106160166692932,
0.000295573424135051,
0.00192322252438022,
0.137560495022823,
0.00217360235649355,
0.00740524521680039,
0.00990339304377132,
0.0735890020284214,
0.114583623436917,
1.19723145924432,
0.554760499828172,
8.40537012592918,
8.40491910001025,
8.40513364344858,
120,
0.000175882395147342,
0.000106779509977354,
8.72509677797499e-05,
-81.4552030512661,
],
dtype=np.float_,
)
# State indices and limit checker
state_ind = dict(
[
("m", 0),
("h", 1),
("j", 2),
("x_kr", 3),
("x_ks", 4),
("x_to_s", 5),
("y_to_s", 6),
("x_to_f", 7),
("y_to_f", 8),
("d", 9),
("f", 10),
("f_Ca_Bj", 11),
("f_Ca_Bsl", 12),
("Ry_Rr", 13),
("Ry_Ro", 14),
("Ry_Ri", 15),
("Na_Bj", 16),
("Na_Bsl", 17),
("Tn_CL", 18),
("Tn_CHc", 19),
("Tn_CHm", 20),
("CaM", 21),
("Myo_c", 22),
("Myo_m", 23),
("SRB", 24),
("SLL_j", 25),
("SLL_sl", 26),
("SLH_j", 27),
("SLH_sl", 28),
("Csqn_b", 29),
("Ca_sr", 30),
("Na_j", 31),
("Na_sl", 32),
("Na_i", 33),
("K_i", 34),
("Ca_j", 35),
("Ca_sl", 36),
("Ca_i", 37),
("V_m", 38),
]
)
for state_name, value in values.items():
if state_name not in state_ind:
raise ValueError("{0} is not a state.".format(state_name))
ind = state_ind[state_name]
# Assign value
init_values[ind] = value
return init_values
def init_parameter_values_single(**values):
"""
Initialize parameter values
"""
# Param values
# Fjunc=0.11, Fjunc_CaL=0.9, cellLength=100, cellRadius=10.25,
# distJuncSL=0.5, distSLcyto=0.45, junctionLength=0.16,
# junctionRadius=0.015, GNa=23, GNaB=0.000597, IbarNaK=1.8,
# KmKo=1.5, KmNaip=11, Q10KmNai=1.39, Q10NaK=1.63, GKr=0.035,
# GKp=0.002, GKs=0.0035, pNaK=0.01833, GK1=0.35, Gto=0.13, epi=1,
# GClB=0.009, GClCa=0.0548125, KdClCa=0.1, GCaL=0.5, Q10CaL=1.8,
# pCa=0.00054, pK=2.7e-07, pNa=1.5e-08, IbarNCX=4.5, Kdact=0.00015,
# KmCai=0.00359, KmCao=1.3, KmNai=12.29, KmNao=87.5, Q10NCX=1.57,
# ksat=0.32, nu=0.27, IbarSLCaP=0.0673, KmPCa=0.0005,
# Q10SLCaP=2.35, GCaB=0.0005513, Kmf=0.000246, Kmr=1.7, MaxSR=15,
# MinSR=1, Q10SRCaP=2.6, Vmax_SRCaP=0.0053114, ec50SR=0.45,
# hillSRCaP=1.787, kiCa=0.5, kim=0.005, koCa=10, kom=0.06, ks=25,
# Bmax_Naj=7.561, Bmax_Nasl=1.65, koff_na=0.001, kon_na=0.0001,
# Bmax_CaM=0.024, Bmax_SR=0.0171, Bmax_TnChigh=0.14,
# Bmax_TnClow=0.07, Bmax_myosin=0.14, koff_cam=0.238,
# koff_myoca=0.00046, koff_myomg=5.7e-05, koff_sr=0.06,
# koff_tnchca=3.2e-05, koff_tnchmg=0.00333, koff_tncl=0.0196,
# kon_cam=34, kon_myoca=13.8, kon_myomg=0.0157, kon_sr=100,
# kon_tnchca=2.37, kon_tnchmg=0.003, kon_tncl=32.7,
# Bmax_SLhighj0=0.000165, Bmax_SLhighsl0=0.0134, Bmax_SLlowj0=0.00046,
# Bmax_SLlowsl0=0.0374, koff_slh=0.03, koff_sll=1.3, kon_slh=100,
# kon_sll=100, Bmax_Csqn0=0.14, DcaJuncSL=1.64e-06,
# DcaSLcyto=1.22e-06, J_ca_juncsl=8.2413e-13, J_ca_slmyo=3.7243e-12,
# koff_csqn=65, kon_csqn=100, DnaJuncSL=1.09e-05, DnaSLcyto=1.79e-05,
# J_na_juncsl=1.8313e-14, J_na_slmyo=1.6386e-12, Nao=140, Ko=5.4,
# Cao=1.8, Cli=15, Clo=150, Mgi=1, Cmem=1.381e-10, Frdy=96485,
# R=8314, Temp=310, stim_amplitude=40.0, stim_duration=1.0,
# stim_period=1000.0, stim_start=0.0
init_values = np.array(
[
0.11,
0.9,
100,
10.25,
0.5,
0.45,
0.16,
0.015,
23,
0.000597,
1.8,
1.5,
11,
1.39,
1.63,
0.035,
0.002,
0.0035,
0.01833,
0.35,
0.13,
1,
0.009,
0.0548125,
0.1,
0.5,
1.8,
0.00054,
2.7e-07,
1.5e-08,
4.5,
0.00015,
0.00359,
1.3,
12.29,
87.5,
1.57,
0.32,
0.27,
0.0673,
0.0005,
2.35,
0.0005513,
0.000246,
1.7,
15,
1,
2.6,
0.0053114,
0.45,
1.787,
0.5,
0.005,
10,
0.06,
25,
7.561,
1.65,
0.001,
0.0001,
0.024,
0.0171,
0.14,
0.07,
0.14,
0.238,
0.00046,
5.7e-05,
0.06,
3.2e-05,
0.00333,
0.0196,
34,
13.8,
0.0157,
100,
2.37,
0.003,
32.7,
0.000165,
0.0134,
0.00046,
0.0374,
0.03,
1.3,
100,
100,
0.14,
1.64e-06,
1.22e-06,
8.2413e-13,
3.7243e-12,
65,
100,
1.09e-05,
1.79e-05,
1.8313e-14,
1.6386e-12,
140,
5.4,
1.8,
15,
150,
1,
1.381e-10,
96485,
8314,
310,
40.0,
1.0,
1000.0,
0.0,
],
dtype=np.float_,
)
# Parameter indices and limit checker
param_ind = dict(
[
("Fjunc", 0),
("Fjunc_CaL", 1),
("cellLength", 2),
("cellRadius", 3),
("distJuncSL", 4),
("distSLcyto", 5),
("junctionLength", 6),
("junctionRadius", 7),
("GNa", 8),
("GNaB", 9),
("IbarNaK", 10),
("KmKo", 11),
("KmNaip", 12),
("Q10KmNai", 13),
("Q10NaK", 14),
("GKr", 15),
("GKp", 16),
("GKs", 17),
("pNaK", 18),
("GK1", 19),
("Gto", 20),
("epi", 21),
("GClB", 22),
("GClCa", 23),
("KdClCa", 24),
("GCaL", 25),
("Q10CaL", 26),
("pCa", 27),
("pK", 28),
("pNa", 29),
("IbarNCX", 30),
("Kdact", 31),
("KmCai", 32),
("KmCao", 33),
("KmNai", 34),
("KmNao", 35),
("Q10NCX", 36),
("ksat", 37),
("nu", 38),
("IbarSLCaP", 39),
("KmPCa", 40),
("Q10SLCaP", 41),
("GCaB", 42),
("Kmf", 43),
("Kmr", 44),
("MaxSR", 45),
("MinSR", 46),
("Q10SRCaP", 47),
("Vmax_SRCaP", 48),
("ec50SR", 49),
("hillSRCaP", 50),
("kiCa", 51),
("kim", 52),
("koCa", 53),
("kom", 54),
("ks", 55),
("Bmax_Naj", 56),
("Bmax_Nasl", 57),
("koff_na", 58),
("kon_na", 59),
("Bmax_CaM", 60),
("Bmax_SR", 61),
("Bmax_TnChigh", 62),
("Bmax_TnClow", 63),
("Bmax_myosin", 64),
("koff_cam", 65),
("koff_myoca", 66),
("koff_myomg", 67),
("koff_sr", 68),
("koff_tnchca", 69),
("koff_tnchmg", 70),
("koff_tncl", 71),
("kon_cam", 72),
("kon_myoca", 73),
("kon_myomg", 74),
("kon_sr", 75),
("kon_tnchca", 76),
("kon_tnchmg", 77),
("kon_tncl", 78),
("Bmax_SLhighj0", 79),
("Bmax_SLhighsl0", 80),
("Bmax_SLlowj0", 81),
("Bmax_SLlowsl0", 82),
("koff_slh", 83),
("koff_sll", 84),
("kon_slh", 85),
("kon_sll", 86),
("Bmax_Csqn0", 87),
("DcaJuncSL", 88),
("DcaSLcyto", 89),
("J_ca_juncsl", 90),
("J_ca_slmyo", 91),
("koff_csqn", 92),
("kon_csqn", 93),
("DnaJuncSL", 94),
("DnaSLcyto", 95),
("J_na_juncsl", 96),
("J_na_slmyo", 97),
("Nao", 98),
("Ko", 99),
("Cao", 100),
("Cli", 101),
("Clo", 102),
("Mgi", 103),
("Cmem", 104),
("Frdy", 105),
("R", 106),
("Temp", 107),
("stim_amplitude", 108),
("stim_duration", 109),
("stim_period", 110),
("stim_start", 111),
]
)
for param_name, value in values.items():
if param_name not in param_ind:
raise ValueError("{0} is not a parameter.".format(param_name))
ind = param_ind[state_name]
# Assign value
init_values[ind] = value
return init_values
def state_indices(*states):
"""
State indices
"""
state_inds = dict(
[
("m", 0),
("h", 1),
("j", 2),
("x_kr", 3),
("x_ks", 4),
("x_to_s", 5),
("y_to_s", 6),
("x_to_f", 7),
("y_to_f", 8),
("d", 9),
("f", 10),
("f_Ca_Bj", 11),
("f_Ca_Bsl", 12),
("Ry_Rr", 13),
("Ry_Ro", 14),
("Ry_Ri", 15),
("Na_Bj", 16),
("Na_Bsl", 17),
("Tn_CL", 18),
("Tn_CHc", 19),
("Tn_CHm", 20),
("CaM", 21),
("Myo_c", 22),
("Myo_m", 23),
("SRB", 24),
("SLL_j", 25),
("SLL_sl", 26),
("SLH_j", 27),
("SLH_sl", 28),
("Csqn_b", 29),
("Ca_sr", 30),
("Na_j", 31),
("Na_sl", 32),
("Na_i", 33),
("K_i", 34),
("Ca_j", 35),
("Ca_sl", 36),
("Ca_i", 37),
("V_m", 38),
]
)
indices = []
for state in states:
if state not in state_inds:
raise ValueError("Unknown state: '{0}'".format(state))
indices.append(state_inds[state])
if len(indices) > 1:
return indices
else:
return indices[0]
def parameter_indices(*params):
"""
Parameter indices
"""
param_inds = dict(
[
("Fjunc", 0),
("Fjunc_CaL", 1),
("cellLength", 2),
("cellRadius", 3),
("distJuncSL", 4),
("distSLcyto", 5),
("junctionLength", 6),
("junctionRadius", 7),
("GNa", 8),
("GNaB", 9),
("IbarNaK", 10),
("KmKo", 11),
("KmNaip", 12),
("Q10KmNai", 13),
("Q10NaK", 14),
("GKr", 15),
("GKp", 16),
("GKs", 17),
("pNaK", 18),
("GK1", 19),
("Gto", 20),
("epi", 21),
("GClB", 22),
("GClCa", 23),
("KdClCa", 24),
("GCaL", 25),
("Q10CaL", 26),
("pCa", 27),
("pK", 28),
("pNa", 29),
("IbarNCX", 30),
("Kdact", 31),
("KmCai", 32),
("KmCao", 33),
("KmNai", 34),
("KmNao", 35),
("Q10NCX", 36),
("ksat", 37),
("nu", 38),
("IbarSLCaP", 39),
("KmPCa", 40),
("Q10SLCaP", 41),
("GCaB", 42),
("Kmf", 43),
("Kmr", 44),
("MaxSR", 45),
("MinSR", 46),
("Q10SRCaP", 47),
("Vmax_SRCaP", 48),
("ec50SR", 49),
("hillSRCaP", 50),
("kiCa", 51),
("kim", 52),
("koCa", 53),
("kom", 54),
("ks", 55),
("Bmax_Naj", 56),
("Bmax_Nasl", 57),
("koff_na", 58),
("kon_na", 59),
("Bmax_CaM", 60),
("Bmax_SR", 61),
("Bmax_TnChigh", 62),
("Bmax_TnClow", 63),
("Bmax_myosin", 64),
("koff_cam", 65),
("koff_myoca", 66),
("koff_myomg", 67),
("koff_sr", 68),
("koff_tnchca", 69),
("koff_tnchmg", 70),
("koff_tncl", 71),
("kon_cam", 72),
("kon_myoca", 73),
("kon_myomg", 74),
("kon_sr", 75),
("kon_tnchca", 76),
("kon_tnchmg", 77),
("kon_tncl", 78),
("Bmax_SLhighj0", 79),
("Bmax_SLhighsl0", 80),
("Bmax_SLlowj0", 81),
("Bmax_SLlowsl0", 82),
("koff_slh", 83),
("koff_sll", 84),
("kon_slh", 85),
("kon_sll", 86),
("Bmax_Csqn0", 87),
("DcaJuncSL", 88),
("DcaSLcyto", 89),
("J_ca_juncsl", 90),
("J_ca_slmyo", 91),
("koff_csqn", 92),
("kon_csqn", 93),
("DnaJuncSL", 94),
("DnaSLcyto", 95),
("J_na_juncsl", 96),
("J_na_slmyo", 97),
("Nao", 98),
("Ko", 99),
("Cao", 100),
("Cli", 101),
("Clo", 102),
("Mgi", 103),
("Cmem", 104),
("Frdy", 105),
("R", 106),
("Temp", 107),
("stim_amplitude", 108),
("stim_duration", 109),
("stim_period", 110),
("stim_start", 111),
]
)
indices = []
for param in params:
if param not in param_inds:
raise ValueError("Unknown param: '{0}'".format(param))
indices.append(param_inds[param])
if len(indices) > 1:
return indices
else:
return indices[0]
def monitor_indices(*monitored):
"""
Monitor indices
"""
monitor_inds = dict(
[
("Vcell", 0),
("Vmyo", 1),
("Vsr", 2),
("Vsl", 3),
("Vjunc", 4),
("SAjunc", 5),
("SAsl", 6),
("Fsl", 7),
("Fsl_CaL", 8),
("mss", 9),
("taum", 10),
("ah", 11),
("bh", 12),
("tauh", 13),
("hss", 14),
("aj", 15),
("bj", 16),
("tauj", 17),
("jss", 18),
("I_Na_junc", 19),
("I_Na_sl", 20),
("I_Na", 21),
("I_nabk_junc", 22),
("I_nabk_sl", 23),
("I_nabk", 24),
("sigma", 25),
("fnak", 26),
("I_nak_junc", 27),
("I_nak_sl", 28),
("I_nak", 29),
("gkr", 30),
("xrss", 31),
("tauxr", 32),
("rkr", 33),
("I_kr", 34),
("kp_kp", 35),
("I_kp_junc", 36),
("I_kp_sl", 37),
("I_kp", 38),
("eks", 39),
("gks_junc", 40),
("gks_sl", 41),
("xsss", 42),
("tauxs", 43),
("I_ks_junc", 44),
("I_ks_sl", 45),
("I_ks", 46),
("GtoSlow", 47),
("GtoFast", 48),
("xtoss", 49),
("ytoss", 50),
("tauxtos", 51),
("tauytos", 52),
("I_tos", 53),
("tauxtof", 54),
("tauytof", 55),
("I_tof", 56),
("I_to", 57),
("I_ClCa_junc", 58),
("I_ClCa_sl", 59),
("I_ClCa", 60),
("I_Clbk", 61),
("fss", 62),
("dss", 63),
("taud", 64),
("tauf", 65),
("fcaCaMSL", 66),
("fcaCaj", 67),
("ibarca_j", 68),
("ibarca_sl", 69),
("ibark", 70),
("ibarna_j", 71),
("ibarna_sl", 72),
("I_Ca_junc", 73),
("I_Ca_sl", 74),
("I_Ca", 75),
("I_CaK", 76),
("I_CaNa_junc", 77),
("I_CaNa_sl", 78),
("I_CaNa", 79),
("I_Catot", 80),
("Ka_junc", 81),
("Ka_sl", 82),
("s1_junc", 83),
("s1_sl", 84),
("s2_junc", 85),
("s3_junc", 86),
("s2_sl", 87),
("s3_sl", 88),
("I_ncx_junc", 89),
("I_ncx_sl", 90),
("I_ncx", 91),
("I_pca_junc", 92),
("I_pca_sl", 93),
("I_pca", 94),
("I_cabk_junc", 95),
("I_cabk_sl", 96),
("I_cabk", 97),
("kCaSR", 98),
("koSRCa", 99),
("kiSRCa", 100),
("RI", 101),
("J_SRCarel", 102),
("J_serca", 103),
("J_SRleak", 104),
("J_CaB_cytosol", 105),
("Bmax_SLlowsl", 106),
("Bmax_SLlowj", 107),
("Bmax_SLhighsl", 108),
("Bmax_SLhighj", 109),
("J_CaB_junction", 110),
("J_CaB_sl", 111),
("Bmax_Csqn", 112),
("I_Na_tot_junc", 113),
("I_Na_tot_sl", 114),
("I_Na_tot_sl2", 115),
("I_Na_tot_junc2", 116),
("I_K_tot", 117),
("I_Ca_tot_junc", 118),
("I_Ca_tot_sl", 119),
("i_Stim", 120),
("I_Na_tot", 121),
("I_Cl_tot", 122),
("I_Ca_tot", 123),
("I_tot", 124),
("FoRT", 125),
("ena_junc", 126),
("ena_sl", 127),
("ek", 128),
("eca_junc", 129),
("eca_sl", 130),
("ecl", 131),
("Qpow", 132),
("aki", 133),
("bki", 134),
("kiss", 135),
("I_K1", 136),
("dm_dt", 137),
("dh_dt", 138),
("dj_dt", 139),
("dx_kr_dt", 140),
("dx_ks_dt", 141),
("dx_to_s_dt", 142),
("dy_to_s_dt", 143),
("dx_to_f_dt", 144),
("dy_to_f_dt", 145),
("dd_dt", 146),
("df_dt", 147),
("df_Ca_Bj_dt", 148),
("df_Ca_Bsl_dt", 149),
("dRy_Rr_dt", 150),
("dRy_Ro_dt", 151),
("dRy_Ri_dt", 152),
("dNa_Bj_dt", 153),
("dNa_Bsl_dt", 154),
("dTn_CL_dt", 155),
("dTn_CHc_dt", 156),
("dTn_CHm_dt", 157),
("dCaM_dt", 158),
("dMyo_c_dt", 159),
("dMyo_m_dt", 160),
("dSRB_dt", 161),
("dSLL_j_dt", 162),
("dSLL_sl_dt", 163),
("dSLH_j_dt", 164),
("dSLH_sl_dt", 165),
("dCsqn_b_dt", 166),
("dCa_sr_dt", 167),
("dNa_j_dt", 168),
("dNa_sl_dt", 169),
("dNa_i_dt", 170),
("dK_i_dt", 171),
("dCa_j_dt", 172),
("dCa_sl_dt", 173),
("dCa_i_dt", 174),
("dV_m_dt", 175),
]
)
indices = []
for monitor in monitored:
if monitor not in monitor_inds:
raise ValueError("Unknown monitored: '{0}'".format(monitor))
indices.append(monitor_inds[monitor])
if len(indices) > 1:
return indices
else:
return indices[0]
def rhs(states, t, parameters, values=None):
"""
Compute the right hand side of the grandi ODE
"""
# Assign states
assert len(states) == 39
(
m,
h,
j,
x_kr,
x_ks,
x_to_s,
y_to_s,
x_to_f,
y_to_f,
d,
f,
f_Ca_Bj,
f_Ca_Bsl,
Ry_Rr,
Ry_Ro,
Ry_Ri,
Na_Bj,
Na_Bsl,
Tn_CL,
Tn_CHc,
Tn_CHm,
CaM,
Myo_c,
Myo_m,
SRB,
SLL_j,
SLL_sl,
SLH_j,
SLH_sl,
Csqn_b,
Ca_sr,
Na_j,
Na_sl,
Na_i,
K_i,
Ca_j,
Ca_sl,
Ca_i,
V_m,
) = states
# Assign parameters
assert len(parameters) == 112
Fjunc = parameters[0]
Fjunc_CaL = parameters[1]
cellLength = parameters[2]
cellRadius = parameters[3]
GNa = parameters[8]
GNaB = parameters[9]
IbarNaK = parameters[10]
KmKo = parameters[11]
KmNaip = parameters[12]
GKr = parameters[15]
GKp = parameters[16]
GKs = parameters[17]
pNaK = parameters[18]
GK1 = parameters[19]
Gto = parameters[20]
epi = parameters[21]
GClB = parameters[22]
GClCa = parameters[23]
KdClCa = parameters[24]
GCaL = parameters[25]
Q10CaL = parameters[26]
pCa = parameters[27]
pK = parameters[28]
pNa = parameters[29]
IbarNCX = parameters[30]
Kdact = parameters[31]
KmCai = parameters[32]
KmCao = parameters[33]
KmNai = parameters[34]
KmNao = parameters[35]
Q10NCX = parameters[36]
ksat = parameters[37]
nu = parameters[38]
IbarSLCaP = parameters[39]
KmPCa = parameters[40]
Q10SLCaP = parameters[41]
GCaB = parameters[42]
Kmf = parameters[43]
Kmr = parameters[44]
MaxSR = parameters[45]
MinSR = parameters[46]
Q10SRCaP = parameters[47]
Vmax_SRCaP = parameters[48]
ec50SR = parameters[49]
hillSRCaP = parameters[50]
kiCa = parameters[51]
kim = parameters[52]
koCa = parameters[53]
kom = parameters[54]
ks = parameters[55]
Bmax_Naj = parameters[56]
Bmax_Nasl = parameters[57]
koff_na = parameters[58]
kon_na = parameters[59]
Bmax_CaM = parameters[60]
Bmax_SR = parameters[61]
Bmax_TnChigh = parameters[62]
Bmax_TnClow = parameters[63]
Bmax_myosin = parameters[64]
koff_cam = parameters[65]
koff_myoca = parameters[66]
koff_myomg = parameters[67]
koff_sr = parameters[68]
koff_tnchca = parameters[69]
koff_tnchmg = parameters[70]
koff_tncl = parameters[71]
kon_cam = parameters[72]
kon_myoca = parameters[73]
kon_myomg = parameters[74]
kon_sr = parameters[75]
kon_tnchca = parameters[76]
kon_tnchmg = parameters[77]
kon_tncl = parameters[78]
Bmax_SLhighj0 = parameters[79]
Bmax_SLhighsl0 = parameters[80]
Bmax_SLlowj0 = parameters[81]
Bmax_SLlowsl0 = parameters[82]
koff_slh = parameters[83]
koff_sll = parameters[84]
kon_slh = parameters[85]
kon_sll = parameters[86]
Bmax_Csqn0 = parameters[87]
J_ca_juncsl = parameters[90]
J_ca_slmyo = parameters[91]
koff_csqn = parameters[92]
kon_csqn = parameters[93]
J_na_juncsl = parameters[96]
J_na_slmyo = parameters[97]
Nao = parameters[98]
Ko = parameters[99]
Cao = parameters[100]
Cli = parameters[101]
Clo = parameters[102]
Mgi = parameters[103]
Cmem = parameters[104]
Frdy = parameters[105]
R = parameters[106]
Temp = parameters[107]
stim_amplitude = parameters[108]
stim_duration = parameters[109]
stim_period = parameters[110]
stim_start = parameters[111]
# Init return args
if values is None:
values = np.zeros_like(states)
else:
assert isinstance(values, np.ndarray) and values.shape == states.shape
# Expressions for the Geometry component
Vcell = 1e-15 * np.pi * cellLength * (cellRadius * cellRadius)
Vmyo = 0.65 * Vcell
Vsr = 0.035 * Vcell
Vsl = 0.02 * Vcell
Vjunc = 0.0005390000000000001 * Vcell
Fsl = 1 - Fjunc
Fsl_CaL = 1 - Fjunc_CaL
# Expressions for the Reversal potentials component
FoRT = Frdy / (R * Temp)
ena_junc = np.log(Nao / Na_j) / FoRT
ena_sl = np.log(Nao / Na_sl) / FoRT
ek = np.log(Ko / K_i) / FoRT
eca_junc = np.log(Cao / Ca_j) / (2 * FoRT)
eca_sl = np.log(Cao / Ca_sl) / (2 * FoRT)
ecl = np.log(Cli / Clo) / FoRT
Qpow = -31 + Temp / 10
# Expressions for the I_Na component
mss = 1.0 / (
(1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
* (1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
)
taum = 0.1292 * np.exp(
-(
(2.9465894465894467 + 0.06435006435006435 * V_m)
* (2.9465894465894467 + 0.06435006435006435 * V_m)
)
) + 0.06487 * np.exp(
-(
(-0.09434663536776214 + 0.019561815336463225 * V_m)
* (-0.09434663536776214 + 0.019561815336463225 * V_m)
)
)
ah = np.where(
V_m >= -40, 0, 4.4312679295805147e-07 * np.exp(-0.14705882352941177 * V_m)
)
bh = np.where(
V_m >= -40,
0.77 / (0.13 + 0.049758141083938695 * np.exp(-0.0900900900900901 * V_m)),
310000.0 * np.exp(0.3485 * V_m) + 2.7 * np.exp(0.079 * V_m),
)
tauh = 1.0 / (ah + bh)
hss = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
aj = np.where(
V_m >= -40,
0,
(37.78 + V_m)
* (-25428.0 * np.exp(0.2444 * V_m) - 6.948e-06 * np.exp(-0.04391 * V_m))
/ (1 + 50262745825.95399 * np.exp(0.311 * V_m)),
)
bj = np.where(
V_m >= -40,
0.6 * np.exp(0.057 * V_m) / (1 + 0.040762203978366204 * np.exp(-0.1 * V_m)),
0.02424
* np.exp(-0.01052 * V_m)
/ (1 + 0.003960868339904256 * np.exp(-0.1378 * V_m)),
)
tauj = 1.0 / (aj + bj)
jss = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
values[0] = (-m + mss) / taum
values[1] = (-h + hss) / tauh
values[2] = (-j + jss) / tauj
I_Na_junc = Fjunc * GNa * (m * m * m) * (-ena_junc + V_m) * h * j
I_Na_sl = GNa * (m * m * m) * (-ena_sl + V_m) * Fsl * h * j
# Expressions for the I_NaBK component
I_nabk_junc = Fjunc * GNaB * (-ena_junc + V_m)
I_nabk_sl = GNaB * (-ena_sl + V_m) * Fsl
# Expressions for the I_NaK component
sigma = -1 / 7 + np.exp(0.01485884101040119 * Nao) / 7
fnak = 1.0 / (
1 + 0.1245 * np.exp(-0.1 * FoRT * V_m) + 0.0365 * np.exp(-FoRT * V_m) * sigma
)
I_nak_junc = (
Fjunc
* IbarNaK
* Ko
* fnak
/ ((1 + np.power(KmNaip, 4) / np.power(Na_j, 4)) * (KmKo + Ko))
)
I_nak_sl = (
IbarNaK
* Ko
* Fsl
* fnak
/ ((1 + np.power(KmNaip, 4) / np.power(Na_sl, 4)) * (KmKo + Ko))
)
I_nak = I_nak_junc + I_nak_sl
# Expressions for the I_Kr component
gkr = 0.4303314829119352 * GKr * np.sqrt(Ko)
xrss = 1.0 / (1 + np.exp(-2 - V_m / 5))
tauxr = 230 / (1 + np.exp(2 + V_m / 20)) + 3300 / (
(1 + np.exp(-22 / 9 - V_m / 9)) * (1 + np.exp(11 / 9 + V_m / 9))
)
values[3] = (-x_kr + xrss) / tauxr
rkr = 1.0 / (1 + np.exp(37 / 12 + V_m / 24))
I_kr = (-ek + V_m) * gkr * rkr * x_kr
# Expressions for the I_Kp component
kp_kp = 1.0 / (1 + 1786.4755653786237 * np.exp(-0.16722408026755853 * V_m))
I_kp_junc = Fjunc * GKp * (-ek + V_m) * kp_kp
I_kp_sl = GKp * (-ek + V_m) * Fsl * kp_kp
I_kp = I_kp_junc + I_kp_sl
# Expressions for the I_Ks component
eks = np.log((Ko + Nao * pNaK) / (pNaK * Na_i + K_i)) / FoRT
gks_junc = GKs
gks_sl = GKs
xsss = 1.0 / (1 + 0.7659283383646487 * np.exp(-0.07017543859649122 * V_m))
tauxs = 990.1 / (1 + 0.8415404088681017 * np.exp(-0.0708215297450425 * V_m))
values[4] = (-x_ks + xsss) / tauxs
I_ks_junc = Fjunc * (x_ks * x_ks) * (-eks + V_m) * gks_junc
I_ks_sl = (x_ks * x_ks) * (-eks + V_m) * Fsl * gks_sl
I_ks = I_ks_junc + I_ks_sl
# Expressions for the I_to component
GtoSlow = np.where(epi == 1, 0.12 * Gto, 0.28919999999999996 * Gto)
GtoFast = np.where(epi == 1, 0.88 * Gto, 0.010799999999999999 * Gto)
xtoss = 1.0 / (1 + np.exp(19 / 13 - V_m / 13))
ytoss = 1.0 / (1 + 49.40244910553019 * np.exp(V_m / 5))
tauxtos = 0.5 + 9 / (1 + np.exp(1 / 5 + V_m / 15))
tauytos = 30 + 800 / (1 + np.exp(6 + V_m / 10))
values[5] = (-x_to_s + xtoss) / tauxtos
values[6] = (-y_to_s + ytoss) / tauytos
I_tos = (-ek + V_m) * GtoSlow * x_to_s * y_to_s
tauxtof = 0.5 + 8.5 * np.exp(-((9 / 10 + V_m / 50) * (9 / 10 + V_m / 50)))
tauytof = 7 + 85 * np.exp(-((40 + V_m) * (40 + V_m)) / 220)
values[7] = (-x_to_f + xtoss) / tauxtof
values[8] = (-y_to_f + ytoss) / tauytof
I_tof = (-ek + V_m) * GtoFast * x_to_f * y_to_f
I_to = I_tof + I_tos
# Expressions for the I_K1 component
aki = 1.02 / (1 + 7.35454251046446e-07 * np.exp(0.2385 * V_m - 0.2385 * ek))
bki = (
0.7626240065063081 * np.exp(0.08032 * V_m - 0.08032 * ek)
+ 1.1534056351865558e-16 * np.exp(0.06175 * V_m - 0.06175 * ek)
) / (1 + 0.08677229415769332 * np.exp(0.5143 * ek - 0.5143 * V_m))
kiss = aki / (aki + bki)
I_K1 = 0.4303314829119352 * GK1 * np.sqrt(Ko) * (-ek + V_m) * kiss
# Expressions for the I_ClCa component
I_ClCa_junc = Fjunc * GClCa * (-ecl + V_m) / (1 + KdClCa / Ca_j)
I_ClCa_sl = GClCa * (-ecl + V_m) * Fsl / (1 + KdClCa / Ca_sl)
I_ClCa = I_ClCa_junc + I_ClCa_sl
I_Clbk = GClB * (-ecl + V_m)
# Expressions for the I_Ca component
fss = 1.0 / (1 + np.exp(35 / 9 + V_m / 9)) + 0.6 / (1 + np.exp(5 / 2 - V_m / 20))
dss = 1.0 / (1 + np.exp(-5 / 6 - V_m / 6))
taud = (1 - np.exp(-5 / 6 - V_m / 6)) * dss / (0.17500000000000002 + 0.035 * V_m)
tauf = 1.0 / (
0.02
+ 0.0197
* np.exp(
-(
(0.48865000000000003 + 0.0337 * V_m)
* (0.48865000000000003 + 0.0337 * V_m)
)
)
)
values[9] = (-d + dss) / taud
values[10] = (-f + fss) / tauf
values[11] = -0.0119 * f_Ca_Bj + 1.7 * (1 - f_Ca_Bj) * Ca_j
values[12] = -0.0119 * f_Ca_Bsl + 1.7 * (1 - f_Ca_Bsl) * Ca_sl
fcaCaMSL = 0
fcaCaj = 0
ibarca_j = (
4
* Frdy
* GCaL
* pCa
* (-0.341 * Cao + 0.341 * Ca_j * np.exp(2 * FoRT * V_m))
* FoRT
* V_m
/ (-1 + np.exp(2 * FoRT * V_m))
)
ibarca_sl = (
4
* Frdy
* GCaL
* pCa
* (-0.341 * Cao + 0.341 * Ca_sl * np.exp(2 * FoRT * V_m))
* FoRT
* V_m
/ (-1 + np.exp(2 * FoRT * V_m))
)
ibark = (
Frdy
* GCaL
* pK
* (-0.75 * Ko + 0.75 * K_i * np.exp(FoRT * V_m))
* FoRT
* V_m
/ (-1 + np.exp(FoRT * V_m))
)
ibarna_j = (
Frdy
* GCaL
* pNa
* (-0.75 * Nao + 0.75 * Na_j * np.exp(FoRT * V_m))
* FoRT
* V_m
/ (-1 + np.exp(FoRT * V_m))
)
ibarna_sl = (
Frdy
* GCaL
* pNa
* (-0.75 * Nao + 0.75 * Na_sl * np.exp(FoRT * V_m))
* FoRT
* V_m
/ (-1 + np.exp(FoRT * V_m))
)
I_Ca_junc = (
0.45
* Fjunc_CaL
* np.power(Q10CaL, Qpow)
* (1 + fcaCaj - f_Ca_Bj)
* d
* f
* ibarca_j
)
I_Ca_sl = (
0.45
* np.power(Q10CaL, Qpow)
* (1 + fcaCaMSL - f_Ca_Bsl)
* Fsl_CaL
* d
* f
* ibarca_sl
)
I_CaK = (
0.45
* np.power(Q10CaL, Qpow)
* (Fjunc_CaL * (1 + fcaCaj - f_Ca_Bj) + (1 + fcaCaMSL - f_Ca_Bsl) * Fsl_CaL)
* d
* f
* ibark
)
I_CaNa_junc = (
0.45
* Fjunc_CaL
* np.power(Q10CaL, Qpow)
* (1 + fcaCaj - f_Ca_Bj)
* d
* f
* ibarna_j
)
I_CaNa_sl = (
0.45
* np.power(Q10CaL, Qpow)
* (1 + fcaCaMSL - f_Ca_Bsl)
* Fsl_CaL
* d
* f
* ibarna_sl
)
# Expressions for the I_NCX component
Ka_junc = 1.0 / (1 + (Kdact * Kdact) / (Ca_j * Ca_j))
Ka_sl = 1.0 / (1 + (Kdact * Kdact) / (Ca_sl * Ca_sl))
s1_junc = Cao * (Na_j * Na_j * Na_j) * np.exp(nu * FoRT * V_m)
s1_sl = Cao * (Na_sl * Na_sl * Na_sl) * np.exp(nu * FoRT * V_m)
s2_junc = (Nao * Nao * Nao) * Ca_j * np.exp((-1 + nu) * FoRT * V_m)
s3_junc = (
Cao * (Na_j * Na_j * Na_j)
+ KmCao * (Na_j * Na_j * Na_j)
+ (Nao * Nao * Nao) * Ca_j
+ KmCai
* (Nao * Nao * Nao)
* (1 + (Na_j * Na_j * Na_j) / (KmNai * KmNai * KmNai))
+ (KmNao * KmNao * KmNao) * (1 + Ca_j / KmCai) * Ca_j
)
s2_sl = (Nao * Nao * Nao) * Ca_sl * np.exp((-1 + nu) * FoRT * V_m)
s3_sl = (
Cao * (Na_sl * Na_sl * Na_sl)
+ KmCao * (Na_sl * Na_sl * Na_sl)
+ (Nao * Nao * Nao) * Ca_sl
+ KmCai
* (Nao * Nao * Nao)
* (1 + (Na_sl * Na_sl * Na_sl) / (KmNai * KmNai * KmNai))
+ (KmNao * KmNao * KmNao) * (1 + Ca_sl / KmCai) * Ca_sl
)
I_ncx_junc = (
Fjunc
* IbarNCX
* np.power(Q10NCX, Qpow)
* (-s2_junc + s1_junc)
* Ka_junc
/ ((1 + ksat * np.exp((-1 + nu) * FoRT * V_m)) * s3_junc)
)
I_ncx_sl = (
IbarNCX
* np.power(Q10NCX, Qpow)
* (-s2_sl + s1_sl)
* Fsl
* Ka_sl
/ ((1 + ksat * np.exp((-1 + nu) * FoRT * V_m)) * s3_sl)
)
# Expressions for the I_PCa component
I_pca_junc = (
Fjunc
* IbarSLCaP
* np.power(Q10SLCaP, Qpow)
* np.power(Ca_j, 1.6)
/ (np.power(KmPCa, 1.6) + np.power(Ca_j, 1.6))
)
I_pca_sl = (
IbarSLCaP
* np.power(Q10SLCaP, Qpow)
* np.power(Ca_sl, 1.6)
* Fsl
/ (np.power(KmPCa, 1.6) + np.power(Ca_sl, 1.6))
)
# Expressions for the I_CaBK component
I_cabk_junc = Fjunc * GCaB * (-eca_junc + V_m)
I_cabk_sl = GCaB * (-eca_sl + V_m) * Fsl
# Expressions for the SR Fluxes component
kCaSR = MaxSR - (MaxSR - MinSR) / (1 + np.power(ec50SR / Ca_sr, 2.5))
koSRCa = koCa / kCaSR
kiSRCa = kiCa * kCaSR
RI = 1 - Ry_Ri - Ry_Ro - Ry_Rr
values[13] = (
kim * RI + kom * Ry_Ro - (Ca_j * Ca_j) * Ry_Rr * koSRCa - Ca_j * Ry_Rr * kiSRCa
)
values[14] = (
kim * Ry_Ri
- kom * Ry_Ro
+ (Ca_j * Ca_j) * Ry_Rr * koSRCa
- Ca_j * Ry_Ro * kiSRCa
)
values[15] = (
-kim * Ry_Ri - kom * Ry_Ri + (Ca_j * Ca_j) * RI * koSRCa + Ca_j * Ry_Ro * kiSRCa
)
J_SRCarel = ks * (-Ca_j + Ca_sr) * Ry_Ro
J_serca = (
Vmax_SRCaP
* np.power(Q10SRCaP, Qpow)
* (np.power(Ca_i / Kmf, hillSRCaP) - np.power(Ca_sr / Kmr, hillSRCaP))
/ (1 + np.power(Ca_i / Kmf, hillSRCaP) + np.power(Ca_sr / Kmr, hillSRCaP))
)
J_SRleak = 5.348e-06 * Ca_sr - 5.348e-06 * Ca_j
# Expressions for the Na Buffers component
values[16] = -koff_na * Na_Bj + kon_na * (Bmax_Naj - Na_Bj) * Na_j
values[17] = -koff_na * Na_Bsl + kon_na * (Bmax_Nasl - Na_Bsl) * Na_sl
# Expressions for the Cytosolic Ca Buffers component
values[18] = -koff_tncl * Tn_CL + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i
values[19] = (
-koff_tnchca * Tn_CHc + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i
)
values[20] = -koff_tnchmg * Tn_CHm + Mgi * kon_tnchmg * (
Bmax_TnChigh - Tn_CHc - Tn_CHm
)
values[21] = -koff_cam * CaM + kon_cam * (Bmax_CaM - CaM) * Ca_i
values[22] = -koff_myoca * Myo_c + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i
values[23] = -koff_myomg * Myo_m + Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m)
values[24] = -koff_sr * SRB + kon_sr * (Bmax_SR - SRB) * Ca_i
J_CaB_cytosol = (
-koff_cam * CaM
- koff_myoca * Myo_c
- koff_myomg * Myo_m
- koff_sr * SRB
- koff_tnchca * Tn_CHc
- koff_tnchmg * Tn_CHm
- koff_tncl * Tn_CL
+ Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m)
+ Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm)
+ kon_cam * (Bmax_CaM - CaM) * Ca_i
+ kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i
+ kon_sr * (Bmax_SR - SRB) * Ca_i
+ kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i
+ kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i
)
# Expressions for the Junctional and SL Ca Buffers component
Bmax_SLlowsl = Bmax_SLlowsl0 * Vmyo / Vsl
Bmax_SLlowj = Bmax_SLlowj0 * Vmyo / Vjunc
Bmax_SLhighsl = Bmax_SLhighsl0 * Vmyo / Vsl
Bmax_SLhighj = Bmax_SLhighj0 * Vmyo / Vjunc
values[25] = -koff_sll * SLL_j + kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j
values[26] = -koff_sll * SLL_sl + kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl
values[27] = -koff_slh * SLH_j + kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j
values[28] = -koff_slh * SLH_sl + kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl
J_CaB_junction = (
-koff_slh * SLH_j
- koff_sll * SLL_j
+ kon_slh * (-SLH_j + Bmax_SLhighj) * Ca_j
+ kon_sll * (-SLL_j + Bmax_SLlowj) * Ca_j
)
J_CaB_sl = (
-koff_slh * SLH_sl
- koff_sll * SLL_sl
+ kon_slh * (-SLH_sl + Bmax_SLhighsl) * Ca_sl
+ kon_sll * (-SLL_sl + Bmax_SLlowsl) * Ca_sl
)
# Expressions for the SR Ca Concentrations component
Bmax_Csqn = Bmax_Csqn0 * Vmyo / Vsr
values[29] = -koff_csqn * Csqn_b + kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr
values[30] = (
-J_SRCarel
+ koff_csqn * Csqn_b
- kon_csqn * (-Csqn_b + Bmax_Csqn) * Ca_sr
- J_SRleak * Vmyo / Vsr
+ J_serca
)
# Expressions for the Na Concentrations component
I_Na_tot_junc = (
3 * I_nak_junc + 3 * I_ncx_junc + I_CaNa_junc + I_Na_junc + I_nabk_junc
)
I_Na_tot_sl = 3 * I_nak_sl + 3 * I_ncx_sl + I_CaNa_sl + I_Na_sl + I_nabk_sl
values[31] = (
-values[16]
+ J_na_juncsl * (-Na_j + Na_sl) / Vjunc
- Cmem * I_Na_tot_junc / (Frdy * Vjunc)
)
values[32] = (
-values[17]
+ J_na_juncsl * (-Na_sl + Na_j) / Vsl
+ J_na_slmyo * (-Na_sl + Na_i) / Vsl
- Cmem * I_Na_tot_sl / (Frdy * Vsl)
)
values[33] = J_na_slmyo * (-Na_i + Na_sl) / Vmyo
# Expressions for the K Concentration component
I_K_tot = -2 * I_nak + I_CaK + I_K1 + I_kp + I_kr + I_ks + I_to
values[34] = 0
# Expressions for the Ca Concentrations component
I_Ca_tot_junc = -2 * I_ncx_junc + I_Ca_junc + I_cabk_junc + I_pca_junc
I_Ca_tot_sl = -2 * I_ncx_sl + I_Ca_sl + I_cabk_sl + I_pca_sl
values[35] = (
-J_CaB_junction
+ J_ca_juncsl * (-Ca_j + Ca_sl) / Vjunc
+ J_SRCarel * Vsr / Vjunc
+ J_SRleak * Vmyo / Vjunc
- Cmem * I_Ca_tot_junc / (2 * Frdy * Vjunc)
)
values[36] = (
-J_CaB_sl
+ J_ca_juncsl * (-Ca_sl + Ca_j) / Vsl
+ J_ca_slmyo * (-Ca_sl + Ca_i) / Vsl
- Cmem * I_Ca_tot_sl / (2 * Frdy * Vsl)
)
values[37] = (
-J_CaB_cytosol + J_ca_slmyo * (-Ca_i + Ca_sl) / Vmyo - J_serca * Vsr / Vmyo
)
# Expressions for the Membrane potential component
i_Stim = np.where(
np.logical_and(
t - stim_period * np.floor(t / stim_period) <= stim_duration + stim_start,
t - stim_period * np.floor(t / stim_period) >= stim_start,
),
-stim_amplitude,
0,
)
I_Na_tot = I_Na_tot_junc + I_Na_tot_sl
I_Cl_tot = I_ClCa + I_Clbk
I_Ca_tot = I_Ca_tot_junc + I_Ca_tot_sl
I_tot = I_Ca_tot + I_Cl_tot + I_K_tot + I_Na_tot
values[38] = -I_tot - i_Stim
# Return results
return values
def monitor(states, t, parameters, monitored=None):
"""
Computes monitored expressions of the grandi ODE
"""
# Assign states
assert len(states) == 39
(
m,
h,
j,
x_kr,
x_ks,
x_to_s,
y_to_s,
x_to_f,
y_to_f,
d,
f,
f_Ca_Bj,
f_Ca_Bsl,
Ry_Rr,
Ry_Ro,
Ry_Ri,
Na_Bj,
Na_Bsl,
Tn_CL,
Tn_CHc,
Tn_CHm,
CaM,
Myo_c,
Myo_m,
SRB,
SLL_j,
SLL_sl,
SLH_j,
SLH_sl,
Csqn_b,
Ca_sr,
Na_j,
Na_sl,
Na_i,
K_i,
Ca_j,
Ca_sl,
Ca_i,
V_m,
) = states
# Assign parameters
assert len(parameters) == 112
Fjunc = parameters[0]
Fjunc_CaL = parameters[1]
cellLength = parameters[2]
cellRadius = parameters[3]
junctionLength = parameters[6]
junctionRadius = parameters[7]
GNa = parameters[8]
GNaB = parameters[9]
IbarNaK = parameters[10]
KmKo = parameters[11]
KmNaip = parameters[12]
GKr = parameters[15]
GKp = parameters[16]
GKs = parameters[17]
pNaK = parameters[18]
GK1 = parameters[19]
Gto = parameters[20]
epi = parameters[21]
GClB = parameters[22]
GClCa = parameters[23]
KdClCa = parameters[24]
GCaL = parameters[25]
Q10CaL = parameters[26]
pCa = parameters[27]
pK = parameters[28]
pNa = parameters[29]
IbarNCX = parameters[30]
Kdact = parameters[31]
KmCai = parameters[32]
KmCao = parameters[33]
KmNai = parameters[34]
KmNao = parameters[35]
Q10NCX = parameters[36]
ksat = parameters[37]
nu = parameters[38]
IbarSLCaP = parameters[39]
KmPCa = parameters[40]
Q10SLCaP = parameters[41]
GCaB = parameters[42]
Kmf = parameters[43]
Kmr = parameters[44]
MaxSR = parameters[45]
MinSR = parameters[46]
Q10SRCaP = parameters[47]
Vmax_SRCaP = parameters[48]
ec50SR = parameters[49]
hillSRCaP = parameters[50]
kiCa = parameters[51]
kim = parameters[52]
koCa = parameters[53]
kom = parameters[54]
ks = parameters[55]
Bmax_Naj = parameters[56]
Bmax_Nasl = parameters[57]
koff_na = parameters[58]
kon_na = parameters[59]
Bmax_CaM = parameters[60]
Bmax_SR = parameters[61]
Bmax_TnChigh = parameters[62]
Bmax_TnClow = parameters[63]
Bmax_myosin = parameters[64]
koff_cam = parameters[65]
koff_myoca = parameters[66]
koff_myomg = parameters[67]
koff_sr = parameters[68]
koff_tnchca = parameters[69]
koff_tnchmg = parameters[70]
koff_tncl = parameters[71]
kon_cam = parameters[72]
kon_myoca = parameters[73]
kon_myomg = parameters[74]
kon_sr = parameters[75]
kon_tnchca = parameters[76]
kon_tnchmg = parameters[77]
kon_tncl = parameters[78]
Bmax_SLhighj0 = parameters[79]
Bmax_SLhighsl0 = parameters[80]
Bmax_SLlowj0 = parameters[81]
Bmax_SLlowsl0 = parameters[82]
koff_slh = parameters[83]
koff_sll = parameters[84]
kon_slh = parameters[85]
kon_sll = parameters[86]
Bmax_Csqn0 = parameters[87]
J_ca_juncsl = parameters[90]
J_ca_slmyo = parameters[91]
koff_csqn = parameters[92]
kon_csqn = parameters[93]
J_na_juncsl = parameters[96]
J_na_slmyo = parameters[97]
Nao = parameters[98]
Ko = parameters[99]
Cao = parameters[100]
Cli = parameters[101]
Clo = parameters[102]
Mgi = parameters[103]
Cmem = parameters[104]
Frdy = parameters[105]
R = parameters[106]
Temp = parameters[107]
stim_amplitude = parameters[108]
stim_duration = parameters[109]
stim_period = parameters[110]
stim_start = parameters[111]
# Init return args
if monitored is None:
monitored = np.zeros((176,), dtype=np.float_)
else:
assert isinstance(monitored, np.ndarray) and monitored.shape == (176,)
# Expressions for the Geometry component
monitored[0] = 1e-15 * np.pi * cellLength * (cellRadius * cellRadius)
monitored[1] = 0.65 * monitored[0]
monitored[2] = 0.035 * monitored[0]
monitored[3] = 0.02 * monitored[0]
monitored[4] = 0.0005390000000000001 * monitored[0]
monitored[5] = 40300 * np.pi * junctionLength * junctionRadius
monitored[6] = 2 * np.pi * cellLength * cellRadius
monitored[7] = 1 - Fjunc
monitored[8] = 1 - Fjunc_CaL
# Expressions for the Reversal potentials component
monitored[125] = Frdy / (R * Temp)
monitored[126] = np.log(Nao / Na_j) / monitored[125]
monitored[127] = np.log(Nao / Na_sl) / monitored[125]
monitored[128] = np.log(Ko / K_i) / monitored[125]
monitored[129] = np.log(Cao / Ca_j) / (2 * monitored[125])
monitored[130] = np.log(Cao / Ca_sl) / (2 * monitored[125])
monitored[131] = np.log(Cli / Clo) / monitored[125]
monitored[132] = -31 + Temp / 10
# Expressions for the I_Na component
monitored[9] = 1.0 / (
(1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
* (1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
)
monitored[10] = 0.1292 * np.exp(
-(
(2.9465894465894467 + 0.06435006435006435 * V_m)
* (2.9465894465894467 + 0.06435006435006435 * V_m)
)
) + 0.06487 * np.exp(
-(
(-0.09434663536776214 + 0.019561815336463225 * V_m)
* (-0.09434663536776214 + 0.019561815336463225 * V_m)
)
)
monitored[11] = np.where(
V_m >= -40, 0, 4.4312679295805147e-07 * np.exp(-0.14705882352941177 * V_m)
)
monitored[12] = np.where(
V_m >= -40,
0.77 / (0.13 + 0.049758141083938695 * np.exp(-0.0900900900900901 * V_m)),
310000.0 * np.exp(0.3485 * V_m) + 2.7 * np.exp(0.079 * V_m),
)
monitored[13] = 1.0 / (monitored[11] + monitored[12])
monitored[14] = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
monitored[15] = np.where(
V_m >= -40,
0,
(37.78 + V_m)
* (-25428.0 * np.exp(0.2444 * V_m) - 6.948e-06 * np.exp(-0.04391 * V_m))
/ (1 + 50262745825.95399 * np.exp(0.311 * V_m)),
)
monitored[16] = np.where(
V_m >= -40,
0.6 * np.exp(0.057 * V_m) / (1 + 0.040762203978366204 * np.exp(-0.1 * V_m)),
0.02424
* np.exp(-0.01052 * V_m)
/ (1 + 0.003960868339904256 * np.exp(-0.1378 * V_m)),
)
monitored[17] = 1.0 / (monitored[15] + monitored[16])
monitored[18] = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
monitored[137] = (-m + monitored[9]) / monitored[10]
monitored[138] = (-h + monitored[14]) / monitored[13]
monitored[139] = (-j + monitored[18]) / monitored[17]
monitored[19] = Fjunc * GNa * (m * m * m) * (-monitored[126] + V_m) * h * j
monitored[20] = GNa * (m * m * m) * (-monitored[127] + V_m) * h * j * monitored[7]
monitored[21] = monitored[19] + monitored[20]
# Expressions for the I_NaBK component
monitored[22] = Fjunc * GNaB * (-monitored[126] + V_m)
monitored[23] = GNaB * (-monitored[127] + V_m) * monitored[7]
monitored[24] = monitored[22] + monitored[23]
# Expressions for the I_NaK component
monitored[25] = -1 / 7 + np.exp(0.01485884101040119 * Nao) / 7
monitored[26] = 1.0 / (
1
+ 0.1245 * np.exp(-0.1 * V_m * monitored[125])
+ 0.0365 * np.exp(-V_m * monitored[125]) * monitored[25]
)
monitored[27] = (
Fjunc
* IbarNaK
* Ko
* monitored[26]
/ ((1 + np.power(KmNaip, 4) / np.power(Na_j, 4)) * (KmKo + Ko))
)
monitored[28] = (
IbarNaK
* Ko
* monitored[26]
* monitored[7]
/ ((1 + np.power(KmNaip, 4) / np.power(Na_sl, 4)) * (KmKo + Ko))
)
monitored[29] = monitored[27] + monitored[28]
# Expressions for the I_Kr component
monitored[30] = 0.4303314829119352 * GKr * np.sqrt(Ko)
monitored[31] = 1.0 / (1 + np.exp(-2 - V_m / 5))
monitored[32] = 230 / (1 + np.exp(2 + V_m / 20)) + 3300 / (
(1 + np.exp(-22 / 9 - V_m / 9)) * (1 + np.exp(11 / 9 + V_m / 9))
)
monitored[140] = (-x_kr + monitored[31]) / monitored[32]
monitored[33] = 1.0 / (1 + np.exp(37 / 12 + V_m / 24))
monitored[34] = (-monitored[128] + V_m) * monitored[30] * monitored[33] * x_kr
# Expressions for the I_Kp component
monitored[35] = 1.0 / (1 + 1786.4755653786237 * np.exp(-0.16722408026755853 * V_m))
monitored[36] = Fjunc * GKp * (-monitored[128] + V_m) * monitored[35]
monitored[37] = GKp * (-monitored[128] + V_m) * monitored[35] * monitored[7]
monitored[38] = monitored[36] + monitored[37]
# Expressions for the I_Ks component
monitored[39] = np.log((Ko + Nao * pNaK) / (pNaK * Na_i + K_i)) / monitored[125]
monitored[40] = GKs
monitored[41] = GKs
monitored[42] = 1.0 / (1 + 0.7659283383646487 * np.exp(-0.07017543859649122 * V_m))
monitored[43] = 990.1 / (1 + 0.8415404088681017 * np.exp(-0.0708215297450425 * V_m))
monitored[141] = (-x_ks + monitored[42]) / monitored[43]
monitored[44] = Fjunc * (x_ks * x_ks) * (-monitored[39] + V_m) * monitored[40]
monitored[45] = (
(x_ks * x_ks) * (-monitored[39] + V_m) * monitored[41] * monitored[7]
)
monitored[46] = monitored[44] + monitored[45]
# Expressions for the I_to component
monitored[47] = np.where(epi == 1, 0.12 * Gto, 0.28919999999999996 * Gto)
monitored[48] = np.where(epi == 1, 0.88 * Gto, 0.010799999999999999 * Gto)
monitored[49] = 1.0 / (1 + np.exp(19 / 13 - V_m / 13))
monitored[50] = 1.0 / (1 + 49.40244910553019 * np.exp(V_m / 5))
monitored[51] = 0.5 + 9 / (1 + np.exp(1 / 5 + V_m / 15))
monitored[52] = 30 + 800 / (1 + np.exp(6 + V_m / 10))
monitored[142] = (-x_to_s + monitored[49]) / monitored[51]
monitored[143] = (-y_to_s + monitored[50]) / monitored[52]
monitored[53] = (-monitored[128] + V_m) * monitored[47] * x_to_s * y_to_s
monitored[54] = 0.5 + 8.5 * np.exp(-((9 / 10 + V_m / 50) * (9 / 10 + V_m / 50)))
monitored[55] = 7 + 85 * np.exp(-((40 + V_m) * (40 + V_m)) / 220)
monitored[144] = (-x_to_f + monitored[49]) / monitored[54]
monitored[145] = (-y_to_f + monitored[50]) / monitored[55]
monitored[56] = (-monitored[128] + V_m) * monitored[48] * x_to_f * y_to_f
monitored[57] = monitored[53] + monitored[56]
# Expressions for the I_K1 component
monitored[133] = 1.02 / (
1 + 7.35454251046446e-07 * np.exp(0.2385 * V_m - 0.2385 * monitored[128])
)
monitored[134] = (
0.7626240065063081 * np.exp(0.08032 * V_m - 0.08032 * monitored[128])
+ 1.1534056351865558e-16 * np.exp(0.06175 * V_m - 0.06175 * monitored[128])
) / (1 + 0.08677229415769332 * np.exp(0.5143 * monitored[128] - 0.5143 * V_m))
monitored[135] = monitored[133] / (monitored[133] + monitored[134])
monitored[136] = (
0.4303314829119352
* GK1
* np.sqrt(Ko)
* (-monitored[128] + V_m)
* monitored[135]
)
# Expressions for the I_ClCa component
monitored[58] = Fjunc * GClCa * (-monitored[131] + V_m) / (1 + KdClCa / Ca_j)
monitored[59] = (
GClCa * (-monitored[131] + V_m) * monitored[7] / (1 + KdClCa / Ca_sl)
)
monitored[60] = monitored[58] + monitored[59]
monitored[61] = GClB * (-monitored[131] + V_m)
# Expressions for the I_Ca component
monitored[62] = 1.0 / (1 + np.exp(35 / 9 + V_m / 9)) + 0.6 / (
1 + np.exp(5 / 2 - V_m / 20)
)
monitored[63] = 1.0 / (1 + np.exp(-5 / 6 - V_m / 6))
monitored[64] = (
(1 - np.exp(-5 / 6 - V_m / 6))
* monitored[63]
/ (0.17500000000000002 + 0.035 * V_m)
)
monitored[65] = 1.0 / (
0.02
+ 0.0197
* np.exp(
-(
(0.48865000000000003 + 0.0337 * V_m)
* (0.48865000000000003 + 0.0337 * V_m)
)
)
)
monitored[146] = (-d + monitored[63]) / monitored[64]
monitored[147] = (-f + monitored[62]) / monitored[65]
monitored[148] = -0.0119 * f_Ca_Bj + 1.7 * (1 - f_Ca_Bj) * Ca_j
monitored[149] = -0.0119 * f_Ca_Bsl + 1.7 * (1 - f_Ca_Bsl) * Ca_sl
monitored[66] = 0
monitored[67] = 0
monitored[68] = (
4
* Frdy
* GCaL
* pCa
* (-0.341 * Cao + 0.341 * Ca_j * np.exp(2 * V_m * monitored[125]))
* V_m
* monitored[125]
/ (-1 + np.exp(2 * V_m * monitored[125]))
)
monitored[69] = (
4
* Frdy
* GCaL
* pCa
* (-0.341 * Cao + 0.341 * Ca_sl * np.exp(2 * V_m * monitored[125]))
* V_m
* monitored[125]
/ (-1 + np.exp(2 * V_m * monitored[125]))
)
monitored[70] = (
Frdy
* GCaL
* pK
* (-0.75 * Ko + 0.75 * K_i * np.exp(V_m * monitored[125]))
* V_m
* monitored[125]
/ (-1 + np.exp(V_m * monitored[125]))
)
monitored[71] = (
Frdy
* GCaL
* pNa
* (-0.75 * Nao + 0.75 * Na_j * np.exp(V_m * monitored[125]))
* V_m
* monitored[125]
/ (-1 + np.exp(V_m * monitored[125]))
)
monitored[72] = (
Frdy
* GCaL
* pNa
* (-0.75 * Nao + 0.75 * Na_sl * np.exp(V_m * monitored[125]))
* V_m
* monitored[125]
/ (-1 + np.exp(V_m * monitored[125]))
)
monitored[73] = (
0.45
* Fjunc_CaL
* np.power(Q10CaL, monitored[132])
* (1 + monitored[67] - f_Ca_Bj)
* d
* f
* monitored[68]
)
monitored[74] = (
0.45
* np.power(Q10CaL, monitored[132])
* (1 + monitored[66] - f_Ca_Bsl)
* d
* f
* monitored[69]
* monitored[8]
)
monitored[75] = monitored[73] + monitored[74]
monitored[76] = (
0.45
* np.power(Q10CaL, monitored[132])
* (
Fjunc_CaL * (1 + monitored[67] - f_Ca_Bj)
+ (1 + monitored[66] - f_Ca_Bsl) * monitored[8]
)
* d
* f
* monitored[70]
)
monitored[77] = (
0.45
* Fjunc_CaL
* np.power(Q10CaL, monitored[132])
* (1 + monitored[67] - f_Ca_Bj)
* d
* f
* monitored[71]
)
monitored[78] = (
0.45
* np.power(Q10CaL, monitored[132])
* (1 + monitored[66] - f_Ca_Bsl)
* d
* f
* monitored[72]
* monitored[8]
)
monitored[79] = monitored[77] + monitored[78]
monitored[80] = monitored[75] + monitored[76] + monitored[79]
# Expressions for the I_NCX component
monitored[81] = 1.0 / (1 + (Kdact * Kdact) / (Ca_j * Ca_j))
monitored[82] = 1.0 / (1 + (Kdact * Kdact) / (Ca_sl * Ca_sl))
monitored[83] = Cao * (Na_j * Na_j * Na_j) * np.exp(nu * V_m * monitored[125])
monitored[84] = Cao * (Na_sl * Na_sl * Na_sl) * np.exp(nu * V_m * monitored[125])
monitored[85] = (Nao * Nao * Nao) * Ca_j * np.exp((-1 + nu) * V_m * monitored[125])
monitored[86] = (
Cao * (Na_j * Na_j * Na_j)
+ KmCao * (Na_j * Na_j * Na_j)
+ (Nao * Nao * Nao) * Ca_j
+ KmCai
* (Nao * Nao * Nao)
* (1 + (Na_j * Na_j * Na_j) / (KmNai * KmNai * KmNai))
+ (KmNao * KmNao * KmNao) * (1 + Ca_j / KmCai) * Ca_j
)
monitored[87] = (Nao * Nao * Nao) * Ca_sl * np.exp((-1 + nu) * V_m * monitored[125])
monitored[88] = (
Cao * (Na_sl * Na_sl * Na_sl)
+ KmCao * (Na_sl * Na_sl * Na_sl)
+ (Nao * Nao * Nao) * Ca_sl
+ KmCai
* (Nao * Nao * Nao)
* (1 + (Na_sl * Na_sl * Na_sl) / (KmNai * KmNai * KmNai))
+ (KmNao * KmNao * KmNao) * (1 + Ca_sl / KmCai) * Ca_sl
)
monitored[89] = (
Fjunc
* IbarNCX
* np.power(Q10NCX, monitored[132])
* (-monitored[85] + monitored[83])
* monitored[81]
/ ((1 + ksat * np.exp((-1 + nu) * V_m * monitored[125])) * monitored[86])
)
monitored[90] = (
IbarNCX
* np.power(Q10NCX, monitored[132])
* (-monitored[87] + monitored[84])
* monitored[7]
* monitored[82]
/ ((1 + ksat * np.exp((-1 + nu) * V_m * monitored[125])) * monitored[88])
)
monitored[91] = monitored[89] + monitored[90]
# Expressions for the I_PCa component
monitored[92] = (
Fjunc
* IbarSLCaP
* np.power(Q10SLCaP, monitored[132])
* np.power(Ca_j, 1.6)
/ (np.power(KmPCa, 1.6) + np.power(Ca_j, 1.6))
)
monitored[93] = (
IbarSLCaP
* np.power(Q10SLCaP, monitored[132])
* np.power(Ca_sl, 1.6)
* monitored[7]
/ (np.power(KmPCa, 1.6) + np.power(Ca_sl, 1.6))
)
monitored[94] = monitored[92] + monitored[93]
# Expressions for the I_CaBK component
monitored[95] = Fjunc * GCaB * (-monitored[129] + V_m)
monitored[96] = GCaB * (-monitored[130] + V_m) * monitored[7]
monitored[97] = monitored[95] + monitored[96]
# Expressions for the SR Fluxes component
monitored[98] = MaxSR - (MaxSR - MinSR) / (1 + np.power(ec50SR / Ca_sr, 2.5))
monitored[99] = koCa / monitored[98]
monitored[100] = kiCa * monitored[98]
monitored[101] = 1 - Ry_Ri - Ry_Ro - Ry_Rr
monitored[150] = (
kim * monitored[101]
+ kom * Ry_Ro
- (Ca_j * Ca_j) * Ry_Rr * monitored[99]
- Ca_j * Ry_Rr * monitored[100]
)
monitored[151] = (
kim * Ry_Ri
- kom * Ry_Ro
+ (Ca_j * Ca_j) * Ry_Rr * monitored[99]
- Ca_j * Ry_Ro * monitored[100]
)
monitored[152] = (
-kim * Ry_Ri
- kom * Ry_Ri
+ (Ca_j * Ca_j) * monitored[101] * monitored[99]
+ Ca_j * Ry_Ro * monitored[100]
)
monitored[102] = ks * (-Ca_j + Ca_sr) * Ry_Ro
monitored[103] = (
Vmax_SRCaP
* np.power(Q10SRCaP, monitored[132])
* (np.power(Ca_i / Kmf, hillSRCaP) - np.power(Ca_sr / Kmr, hillSRCaP))
/ (1 + np.power(Ca_i / Kmf, hillSRCaP) + np.power(Ca_sr / Kmr, hillSRCaP))
)
monitored[104] = 5.348e-06 * Ca_sr - 5.348e-06 * Ca_j
# Expressions for the Na Buffers component
monitored[153] = -koff_na * Na_Bj + kon_na * (Bmax_Naj - Na_Bj) * Na_j
monitored[154] = -koff_na * Na_Bsl + kon_na * (Bmax_Nasl - Na_Bsl) * Na_sl
# Expressions for the Cytosolic Ca Buffers component
monitored[155] = -koff_tncl * Tn_CL + kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i
monitored[156] = (
-koff_tnchca * Tn_CHc + kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i
)
monitored[157] = -koff_tnchmg * Tn_CHm + Mgi * kon_tnchmg * (
Bmax_TnChigh - Tn_CHc - Tn_CHm
)
monitored[158] = -koff_cam * CaM + kon_cam * (Bmax_CaM - CaM) * Ca_i
monitored[159] = (
-koff_myoca * Myo_c + kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i
)
monitored[160] = -koff_myomg * Myo_m + Mgi * kon_myomg * (
Bmax_myosin - Myo_c - Myo_m
)
monitored[161] = -koff_sr * SRB + kon_sr * (Bmax_SR - SRB) * Ca_i
monitored[105] = (
-koff_cam * CaM
- koff_myoca * Myo_c
- koff_myomg * Myo_m
- koff_sr * SRB
- koff_tnchca * Tn_CHc
- koff_tnchmg * Tn_CHm
- koff_tncl * Tn_CL
+ Mgi * kon_myomg * (Bmax_myosin - Myo_c - Myo_m)
+ Mgi * kon_tnchmg * (Bmax_TnChigh - Tn_CHc - Tn_CHm)
+ kon_cam * (Bmax_CaM - CaM) * Ca_i
+ kon_myoca * (Bmax_myosin - Myo_c - Myo_m) * Ca_i
+ kon_sr * (Bmax_SR - SRB) * Ca_i
+ kon_tnchca * (Bmax_TnChigh - Tn_CHc - Tn_CHm) * Ca_i
+ kon_tncl * (Bmax_TnClow - Tn_CL) * Ca_i
)
# Expressions for the Junctional and SL Ca Buffers component
monitored[106] = Bmax_SLlowsl0 * monitored[1] / monitored[3]
monitored[107] = Bmax_SLlowj0 * monitored[1] / monitored[4]
monitored[108] = Bmax_SLhighsl0 * monitored[1] / monitored[3]
monitored[109] = Bmax_SLhighj0 * monitored[1] / monitored[4]
monitored[162] = -koff_sll * SLL_j + kon_sll * (-SLL_j + monitored[107]) * Ca_j
monitored[163] = -koff_sll * SLL_sl + kon_sll * (-SLL_sl + monitored[106]) * Ca_sl
monitored[164] = -koff_slh * SLH_j + kon_slh * (-SLH_j + monitored[109]) * Ca_j
monitored[165] = -koff_slh * SLH_sl + kon_slh * (-SLH_sl + monitored[108]) * Ca_sl
monitored[110] = (
-koff_slh * SLH_j
- koff_sll * SLL_j
+ kon_slh * (-SLH_j + monitored[109]) * Ca_j
+ kon_sll * (-SLL_j + monitored[107]) * Ca_j
)
monitored[111] = (
-koff_slh * SLH_sl
- koff_sll * SLL_sl
+ kon_slh * (-SLH_sl + monitored[108]) * Ca_sl
+ kon_sll * (-SLL_sl + monitored[106]) * Ca_sl
)
# Expressions for the SR Ca Concentrations component
monitored[112] = Bmax_Csqn0 * monitored[1] / monitored[2]
monitored[166] = -koff_csqn * Csqn_b + kon_csqn * (-Csqn_b + monitored[112]) * Ca_sr
monitored[167] = (
-monitored[102]
+ koff_csqn * Csqn_b
- kon_csqn * (-Csqn_b + monitored[112]) * Ca_sr
- monitored[104] * monitored[1] / monitored[2]
+ monitored[103]
)
# Expressions for the Na Concentrations component
monitored[113] = (
3 * monitored[27]
+ 3 * monitored[89]
+ monitored[19]
+ monitored[22]
+ monitored[77]
)
monitored[114] = (
3 * monitored[28]
+ 3 * monitored[90]
+ monitored[20]
+ monitored[23]
+ monitored[78]
)
monitored[115] = 3 * monitored[28] + 3 * monitored[90] + monitored[78]
monitored[116] = 3 * monitored[27] + 3 * monitored[89] + monitored[77]
monitored[168] = (
-monitored[153]
+ J_na_juncsl * (-Na_j + Na_sl) / monitored[4]
- Cmem * monitored[113] / (Frdy * monitored[4])
)
monitored[169] = (
-monitored[154]
+ J_na_juncsl * (-Na_sl + Na_j) / monitored[3]
+ J_na_slmyo * (-Na_sl + Na_i) / monitored[3]
- Cmem * monitored[114] / (Frdy * monitored[3])
)
monitored[170] = J_na_slmyo * (-Na_i + Na_sl) / monitored[1]
# Expressions for the K Concentration component
monitored[117] = (
-2 * monitored[29]
+ monitored[136]
+ monitored[34]
+ monitored[38]
+ monitored[46]
+ monitored[57]
+ monitored[76]
)
monitored[171] = 0
# Expressions for the Ca Concentrations component
monitored[118] = -2 * monitored[89] + monitored[73] + monitored[92] + monitored[95]
monitored[119] = -2 * monitored[90] + monitored[74] + monitored[93] + monitored[96]
monitored[172] = (
-monitored[110]
+ J_ca_juncsl * (-Ca_j + Ca_sl) / monitored[4]
+ monitored[102] * monitored[2] / monitored[4]
+ monitored[104] * monitored[1] / monitored[4]
- Cmem * monitored[118] / (2 * Frdy * monitored[4])
)
monitored[173] = (
-monitored[111]
+ J_ca_juncsl * (-Ca_sl + Ca_j) / monitored[3]
+ J_ca_slmyo * (-Ca_sl + Ca_i) / monitored[3]
- Cmem * monitored[119] / (2 * Frdy * monitored[3])
)
monitored[174] = (
-monitored[105]
+ J_ca_slmyo * (-Ca_i + Ca_sl) / monitored[1]
- monitored[103] * monitored[2] / monitored[1]
)
# Expressions for the Membrane potential component
monitored[120] = np.where(
np.logical_and(
t - stim_period * np.floor(t / stim_period) <= stim_duration + stim_start,
t - stim_period * np.floor(t / stim_period) >= stim_start,
),
-stim_amplitude,
0,
)
monitored[121] = monitored[113] + monitored[114]
monitored[122] = monitored[60] + monitored[61]
monitored[123] = monitored[118] + monitored[119]
monitored[124] = monitored[117] + monitored[121] + monitored[122] + monitored[123]
monitored[175] = -monitored[120] - monitored[124]
# Return results
return monitored
def FE(states, t, dt, parameters):
"""
Compute a forward step using the explicit Euler algorithm to the grandi ODE
"""
# Assign states
assert len(states) == 39
(
m,
h,
j,
x_kr,
x_ks,
x_to_s,
y_to_s,
x_to_f,
y_to_f,
d,
f,
f_Ca_Bj,
f_Ca_Bsl,
Ry_Rr,
Ry_Ro,
Ry_Ri,
Na_Bj,
Na_Bsl,
Tn_CL,
Tn_CHc,
Tn_CHm,
CaM,
Myo_c,
Myo_m,
SRB,
SLL_j,
SLL_sl,
SLH_j,
SLH_sl,
Csqn_b,
Ca_sr,
Na_j,
Na_sl,
Na_i,
K_i,
Ca_j,
Ca_sl,
Ca_i,
V_m,
) = states
# Assign parameters
assert len(parameters) == 112
Fjunc = parameters[0]
Fjunc_CaL = parameters[1]
cellLength = parameters[2]
cellRadius = parameters[3]
GNa = parameters[8]
GNaB = parameters[9]
IbarNaK = parameters[10]
KmKo = parameters[11]
KmNaip = parameters[12]
GKr = parameters[15]
GKp = parameters[16]
GKs = parameters[17]
pNaK = parameters[18]
GK1 = parameters[19]
Gto = parameters[20]
epi = parameters[21]
GClB = parameters[22]
GClCa = parameters[23]
KdClCa = parameters[24]
GCaL = parameters[25]
Q10CaL = parameters[26]
pCa = parameters[27]
pK = parameters[28]
pNa = parameters[29]
IbarNCX = parameters[30]
Kdact = parameters[31]
KmCai = parameters[32]
KmCao = parameters[33]
KmNai = parameters[34]
KmNao = parameters[35]
Q10NCX = parameters[36]
ksat = parameters[37]
nu = parameters[38]
IbarSLCaP = parameters[39]
KmPCa = parameters[40]
Q10SLCaP = parameters[41]
GCaB = parameters[42]
Kmf = parameters[43]
Kmr = parameters[44]
MaxSR = parameters[45]
MinSR = parameters[46]
Q10SRCaP = parameters[47]
Vmax_SRCaP = parameters[48]
ec50SR = parameters[49]
hillSRCaP = parameters[50]
kiCa = parameters[51]
kim = parameters[52]
koCa = parameters[53]
kom = parameters[54]
ks = parameters[55]
Bmax_Naj = parameters[56]
Bmax_Nasl = parameters[57]
koff_na = parameters[58]
kon_na = parameters[59]
Bmax_CaM = parameters[60]
Bmax_SR = parameters[61]
Bmax_TnChigh = parameters[62]
Bmax_TnClow = parameters[63]
Bmax_myosin = parameters[64]
koff_cam = parameters[65]
koff_myoca = parameters[66]
koff_myomg = parameters[67]
koff_sr = parameters[68]
koff_tnchca = parameters[69]
koff_tnchmg = parameters[70]
koff_tncl = parameters[71]
kon_cam = parameters[72]
kon_myoca = parameters[73]
kon_myomg = parameters[74]
kon_sr = parameters[75]
kon_tnchca = parameters[76]
kon_tnchmg = parameters[77]
kon_tncl = parameters[78]
Bmax_SLhighj0 = parameters[79]
Bmax_SLhighsl0 = parameters[80]
Bmax_SLlowj0 = parameters[81]
Bmax_SLlowsl0 = parameters[82]
koff_slh = parameters[83]
koff_sll = parameters[84]
kon_slh = parameters[85]
kon_sll = parameters[86]
Bmax_Csqn0 = parameters[87]
J_ca_juncsl = parameters[90]
J_ca_slmyo = parameters[91]
koff_csqn = parameters[92]
kon_csqn = parameters[93]
J_na_juncsl = parameters[96]
J_na_slmyo = parameters[97]
Nao = parameters[98]
Ko = parameters[99]
Cao = parameters[100]
Cli = parameters[101]
Clo = parameters[102]
Mgi = parameters[103]
Cmem = parameters[104]
Frdy = parameters[105]
R = parameters[106]
Temp = parameters[107]
stim_amplitude = parameters[108]
stim_duration = parameters[109]
stim_period = parameters[110]
stim_start = parameters[111]
# Expressions for the Geometry component
Vcell = 1e-15 * np.pi * cellLength * (cellRadius * cellRadius)
Vmyo = 0.65 * Vcell
Vsr = 0.035 * Vcell
Vsl = 0.02 * Vcell
Vjunc = 0.0005390000000000001 * Vcell
Fsl = 1 - Fjunc
Fsl_CaL = 1 - Fjunc_CaL
# Expressions for the Reversal potentials component
FoRT = Frdy / (R * Temp)
ena_junc = np.log(Nao / Na_j) / FoRT
ena_sl = np.log(Nao / Na_sl) / FoRT
ek = np.log(Ko / K_i) / FoRT
eca_junc = np.log(Cao / Ca_j) / (2 * FoRT)
eca_sl = np.log(Cao / Ca_sl) / (2 * FoRT)
ecl = np.log(Cli / Clo) / FoRT
Qpow = -31 + Temp / 10
# Expressions for the I_Na component
mss = 1.0 / (
(1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
* (1 + 0.0018422115811651339 * np.exp(-0.1107419712070875 * V_m))
)
taum = 0.1292 * np.exp(
-(
(2.9465894465894467 + 0.06435006435006435 * V_m)
* (2.9465894465894467 + 0.06435006435006435 * V_m)
)
) + 0.06487 * np.exp(
-(
(-0.09434663536776214 + 0.019561815336463225 * V_m)
* (-0.09434663536776214 + 0.019561815336463225 * V_m)
)
)
ah = np.where(
V_m >= -40, 0, 4.4312679295805147e-07 * np.exp(-0.14705882352941177 * V_m)
)
bh = np.where(
V_m >= -40,
0.77 / (0.13 + 0.049758141083938695 * np.exp(-0.0900900900900901 * V_m)),
310000.0 * np.exp(0.3485 * V_m) + 2.7 * np.exp(0.079 * V_m),
)
tauh = 1.0 / (ah + bh)
hss = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
aj = np.where(
V_m >= -40,
0,
(37.78 + V_m)
* (-25428.0 * np.exp(0.2444 * V_m) - 6.948e-06 * np.exp(-0.04391 * V_m))
/ (1 + 50262745825.95399 * np.exp(0.311 * V_m)),
)
bj = np.where(
V_m >= -40,
0.6 * np.exp(0.057 * V_m) / (1 + 0.040762203978366204 * np.exp(-0.1 * V_m)),
0.02424
* np.exp(-0.01052 * V_m)
/ (1 + 0.003960868339904256 * np.exp(-0.1378 * V_m)),
)
tauj = 1.0 / (aj + bj)
jss = 1.0 / (
(1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
* (1 + 15212.593285654404 * np.exp(0.13458950201884254 * V_m))
)
dm_dt = (-m + mss) / taum
states[0] = dt * dm_dt + m
dh_dt = (-h + hss) / tauh
states[1] = dt * dh_dt + h
dj_dt = (-j + jss) / tauj
states[2] = dt * dj_dt + j
I_Na_junc = Fjunc * GNa * (m * m * m) * (-ena_junc + V_m) * h * j
I_Na_sl = GNa * (m * m * m) * (-ena_sl + V_m) * Fsl * h * j
# Expressions for the I_NaBK component
I_nabk_junc = Fjunc * GNaB * (-ena_junc + V_m)
I_nabk_sl = GNaB * (-ena_sl + V_m) * Fsl
# Expressions for the I_NaK component
sigma = -1 / 7 + np.exp(0.01485884101040119 * Nao) / 7
fnak = 1.0 / (
1 + 0.1245 * np.exp(-0.1 * FoRT * V_m) + 0.0365 * np.exp(-FoRT * V_m) * sigma
)
I_nak_junc = (
Fjunc
* IbarNaK
* Ko
* fnak
/ ((1 + np.power(KmNaip, 4) / np.power(Na_j, 4)) * (KmKo + Ko))
)
I_nak_sl = (
IbarNaK
* Ko
* Fsl
* fnak
/ ((1 + np.power(KmNaip, 4) / np.power(Na_sl, 4)) * (KmKo + Ko))
)
I_nak = I_nak_junc + I_nak_sl
# Expressions for the I_Kr component
gkr = 0.4303314829119352 * GKr * np.sqrt(Ko)
xrss = 1.0 / (1 + np.exp(-2 - V_m / 5))
tauxr = 230 / (1 + np.exp(2 + V_m / 20)) + 3300 / (
(1 + np.exp(-22 / 9 - V_m / 9)) * (1 + np.exp(11 / 9 + V_m / 9))
)
dx_kr_dt = (-x_kr + xrss) / tauxr
states[3] = dt * dx_kr_dt + x_kr
rkr = 1.0 / (1 + np.exp(37 / 12 + V_m / 24))
I_kr = (-ek + V_m) * gkr * rkr * x_kr
# Expressions for the I_Kp component
kp_kp = 1.0 / (1 + 1786.4755653786237 * np.exp(-0.16722408026755853 * V_m))
I_kp_junc = Fjunc * GKp * (-ek + V_m) * kp_kp
I_kp_sl = GKp * (-ek + V_m) * Fsl * kp_kp
I_kp = I_kp_junc + I_kp_sl
# Expressions for the I_Ks component
eks = np.log((Ko + Nao * pNaK) / (pNaK * Na_i + K_i)) / FoRT
gks_junc = GKs
gks_sl = GKs
xsss = 1.0 / (1 + 0.7659283383646487 * np.exp(-0.07017543859649122 * V_m))
tauxs = 990.1 / (1 + 0.8415404088681017 *
|
np.exp(-0.0708215297450425 * V_m)
|
numpy.exp
|
# some structure based on https://github.com/wpm/tfrnnlm/blob/master/tfrnnlm/rnn.py
#https://github.com/tensorflow/tensorflow/pull/2580/files#diff-083dd112b4600ecbaf63b2070951aad8
from __future__ import print_function
import ast
import time
from datetime import timedelta
import inspect
import math
import json
import os.path
import sys
import shutil
import heapq
import pygtrie as trie
from collections import deque
from itertools import chain
from operator import itemgetter
import numpy as np
import tensorflow as tf
import reader
# BPE imports
# import codecs
# from subword_nmt.apply_bpe import BPE, read_vocabulary
flags = tf.flags
# Path options
flags.DEFINE_string("data_path", None, "Path to folder containing training/test data.")
flags.DEFINE_string("train_dir", None, "Output directory for saving the model.")
# Scenario options. Training is default so, no option for it.
flags.DEFINE_boolean("predict", False, "Set to True for computing predictability.")
flags.DEFINE_boolean("test", False, "Set to True for computing test perplexity.")
flags.DEFINE_boolean("dynamic_test", False, "Set to True for performing dynamic train-testing perplexity calculation (only one train epoch).")
flags.DEFINE_boolean("maintenance_test", False, "Set to True for performing maintenance train-testing perplexity simulation (only one train epoch).")
flags.DEFINE_boolean("completion", False, "Set to True to run code completion experiment.")
flags.DEFINE_boolean("maintenance_completion", False, "Set to True to run maintenance code completion experiment")
flags.DEFINE_boolean("dynamic", False, "Set to True to run dynamic code completion experiment.")
# Filename/path options
flags.DEFINE_string("train_filename", None, "The train file on which to train.")
flags.DEFINE_string("validation_filename", None, "The test file on which to run validation.")
flags.DEFINE_string("test_filename", None, "The test file on which to compute perplexity or predictability.")
flags.DEFINE_string("test_proj_filename", None, "The file that contains the test project name for each test instance.")
flags.DEFINE_string("identifier_map", None, "The file that contains information about which tokens are identifiers.")
flags.DEFINE_boolean("cache_ids", False, "Set to True to cache project identifiers during completion.")
# flags.DEFINE_string("BPE", None, "The file containing the BPE encoding.")
flags.DEFINE_string("subtoken_map", None, "Contains the mapping from heyristic subtokens to tokens.")
# flags.DEFINE_string("output_probs_file", "predictionProbabilities.txt", "The file to store output probabilities.")
# Network architecture/hyper-parameter options
flags.DEFINE_integer("num_layers", 1, "Number of Layers. Using a single layer is advised.")
flags.DEFINE_integer("hidden_size", 512, "Hidden size. Number of dimensions for the embeddings and RNN hidden state.")
flags.DEFINE_float("keep_prob", 0.5, "Keep probability = 1.0 - dropout probability.")
flags.DEFINE_integer("vocab_size", 25000, "Vocabulary size")
flags.DEFINE_boolean("gru", False, "Use a GRU cell. Must be set to True to use a GRU, otherwise an LSTM will be used.")
flags.DEFINE_integer("steps_per_checkpoint", 5000, "Number of steps for printing stats (validation is run) and checkpointing the model. Must be increased by 'a lot' for large training corpora.")
flags.DEFINE_integer("max_epoch", 30, "Max number training epochs to run.")
flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_integer("test_batch_size", 10, "Batch size during predictability test")
flags.DEFINE_integer("num_steps", 200, "Sequence length.")
flags.DEFINE_float("init_scale", 0.05, "Initialization scale.")
flags.DEFINE_float("learning_rate", 0.1, "Learning rate")
flags.DEFINE_float("max_grad_norm", 5.0, "Clip gradients to this norm")
flags.DEFINE_float("lr_decay", 0.5, "Learning rate decay. Default is 0.5 which halves the learning rate.")
# n-gram identifier cache options
flags.DEFINE_float("file_cache_weight", 0.2, "Weight of the file cache.")
flags.DEFINE_integer("cache_order", 6, "n-gram order for the identifier cache")
flags.DEFINE_integer("thresh", 0, "Threshold for vocabulary inclusion.")
flags.DEFINE_boolean("unk", True, "use -UNK- token to model OOV.")
flags.DEFINE_boolean("bidirectional", False, "Bidirectional model.")
flags.DEFINE_boolean("word_level_perplexity", False, "Convert to word level perplexity.")
flags.DEFINE_boolean("cross_entropy", False, "Print cross-entropy for validation/test instead of perplexity.")
flags.DEFINE_boolean("token_model", False, "Whether it is a token level model.")
flags.DEFINE_boolean("completion_unk_wrong", False, "Whether completing -UNK- should contribute in MRR. Set to "
"True for Allamanis et al. heuristic subtoken model.")
flags.DEFINE_boolean("verbose", False, "Verbose for completion.")
FLAGS = flags.FLAGS
def data_type():
"""
Returns the TF floating point type used for operations.
:return: The data type used (tf.float32)
"""
return tf.float32
def get_gpu_config():
gconfig = tf.ConfigProto()
gconfig.gpu_options.per_process_gpu_memory_fraction = 0.975 # Don't take 100% of the memory
gconfig.allow_soft_placement = True # Does not aggressively take all the GPU memory
gconfig.gpu_options.allow_growth = True # Take more memory when necessary
return gconfig
class NLM(object):
def __init__(self, config):
"""
Initializes the neural language model based on the specified configation.
:param config: The configuration to be used for initialization.
"""
self.num_layers = config.num_layers
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.hidden_size = hidden_size = config.hidden_size
self.vocab_size = vocab_size = config.vocab_size
#self.predictions_file = config.output_probs_file
self.global_step = tf.Variable(0, trainable=False)
with tf.name_scope("Parameters"):
# Sets dropout and learning rate.
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")
with tf.name_scope("Input"):
self.inputd = tf.placeholder(tf.int64, shape=(batch_size, None), name="inputd")
self.targets = tf.placeholder(tf.int64, shape=(batch_size, None), name="targets")
self.target_weights = tf.placeholder(tf.float32, shape=(batch_size, None), name="tgtweights")
with tf.device("/cpu:0"):
with tf.name_scope("Embedding"):
# Initialize embeddings on the CPU and add dropout layer after embeddings.
self.embedding = tf.Variable(tf.random_uniform((vocab_size, hidden_size), -config.init_scale, config.init_scale), dtype=data_type(), name="embedding")
self.embedded_inputds = tf.nn.embedding_lookup(self.embedding, self.inputd, name="embedded_inputds")
self.embedded_inputds = tf.nn.dropout(self.embedded_inputds, self.keep_probability)
with tf.name_scope("RNN"):
# Definitions for the different cells that can be used. Either lstm or GRU which will be wrapped with dropout.
def lstm_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(hidden_size, forget_bias=0.0, state_is_tuple=True)
def gru_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.GRUCell(hidden_size, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(hidden_size)
def drop_cell():
if FLAGS.gru:
return tf.contrib.rnn.DropoutWrapper(gru_cell(), output_keep_prob=self.keep_probability)
else:
return tf.contrib.rnn.DropoutWrapper(lstm_cell(), output_keep_prob=self.keep_probability)
# Allows multiple layers to be used. Not advised though.
rnn_layers = tf.contrib.rnn.MultiRNNCell([drop_cell() for _ in range(self.num_layers)], state_is_tuple=True)
# Initialize the state to zero.
self.reset_state = rnn_layers.zero_state(batch_size, data_type())
self.outputs, self.next_state = tf.nn.dynamic_rnn(rnn_layers, self.embedded_inputds, time_major=False,
initial_state=self.reset_state)
with tf.name_scope("Cost"):
# Output and loss function calculation
self.output = tf.reshape(tf.concat(axis=0, values=self.outputs), [-1, hidden_size])
self.softmax_w = tf.get_variable("softmax_w", [hidden_size, vocab_size], dtype=data_type())
self.softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
self.logits = tf.matmul(self.output, self.softmax_w) + self.softmax_b
self.loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[self.logits], [tf.reshape(self.targets, [-1])], [tf.reshape(self.target_weights, [-1])])
self.cost = tf.div(tf.reduce_sum(self.loss), batch_size, name="cost")
self.final_state = self.next_state
self.norm_logits = tf.nn.softmax(self.logits)
with tf.name_scope("Train"):
self.iteration = tf.Variable(0, dtype=data_type(), name="iteration", trainable=False)
tvars = tf.trainable_variables()
self.gradients, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
config.max_grad_norm, name="clip_gradients")
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_step = optimizer.apply_gradients(zip(self.gradients, tvars), name="train_step",
global_step=self.global_step)
self.validation_perplexity = tf.Variable(dtype=data_type(), initial_value=float("inf"),
trainable=False, name="validation_perplexity")
tf.summary.scalar(self.validation_perplexity.op.name, self.validation_perplexity)
self.training_epoch_perplexity = tf.Variable(dtype=data_type(), initial_value=float("inf"),
trainable=False, name="training_epoch_perplexity")
tf.summary.scalar(self.training_epoch_perplexity.op.name, self.training_epoch_perplexity)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
self.initialize = tf.initialize_all_variables()
self.summary = tf.summary.merge_all()
def get_parameter_count(self, debug=False):
"""
Counts the number of parameters required by the model.
:param debug: Whether debugging information should be printed.
:return: Returns the number of parameters required for the model.
"""
params = tf.trainable_variables()
total_parameters = 0
for variable in params:
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
if debug:
print(variable)
print(shape + "\t" + str(len(shape)) + "\t" + str(variable_parameters))
total_parameters += variable_parameters
return total_parameters
@property
def reset_state(self):
return self._reset_state
@reset_state.setter
def reset_state(self, x):
self._reset_state = x
@property
def cost(self):
return self._cost
@cost.setter
def cost(self, y):
self._cost = y
@property
def final_state(self):
return self._final_state
@final_state.setter
def final_state(self, z):
self._final_state = z
@property
def learning_rate(self):
return self._lr
@learning_rate.setter
def learning_rate(self, l):
self._lr = l
@property
def input(self):
return self.data
def train(self, session, config, train_data, exit_criteria, valid_data, summary_dir):
"""
Trains the NLM with the specified configuration, training, and validation data.
Training is terminated when the specified criteria have been satisfied.
:param session: The TF session in which operations should be run.
:param config: The configuration to be used for the model.
:param train_data: The dataset instance to use for training.
:param exit_criteria: The training termination criteria.
:param valid_data: The dataset instance to use for validation.
:param summary_dir: Directory in which summary information will be stored.
"""
summary_writer = tf.summary.FileWriter(summary_dir, session.graph)
previous_valid_log_ppx = []
nglobal_steps = 0
epoch = 1
new_learning_rate = config.learning_rate
state = session.run(self.reset_state)
try:
while True:
epoch_log_perp_unnorm = epoch_total_weights = 0.0
print("Epoch %d Learning rate %0.3f" % (epoch, new_learning_rate))
epoch_start_time = time.time()
# Runs each training step. A step is processing a minibatch of context-target pairs.
for step, (context, target, target_weights) in enumerate(
train_data.batch_producer_memory_efficient(self.batch_size, self.num_steps)):
# Every steps_per_checkpoint steps run validation and print perplexity/entropy.
if step % FLAGS.steps_per_checkpoint == 0:
print('Train steps:', step)
if step >0:
validation_perplexity = self.test(session, config, valid_data)
validation_log_perplexity = math.log(validation_perplexity)
print("global_steps %d learning_rate %.4f valid_perplexity %.2f" % (nglobal_steps, new_learning_rate, validation_perplexity))
sys.stdout.flush()
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else: # LSTM cell
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
# Run the actual training step.
_, cost, state, loss, iteration = session.run([self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
nglobal_steps += 1
# Add step loss and weights to the total.
epoch_log_perp_unnorm += np.sum(loss)
epoch_total_weights += np.sum(sum(target_weights))
# epoch_total_weights += np.sum(sum(sub_target_weights))
train_log_perplexity = epoch_log_perp_unnorm / epoch_total_weights
train_perplexity = math.exp(train_log_perplexity) if train_log_perplexity < 300 else float("inf")
validation_perplexity = self.test(session, config, valid_data)
validation_log_perplexity = math.log(validation_perplexity)
# Checkpoint and save the model.
checkpoint_path = os.path.join(FLAGS.train_dir, "lm.ckpt.epoch" + str(epoch))
self.saver.save(session, checkpoint_path, global_step=self.global_step)
train_perplexity_summary = tf.Summary()
valid_perplexity_summary = tf.Summary()
train_perplexity_summary.value.add(tag="train_log_ppx", simple_value=train_log_perplexity)
train_perplexity_summary.value.add(tag="train_ppx", simple_value=train_perplexity)
summary_writer.add_summary(train_perplexity_summary, nglobal_steps)
valid_perplexity_summary.value.add(tag="valid_log_ppx", simple_value=validation_log_perplexity)
valid_perplexity_summary.value.add(tag="valid_ppx", simple_value=validation_perplexity)
summary_writer.add_summary(valid_perplexity_summary, nglobal_steps)
# Convert epoch time in minutes and print info on screen.
epoch_time = (time.time() - epoch_start_time) * 1.0 / 60
print("END EPOCH %d global_steps %d learning_rate %.4f time(mins) %.4f train_perplexity %.2f valid_perplexity %.2f" %
(epoch, nglobal_steps, new_learning_rate, epoch_time, train_perplexity, validation_perplexity))
sys.stdout.flush()
if exit_criteria.max_epochs is not None and epoch > exit_criteria.max_epochs:
raise StopTrainingException()
# Decrease learning rate if valid ppx does not decrease
if len(previous_valid_log_ppx) > 1 and validation_log_perplexity >= previous_valid_log_ppx[-1]:
new_learning_rate = new_learning_rate * config.lr_decay
# If validation perplexity has not improved over the last 5 epochs, stop training
if new_learning_rate == 0.0 or (len(previous_valid_log_ppx) > 4 and validation_log_perplexity > max(previous_valid_log_ppx[-5:])):
raise StopTrainingException()
previous_valid_log_ppx.append(validation_log_perplexity)
epoch += 1
except (StopTrainingException, KeyboardInterrupt):
print("Finished training ........")
def test(self, session, config, test_data, ignore_padding=False):
"""
Tests the NLM with the specified configuration and test data.
:param session: The TF session in which operations should be run.
:param config: The configuration to be used for the model.
:param test_data:
:param ignore_padding:
:return:
"""
log_perp_unnorm, total_size = 0.0, 0.0
batch_number = -1
state = session.run(self.reset_state)
for step, (context, target, target_weights, sub_target_weights) in enumerate(
test_data.batch_producer(self.batch_size, self.num_steps, True)):
batch_number += 1
feed_dict = {
self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.keep_probability: 1.0 # No dropout should be used for the test!
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else:
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
# norm_logits, loss, cost, state = session.run([self.norm_logits, self.loss, self.cost, self.next_state], feed_dict)
loss, cost, state = session.run([self.loss, self.cost, self.next_state], feed_dict)
if FLAGS.token_model:
targets = [t for tar in target for t in tar]
voc_size = 10500000
loss = [-math.log(1.0/voc_size, 2) if t == self.train_vocab["-UNK-"] else l
for l,t in zip(loss, targets) ]
log_perp_unnorm += np.sum(loss)
if FLAGS.word_level_perplexity:
total_size += np.sum(sum(sub_target_weights))
else:
total_size += np.sum(sum(target_weights))
if ignore_padding:
paddings = 0
for tok_loss, weight in zip(loss, chain.from_iterable(zip(*target_weights))):
if weight == 0:
log_perp_unnorm -= tok_loss
paddings += 1
total_size += 1e-12
log_ppx = log_perp_unnorm / total_size
ppx = math.exp(float(log_ppx)) if log_ppx < 300 else float("inf")
if FLAGS.cross_entropy:
return log_ppx
return ppx
def dynamic_train_test_file(self, test_lines, train_vocab, train_vocab_rev, test_projects, config, output_path, session):
"""
Tests the NLM on the specified test dataset but also updates its parameters by training on each file
after computing its per token entropy.
The model is restored back to the global model after testing has been completed for a project.
This procedure adapts the model to each project resulting in better performance but also prevents the model from
forgetting information contained in the global model after testing on a plethora of projects.
The per token entropy/perplexity will be calculated and shown on the screen.
This mode should always be run with batch_size=1.
:param test_lines:
:param train_vocab: The word to id mapping.
:param train_vocab_rev: The id to word mapping.
:param test_projects: Names of the projects for each test file instance.
:param config: The configuration to be used for the model.
:param output_path:
:param session: The TF session in which operations should be run.
:return: Average loss per file and not per token.
"""
config.batch_size = 1
ctr = 0
nglobal_steps = 0
state = None
new_learning_rate = config.learning_rate
losses = []
lengths = []
last_test_project = None
for test_line, test_project in zip(test_lines, test_projects):
ctr += 1
if test_project != last_test_project and last_test_project is not None:
# New test project so restore the model back to the global one.
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
self.saver.restore(session, ckpt.model_checkpoint_path)
last_test_project = test_project
# Get the ids for this test instance and calculate entropy/perplexity.
test_line = test_line.replace("\n", (" %s" % "-eod-"))
ids = [train_vocab[word] if word in train_vocab else train_vocab['-UNK-'] for word in test_line.split(' ')]
test_dataset = reader.dataset(ids, train_vocab, train_vocab_rev)
test_loss = self.test(session, config, test_dataset, True)
if FLAGS.cross_entropy:
print('line cross_entropy:', test_loss)
else:
print('line perplexity:', test_loss)
sys.stdout.flush()
losses.append(test_loss)
# Train.
state = session.run(self.reset_state)
try:
epoch_log_perp_unnorm = epoch_total_weights = 0.0
epoch_sub_total_weights = 0.0
# Train on each batch to adapt the model to the new information available.
for step, (context, target, target_weights, sub_target_weights) in enumerate(
test_dataset.batch_producer(self.batch_size, self.num_steps)):
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else: # LSTM
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
_, cost, state, loss, iteration = session.run([self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
nglobal_steps += 1
epoch_log_perp_unnorm += np.sum(loss)
epoch_total_weights += np.sum(sum(target_weights))
epoch_sub_total_weights += np.sum(sum(sub_target_weights))
train_log_perplexity = epoch_log_perp_unnorm / epoch_total_weights
train_perplexity = math.exp(train_log_perplexity) if train_log_perplexity < 300 else float("inf")
print(train_perplexity)
lengths.append(int(round(epoch_sub_total_weights, 0)))
except (StopTrainingException, KeyboardInterrupt):
print("Finished training ........")
total_len = float(sum(lengths))
len_weights = [length / total_len for length in lengths]
if FLAGS.cross_entropy:
print('Per token entropy:', sum([perp * weight for perp, weight in zip(losses, len_weights)]))
else:
print('Per token perplexity:', sum([perp * weight for perp, weight in zip(losses, len_weights)]))
return sum(losses)/ctr
def dynamic_train_test(self, test_lines, train_vocab, train_vocab_rev, test_projects, config, output_path, session):
"""
Tests the NLM on the specified test dataset but also updates its parameters by training on each batch
after computing its per token entropy first.
The model is restored back to the global model after testing has been completed for a project.
This procedure adapts the model to each project resulting in better performance but also prevents the model from
forgetting information contained in the global model after testing on a plethora of projects.
The per token entropy/perplexity will be calculated and shown on the screen.
This mode should always be run with batch_size=1.
:param test_lines:
:param train_vocab:
:param train_vocab_rev:
:param test_projects:
:param config: The configuration to be used for the model.
:param output_path:
:param session:
:return: Average loss per file and not per token.
"""
config.batch_size = 1
ctr = 0
nglobal_steps = 0
state = None
new_learning_rate = config.learning_rate
losses = []
lengths = []
last_test_project = None
for test_line, test_project in zip(test_lines, test_projects):
ctr += 1
if ctr % 100 == 0:
print("\t %d lines" % ctr)
total_len = float(sum(lengths))
len_weights = [length / total_len for length in lengths]
print('Current per token :', sum([perp * weight for perp, weight in zip(losses, len_weights)]))
print(sum(losses) / ctr)
file_log_perp_unnorm = file_total_weights = 0.0
file_sub_total_weights = 0.0
if test_project != last_test_project and last_test_project is not None:
# New test project so restore the model
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
self.saver.restore(session, ckpt.model_checkpoint_path)
last_test_project = test_project
test_line = test_line.replace("\n", (" %s" % "-eod-"))
ids = [train_vocab[word] if word in train_vocab else train_vocab['-UNK-'] for word in test_line.split(' ')]
test_dataset = reader.dataset(ids, train_vocab, train_vocab_rev)
# Test, Train
state = session.run(self.reset_state)
try:
epoch_log_perp_unnorm = epoch_total_weights = 0.0
epoch_sub_total_weights = 0.0
for step, (context, target, target_weights, sub_target_weights) in enumerate(
test_dataset.batch_producer(self.batch_size, self.num_steps)):
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.keep_probability: 1.0
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else: # LSTM
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
loss, cost, state = session.run([self.loss, self.cost, self.next_state], feed_dict)
if FLAGS.token_model:
targets = [t for tar in target for t in tar]
loss = [-math.log(1.0/len(self.train_vocab), 2) if t == self.train_vocab["-UNK-"] else l
for l,t in zip(loss, targets) ]
file_log_perp_unnorm += np.sum(loss)
file_total_weights += np.sum(sum(target_weights))
file_sub_total_weights += np.sum(sum(sub_target_weights))
if True:
for tok_loss, weight in zip(loss, chain.from_iterable(zip(*target_weights))):
if weight == 0:
file_log_perp_unnorm -= tok_loss
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else: # LSTM
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
_, cost, state, loss, iteration = session.run([self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
nglobal_steps += 1
epoch_log_perp_unnorm += np.sum(loss)
epoch_total_weights += np.sum(sum(target_weights))
epoch_sub_total_weights += np.sum(sum(sub_target_weights))
train_log_perplexity = epoch_log_perp_unnorm / epoch_total_weights
train_perplexity = math.exp(train_log_perplexity) if train_log_perplexity < 300 else float("inf")
print(train_perplexity)
lengths.append(int(round(epoch_sub_total_weights, 0)))
if FLAGS.word_level_perplexity:
test_loss = file_log_perp_unnorm / file_sub_total_weights
else:
test_loss = file_log_perp_unnorm / file_total_weights
if FLAGS.cross_entropy:
print('line cross_entropy:', test_loss)
else:
print('line perplexity:', test_loss)
sys.stdout.flush()
losses.append(test_loss)
except (StopTrainingException, KeyboardInterrupt):
print("Finished training ........")
total_len = float(sum(lengths))
len_weights = [length / total_len for length in lengths]
if FLAGS.cross_entropy:
print('Per token entropy:', sum([perp * weight for perp, weight in zip(losses, len_weights)]))
else:
print('Per token perplexity:', sum([perp * weight for perp, weight in zip(losses, len_weights)]))
return sum(losses)/ctr
def maintenance_test(self, session, config, test_lines, test_projects, train_vocab, train_vocab_rev):
"""
Simulates code maintenance scenario.
For each file in a project the model is first adapted on the rest of the files.
The model is also adapted on encountered sequences of the test file.
The test_projects argument informs the model about which instances belong to which project.
Note: Always use batch_size=1. RNN num_steps=20 is a good value.
:param session: The TF session in which operations will be run.
:param config: The configuration to be used for the model.
:param test_lines: A list containing each test instance as a separate entry. Instances from the same project should be consecutive.
:param test_projects: To which project does each instance belong to. Should be a list of strings.
:param train_vocab: The word to id mapping.
:param train_vocab_rev: The id to word mapping.
:return:
"""
# If checkpoint does not exist throw an exception. A global model must have been pretrained.
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if not tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
raise Exception('Checkpoint does not exist!')
# Some initializations.
new_learning_rate = config.learning_rate
test_losses = []
test_losses_sum = 0.0
lengths = []
ctr = 0
# First compute which files belong in each project.
project_sizes = []
last_project_name = ''
project_file_size = 0
for test_project in test_projects:
if test_project != last_project_name:
if last_project_name != '':
project_sizes.append(project_file_size)
project_file_size = 0
else:
project_file_size += 1
last_project_name = test_project
print(project_sizes)
print(sum(project_sizes))
print()
# Now distribute test lines based on project size.
project_test_lines = []
files_distributed = 0
for project_file_size in project_sizes:
project_test_lines.append(test_lines[files_distributed : files_distributed + project_file_size])
files_distributed += project_file_size
partitions = 20 # Default number of partitions
for proj_id, test_lines in enumerate(project_test_lines):
print(len(test_lines))
large_project = len(test_lines) > 200
if len(test_lines) > 2000: # Really big projects can have speed benefits from more partitions.
partitions = 50
else:
partitions = 20
if large_project:
# The project is very large so partition it in partition_size parts.
# For each of the 20/50 parts train a model on all the parts excluding itself.
partition_size = len(test_lines) / partitions
for partition in range(partitions):
partition_words = []
for i, line in enumerate(test_lines):
if i / partition_size != partition and not (i / partition_size == partitions + 1 and partition == partitions):
partition_words.extend([word for word in line.split(' ')])
# If checkpoint does not exist throw an exception. A global model should have been trained...
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if not tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
raise Exception('Checkpoint for global model does not exist!')
self.saver.restore(session, ckpt.model_checkpoint_path)
# Where to save this partition.
partition_path = os.path.join(FLAGS.train_dir, "partition%d" % partition)
if os.path.isdir(partition_path): shutil.rmtree(partition_path)
partition_ids = [train_vocab[word] if word in train_vocab else train_vocab['-UNK-'] for word in partition_words]
partition_dataset = reader.dataset(partition_ids, train_vocab, train_vocab_rev)
# Reset the LSTM state to zeros and train
state = session.run(self.reset_state)
try:
epoch_log_perp_unnorm = epoch_total_weights = 0.0
epoch_sub_total_weights = 0.0
for step, (context, target, target_weights, sub_target_weights) in enumerate(
partition_dataset.batch_producer(self.batch_size, self.num_steps)):
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else:
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
_, cost, state, loss, iteration = session.run(
[self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
epoch_log_perp_unnorm += np.sum(loss)
epoch_total_weights += np.sum(sum(target_weights))
epoch_sub_total_weights += np.sum(sum(sub_target_weights))
train_log_perplexity = epoch_log_perp_unnorm / epoch_total_weights
train_perplexity = math.exp(train_log_perplexity) if train_log_perplexity < 300 else float("inf")
print(train_perplexity)
checkpoint_path = os.path.join(partition_path, "model")
self.saver.save(session, checkpoint_path, global_step=self.global_step)
except (StopTrainingException, KeyboardInterrupt):
print("Finished training ........")
for lines_done, test_line in enumerate(test_lines):
ctr += 1
# Restore the global model
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
self.saver.restore(session, ckpt.model_checkpoint_path)
if large_project:
# Load pretrained model and only train on the rest of the files from this partition
partition = lines_done / partition_size
partition = min(partitions - 1, partition) # last partition can be bigger
partition_path = os.path.join(FLAGS.train_dir, "partition%d" % partition)
part_ckpt = tf.train.get_checkpoint_state(partition_path)
self.saver.restore(session, part_ckpt.model_checkpoint_path)
train_words = []
if partition < partitions - 1:
partition_lines = zip(range(partition * partition_size, (partition + 1) * partition_size),
test_lines[partition * partition_size : (partition + 1) * partition_size])
else:
partition_lines = zip(range(partition * partition_size, len(test_lines)),
test_lines[partition * partition_size : (partition + 1) * partition_size])
for i, line in partition_lines:
if i != (lines_done % partition_size):
train_words.extend([word for word in line.split(' ')])
else:
# Now for each file in the current test project use the rest as train data
train_words = []
for i, line in enumerate(test_lines):
if i != lines_done:
train_words.extend([word for word in line.split(' ')])
# Convert the train data words to ids
ids = [train_vocab[word] if word in train_vocab else train_vocab['-UNK-'] for word in train_words]
train_dataset = reader.dataset(ids, train_vocab, train_vocab_rev)
# Reset the LSTM state to zeros and train
state = session.run(self.reset_state)
try:
epoch_log_perp_unnorm = epoch_total_weights = 0.0
epoch_sub_total_weights = 0.0
for step, (context, target, target_weights, sub_target_weights) in enumerate(
train_dataset.batch_producer(self.batch_size, self.num_steps)):
feed_dict = {self.inputd: context,
self.targets: target,
self.target_weights: target_weights,
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else:
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
# print("state number " + str(i))
_, cost, state, loss, iteration = session.run([self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
epoch_log_perp_unnorm += np.sum(loss)
epoch_total_weights += np.sum(sum(target_weights))
epoch_sub_total_weights += np.sum(sum(sub_target_weights))
train_log_perplexity = epoch_log_perp_unnorm / epoch_total_weights
train_perplexity = math.exp(train_log_perplexity) if train_log_perplexity < 300 else float("inf")
# lengths.append(int(round(epoch_sub_total_weights, 0)))
# Training done. Now test on test file
ids = [train_vocab[word] if word in train_vocab else train_vocab['-UNK-']
for word in test_lines[lines_done].split(' ')]
# Test on each sequence of tokens and then train on it until the file is done
subtokens_done = 0
tokens_done = 0
instance_losses = []
# Reset the LSTM state to zeros and train
state = session.run(self.reset_state)
test_state = session.run(self.reset_state)
while subtokens_done + config.num_steps < len(ids):
step_end = subtokens_done + config.num_steps
unfinished_token = train_vocab_rev[ids[step_end]].endswith('@@')
while unfinished_token:
step_end += 1
unfinished_token = train_vocab_rev[ids[step_end]].endswith('@@')
try:
context = ids[subtokens_done : step_end]
target = ids[subtokens_done + 1 : step_end + 1]
target_weights = [1] * len(context)
for id in context:
if not train_vocab_rev[id].endswith('@@'):
tokens_done += 1
feed_dict = {self.inputd: np.tile(context, (self.batch_size, 1)),
self.targets: np.tile(target, (self.batch_size, 1)),
self.target_weights: np.tile(target_weights, (self.batch_size, 1)),
self.keep_probability: 1.0
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = test_state[i]
else:
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = test_state[i].c
feed_dict[h] = test_state[i].h
cost, test_state, loss = session.run([self.cost, self.next_state, self.loss], feed_dict)
if tokens_done > 0:
instance_losses.append((tokens_done, len(context), sum(loss)/tokens_done))
feed_dict = {self.inputd: np.tile(context, (self.batch_size, 1)),
self.targets: np.tile(target, (self.batch_size, 1)),
self.target_weights: np.tile(target_weights, (self.batch_size, 1)),
self.learning_rate: new_learning_rate,
self.keep_probability: config.keep_prob
}
if FLAGS.gru:
for i, h in enumerate(self.reset_state):
feed_dict[h] = state[i]
else: # LSTM
for i, (c, h) in enumerate(self.reset_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
_, cost, state, loss, iteration = session.run(
[self.train_step, self.cost, self.next_state, self.loss, self.iteration], feed_dict)
except (StopTrainingException, KeyboardInterrupt):
print("Finished training ........")
subtokens_done = step_end
# Test and train on leftover part of the sequence, which has length < step_size.
try:
tokens_done = 0
context = ids[subtokens_done:-1]
target = ids[subtokens_done + 1:]
target_weights = [1] * len(context)
if len(context) == 0 or len(target) == 0: # if there are no actual leftovers stop.
continue
for id in context:
if not train_vocab_rev[id].endswith('@@'):
tokens_done += 1
feed_dict = {self.inputd: np.tile(context, (self.batch_size, 1)),
self.targets:
|
np.tile(target, (self.batch_size, 1))
|
numpy.tile
|
'''
Description: 工具
Author: SongJ
Date: 2020-12-28 14:10:28
LastEditTime: 2021-04-10 11:18:43
LastEditors: SongJ
'''
import time
import xarray as xr
import math
import matplotlib.pyplot as plt
import numba
import numpy as np
from numba import jit, njit
from scipy.spatial.distance import pdist, squareform
from sklearn.neighbors import BallTree, DistanceMetric, KDTree
from . import DPTree
# from DPTree import DPTree, label_these_node, split_cluster
# from DPTree_ST import (DPTree, label_these_node, label_these_node_new,
# label_these_node_new2, split_cluster_new, split_outlier)
from . import DPTree_ST
def fn_timer(*args,**kwargs):
def mid_fn(function):
def function_timer(*in_args, **kwargs):
t0 = time.time()
result = function(*in_args, **kwargs)
t1 = time.time()
print (" %s: %s seconds" %
(args[0], str(t1-t0))
)
return result
return function_timer
return mid_fn
def check_netcdf(X):
if type(X)!=xr.DataArray:
raise ValueError("Only support datatype DataArray of xarray, please handle netcdf data by the library xarray.")
# 二维栅格数据转换为样本点数据,每个网格为一个样本点
def rasterArray_to_sampleArray(data):
rows,cols = data.shape
data_all = np.zeros((rows*cols,3))
num = 0
for i in range(rows):
for j in range(cols):
data_all[num,:] = [i,j,data[i,j]]
num+=1
pass
pass
data_all[:,[0,1]]=data_all[:,[1,0]]
not_none_pos = np.where(data_all[:,2]!=0)[0] #* 去除零值后的数据,在全局的位置 [638,629,1004,……] 值为 data_all数据下标
nan_pos = np.where(np.isnan(data_all[:,2]))[0] #* 获取 值为 nan 的下标
not_none_pos = np.setdiff1d(not_none_pos,nan_pos)
data_not_none = data_all[not_none_pos]
pos_not_none = np.full((rows*cols),-1,dtype=np.int64) #* 全局数据中,不为零的下标[-1,-1,0,-1,1,-1,2,3,4,……] 值为 data_not_none 下标
pos_not_none[not_none_pos] = np.array(range(len(not_none_pos)))
return data_not_none,pos_not_none
# 三维时空立方体转换为样本点矩阵,每个单元立方格为一个样本点
def rasterCube_to_sampleArray(data):
times,rows,cols = data.shape
data_not_none = np.zeros((times*rows*cols,4))
data_all = np.zeros((times*rows*cols,4))
num = 0
for i in range(rows):
for j in range(cols):
for k in range(times):
data_all[num,:] = [i,j,k,data[k,i,j]]
num+=1
pass
pass
# data[:,[0,1]]=data[:,[1,0]]
not_none_pos = np.where(data_all[:,3]!=0)[0] #* 去除零值后的数据,在全局的位置 [638,629,1004,……] 值为 data_all数据下标
nan_pos = np.where(np.isnan(data_all[:,3]))[0] #* 获取 值为 nan 的下标
not_none_pos = np.setdiff1d(not_none_pos,nan_pos)
data_not_none = data_all[not_none_pos]
pos_not_none = np.full((times*rows*cols),-1,dtype=np.int64) #* 全局数据中,不为零的下标[-1,-1,0,-1,1,-1,2,3,4,……] 值为 data_not_none 下标
pos_not_none[not_none_pos] = np.array(range(len(not_none_pos)))
return data_not_none,pos_not_none
#* 聚类结果转换为netcdf
def labeled_res_to_netcdf(ori_nc,data_table,labels):
#* 将聚类结果写入DataArray
ori_ndarray = np.array(ori_nc)
dr_labels = np.full(ori_ndarray.shape,-2)
for i in range(len(data_table)):
if(ori_ndarray.ndim==2):
dr_labels[int(data_table[i][1])][int(data_table[i][0])] = labels[i]
elif(ori_ndarray.ndim==3):
dr_labels[int(data_table[i][2])][int(data_table[i][0])][int(data_table[i][1])] = labels[i]
else:
raise ValueError("Two or Three-dimensional matrix is needed")
pass
labeled_res= xr.DataArray(
dr_labels,
coords=ori_nc.coords,
dims=ori_nc.dims
)
ds = xr.Dataset(data_vars = dict(label=labeled_res,attr=ori_nc))
return ds
@fn_timer("计算距离矩阵")
def calc_dist_matrix(data, metric='euclidean'):
dist_mat = squareform(pdist(data, metric=metric))
return dist_mat
def calc_attr_dist_mat(data,attrs_index,metric='euclidean'):
rows = data.shape[0]
try:
attr_dist_mat = squareform(pdist(data[:,attrs_index].reshape(rows, 1),metric=metric))
except:
attr_dist_mat = squareform(pdist(data[:,attrs_index].reshape(rows, 1),metric='euclidean'))
return attr_dist_mat
@fn_timer("计算截断密度")
@njit
def calcu_cutoff_density(dist_mat, eps):
'''
计算截断密度
'''
local_cutoff_density = np.where(dist_mat < eps, 1, 0).sum(axis=1)
local_cutoff_density = local_cutoff_density
return local_cutoff_density
@fn_timer("计算空间属性邻近域")
def calc_homo_near_grid(data,s_eps,attr_eps,pos_not_none):
'''
获取点的空间属性邻近域 mixin_near_matrix
'''
mixin_near_matrix = {}
rows,cols = data.shape
num = 0
for i in range(rows):
for j in range(cols):
#* 计算每个点的邻域范围:
left_lon = i-s_eps if i-s_eps>=0 else 0
rigth_lon = i+s_eps if i+s_eps<rows else rows
up_lat = j-s_eps if j-s_eps>=0 else 0
down_lat = j+s_eps if j+s_eps<cols else cols
s_near = data[left_lon:rigth_lon+1,up_lat:down_lat+1]
if(data[i,j]!=0 and (not np.isnan(data[i,j]))):
pos_s_near = np.where((np.abs(s_near-data[i,j])<=attr_eps) & (s_near!=0) &(~np.isnan(s_near)))
pos_data = np.vstack(pos_s_near) + np.array([[left_lon],[up_lat]])
pos_in_matrix = cols*pos_data[0]+pos_data[1] #* 获取全局邻域位置(全局包含空值点)
pos = pos_not_none[pos_in_matrix]
mixin_near_matrix[num] = pos
num+=1
pass
pass
return mixin_near_matrix
def calc_homo_near_cube(data,s_eps,t_eps,attr_eps,pos_not_none):
'''
获取点的时空属性邻近域 mixin_near_matrix
'''
mixin_near_matrix = {}
time_len,rows,cols = data.shape
num = 0
for i in range(rows):
for j in range(cols):
for k in range(time_len):
#* 计算每个点的邻域范围:
left_lon = i-s_eps if i-s_eps>=0 else 0
rigth_lon = i+s_eps if i+s_eps<rows else rows
up_lat = j-s_eps if j-s_eps>=0 else 0
down_lat = j+s_eps if j+s_eps<cols else cols
early_time = k-t_eps if k-t_eps>=0 else 0
lated_time = k+t_eps if k+t_eps<time_len else time_len
s_near = data[early_time:lated_time+1,left_lon:rigth_lon+1,up_lat:down_lat+1]
# s_near = s_near[np.where(~np.isnan(s_near) & (s_near!=0))]
if(data[k,i,j]!=0 and (not np.isnan(data[k,i,j]))):
pos_s_near = np.where((np.abs(s_near-data[k,i,j])<=attr_eps) & (s_near!=0) &(~np.isnan(s_near)))
pos_data = np.vstack(pos_s_near) + np.array([[early_time],[left_lon],[up_lat]])
pos_in_matrix = time_len*cols*pos_data[1]+time_len*pos_data[2]+pos_data[0] #* 获取全局邻域位置(全局包含空值点)
pos = pos_not_none[pos_in_matrix]
mixin_near_matrix[num] = pos
num+=1
pass
pass
pass
return mixin_near_matrix
@fn_timer("计算高斯密度")
def calc_gaus_density_spatial(data,s_eps,attr_eps):
'''
此处 data 为空间栅格矩阵数据,行列分别为:lon,lat
'''
rows,cols = data.shape
zero_num = np.where(data==0,1,0).sum()
nan_num = np.where(np.isnan(data),1,0).sum()
density_list_len = rows*cols - zero_num - nan_num
density = np.zeros(density_list_len,dtype=np.float32)
num = 0
for i in range(rows):
for j in range(cols):
#* 计算每个点的邻域范围:
left_lon = i-s_eps if i-s_eps>=0 else 0
rigth_lon = i+s_eps if i+s_eps<rows else rows
up_lat = j-s_eps if j-s_eps>=0 else 0
down_lat = j+s_eps if j+s_eps<cols else cols
s_near = data[left_lon:rigth_lon+1,up_lat:down_lat+1]
s_near = s_near[np.where((~np.isnan(s_near)) & (s_near!=0))]
if(data[i,j]!=0 and (not np.isnan(data[i,j]))):
density[num] = np.exp(-1*((1-(np.abs(s_near-data[i,j])))/attr_eps)**2).sum()
num+=1
pass
pass
return density
@fn_timer("密度计算")
# @njit
def calc_gaus_density_st(data,s_eps,t_eps,attr_eps):
'''
此处 data 为立方体数据,三个维度:time,lon,lat
'''
time_len,rows,cols = data.shape
zero_num = np.where(data==0,1,0).sum()
nan_num = np.where(np.isnan(data),1,0).sum()
density_list_len = time_len*rows*cols - zero_num - nan_num
density = np.zeros(density_list_len,dtype=np.float32)
num = 0
for i in range(rows):
for j in range(cols):
for k in range(time_len):
#* 计算每个点的邻域范围:
left_lon = i-s_eps if i-s_eps>=0 else 0
rigth_lon = i+s_eps if i+s_eps<rows else rows
up_lat = j-s_eps if j-s_eps>=0 else 0
down_lat = j+s_eps if j+s_eps<cols else cols
early_time = k-t_eps if k-t_eps>=0 else 0
lated_time = k+t_eps if k+t_eps<time_len else time_len
s_near = data[early_time:lated_time+1,left_lon:rigth_lon+1,up_lat:down_lat+1]
s_near = s_near[np.where((~np.isnan(s_near)) & (s_near!=0))]
if(data[k,i,j]!=0 and (not np.isnan(data[k,i,j]))):
density[num] = np.exp(-1*((1-(np.abs(s_near-data[k,i,j])))/attr_eps)**2).sum()
num+=1
pass
pass
pass
return density
@fn_timer("计算空间近邻")
def calc_spatial_neighbor(X_spatial,eps,leaf_size):
'''
使用 kdtree 计算空间近邻
主要是借助kdtree解决大数据量的计算问题
'''
tree = KDTree(X_spatial, leaf_size=leaf_size)
ind = tree.query_radius(X_spatial, eps, return_distance=False, count_only=False, sort_results=False)
return ind
@fn_timer("计算时空邻居")
# @njit
def calc_st_neighbor(X_time,eps,spatial_neighbor):
'''
计算时间近邻
'''
st_neighbor = []
flattened_time = X_time.flatten()
rows = len(flattened_time)
for i in range(rows):
cur_spat_neighbor = spatial_neighbor[i]
st_neighbor.append(cur_spat_neighbor[np.where(abs(flattened_time[cur_spat_neighbor]-flattened_time[i])<=eps)[0]])
pass
return np.array(st_neighbor)
# @fn_timer("计算时空邻居")
# def calc_st_neighbor(spatial_neighbor,time_neighbor):
# st_neighbor={}
# rows = spatial_neighbor.shape[0]
# for i in range(rows):
# st_neighbor[i]=np.intersect1d(spatial_neighbor[i],time_neighbor[i])
# pass
# return st_neighbor
@fn_timer("计算混合邻居")
def calc_mixin_near_matrix(space_dist_mat,spatial_eps,attr_dist_mat,attr_eps):
rows = space_dist_mat.shape[0]
mixin_near_matrix = {}
for i in range(rows):
space_near = np.where(space_dist_mat[i,:]<=spatial_eps)[0]
attr_near = np.where(attr_dist_mat[i,:]<=attr_eps)[0]
mixin_near_matrix[i]=np.intersect1d(space_near,attr_near)
return mixin_near_matrix
# @njit
def calc_gaus_density_njit(rows,ind,dist,st_neighbors,eps):
local_gaus_density = np.zeros((rows,),dtype=np.float32)
for i in range(rows):
arg_intersect_ind = np.where(np.in1d(ind[i],st_neighbors[i]))
local_gaus_density[i] = np.exp(-1 *(dist[i][arg_intersect_ind]/eps)**2).sum()
return local_gaus_density
def calc_mixin_near_matrix(rows,ind,st_neighbors):
mixin_near_matrix = {}
for i in range(rows):
arg_intersect_ind = np.where(np.in1d(ind[i],st_neighbors[i]))
mixin_near_matrix[i] = ind[i][arg_intersect_ind]
return mixin_near_matrix
@fn_timer("计算高斯密度")
@njit
def calcu_gaus_density_spatial(near_matrix,dist_mat, eps):
'''
计算高斯密度
'''
rows = dist_mat.shape[0]
local_gaus_density = np.zeros((rows,),dtype=np.float32)
for i in range(rows):
near_nodes = near_matrix[1][np.where(near_matrix[0]==i)]
local_gaus_density[i] = np.exp(-1*((1-dist_mat[i][near_nodes])/eps)**2).sum()
return local_gaus_density
@fn_timer("计算高斯密度")
@njit
def calcu_gaus_density(dist_mat, eps):
'''
计算高斯密度
'''
rows = dist_mat.shape[0]
local_gaus_density = np.zeros((rows,),dtype=np.float32)
for i in range(rows):
local_gaus_density[i] = np.exp(-1 *((dist_mat[i, :])/(eps))**2).sum()
pass
return local_gaus_density
def calc_density(dist_mat,eps,density_metric):
if(density_metric=='gauss'):
return calcu_gaus_density(dist_mat,eps)
else:
return calcu_cutoff_density(dist_mat,eps)
def calc_repulsive_force(data,density,k_num,leaf_size,dist_mat=[],fast=False):
if(fast):
denser_pos,denser_dist,density_and_k_relation = calc_repulsive_force_fast(data,k_num,density,leaf_size)
pass
else:
denser_pos,denser_dist,density_and_k_relation = calc_repulsive_force_classical(data,density,dist_mat)
return denser_pos,denser_dist,density_and_k_relation
@fn_timer("计算斥群值_快速")
def calc_repulsive_force_fast(data, k_num, density, leaf_size):
#* b. 求每个点的k近邻
# tree = BallTree(data,leaf_size=2000,metric=DistanceMetric.get_metric('mahalanobis',V=np.cov(data.T)))
tree = KDTree(data, leaf_size=leaf_size)
dist, ind = tree.query(data, k=k_num)
#* 统计 密度 与 k 值的相关性:
density_and_k_relation = np.zeros((ind.shape[0],2),dtype=np.float32)
#* c. 计算 k近邻点 是否能找到斥群值
denser_dist = np.full(ind.shape[0], -1,dtype=np.float32)
denser_pos = np.full(ind.shape[0],-1,dtype=np.int32)
for i in range(ind.shape[0]):
denser_list =
|
np.where(density[ind[i]]>density[i])
|
numpy.where
|
import matplotlib.pyplot as plt
import seaborn as sns
import random
import numpy as np
import gym
import torch
import json
import sys
import inspect
import os
import os.path as osp
from functools import reduce
def plot_belief(data, title, figsize, ):
f, ax = plt.subplots(figsize=figsize)
ax = sns.heatmap(data, linewidth=1, center=center, cmap=cmap, xticklabels=20, yticklabels=['left', 'right'])
ax.set_title(title)
ax.set_ylim(2, 0)
ax.set_ylabel('Actions')
ax.set_xlabel('States')
# ax.set_xticks(np.arange(70, 91, 1))
ax.tick_params(labelsize=12)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=16)
# f.tight_layout()
f.subplots_adjust(top=0.8, bottom=0.3, right=0.8)
f.tight_layout()
return f
def plot_table_blackjack(data, title, center=None, figsize=(7.5, 12), cmap=None):
'''
Flatten from 4-D to 2-D and plot all heatmaps.
'''
TITLE = ['Stick, No Usable Ace', 'Stick, With Usable Ace', 'Hit, No Usable Ace', 'Hit, With Usable Ace']
# if contrast:
# cmap = sns.diverging_palette(10, 240, n=128)
# center = 0
# else:
# cmap = 'Blues'
cmap = 'Blues' if cmap is None else cmap
# f, ax = plt.subplots(figsize=figsize)
nrows = 2
ncols = 2
f, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(5*ncols, 5*nrows), constrained_layout=True)
to_plot = np.split(data, data.shape[-1], axis=-1)
to_plot = [np.squeeze(d) for d in to_plot]
# breakpoint()
to_plot = [np.split(d, d.shape[-1], axis=-1) for d in to_plot]
to_plot = [np.squeeze(t) for sub in to_plot for t in sub]
# print(to_plot[0].shape)
for idx, (ax, plot) in enumerate(zip(axes.flatten(), to_plot)):
# print(plot)
# ax = sns.heatmap(plot, center=center, linewidth=1, yticklabels=1, cmap=cmap)
sns.heatmap(plot, center=center, linewidth=1, yticklabels=1, cmap=cmap, ax=ax, cbar_kws={"fraction": 0.1, "pad": 0.1, "aspect": 40})
ax.set_title(TITLE[idx])
# States outside this range are unreachable
ax.set_ylim(22, 4)
ax.set_xlim(1, 11)
ax.set_ylabel('Sum of Player Hand')
ax.set_xlabel('Dealer Face-up Card')
ax.tick_params(labelsize=10)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
return f
def plot_outcome_blackjack(data, title, figsize=(10,5.4), center=None, cmap=None):
# if contrast:
# cmap = sns.diverging_palette(10, 240, n=128)
# center = 0
# else:
# cmap = 'Blues'
cmap = 'Blues' if cmap is None else cmap
data = np.expand_dims(data[..., :-2], axis=0)
f, ax = plt.subplots(figsize=figsize)
# xticklabels = ['Stick & lose', 'Stick & win', 'Hit & lose', 'Stick & draw', 'Hit only']
xticklabels = ['Stick & lose', 'Stick & win', 'Hit & lose', 'Stick & draw']
ax = sns.heatmap(data, center=center, linewidth=3, cmap=cmap, xticklabels=xticklabels, cbar_kws={"orientation": 'horizontal', "pad": 0.35}, yticklabels=False, annot_kws={"size":16})
ax.set_title(title)
ax.set_ylabel('Belief')
ax.set_xlabel('Outcomes')
ax.tick_params(labelsize=20)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=20)
return f
def plot_table_cartpole(data, title, figsize, contrast=False):
if contrast:
cmap = sns.diverging_palette(10, 240, n=128)
center = 0
else:
cmap = 'Blues'
f, ax = plt.subplots(figsize=figsize)
ax = sns.heatmap(data, linewidth=1, center=center, cmap=cmap, xticklabels=20, yticklabels=['left', 'right'])
ax.set_title(title)
ax.set_ylim(2, 0)
ax.set_ylabel('Actions')
ax.set_xlabel('States')
# ax.set_xticks(np.arange(70, 91, 1))
ax.tick_params(labelsize=12)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=16)
# f.tight_layout()
f.subplots_adjust(top=0.8, bottom=0.3, right=0.8)
f.tight_layout()
return f
def plot_belief_taxi(data):
pass
def plot_contrast_taxi(belief, con_belief):
pass
def set_global_seeds(seed):
# set numpy and random seeds
np.random.seed(seed)
random.seed(seed)
# Set gym env seed
if hasattr(gym.spaces, 'prng'):
gym.spaces.prng.seed(seed)
# Set torch seed
try:
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
except ImportError:
pass
def get_device(device = None):
if device == None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device(device)
return device
def convert_to_onehot(array, size, transform):
# print(array.long().size())
out = torch.zeros(array.size()[0]).cuda()
if isinstance(array, torch.Tensor):
for idx, arr in enumerate(array):
int_ = transform([3, 3, 6, 3], arr)
# breakpoint()
if isinstance(int_, np.ndarray):
int_ = torch.as_tensor(int_)
# breakpoint()
out[idx] = int_
out = out.reshape(-1, 1)
onehot = torch.FloatTensor(array.size()[0], 162).cuda()
onehot.zero_()
onehot.scatter_(1, out.long(), 1)
return onehot
def is_arraylike(x):
if isinstance(x, (list, np.ndarray)):
return True
return False
def as_list(x):
if is_arraylike(x):
return x
else:
return
|
np.array([x])
|
numpy.array
|
from pypet import Environment, Parameter, cartesian_product, progressbar, Parameter
import numpy as np
import csv
import os
import copy
import pickle
import logging
from datetime import date
import time
def add_parameters(traj):
"""
add parameters to the trajectory with descriptions and default values
Parameters:
traj: pypet.trajectory.Trajectory
trajectory container, which manages the parameters
Returns:
None
Dependencies:
from pypet import Parameter
import numpy as np
"""
traj.par.N_pop = Parameter('N_pop', 10**5, 'population size')
traj.par.N_site = Parameter('N_site', 20, 'sequence length')
traj.par.N_state = Parameter('N_state', 2, 'number of states per site')
traj.par.mu = Parameter('mu', 10**(-4), 'mutation prob. per site per time step')
traj.par.sigma_h = Parameter('sigma_h', 1, 'host fitness coefficient')
traj.par.D0 = Parameter('D0', 5, 'cross-immunity distance')
traj.par.h_0 = Parameter('h_0', -7, 'typical single mutation fitness cost')
traj.par.J_0 = Parameter('J_0', 0, 'typical mutation coupling coefficient')
traj.par.hJ_coeffs = Parameter('hJ_coeffs', 'p24',
'fitness coefficients')
# traj.par.seed = Parameter('seed', 123456, 'RNG seed')
# randomly choose rng seed and save it as parameter
seed = np.random.randint(10**6)
traj.par.seed = Parameter('seed', seed, 'RNG seed')
traj.par.N_simu = Parameter('N_simu', 200, 'number of time steps to simulate')
def fitness_coeff_constant(N_site,N_state,h_0,J_0):
"""
creating the mutational fitness coefficients for the simulated sequences
in the case of constant fields and constant couplings
Parameters:
N_site: int
sequence length
N_state: int
number of states per site
h_0: int or float
single-mutation fitness cost
J_0: int or float
fitness coupling coefficient for double mutations
Returns:
h_list: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_list: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
Dependencies:
import numpy as np
"""
numparam_J=int(N_site*(N_site-1)/2)
J_list=np.ones((numparam_J, N_state-1, N_state-1))*J_0
h_list=np.ones((N_site, N_state-1))*h_0
return h_list, J_list
def fitness_coeff_p24(N_site, N_state, filepath='C:/Users/julia/Documents/Resources/InfluenzaFitnessLandscape/NewApproachFromMarch2021/'
'InfluenzaFitnessInference/code/notebooks/fitnessinference/p24-B-S0.90-Ising-reduced-out-learn.j', seed=12345, h_min=-9., h_max=-0.5, J_min=-2., J_max=3.):
"""
creating the mutational fitness coefficients for the simulated sequences in the case
of fitness coeffs sampled from coefficients inferred for the HIV protein p24
Parameters:
N_site: int
sequence length (<=105)
N_state: int
number of states per site
filepath (optional): str
filepath to a .j file that was created by the ACE inference of
p24 fitness coefficients
seed (optional): int
seed for random sanpling from the given coefficients
h_min, h_max, J_min, J_max (optional): float
maximum and minimum mutational fitness coefficients
(fixed for various sequence lengths)
Returns:
h_list: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_list: numpy.ndarray
added fitness change due to couplings of
a mutation to a specific state at any site i
with a mutation to a specific state at any other site j
Dependencies:
import os
import numpy as np
import csv
"""
filepath = os.path.normpath(filepath)
if not os.path.exists(filepath):
filepath = os.path.join(os.getcwd(), 'code', 'notebooks', 'fitnessinference', 'p24-B-S0.90-Ising-reduced-out-learn.j')
# np.random.seed(seed)
# get coefficients from file
with open(filepath) as f:
reader = csv.reader(f, delimiter = '\t')
param_list = list(reader)
# calculate sequence length from the coeff data
seq_length = int((np.sqrt(1 + 8 * len(param_list)) - 1) / 2)
# separate h and J list
h_list = [[float(param_list[i][j]) for j in range(len(param_list[i]))]
for i in range(seq_length)]
J_list = [[float(param_list[i][j]) for j in range(len(param_list[i]))]
for i in range(seq_length, len(param_list))]
# calculate matrix from J_list
k=0
J_mat=[[[] for j in range(seq_length)] for i in range(seq_length)]
for i in range(seq_length):
for j in range(i):
J_mat[i][j]=J_list[k]
J_mat[j][i]=J_list[k]
k+=1
# reduce J and h lists to sequence of length N_site
J_list_red = []
for i in range(N_site):
for j in range(i):
J_list_red.append(J_mat[i][j])
h_list_red = h_list[:N_site]
# sample from h and J parameters to get coefficient lists for only N_state states at each site
J_list_final = np.array([np.random.choice(J_list_red[i], size=(N_state-1, N_state-1))
for i in range(len(J_list_red))])
h_list_final = np.array([np.random.choice(h_list_red[i], size=N_state-1)
for i in range(len(h_list_red))])
# # replace max and min of coefficients by specific value, comment out if no modification to sampled coefficients
# J_list_final = np.where(J_list_final==np.max(J_list_final), J_max, J_list_final)
# J_list_final = np.where(J_list_final==np.min(J_list_final), J_min, J_list_final)
# h_list_final = np.where(h_list_final==np.max(h_list_final), h_max, h_list_final)
# h_list_final = np.where(h_list_final==np.min(h_list_final), h_min, h_list_final)
return h_list_final, J_list_final
def mutate_seqs(seqs, N_state, mu):
"""
mutate list of sequences according to given mutation probability and number of states,
Parameters:
seqs: numpy.ndarray
list of sequences
N_state: int
number of states per site
mu: float
probability to mutate from the current state to any one of the other states <<1
Returns:
seqs_m: numpy.ndarray
list of mutated sequences
Dependencies:
import numpy as np
"""
# first choose randomly how far in the state space each site is shifted
shift_ind = np.random.choice(N_state, size=seqs.shape, replace=True, p=[1-mu*(N_state-1)]+[mu]*(N_state-1))
# from this calculate the new state index (which can be negative)
new_ind = np.array(- N_state + shift_ind + seqs, dtype=int)
# set the new state
state_list = np.arange(N_state)
seqs_m = state_list[new_ind]
return seqs_m
def fitness_int(seq, N_state, h_model, J_model, statevec_list):
"""
calculate the intrinsic fitness for one sequence
Parameters:
seq: numpy.ndarray
sequence
N_state: int
number of states per site
h_model: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_model: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
statevec_list: numpy.ndarray
list of vectors that represent the states of a sequence site
Returns:
f_int: float
intrinsic fitness for the sequence
Dependencies:
import numpy as np
"""
f_int = 0
k = 0
for i in range(len(seq)): # for each state 1
# state at site i
s1 = statevec_list[seq[i]]
# fitness contribution from state at i
f_int += np.dot(s1, h_model[i])
for j in range(i): # for each state 2<state 1
# state at other site j
s2 = statevec_list[seq[j]]
# fitness contribution from coupling of state at i with state at j
f_int += np.matmul(np.matmul(s1, J_model[k]), s2.T)
k += 1
return f_int
def fitness_int_list(strain_current, N_state, h_model, J_model):
"""
calculate the intrinsic fitness for each current strain
Parameters:
strain_current: numpy.ndarray
list of current strains (=unique sequences)
N_state: int
number of states per site
h_model: numpy.ndarray
mutational fitness change for mutation to each mutated state at each site
J_model: numpy.ndarray
added fitness change due to couplings of
two specific mutations to each state at each site
Returns:
f_int_list: numpy.ndarray
intrinsic fitness for each strain
Dependencies:
import numpy as np
"""
statevec_list=np.array([[int(i==j) for j in range(1,N_state)]
for i in range(N_state)])
f_int_list = np.array([fitness_int(seq, N_state, h_model, J_model, statevec_list)
for seq in strain_current])
return f_int_list
def fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0):
"""
calculate the host population-dependent fitness contribution for one sequence
at the current time
Parameters:
seq: numpy.ndarray
sequence
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Returns:
f_host: float
host-dependent fitness for the sequence at the current time
Dependencies:
import numpy as np
"""
f_host_noSig = 0 # initialize host fitness without sigma_h factor
for t in range(len(st_yearly)): # iterate through all prev. time steps
strains = st_yearly[t]
# create array of same dimension as strain list at t
seq_arr = np.repeat([seq], len(strains), axis=0)
# calculate mutational distances between seq_arr and strains
mut_dist = np.sum(seq_arr!=strains, axis=1)
f_host_noSig += -np.dot(st_freq_yearly[t], np.exp(-mut_dist/D0))
f_host = sigma_h*f_host_noSig
return f_host
def fitness_host_list(strain_current, st_yearly, st_freq_yearly, sigma_h, D0):
"""
calculate the host population-dependent fitness contribution for all strains
at the current time
Parameters:
strain_current: numpy.ndarray
list of current strains (=unique sequences)
st_yearly: list
list of strains for each time step up to t-1
st_freq_yearly: list
list of strain frequencies for each time step up to t-1
sigma_h: float
coefficient modulating f_host
D0: float
cross-immunity distance
Returns:
f_host_list: numpy.ndarray
host-dependent fitness for each strain at the current time
Dependencies:
import numpy as np
"""
f_host_list = np.array([fitness_host(seq, st_yearly, st_freq_yearly, sigma_h, D0)
for seq in strain_current])
return f_host_list
def flu_antigen_simulation(traj, filepath, varied_simu_params):
"""
simulate the evolution of flu-like antigenic sequences
Parameters:
traj: pypet.trajectory.Trajectory
trajectory container, which manages the parameters
filepath: str
path to folder
where results should be stored
varied_simu_params: list
list of names of parameters that are varied
in the parameter sweep
Results:
strain_yearly: list
[[list of unique sequences (strains)]
for each time step]
with strains from most to least prevalent at each time
strain_frequency_yearly: list
[[list of frequencies of strains]
for each time step]
in same order as in strain_yearly
pickled .data files with intermediate simulation results (uodated at each simulated time step)
Returns:
run_name: str
name of file without path or extension
in which the results of the single run are saved
Dependencies:
other functions in this module
import numpy as np
from pypet import Environment, Parameter
import os
import pickle
import copy
"""
# initializations:
# set RNG seed:
np.random.seed(traj.seed)
# current sequences, numpy array, initialized with all zeros
seqs = np.zeros((traj.N_pop, traj.N_site))
# current strains
strain_current, strain_count_current =\
np.unique(seqs, return_counts=True, axis=0)
# strains at each time, list, initialized with initial strain
strain_yearly = [strain_current]
# strain frequencies at each time, list, initialized with 1
strain_frequency_yearly = [strain_count_current/
|
np.sum(strain_count_current)
|
numpy.sum
|
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
# stock prices (3x per day)
# [morning, midday, evening]
APPLE = np.array(
[[1,5],[3,-2],[-1,-4],[-2,1]])
# midday variance
print(APPLE.mean(axis=0))
cov = np.cov(APPLE,rowvar=0)
print(cov)
w, v =
|
LA.eig(cov)
|
numpy.linalg.eig
|
#!/usr/bin/env python
import builtins
import operator
import warnings
from .duckprint import (duck_str, duck_repr, duck_array2string, typelessdata,
default_duckprint_options, default_duckprint_formatters, FormatDispatcher)
from .common import (is_ndducktype, is_ndscalar, is_ndarr, is_ndtype,
new_ducktype_implementation, ducktype_link, get_duck_cls, as_duck_cls)
from .ndarray_api_mixin import NDArrayAPIMixin
import numpy as np
from numpy import newaxis
import numpy.core.umath as umath
from numpy.lib.mixins import NDArrayOperatorsMixin
from numpy.lib.function_base import _quantile_is_valid
import numpy.core.numerictypes as ntypes
from numpy.core.multiarray import (normalize_axis_index,
interp as compiled_interp, interp_complex as compiled_interp_complex)
from numpy.lib.stride_tricks import _broadcast_shape
from numpy.core.numeric import normalize_axis_tuple
class MaskedOperatorMixin(NDArrayOperatorsMixin):
# shared implementations for MaskedArray, MaskedScalar
# override the NDArrayOperatorsMixin implementations for cmp ops, as
# currently those don't work for flexible types.
def _cmp_op(self, other, op):
if other is X:
db, mb = self._data.dtype.type(0), np.bool_(True)
else:
db, mb = getdata(other), getmask(other)
cls = get_duck_cls(self, other)
data = op(self._data, db)
mask = self._mask | mb
return maskedarray_or_scalar(data, mask, cls=cls)
def __lt__(self, other):
return self._cmp_op(other, operator.lt)
def __le__(self, other):
return self._cmp_op(other, operator.le)
def __eq__(self, other):
return self._cmp_op(other, operator.eq)
def __ne__(self, other):
return self._cmp_op(other, operator.ne)
def __gt__(self, other):
return self._cmp_op(other, operator.gt)
def __ge__(self, other):
return self._cmp_op(other, operator.ge)
def __complex__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __int__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __float__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __index__(self):
raise TypeError("Use .filled() before converting to non-masked scalar")
def __array_function__(self, func, types, arg, kwarg):
impl, check_args = implements.handled_functions.get(func, (None, None))
if impl is None or not check_args(arg, kwarg, types, self.known_types):
return NotImplemented
return impl(*arg, **kwarg)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if ufunc not in _masked_ufuncs:
return NotImplemented
return getattr(_masked_ufuncs[ufunc], method)(*inputs, **kwargs)
def _get_fill_value(self, fill_value, minmax):
if minmax is not None:
if fill_value != np._NoValue:
raise Exception("Do not give fill_value if providing minmax")
if minmax == 'max':
fill_value = _maxvals[self.dtype]
elif minmax == 'maxnan':
if issubclass(self.dtype.type, np.inexact):
# some functions, eg np.sort, treat nan as largest
fill_value = np.nan
else:
fill_value = _maxvals[self.dtype]
elif minmax == 'min':
fill_value = _minvals[self.dtype]
else:
raise ValueError("minmax should be 'min' or 'max'")
if fill_value is None:
raise ValueError("minmax not supported for dtype {}".format(
self.dtype))
elif fill_value is np._NoValue:
# default is 0 for all types (*not* np.nan for inexact)
fill_value = 0
return fill_value
@property
def flat(self):
return MaskedIterator(self)
def duck_require(data, dtype=None, ndmin=0, copy=True, order='K'):
"""
Return an ndarray-like that satisfies requirements.
Returns a view if possible.
Parameters
----------
data : array-like
Must be an ndarray or ndarray ducktype.
dtype : numpy datatype
Datatype to convert to
ndmin : integer
Same as 'ndmin' argument of np.array
copy : bool
Whether to guarantee a copy is made
order : one of 'K', 'F', 'C', 'A'
Same as 'order' argument of np.array
"""
# we must use only properties that work for ndarray ducktypes.
# This rules out using np.require
newdtype = dtype if dtype is not None else data.dtype
if copy or (newdtype != data.dtype):
data = data.astype(newdtype, order=order)
if order != 'K' and order is not None:
warnings.warn('order parameter of MaskedArray is ignored')
if ndmin != 0 and data.ndim < ndmin:
nd = ndmin - data.ndim
data = data[(None,)*nd + (Ellipsis,)]
return data
def asarr(v, **kwarg):
if is_ndarr(v):
return duck_require(v, **kwarg)
else: # must be ndscalar
if is_ndducktype(v):
# convert to duck-array class using our ducktype conventions
return get_duck_cls(v)(v, **kwarg)
else: # usually, np.generic type
return np.array(v, **kwarg)
class MaskedArray(MaskedOperatorMixin, NDArrayAPIMixin):
"An ndarray ducktype allowing array elements to be masked"
def __init__(self, data, mask=None, dtype=None, copy=False,
order=None, subok=False, ndmin=0):
"""
Constructs a MaskedArray given data and optional mask.
Parameters
----------
data : array-like
Any object following the numpy ducktype api or convertible to an
ndarray, but also allowing the masked signifier `X` to mark masked
elements. See Notes below.
mask : array-like
Any object convertible to a boolean `ndarray` of the same
shape as data, where true elements are masked. If omitted, defaults
to all `False`. See Notes below.
dtype : data-type, optional
The desired data-type for the array. See `np.array` argument.
copy : bool, optional
If false (default), the MaskedArray will view the data and mask
if they are ndarrays with the right properties. Otherwise
a they will be copied.
order : {'K', 'A', 'C', 'F'}, optional
Memory layout of the array. See `np.array` argument. This affects
both the data and mask.
ndmin : int, optional
Specifies the minimum number of dimensions the resulting array
should have. See `np.array` argument.
Returns
-------
out : MaskedArray
The resulting MaskedArray.
Notes
-----
This MaskedArray constructor supports a few different ways to mark
masked elements, which are sometimes exclusive.
First, `data` may be a MaskedArray, in which case `mask` should not
be supplied.
If `mask` is not supplied, then masked elements may be marked in the
`data` using the masked input element `X`. That is, `data` can be a
list-of-lists containing numerical scalars and `ndarray`s,
similar to that accepted by `np.array`, but additionally allowing
some elements to be replaced with `X`. The dtype will be inferred
based on the converted dtype of the non-masked elements. If all
elements are `X`, the `dtype` argument of `MaskedArray` must be
supplied:
>>> a = MaskedArray([[1, X, 3], np.arange(3)])
>>> b = MaskedArray([X, X, X], dtype='f8')
If `mask` is supplied, `X` should not be used in the `data. `mask`
should be any object convertible to bool datatype and broadcastable
to the shape of the data. If `mask` is already a bool ndarray
of the same shape as `data`, it will be viewed, otherwise it will
be copied.
"""
if isinstance(data, MaskedScalar):
self.__init__(data._data, data._mask, dtype=data.dtype,
order=order, ndmin=ndmin)
return
elif isinstance(data, MaskedArray):
self._mask = duck_require(data._mask, copy=copy, order=order,
ndmin=ndmin)
if mask is not None:
self._data = duck_require(data._data, copy=True, order=order,
ndmin=ndmin)
mask = np.array(mask, dtype=bool, copy=False)
self._mask |= np.broadcast_to(mask, self._data.shape)
else:
self._data = duck_require(data._data, copy=copy, order=order,
ndmin=ndmin)
return
elif data is X and mask is None:
# 0d masked array
if dtype is None:
raise ValueError("must supply dtype if all elements are X")
self._data = np.array(dtype.type(0))
self._mask = np.array(True)
return
# Otherwise got non-masked type, we convert data/mask to MaskedArray:
if mask is None:
# if mask is None, user can put X in the data.
# Otherwise, X will cause some kind of error in np.array below
data, mask, _ = replace_X(data, dtype=dtype)
# replace_X sometimes uses broadcast_to, which returns a
# readonly array with funny strides. Make writeable if so,
# since we will end up in the is_ndducktype code-path below.
if (isinstance(mask, np.ndarray) and
mask.flags['WRITEABLE'] == False):
mask = mask.copy()
self._data = asarr(data, dtype=dtype, copy=copy,order=order,ndmin=ndmin)
if mask is None:
self._mask = np.zeros(self._data.shape, dtype='bool', order=order)
elif is_ndtype(mask):
self._mask = asarr(mask, dtype=np.bool_, copy=copy, order=order)
if self._mask.shape != self._data.shape:
self._mask = np.broadcast_to(self._mask,self._data.shape).copy()
else:
self._mask = np.empty(self._data.shape, dtype='bool')
self._mask[...] = np.broadcast_to(mask, self._data.shape)
@classmethod
def __nd_duckprint_dispatcher__(cls):
return masked_dispatcher
def __str__(self):
return duck_str(self)
def __repr__(self):
return duck_repr(self, showdtype=self._mask.all())
def __getitem__(self, ind):
if is_string_or_list_of_strings(ind):
# for viewing fields of structured arrays, return readonly view.
# (see .real/.imag discussion in user guide)
ret = self._data[ind]
ret.flags['WRITEABLE'] = False
return type(self)(ret, self._mask)
if not isinstance(ind, tuple):
ind = (ind,)
# If a boolean MaskedArray is provided as an ind, treat masked vals as
# False. Allows code like "a[a>0]", which is then the same as
# "a[np.nonzero(a>0)]"
ind = tuple(i.filled(False, view=1) if
(isinstance(i, MaskedArray) and i.dtype.type is np.bool_)
else i for i in ind)
# TODO: Possible future improvement would be to support masked
# integer arrays as indices. Then marr[boolmask] should behave
# the same as marr[where(boolmask)], i.e. masked indices are
# ignored.
data = self._data[ind]
mask = self._mask[ind]
if is_ndscalar(mask): # test mask not data, to account for obj arrays
return type(self)._scalartype(data, mask, dtype=self.dtype)
return type(self)(data, mask, dtype=self.dtype)
def __setitem__(self, ind, val):
if not self.flags.writeable:
raise ValueError("assignment destination is read-only")
if self.dtype.names and is_string_or_list_of_strings(ind):
raise ValueError("Cannot assign to fields of a Masked structured "
"array")
if not isinstance(ind, tuple):
ind = (ind,)
# If a boolean MaskedArray is provided as an ind, treat masked vals as
# False. Allows code like "a[a>0] = X"
ind = tuple(i.filled(False, view=1) if
(isinstance(i, MaskedArray) and i.dtype.type is np.bool_)
else i for i in ind)
if val is X:
self._mask[ind] = True
elif isinstance(val, (MaskedArray, MaskedScalar)):
self._data[ind] = val._data
self._mask[ind] = val._mask
else:
self._data[ind] = val
self._mask[ind] = False
def __len__(self):
return len(self._data)
@property
def shape(self):
return self._data.shape
@shape.setter
def shape(self, shp):
self._data.shape = shp
self._mask.shape = shp
@property
def dtype(self):
return self._data.dtype
@dtype.setter
def dtype(self, dt):
dt = np.dtype(dt)
if self._data.dtype.itemsize != dt.itemsize:
raise ValueError("views of MaskedArrays cannot change the "
"datatype's itemsize")
self._data.dtype = dt
@property
def flags(self):
return self._data.flags
@property
def strides(self):
return self._data.strides
@property
def mask(self):
# return a readonly view of mask
m = self._mask.view()
m.flags['WRITEABLE'] = False
return m
def view(self, dtype=None, type=None):
if type is not None:
raise ValueError("subclasses not yet supported")
if dtype is None:
dtype = self.dtype
else:
try:
dtype = np.dtype(dtype)
except ValueError:
raise ValueError("dtype must be a dtype, not subclass")
if dtype.itemsize != self.itemsize:
raise ValueError("views of MaskedArrays cannot change the "
"datatype's itemsize")
return type(self)(self._data.view(dtype), self._mask)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
result_data = self._data.astype(dtype, order, casting, subok, copy)
# force a copy of mask if data was copied
if copy == False and result_data is not self:
copy = True
result_mask = self._mask.astype(bool, order, casting, subok, copy)
return type(self)(result_data, result_mask)
def tolist(self):
return [x.tolist() for x in self]
def filled(self, fill_value=np._NoValue, minmax=None, view=False):
"""
Parameters
==========
fill_value : scalar, optional
value to put in masked positions of the array. Defaults to 0
if minmax is not provided.
minmax : string 'min', 'max' or 'maxnan', optional
If 'min', fill masked elements with the minimum value for this
array's datatype. If 'max', fill with maximum value for this
datatype. If 'maxnan', fill with nan if a floating type, otherwise
same as 'max'.
view : boolean, optional
If True, then the returned array is a view of the underlying data
array rather than a copy (optimization). Be careful, as subsequent
actions on the maskedarray can put nonsense data in the view.
If the array is writeonly, this option is ignored and a copy is
always returned.
Returns
=======
data : ndarray
Returns a copy of this MaskedArray with masked elements replaced
by the fill value. (or a view of view=True).
"""
if view and self._data.flags['WRITEABLE']:
d = self._data.view()
d[self._mask] = self._get_fill_value(fill_value, minmax)
d.flags['WRITEABLE'] = False
return d
d = self._data.copy(order='K')
d[self._mask] = self._get_fill_value(fill_value, minmax)
return d
def count(self, axis=None, keepdims=False):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default (`axis` = `None`) is perform the count sum over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.X
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
return (~self._mask).sum(axis=axis, dtype=np.intp, keepdims=keepdims)
# This works inplace, unlike np.sort
def sort(self, axis=-1, kind='quicksort', order=None):
# Note: See comment in np.sort impl below for trick used here.
# This is the inplace version
self._data[self._mask] = _maxvals[self.dtype]
self._data.sort(axis, kind, order)
self._mask.sort(axis, kind)
# This works inplace, unlike np.resize, and fills with repeat instead of 0
def resize(self, new_shape, refcheck=True):
self._data.resize(new_shape, refcheck)
self._mask.resize(new_shape, refcheck)
class MaskedScalar(MaskedOperatorMixin, NDArrayAPIMixin):
"An ndarray scalar ducktype allowing the value to be masked"
def __init__(self, data, mask=None, dtype=None):
"""
Construct masked scalar given a data value and mask value.
Parameters
----------
data : numpy scalar, MaskedScalar, or X
The value of the scalar. If `X` is given, `dtype` must be supplied.
mask : bool
If true, the scalar is masked. Default is false.
dtype : numpy dtype
dtype to convert to the data to
Notes
-----
To construct a masked MaskedScalar of a certain dtype, it may be
preferrable to use ``X(dtype)``.
If `data` is a MaskedScalar, do not supply a `mask`.
"""
if isinstance(data, MaskedScalar):
self._data = data._data
self._mask = data._mask
if mask is not None:
raise ValueError("don't use mask if passing a maskedscalar")
self._dtype = self._data.dtype
return
elif data is X:
if dtype is None:
raise ValueError("Must supply dtype when data is X")
if mask is not None:
raise ValueError("don't supply mask when data is X")
self._data = np.dtype(dtype).type(0)
self._mask = np.bool_(True)
self._dtype = self._data.dtype
return
# Otherwise, convert data/mask to MaskedScalar:
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is None or dtype.type is not np.object_:
if is_ndtype(data):
if dtype is not None and data.dtype != dtype:
data = data.astype(dtype, copy=False)[()]
if not is_ndscalar(data):
data = data[()]
self._data = data
else:
# next line is more complicated than desired due to struct
# types, which numpy does not have a constructor for
# convert to scalar
self._data = np.array(data, dtype=dtype)[()]
self._mask = np.bool_(mask)
self._dtype = self._data.dtype
else:
# object dtype treated specially
self._data = data
self._mask = np.bool_(mask)
self._dtype = dtype
@property
def shape(self):
return ()
@property
def dtype(self):
return self._dtype
def __getitem__(self, ind):
if (self.dtype.names and is_string_or_list_of_strings(ind) or
isinstance(ind, int)):
# like structured scalars, support string indexing and int indexing
data = self._data[ind]
mask = self._mask
return type(self)(data, mask)
if ind == ():
return self
if ind == Ellipsis or ind == (Ellipsis,):
return MaskedArray(self)
raise IndexError("invalid index to scalar variable.")
def __setitem__(self, ind, val):
# non-masked structured scalars normally allow assignment (eg, to
# individual fields), but here we disallow *all* assignment, because of
# ambiguity about what to do with mask. See discussion of .real/.imag
raise ValueError("assignment destination is read-only")
def __str__(self):
if self._mask:
return MASK_STR
return str(self._data)
def __repr__(self):
if self._mask:
return "X({})".format(str(self.dtype))
if self.dtype.type in typelessdata and self.dtype.names is None:
dtstr = ''
else:
dtstr = ', dtype={}'.format(str(self.dtype))
return "MaskedScalar({}{})".format(repr(self._data), dtstr)
def __format__(self, format_spec):
if self._mask:
return 'X'
return format(self._data, format_spec)
def __bool__(self):
if self._mask:
return False
return bool(self._data)
def __hash__(self):
if self._mask:
return 0
return hash(self._data)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
result_data = self._data.astype(dtype, order, casting, subok, copy)
return MaskedScalar(result_data, self._mask)
def tolist(self):
if self._mask:
return self
return self._data.item()
@property
def mask(self):
return self._mask
def filled(self, fill_value=np._NoValue, minmax=None, view=False):
# view is ignored
fill_value = self._get_fill_value(fill_value, minmax)
if self._mask:
if self.dtype.names:
# next line is more complicated than desired due to struct
# types, which numpy does not have a constructor for
return np.array(fill_value, dtype=self.dtype)[()]
return type(self._data)(fill_value)
return self._data
def count(self, axis=None, keepdims=False):
return 0 if self._mask else 1
# create a special dummy object which signifies "masked", which users can put
# in lists to pass to MaskedArray constructor, or can assign to elements of
# a MaskedArray, to set the mask.
class MaskedX:
def __repr__(self):
return 'masked_input_X'
def __str__(self):
return 'masked_input_X'
# as a convenience, can make this typed by calling with a dtype
def __call__(self, dtype):
return MaskedScalar(0, True, dtype=dtype)
# prevent X from being used as an element in np.array, to avoid
# confusing the user. X should only be used in MaskedArrays
def __array__(self):
# hack: the only Exception that numpy doesn't clear here is MemoryError
raise MemoryError("Masked X should only be used in "
"MaskedArray assignment or construction")
masked = X = MaskedX()
ducktype_link(MaskedArray, MaskedScalar, (MaskedX,))
def replace_X(data, dtype=None):
"""
takes array-like input, replaces masked value by 0 and return filled data &
mask. This is more-or-less a reimplementation of PyArray_DTypeFromObject to
account for masked values
Parameters
==========
data : nested tuple.list of ndarrays/MaskedArrays/X
dtype : dtype to force for output
Returns
=======
data : ndarray (or duck)
The data array of the combined inputs
mask : ndarray (or duck)
The mask array of the combined inputs
cls : type
The most derived MaskedArray subtype seen in the inputs
"""
if isinstance(data, (list, tuple)) and len(data) == 0:
return data, [], MaskedArray
# we do two passes: First we figure out the output dtype, then we replace
# all masked values by the filler "type(0)".
def get_dtype(data, cur_dtype=X):
if isinstance(data, (list, tuple)):
dtypes = (get_dtype(d, cur_dtype) for d in data)
dtypes = [dt for dt in dtypes if dt is not X]
if not dtypes:
return cur_dtype
out_dtype = np.result_type(*dtypes)
if cur_dtype is X:
return out_dtype
else:
return np.promote_types(out_dtype, cur_dtype)
if data is X:
return X
if is_ndtype(data):
return data.dtype
# otherwise try to coerce it to an ndarray (accounts for __array__,
# __array_interface__ implementors)
return np.array(data).dtype
if dtype is None:
dtype = get_dtype(data)
if dtype is X:
raise ValueError("must supply dtype if all elements are X")
else:
dtype = np.dtype(dtype)
fill = dtype.type(0)
cls = MaskedArray
def replace(data):
nonlocal cls
if data is X:
return fill, True
if isinstance(data, (MaskedScalar, MaskedArray)):
# whenever we come across a Masked* subtype, update cls
cls = get_duck_cls(cls, data)
return data._data, data._mask
if isinstance(data, list):
return (list(x) for x in zip(*(replace(d) for d in data)))
if is_ndtype(data):
return data, np.broadcast_to(False, data.shape)
# otherwise assume it is some kind of scalar
return data, False
out_dat, out_mask = replace(data)
return out_dat, out_mask, cls
# used by marr.flat
class MaskedIterator:
def __init__(self, ma):
self.dataiter = ma._data.flat
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
data = self.dataiter.__getitem__(indx)
mask = self.maskiter.__getitem__(indx)
return maskedarray_or_scalar(data, mask, cls=type(self))
def __setitem__(self, index, value):
if value is X or (isinstance(value, MaskedScalar) and value.mask):
self.maskiter[index] = True
else:
self.dataiter[index] = getdata(value)
self.maskiter[index] = getmask(value)
def __next__(self):
return maskedarray_or_scalar(next(self.dataiter), next(self.maskiter),
cls=type(self))
next = __next__
_minvals = ntypes._minvals
_minvals.update([(k, -np.inf) for k in [np.float16, np.float32, np.float64]])
_maxvals = ntypes._maxvals
_maxvals.update([(k, +np.inf) for k in [np.float16, np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
_minvals.update([(np.float128, -np.inf)])
_maxvals.update([(np.float128, +np.inf)])
def is_string_or_list_of_strings(val):
if isinstance(val, str):
return True
if not isinstance(val, list):
return False
for v in val:
if not isinstance(v, str):
return False
return True
################################################################################
# Printing setup
################################################################################
def as_masked_fmt(formattercls):
# we subclass the original formatter class, and wrap the result of
# `get_format_func` to take care of masked values.
class MaskedFormatter(formattercls):
def get_format_func(self, elem, **options):
if not elem._mask.any():
default_fmt = super().get_format_func(elem._data, **options)
return lambda x: default_fmt(x._data)
masked_str = options['masked_str']
# only get fmt_func based on non-masked values
# (we take care of masked elements ourselves)
unmasked = elem._data[~elem._mask]
if unmasked.size == 0:
default_fmt = lambda x: ''
reslen = len(masked_str)
else:
default_fmt = super().get_format_func(unmasked, **options)
# default_fmt should always give back same str length.
# Figure out what this is with a test call.
# This is a bit complicated to account for struct types.
example_elem = elem._data.ravel()[0]
example_str = default_fmt(example_elem)
reslen = builtins.max(len(example_str), len(masked_str))
# pad the columns to align when including the masked string
if issubclass(elem.dtype.type, np.floating) and unmasked.size > 0:
# for floats, try to align with decimal point if present
frac = example_str.partition('.')
nfrac = len(frac[1]) + len(frac[2])
masked_str = (masked_str + ' '*nfrac).rjust(reslen)
# Would it be safer/better to simply center the X?
else:
masked_str = masked_str.rjust(reslen)
def fmt(x):
if x._mask:
return masked_str
return default_fmt(x._data).rjust(reslen)
return fmt
return MaskedFormatter
MASK_STR = 'X'
masked_formatters = [as_masked_fmt(f) for f in default_duckprint_formatters]
default_options = default_duckprint_options.copy()
default_options['masked_str'] = MASK_STR
masked_dispatcher = FormatDispatcher(masked_formatters, default_options)
################################################################################
# Ufunc setup
################################################################################
_masked_ufuncs = {}
class _Masked_UFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
return "Masked version of {}".format(self.f)
def getdata(a):
if isinstance(a, (MaskedArray, MaskedScalar)):
return a._data
return a
def getmask(a):
if isinstance(a, (MaskedArray, MaskedScalar)):
return a._mask
return False
class _Masked_UniOp(_Masked_UFunc):
"""
Masked version of unary ufunc. Assumes 1 output.
Parameters
----------
ufunc : ufunc
The ufunc for which to define a masked version.
"""
def __init__(self, ufunc):
super().__init__(ufunc)
def __call__(self, a, *args, **kwargs):
if a is X:
raise ValueError("must supply dtype if all inputs are X")
a = as_duck_cls(a, base=MaskedArray)
out = kwargs.get('out', ())
if not isinstance(out, tuple):
out = (out,)
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
d, m = a._data, a._mask
where = ~m
kwhere = kwargs.get('where', None)
if isinstance(kwhere, (MaskedArray, MaskedScalar)):
if kwhere.dtype.type != np.bool_:
raise ValueError("'where' only supports masks for boolean "
"dtype")
kwhere = kwhere.filled(False)
if kwhere is not None:
where &= kwhere
kwargs['where'] = where
result = self.f(d, *args, **kwargs)
if out != ():
out[0]._mask[...] = m
return out[0]
cls = get_duck_cls(a, base=MaskedArray)
if is_ndscalar(result):
return type(a)._scalartype(result, m)
return type(a)(result, m)
class _Masked_BinOp(_Masked_UFunc):
"""
Masked version of binary ufunc. Assumes 1 output.
Parameters
----------
ufunc : ufunc
The ufunc for which to define a masked version.
reduce_fill : function or scalar, optional
Determines what fill_value is used during reductions. If a function is
supplied, it shoud accept a dtype as argument and return a fill value
with that dtype. A scalar value may also be supplied, which is used
for all dtypes of the ufunc.
"""
def __init__(self, ufunc, reduce_fill=None):
super().__init__(ufunc)
if reduce_fill is None:
reduce_fill = ufunc.identity
if (reduce_fill is not None and
(is_ndscalar(reduce_fill) or not callable(reduce_fill))):
self.reduce_fill = lambda dtype: reduce_fill
else:
self.reduce_fill = reduce_fill
def __call__(self, a, b, **kwargs):
# treat X as a masked value of the other array's dtype
if a is X:
a = X(b.dtype)
if b is X:
b = X(a.dtype)
a, b = as_duck_cls(a, b, base=MaskedArray)
da, db = a._data, b._data
ma, mb = a._mask, b._mask
mkwargs = {}
for k in ['where', 'order']:
if k in kwargs:
mkwargs[k] = kwargs[k]
out = kwargs.get('out', ())
if not isinstance(out, tuple):
out = (out,)
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
m = np.logical_or(ma, mb, **mkwargs)
where = ~m
kwhere = kwargs.get('where', None)
if isinstance(kwhere, (MaskedArray, MaskedScalar)):
if kwhere.dtype.type != np.bool_:
raise ValueError("'where' only supports masks for boolean "
"dtype")
kwhere = kwhere.filled(False)
if kwhere is not None:
where &= kwhere
kwargs['where'] = where
result = self.f(da, db, **kwargs)
if out:
return out[0]
if is_ndscalar(result):
return type(a)._scalartype(result, m)
return type(a)(result, m)
def reduce(self, a, **kwargs):
if self.reduce_fill is None:
raise TypeError("reduce not supported for masked {}".format(self.f))
da, ma = getdata(a), getmask(a)
mkwargs = kwargs.copy()
for k in ['initial', 'dtype']:
if k in mkwargs:
del mkwargs[k]
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
initial = kwargs.get('initial', None)
if isinstance(initial, (MaskedScalar, MaskedX)):
raise ValueError("initial should not be masked")
if 0: # two different implementations, investigate performance
wheremask = ~ma
if 'where' in kwargs:
wheremask &= kwargs['where']
kwargs['where'] = wheremask
if 'initial' not in kwargs:
kwargs['initial'] = self.reduce_fill(da.dtype)
result = self.f.reduce(da, **kwargs)
m = np.logical_and.reduce(ma, **mkwargs)
else:
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
# if da is a scalar, we get correct result no matter fill
result = self.f.reduce(da, **kwargs)
m = np.logical_and.reduce(ma, **mkwargs)
if out:
return out[0]
cls = get_duck_cls(a, base=MaskedArray)
if is_ndscalar(result):
return cls._scalartype(result, m)
return cls(result, m)
def accumulate(self, a, axis=0, dtype=None, out=None):
if self.reduce_fill is None:
raise TypeError("accumulate not supported for masked {}".format(
self.f))
da, ma = getdata(a), getmask(a)
dataout, maskout = None, None
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
dataout = out[0]._data
maskout = out[0]._mask
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
result = self.f.accumulate(da, axis, dtype, dataout)
m = np.logical_and.accumulate(ma, axis, out=maskout)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def outer(self, a, b, **kwargs):
if self.reduce_fill is None:
raise TypeError("outer not supported for masked {}".format(self.f))
da, db = getdata(a), getdata(b)
ma, mb = getmask(a), getmask(b)
# treat X as a masked value of the other array's dtype
if da is X:
da, ma = db.dtype.type(0), np.bool_(True)
if db is X:
db, mb = da.dtype.type(0), np.bool_(True)
mkwargs = kwargs.copy()
if 'dtype' in mkwargs:
del mkwargs['dtype']
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
if not is_ndscalar(db):
db[mb] = self.reduce_fill(db.dtype)
result = self.f.outer(da, db, **kwargs)
m = np.logical_or.outer(ma, mb, **mkwargs)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def reduceat(self, a, indices, **kwargs):
if self.reduce_fill is None:
raise TypeError("reduce not supported for masked {}".format(self.f))
da, ma = getdata(a), getmask(a)
mkwargs = kwargs.copy()
for k in ['initial', 'dtype']:
if k in mkwargs:
del mkwargs[k]
out = kwargs.get('out', ())
if out:
if not isinstance(out[0], MaskedArray):
raise ValueError("out must be a MaskedArray")
kwargs['out'] = (out[0]._data,)
mkwargs['out'] = (out[0]._mask,)
initial = kwargs.get('initial', None)
if isinstance(initial, (MaskedScalar, MaskedX)):
raise ValueError("initial should not be masked")
if not is_ndscalar(da):
da[ma] = self.reduce_fill(da.dtype)
# if da is a scalar, we get correct result no matter fill
result = self.f.reduceat(da, indices, **kwargs)
m = np.logical_and.reduceat(ma, indices, **mkwargs)
if out:
return out[0]
if is_ndscalar(result):
return MaskedScalar(result, m)
return type(a)(result, m)
def at(self, a, indices, b=None):
if isinstance(indices, (MaskedArray, MaskedScalar)):
raise ValueError("indices should not be masked. "
"Use .filled() first")
da, ma = getdata(a), getmask(a)
db, mb = None, None
if b is not None:
db, mb = getdata(b), getmask(b)
self.f.at(da, indices, db)
np.logical_or.at(ma, indices, mb)
def _add_ufunc(ufunc, uni=False, glob=globals(), **kwargs):
if uni:
impl = _Masked_UniOp(ufunc, **kwargs)
else:
impl = _Masked_BinOp(ufunc, **kwargs)
_masked_ufuncs[ufunc] = impl
glob[ufunc.__name__] = impl
# unary funcs
for ufunc in [umath.exp, umath.conjugate, umath.sin, umath.cos, umath.tan,
umath.arctan, umath.arcsinh, umath.sinh, umath.cosh,
umath.tanh, umath.absolute, umath.fabs, umath.negative,
umath.floor, umath.ceil, umath.logical_not, umath.isfinite,
umath.isinf, umath.isnan, umath.invert, umath.sqrt, umath.log,
umath.log2, umath.log10, umath.tan, umath.arcsin,
umath.arccos, umath.arccosh, umath.arctanh]:
_add_ufunc(ufunc, uni=True)
# binary ufuncs
for ufunc in [umath.add, umath.subtract, umath.multiply,
umath.arctan2, umath.hypot, umath.equal, umath.not_equal,
umath.less_equal, umath.greater_equal, umath.less,
umath.greater, umath.logical_and, umath.logical_or,
umath.logical_xor, umath.bitwise_and, umath.bitwise_or,
umath.bitwise_xor, umath.true_divide, umath.floor_divide,
umath.remainder, umath.fmod, umath.mod, umath.power]:
_add_ufunc(ufunc)
# fill value depends on dtype
_add_ufunc(umath.maximum, reduce_fill=lambda dt: _minvals[dt])
_add_ufunc(umath.minimum, reduce_fill=lambda dt: _maxvals[dt])
################################################################################
# __array_function__ setup
################################################################################
implements = new_ducktype_implementation()
def get_maskedout(out):
if out is not None:
if isinstance(out, MaskedArray):
return out._data, out._mask
raise Exception("out must be a masked array")
return None, None
def maskedarray_or_scalar(data, mask, out=None, cls=MaskedArray):
if out is not None:
return out
if is_ndscalar(data):
return cls._scalartype(data, mask)
return cls(data, mask)
def _copy_mask(mask, outmask=None):
if outmask is not None:
result_mask = outmask
result_mask[...] = mask
else:
result_mask = mask.copy()
return result_mask
def _inplace_not(v):
if isinstance(v, np.ndarray):
return np.logical_not(v, out=v)
return np.logical_not(v)
################################################################################
# npy-api implementations
################################################################################
@implements(np.all)
def all(a, axis=None, out=None, keepdims=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
# out can be maskedarray or ndarray since we never return masked elements
# (or.. should we only allow ndarray out?)
if isinstance(out, MaskedArray):
np.all(a.filled(True, view=1), axis, out._data, keepdims)
out._mask[...] = False
return out
return np.all(a.filled(True, view=1), axis, out, keepdims)
# Note: returns boolean, not MaskedArray. If case of fully masked,
# return True, like np.all([]).
@implements(np.any)
def any(a, axis=None, out=None, keepdims=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
if isinstance(out, MaskedArray):
np.any(a.filled(False, view=1), axis, out._data, keepdims)
out._mask[...] = False
return out
return np.any(a.filled(False, view=1), axis, out, keepdims)
# Note: returns boolean, not MaskedArray. If case of fully masked,
# return False, like np.any([])
@implements(np.amax)
@implements(np.max)
def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=True):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
kwarg = {}
if keepdims is not np._NoValue:
kwarg['keepdims'] = keepdims
if where is not np._NoValue:
kwarg['where'] = where
initial_m = initial_d = np._NoValue
if initial is not np._NoValue:
ismasked = isinstance(initial, MaskedScalar)
if initial is X or ismasked and initial._mask:
raise ValueError("initial cannot be masked")
initial_m = False
initial_d = initial._data if ismasked else initial
filled = a.filled(minmax='min', view=1)
result_data = np.max(filled, axis, outdata, initial=initial_d, **kwarg)
result_mask = np.logical_and.reduce(a._mask, axis, out=outmask,
initial=initial_m, **kwarg)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.argmax)
def argmax(a, axis=None, out=None):
if isinstance(out, MaskedArray):
raise TypeError("out argument of argmax should be an ndarray")
a = as_duck_cls(a, base=MaskedArray)
# most of the time this is enough
filled = a.filled(minmax='min', view=1)
result_data = np.argmax(filled, axis, out)
# except if the only unmasked elem is minval. Have to check and do carefully
data_min = filled == _minvals[a.dtype]
is_min = data_min & ~a._mask
has_min = np.any(is_min, axis=axis)
if np.any(has_min):
has_no_other_data = np.all(data_min, axis=axis)
has_lonely_min = has_min & has_no_other_data
if np.any(has_lonely_min):
min_ind = np.argmax(is_min, axis=axis)
if is_ndscalar(result_data):
return min_ind
result_data[has_lonely_min] = min_ind[has_lonely_min]
# one day, might speed up with numba/extension. Or with np.take?
return result_data
@implements(np.amin)
@implements(np.min)
def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
kwarg = {}
if keepdims is not np._NoValue:
kwarg['keepdims'] = keepdims
if where is not np._NoValue:
kwarg['where'] = where
initial_m = initial_d = np._NoValue
if initial is not np._NoValue:
ismasked = isinstance(initial, MaskedScalar)
if initial is X or ismasked and initial._mask:
raise ValueError("initial cannot be masked")
initial_m = False
initial_d = initial._data if ismasked else initial
filled = a.filled(minmax='max', view=1)
result_data = np.min(filled, axis, outdata, initial=initial_d, **kwarg)
result_mask = np.logical_and.reduce(a._mask, axis, out=outmask,
initial=initial_m, **kwarg)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.argmin)
def argmin(a, axis=None, out=None):
if isinstance(out, MaskedArray):
raise TypeError("out argument of argmax should be an ndarray")
a = as_duck_cls(a, base=MaskedArray)
# most of the time this is enough
filled = a.filled(minmax='max', view=1)
result_data = np.argmin(filled, axis, out)
# except if the only unmasked elem is maxval. Have to check and do carefully
data_max = filled == _maxvals[a.dtype]
is_max = data_max & ~a._mask
has_max = np.any(is_max, axis=axis)
if np.any(has_max):
has_no_other_data = np.all(data_max, axis=axis)
has_lonely_max = has_max & has_no_other_data
if np.any(has_lonely_max):
max_ind = np.argmax(is_max, axis=axis)
if is_ndscalar(result_data):
return max_ind
result_data[has_lonely_max] = max_ind[has_lonely_max]
return result_data
@implements(np.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
a = as_duck_cls(a, base=MaskedArray)
# Note: This is trickier than it looks. The first line sorts the mask
# together with any min_vals which may be present, so there appears to
# be a problem ordering mask vs min_val elements.
# But, since we know all the masked elements have to end up at the end
# of the axis, we can sort the mask too and everything works out. The
# mask-sort only swaps the mask between min_val and masked positions
# which have the same underlying data.
# np.nan should sort higher than all others, so use it as fill if floating
result_data = np.sort(a.filled(minmax='maxnan', view=1), axis, kind, order)
result_mask = np.sort(a._mask, axis, kind) #or partition for speed?
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
# Note: lexsort may be faster, but doesn't provide kind or order kwd
@implements(np.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
a = as_duck_cls(a, base=MaskedArray)
# Similar to mask-sort trick in sort above, here after sorting data we
# re-sort based on mask. Use the property that if you argsort the index
# array produced by argsort you get the element rank, which can be
# argsorted again to get back the sort indices. However, here we
# modify the rank based on the mask before inverting back to indices.
# Uses two argsorts plus a temp array.
inds = np.argsort(a.filled(minmax='maxnan', view=1), axis, kind, order)
# next two lines "reverse" the argsort (same as double-argsort)
ranks = np.empty(inds.shape, dtype=inds.dtype)
np.put_along_axis(ranks, inds, np.arange(a.shape[axis]), axis)
# prepare to resort but make masked elem highest rank
ranks[a._mask] = _maxvals[ranks.dtype]
return np.argsort(ranks, axis, kind)
@implements(np.partition)
def partition(a, kth, axis=-1, kind='introselect', order=None):
a = as_duck_cls(a, base=MaskedArray)
inds = np.argpartition(a, kth, axis, kind, order)
return np.take_along_axis(a, inds, axis=axis)
@implements(np.argpartition)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
# see argsort for explanation
a = as_duck_cls(a, base=MaskedArray)
filled = a.filled(minmax='maxnan', view=1)
inds = np.argpartition(filled, kth, axis, kind, order)
ranks = np.empty(inds.shape, dtype=inds.dtype)
np.put_along_axis(ranks, inds, np.arange(a.shape[axis]), axis)
ranks[a._mask] = _maxvals[ranks.dtype]
return np.argpartition(ranks, kth, axis, kind)
@implements(np.searchsorted, checked_args=('v',))
def searchsorted(a, v, side='left', sorter=None):
a = as_duck_cls(a, base=MaskedArray)
maskleft = len(a) - np.sum(a._mask)
aval = a.filled(minmax='maxnan', view=1)
inds = np.searchsorted(aval, v.filled(minmax='maxnan', view=1),
side, sorter)
# Line above treats mask and maxval as the same, we need to fix it up
if side == 'left':
# masked vals in v need to be moved right to the left end of the
# masked vals in a (which have to be to the right end of a).
inds[v._mask] = maskleft
else:
# maxvals in v meed to be moved left to the left end of the
# masked vals in a.
if issubclass(v.dtype.type, np.inexact):
maxinds = np.isnan(v._data)
else:
maxinds = v._data == _maxvals[v.dtype]
inds[maxinds & ~v._mask] = maskleft
return inds
@implements(np.digitize)
def digitize(x, bins, right=False):
x = as_duck_cls(x, base=MaskedArray)
# Original comment:
# here for compatibility, searchsorted below is happy to take this
if np.issubdtype(x.dtype, np.complexfloating):
raise TypeError("x may not be complex")
if isinstance(bins, (MaskedArray, MaskedScalar)):
raise ValueError("bins should not be masked. "
"Use .filled() first")
mono = np.lib.function_base._monotonicity(bins)
if mono == 0:
raise ValueError("bins must be monotonically "
"increasing or decreasing")
# this is backwards because the arguments below are swapped
side = 'left' if right else 'right'
if mono == -1:
# reverse the bins, and invert the results
return len(bins) - np.searchsorted(bins[::-1], x, side=side)
else:
return np.searchsorted(bins, x, side=side)
@implements(np.lexsort)
def lexsort(keys, axis=-1):
if not isinstance(keys, tuple):
keys = tuple(keys)
keys = as_duck_cls(*keys, base=MaskedArray, single=False)
# strategy: for each key, split into a mask and data key.
# So, we end up sorting twice as many keys. Mask is primary key (last).
keys = tuple(x for k in keys for x in (k._data, k._mask))
return np.lexsort(keys, axis)
@implements(np.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, base=MaskedArray)
if type(a) is not cls:
a = cls(a)
# code partly copied from _mean in numpy/core/_methods.py
is_float16_result = False
rcount = a.count(axis=axis, **kwargs)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(a.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
elif issubclass(a.dtype.type, np.float16):
dtype = np.dtype('f4')
is_float16_result = True
ret = np.sum(a.filled(0, view=1), axis=axis, out=outdata, dtype=dtype,
**kwargs)
retmask = np.all(a._mask, axis=axis, out=outmask, **kwargs)
with np.errstate(divide='ignore', invalid='ignore'):
if is_ndarr(ret):
ret = np.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return maskedarray_or_scalar(ret, retmask, out, type(a))
@implements(np.var)
def var(a, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
# code largely copied from _methods.var
rcount = a.count(axis=axis, **kwargs)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(a.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype('f8')
# Compute the mean, keeping same dims. Note that if dtype is not of
# inexact type then arraymean will not be either.
rcount = a.count(axis=axis, keepdims=True)
arrmean = a.filled(0).sum(axis=axis, dtype=dtype, keepdims=True)
with np.errstate(divide='ignore', invalid='ignore'):
if not is_ndscalar(arrmean):
arrmean = np.true_divide(arrmean, rcount, out=arrmean,
casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
x = type(a)(a - arrmean)
if issubclass(a.dtype.type, np.complexfloating):
x = np.multiply(x, np.conjugate(x), out=x).real
else:
x = np.multiply(x, x, out=x)
ret = x.filled(0, view=1).sum(axis, dtype, out=outdata, **kwargs)
# Compute degrees of freedom and make sure it is not negative.
rcount = a.count(axis=axis, **kwargs)
rcount = np.maximum(rcount - ddof, 0)
# divide by degrees of freedom
with np.errstate(divide='ignore', invalid='ignore'):
if is_ndarr(ret):
ret = np.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
if out is not None:
out[rcount == 0] = X
return out
return maskedarray_or_scalar(ret, rcount == 0, cls=type(a))
@implements(np.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
ret = var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, MaskedArray):
ret = np.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = np.sqrt(ret).astype(ret.dtype)
else:
ret = np.sqrt(ret)
return ret
@implements(np.average, checked_args=('a',))
def average(a, axis=None, weights=None, returned=False):
a = as_duck_cls(a, base=MaskedArray)
if weights is None:
avg = a.mean(axis)
if returned:
return avg, avg.dtype.type(a.count(axis))
return avg
wgt = weights if is_ndtype(weights) else np.array(weights)
if isinstance(wgt, MaskedArray):
raise TypeError("weight must not be a MaskedArray")
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Note: No float16 special case, since ndarray.average skips it
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if wgt.shape != a.shape:
wgt = np.broadcast_to(wgt, a.shape)
wgt = type(a)(wgt, a._mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if np.any(scl == 0.0):
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
return avg, scl
return avg
def _move_reduction_axis_last(a, axis=None):
"""
Modified from numpy.lib.function_base._ureduce.
Reshape/transpose array so desired axes are grouped at the end.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or iterable of ints
axes or axis to reduce
Returns
-------
arr : ndarray
Input ndarray with iteration axis/axes moved to be a single axis
at the end.
keepdims : tuple
a.shape with axis dims set to 1 which can be used to reshape the
result of a reduction to the same shape a ufunc with keepdims=True
would produce.
"""
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
axis = normalize_axis_tuple(axis, nd)
for ax in axis:
keepdim[ax] = 1
if len(axis) == 1:
# arr, with the iteration axis at the end
ax = axis[0]
dims = list(range(a.ndim))
a = np.transpose(a, dims[:ax] + dims[ax+1:] + [ax])
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
keepdim = tuple(keepdim)
else:
keepdim = (1,) * a.ndim
a = a.ravel()
return a, keepdim
@implements(np.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
return np.quantile(a, 0.5, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation='midpoint', keepdims=keepdims)
@implements(np.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
q = np.true_divide(q, 100)
q = np.asanyarray(q) # undo any decay the ufunc performed (gh-13105)
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
@implements(np.quantile)
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
q = np.asanyarray(q)
if not _quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
a = as_duck_cls(a, base=MaskedArray)
a, kdim = _move_reduction_axis_last(a, axis)
if len(q.shape) > 1:
raise ValueError("q must be a scalar or 1d array")
out_shape = (q.size,) + a.shape[:-1]
if out is None:
dt = np.promote_types(a.dtype, np.float64)
outarr = get_duck_cls(a)(np.empty(out_shape, dtype=dt))
else:
if out.shape == out_shape:
outarr = out
elif q.size == 1 and (1,)+out.shape == out_shape:
outarr = out[None,...]
else:
raise ValueError('out has wrong shape')
inds = np.ndindex(a.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
ai = a[ind]
dat = ai._data[~ai.mask]
oind = (slice(None),) + ind
if dat.size == 0:
outarr[oind] = X
else:
outarr[oind] = np.quantile(dat, q, interpolation=interpolation)
if out is not None:
return out
# return a scalar in simple case
if q.shape == () and axis is None:
return outarr[0]
out_dim = kdim if keepdims else a.shape[:-1]
return outarr.reshape(q.shape + out_dim)
@implements(np.cov, checked_args=('m', 'y'))
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
cls = get_duck_cls(m, base=MaskedArray)
if type(m) is not cls:
m = cls(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
if not is_ndtype(y):
y = cls(y)
else:
cls = get_duck_cls(m, y, base=MaskedArray)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = cls(m, ndmin=2, dtype=dtype)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return cls([]).reshape(0, 0)
if y is not None:
y = cls(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if np.any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if np.any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg = np.average(X, axis=1, weights=w)
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = np.dot(X, X_T.conj())
# Determine the normalization
nomask = ~X.mask
wnm = nomask.astype(dtype) if w is None else w*nomask
w_sum = np.dot(wnm, nomask.T)
if ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
a_sum = np.dot(w*aweights*nomask, nomask.T)
fact = w_sum - ddof*a_sum/w_sum
nonpos_fact = fact <= 0
if np.any(nonpos_fact):
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=3)
fact[nonpos_fact] = X
c *= np.true_divide(1, fact)
return c.squeeze()
@implements(np.corrcoef, checked_args=('x', 'y'))
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning, stacklevel=3)
c = np.cov(x, y, rowvar)
try:
d = np.diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = np.sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
cd = c._data
with np.errstate(invalid='ignore'):
np.clip(cd.real, -1, 1, out=cd.real)
if np.iscomplexobj(cd):
np.clip(cd.imag, -1, 1, out=cd.imag)
return c
@implements(np.clip)
def clip(a, a_min, a_max, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.clip(a._data, a_min, a_max, outdata)
result_mask = _copy_mask(a._mask, outmask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.compress)
def compress(condition, a, axis=None, out=None):
# Note: masked values in condition treated as False
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(condition, a, base=MaskedArray)
cond = cls(condition).filled(False, view=1)
a = cls(a)
result_data = np.compress(cond, a._data, axis, outdata)
result_mask = np.compress(cond, a._mask, axis, outmask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.copy)
def copy(a, order='K'):
a = as_duck_cls(a, base=MaskedArray)
result_data = np.copy(a._data, order=order)
result_mask = np.copy(a._mask, order=order)
return maskedarray_or_scalar(result_data, result_mask, cls=type(a))
@implements(np.product)
@implements(np.prod)
def prod(a, axis=None, dtype=None, out=None, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.prod(a.filled(1, view=1), axis=axis, dtype=dtype,
out=outdata, keepdims=keepdims)
result_mask = np.all(a._mask, axis=axis, out=outmask, keepdims=keepdims)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.cumproduct)
@implements(np.cumprod)
def cumprod(a, axis=None, dtype=None, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.cumprod(a.filled(1, view=1), axis, dtype=dtype,
out=outdata)
result_mask = np.logical_or.accumulate(~a._mask, axis, out=outmask)
result_mask =_inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.sum)
def sum(a, axis=None, dtype=None, out=None, keepdims=False):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.sum(a.filled(0, view=1), axis, dtype=dtype,
out=outdata, keepdims=keepdims)
result_mask = np.all(a._mask, axis, out=outmask, keepdims=keepdims)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.cumsum)
def cumsum(a, axis=None, dtype=None, out=None):
a = as_duck_cls(a, base=MaskedArray)
outdata, outmask = get_maskedout(out)
result_data = np.cumsum(a.filled(0, view=1), axis, dtype=dtype,
out=outdata)
result_mask = np.logical_or.accumulate(~a._mask, axis, out=outmask)
result_mask =_inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a = as_duck_cls(a, base=MaskedArray)
result = np.diagonal(a._data, offset=offset, axis1=axis1, axis2=axis2)
rmask = np.diagonal(a._mask, offset=offset, axis1=axis1, axis2=axis2)
return maskedarray_or_scalar(result, rmask, cls=type(a))
@implements(np.diag)
def diag(v, k=0):
v = as_duck_cls(v, base=MaskedArray)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = type(v)(np.zeros((n, n), v.dtype))
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return np.diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@implements(np.diagflat)
def diagflat(v, k=0):
v = as_duck_cls(v, base=MaskedArray)
return np.diag(v.ravel(), k)
@implements(np.tril)
def tril(m, k=0):
m = as_duck_cls(m, base=MaskedArray)
mask = np.tri(*m.shape[-2:], k=k, dtype=bool)
return np.where(mask, m, np.zeros(1, m.dtype))
@implements(np.triu)
def triu(m, k=0):
m = as_duck_cls(m, base=MaskedArray)
mask = np.tri(*m.shape[-2:], k=k-1, dtype=bool)
return np.where(mask, np.zeros(1, m.dtype), m)
@implements(np.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
outdata, outmask = get_maskedout(out)
a = as_duck_cls(a, base=MaskedArray)
result_data = np.trace(a.filled(0, view=1), offset=offset, axis1=axis1,
axis2=axis2, dtype=dtype, out=outdata)
result_mask = np.trace(~a._mask, offset=offset, axis1=axis1, axis2=axis2,
dtype=bool, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, type(a))
@implements(np.dot)
def dot(a, b, out=None):
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.dot(a.filled(0, view=1), b.filled(0, view=1),
out=outdata)
result_mask = np.dot(~a._mask, ~b._mask, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.vdot)
def vdot(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.vdot(a.filled(0, view=1), b.filled(0, view=1))
result_mask = np.vdot(~a._mask, ~b._mask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
# because of mask calculation, we don't support vectors of length 2.
# convert them if present. First have to do axis manip as in np.cross
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
axis = None
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
if a.shape[-1] == 2:
a = np.append(a, np.broadcast_to(0, a.shape[:-1] + (1,)), axis=-1)
if b.shape[-1] == 2:
b = np.append(b, np.broadcast_to(0, b.shape[:-1] + (1,)), axis=-1)
result_data = np.cross(a.filled(0, view=1), b.filled(0, view=1), axisa,
axisb, axisc, axis)
# trick: use nan behavior to compute mask
ma = np.where(a._mask, np.nan, 0)
mb = np.where(b._mask, np.nan, 0)
mab = np.cross(ma, mb, axisa, axisb, axisc, axis)
result_mask = np.isnan(mab)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.inner)
def inner(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.inner(a.filled(0, view=1), b.filled(0, view=1))
result_mask = np.inner(~a._mask, ~b._mask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, cls=cls)
@implements(np.outer)
def outer(a, b, out=None):
outdata, outmask = get_maskedout(out)
cls = get_duck_cls(a, b, base=MaskedArray)
a, b = cls(a), cls(b)
result_data = np.outer(a.filled(0, view=1), b.filled(0, view=1),
out=outdata)
result_mask = np.outer(~a._mask, ~b._mask, out=outmask)
result_mask = _inplace_not(result_mask)
return maskedarray_or_scalar(result_data, result_mask, out, cls)
@implements(np.kron)
def kron(a, b):
cls = get_duck_cls(a, b, base=MaskedArray)
a = cls(a, copy=False, subok=True, ndmin=b.ndim)
nda, ndb = a.ndim, b.ndim
if (nda == 0 or ndb == 0):
return np.multiply(a, b)
a_shape = a.shape
b_shape = b.shape
nd = ndb
if ndb > nda:
a_shape = (1,)*(ndb-nda) + a_shape
elif b.ndim < a.ndim:
b_shape = (1,)*(nda-ndb) + b_shape
nd = nda
result =
|
np.outer(a, b)
|
numpy.outer
|
import os
import cv2
import collections
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision.transforms as transforms
from pathlib import Path
from PIL import Image
from psenet_text_detector.models import resnet50
from psenet_text_detector.pypse import pse as pypse
from psenet_text_detector.utils import download, scale_image
def copy_state_dict(state_dict):
new_state_dict = collections.OrderedDict()
for key, value in state_dict.items():
tmp = key[7:]
new_state_dict[tmp] = value
return new_state_dict
def load_psenet_model(cuda: bool = False,
scale: int = 1):
# get psenet net path
home_path = str(Path.home())
weight_path = os.path.join(home_path,
".psenet_text_detector",
"weights",
"psenet_best.pth")
# load base resnet model
model = resnet50(pretrained=False, num_classes=7, scale=scale) # initialize
# check if weights are already downloaded, if not download
url = "https://drive.google.com/uc?id=1w2IgRkQXX49AbOARitO5xCr8-N93JHDd"
if os.path.isfile(weight_path) is not True:
print("PSENet text detector weight will be downloaded to {}"
.format(weight_path))
download(url=url, save_path=weight_path)
# arange device
if cuda:
checkpoint = torch.load(weight_path, map_location='cuda')
model.load_state_dict(copy_state_dict(checkpoint['state_dict']))
model = model.cuda()
cudnn.benchmark = False
else:
checkpoint = torch.load(weight_path, map_location='cpu')
model.load_state_dict(copy_state_dict(checkpoint['state_dict']))
model.eval()
return model
def get_prediction(image,
model=load_psenet_model(),
binary_th=1.0,
kernel_num=3,
upsample_scale=1,
long_size=1280,
min_kernel_area=10.0,
min_area=300.0,
min_score=0.93,
cuda=False):
model = load_psenet_model(cuda=cuda,
scale=upsample_scale)
scaled_img = scale_image(image, long_size)
#scaled_img = np.expand_dims(scaled_img,axis=0)
scaled_img = transforms.ToTensor()(scaled_img)
scaled_img = transforms.Normalize(mean=[0.0618, 0.1206, 0.2677], std=[1.0214, 1.0212, 1.0242])(scaled_img)
scaled_img = torch.unsqueeze(scaled_img, 0)
#img = scaleimg(org_img)
#img = img[:,:,[2,1,0]]
#img = np.expand_dims(img,axis=0)
#img = Image.fromarray(img)
#img = img.convert('RGB')
#img = torch.Tensor(img)
#img = img.permute(0,3,1,2)
outputs = model(scaled_img)
score = torch.sigmoid(outputs[:, 0, :, :])
outputs = (torch.sign(outputs - binary_th) + 1) / 2
text = outputs[:, 0, :, :]
kernels = outputs[:, 0:kernel_num, :, :] * text
score = score.data.cpu().numpy()[0].astype(np.float32)
text = text.data.cpu().numpy()[0].astype(np.uint8)
kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)
pred = pypse(kernels, min_kernel_area / (upsample_scale * upsample_scale))
scale = (image.shape[1] * 1.0 / pred.shape[1], image.shape[0] * 1.0 / pred.shape[0])
label = pred
label_num = np.max(label) + 1
boxes = []
for i in range(1, label_num):
points = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]
if points.shape[0] < min_area / (upsample_scale * upsample_scale):
continue
score_i =
|
np.mean(score[label == i])
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 13:30:37 2017
@author: <NAME> (<EMAIL>)
Description: Implements Bayesian Linear Autoregression with the NIG model
(i.e., spatial locations have iid errors with common variance)
"""
import numpy as np
from scipy import special
from scipy import linalg
from scipy import stats
import scipy
from probability_model import ProbabilityModel
from nearestPD import NPD
class BVARNIG(ProbabilityModel):
"""The Bayesian Vector Autoregression model using past observations as
regressors in a specified neighbourhood. E.g., if the 4-neighbourhood is
selected with lag length 1, then the mean of y_{t,i} is modelled as linear
combination of observations y_{t-1, j} \in nb(i). Around the boundary,
the neighbourhoods are 0-padded.
###****************************************************************###
### MODEL PRIORS ###
###****************************************************************###
Inputs always needed. They correspond to priors in the following model:
Y ~ N(X*beta, sigma2 * I),
beta ~ N(beta_0, sigma2 * V_0),
sigma2 ~ IG(a,b)
prior_a: float >0:
a parameter of the Inverse Gamma, a>0
prior_b: float >0:
b parameter of the Inverse Gamma, b>0
prior_mean_beta: 1D-numpy array of size k, k = num regressors:
corresponds to beta_0, i.e. the mean prior of coefficients.
Takes precedence over prior_mean_scale if both specified.
prior_var_beta: 2D-numpy array of size kxk, k = num regressors:
corresponds to V_0, i.e. the covariance prior of coefs
Takes precedence over prior_var_scale if both specified.
prior_mean_scale: float:
If prior_mean_beta is None, prior_mean_scale supplied, the number
of regressors k is calculated automatically and
beta_0 = prior_mean_scale * np.ones(k)
prior_var_scale: float >0:
If prior_var_beta is None, prior_var_scale supplied, the number
of regressors k is calculated automatically and
beta_0 = prior_var_scale * np.identity(k)
###****************************************************************###
### REGULAR GRID + STRONG PARAM BINDING ###
###****************************************************************###
Inputs needed when assuming regular grid with strong parameter binding:
nbh_sequence, restriction_sequence, padding
nbh_sequence: array with integer entries, only needed if data on
regular grid, with strong coupling between effects:
0, 4, 8 -> specify the sequence ofVAR-nbhs.
corresponds to strong parameter coupling on regular
grid with no neighbourhood (0), 4-neighbourhood (4),
and 8-neighbourhood (8). I.e. all locations are on
a grid defining their nbhs, and share params.
(See restriction_sequence for param sharing)
restriction_sequence: array with integer entries, only needed if data
on regular grid, with strong coupling between effects:
0, 4, 8 -> specify the restriction of nbh_sequence on regular
spatial grid with parameter coupling.
Regardless of 0,4,8, we always couple across all
LOCATIONS! I.e., params the same across the grid.
However, we can vary how much we couple params within
each location's nbh: Not at all, i.e. one parameter
for each nbh location relative to each location (0),
the 4 inner and the 4 outer (4), and in the case of
a 8-nbh, all 8 together (8). See Fig. 2 in the paper
for illustration of 4-nbh (red), 8 nbh (red + blue),
0 nbh (orange).
NOTE: The parameter bindings for the intercepts are
again specified via intercept_grouping (see below).
They are NOT strongly coupled unless the argument
is not specified or is supplied as None.
padding: string:
ONLY needed if we specify nbh_sequence and restriction_sequence,
implying that we are on a regular grid. Then, we need to pad the
outside of the grid using one of the below options:
'overall_mean' -> compute mean across space and fill in
'row_col_mean' -> compute row and col means and fill in
'zero' -> insert zeros (bias estimation towards 0)
'leave-out' -> don't pad at all, and estimate only using
locations with full neighbourhood
###****************************************************************###
### GENERAL NBHS + ANY PARAM BINDING ###
###****************************************************************###
Inputs needed when assuming general nbh structures with arbitrary
parameter bindings:
intercept_grouping, general_nbh_sequence,
general_nbh_restriction_sequence , general_nbh_coupling
intercept_grouping: GxS1xS2 numpy array of ones or zeros grouping the
locations into G groups so that each group shares the intercept.
Notice that summing over the G-dimension, we would get an S1xS2
array of only ones. I.e., each location has to be in one of the G
groups. Extreme cases: G=1 with a single slice of ones => all
locations have one shared intercept. G=S1*S2 with each of the G
slicescontaining exactly a single 1-entry and only zeros otherwise
=> each location has individual intercept.
general_nbh_sequence: list of list of lists:
Gives an nparray of nparrays of nparrays of
coordinates/identifiers, i.e. an object like
[[[2,3,4],[5,6],[7]], [[5,6],[8],[9,10]], ...].
Here, [2,3,4] would be the 'closest' nbh to the
point with spatial coordinate 0, [5,6] the second-
closest, [7] the third-closest. how far away from
the closest nbh you consider the data is implied
by the general_nbh_restriction_sequence that
will give you the indices of the nbhs to be
considered for each lag length.
In the notation of the PAPER, this gives you the nbh. system as
[[N_1(1), N_2(1), N_3(1)], [N_1(2), N_2(2), N_2(3)], ...], i.e.
list entry s belongs to location with index s and contains n neigh-
bourhoods N_1(s), ... N_n(s) s.t. the indices describe spatial
closeness, with smaller indices indicating that we are closer to s.
Note that we assume n to be the same for all locations. If there
is a case where you assume that some locations s have less nbhs
than others, simply add some empty nbhs, i.e. N_i(s) = [].
general_nbh_restriction_sequence: list of list:
Gives you a list of lists of indices, i.e.
[[0,1,2,3], [0,1,2], [0],[]], where it must hold that
later entries are strict subsets of previous ones
s.t. the largest value at position l is at most as
big as the largest value at position l-1. Also, if
k is the largest value at position l, it must hold
that all k' s.t. 0<=k'<=k must be in that list entry
NOTE: If you want to have only auto-regressive
terms at some nbh (I.e., only its own past influen-
ces the present), then simply add an empty list [].
In the notation of the PAPER, this is the function p(.) assigning
temporal meaning to the neighbourhood structure. p is given in
list form so that the l-th entry of p gives all indices that
are going to be used at lag length l. I.e., assuming p(l) = 3
(using p's definition from the paper), it will hold that the
respective entry in general_nbh_restriction_sequence is going to
be [0,1,2]. More generally, for p(l) = k, [0,1,...,k-1].
general_nbh_coupling: string:
["no coupling", "weak coupling",
"strong coupling"], tells you how neighbourhoods
are tied together. "no coupling" means that each
linear effect of s' \in N_i(s) is modelled sepa-
rately. "weak coupling" means that the linear
effect for all s' \in N_i(s) are modelled together,
and "strong coupling" means that the linear effects
are also modelled together across space, i.e.
s' \in N_i(s) and g \in N_i(k) have the same effect
(but s' in N_j(s) and g in N_i(k) do not)
NOTE: no coupling is not implemented, because you
can obtain the same effect by weak coupling and
treating each station as its own nbh.
In the PAPER, notes on this are right after SSBVAR definition.
"weak coupling" is the standard modelling framework that assumes
that for all locations in a given nbh, we have a single linear
effect. "strong coupling" means that in addition, we have the same
linear neighbourhood effect for each location.
###****************************************************************###
### HYPERPARAMETER LEARNING ###
###****************************************************************###
Inputs needed when doing hyperparameter learning:
hyperparameter_optimization [don't use auto_prior_update!]
hyperparameter_optimization (ProbabilityModel level): string or None:
-> [True, False, None, "caron", "online", "turner"]
by default, this is True, which amounts to updating
the gradients but not performing on-line/caron's
hyperpar. learning. If False or None, the gradients
are not updated. "caron" and "online" both employ
the on-line hyperparameter learning proposed by
<NAME> (2012). If you don't want
this, but only want to do Turner's routine, you
have to do so via an enclosing HyperparameterOptimization
object. For this, put hyperparameter_optimization
to True (default) or "turner".
I.e., "turner", "True" mean that gradients are updated recursively,
but not used (unless an enclosing HyperparameterOptimization
object uses them), "caron" and "online" mean that we perform
gradient descent updates as in the PAPER. "False" and None mean
that we don't update the gradients. (barely any computational
benefit in so doing)
auto_prior_update: boolean.
Basically, DON'T set to True. It updates the priors by setting them
to the posterior expectations at time t. For instance, the beta_0
prior at time t will be set to
sum_r{ beta_rt[r,:] * P(r|y_1:t) }.
###****************************************************************###
### EXOGENEOUS/ADDITIONAL PREDICTORS ###
###****************************************************************###
NOT IMPLEMENTED!!!!
Inputs needed when using additional variables:
exo_selection, nbh_sequence_exo
NOTE: Intercepts, EXO, and ENDO vars can always ge grouped by the
following simple procedure: Suppose you have two groups G1, G2.
Let's assume you assume the same model in G1, G2 but with diff-
erent parameterizations. Lets say the params you want are
a(G1), a(G2), b(G1), b(G2). Then you can just estimate all four
coefs jointly by having G1 have a column of zeros for the var
corresponding to a(G2), b(G2) and vice versa.
NOTE: At some point, it may be good to replace the strings indicating
our neighbourhood structures using integers instead, since
string-computations are more expensive than integer-computations
exo_selection:
0,1,2,.. -> gives you a selection vector of length
num_exo_vars allowing you to select which exos
you want to regress on Y. The integers are
the row index in vector [exo1, exo2, ...] of
regressors available at each location.
nbh_sequence_exo: #not in the input
0,4,8 -> gives you the nbh of the lagged exos that are
regressors for your problem. Starts at time t
(rather than t-1, as for endo sequence)
###****************************************************************###
### OTHER INPUTS ###
###****************************************************************###
None of these inputs are needed, they provide additional functionality
non_spd_alerts: boolean:
Gives an alert whenever the covariance matrix was not semi-positive
definite and needed to be converted into an spd-matrix by forcing
it via 'nearestPD' or adding a disturbance.
NOTE: If you experience this a lot, try to rescale your data, i.e.
normalize it on-line or do something along the same lines.
"""
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" OBJECT INITIALIZATION FUNCTIONS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def __init__(self,
prior_a,
prior_b,
S1,
S2,
prior_mean_beta=None,
prior_var_beta=None,
prior_mean_scale=0,
prior_var_scale=100,
nbh_sequence=None,
restriction_sequence = None,
intercept_grouping = None,
general_nbh_sequence = None,
general_nbh_restriction_sequence = None,
exo_selection = None,
padding = 'overall_mean',
#deprecated argument, should go
auto_prior_update=False,
hyperparameter_optimization = "online",
general_nbh_coupling = "strong coupling",
non_spd_alerts =False
):
"""STEP 1: Store priors"""
self.a, self.b = prior_a, prior_b
"""if beta_0 or beta's covariance matrix are specified, that takes
precedence over a supplied scaling of a vector/matrix of ones"""
if not prior_mean_beta is None:
self.prior_mean_beta = prior_mean_beta.flatten()
else:
self.prior_mean_beta= prior_mean_beta
self.prior_var_beta= prior_var_beta
"""STEP 2: Store execution parameters"""
self.auto_prior_update = auto_prior_update #Don't use
if (hyperparameter_optimization is not None or
hyperparameter_optimization is not False):
self.a_old = prior_a + 0.0000001 #Used for gradient computation
self.b_old = prior_b+ 0.0000001 #Used for gradient computation
self.gradient_old = 0.0 #Used for gradient computation
self.a_list, self.b_list = [],[]
self.hyperparameter_optimization = hyperparameter_optimization
self.non_spd_alerts = non_spd_alerts #if cov mat not spd and forced
#to be, this alerts you.
"""STEP 3: Get informations about the model we set up"""
self.has_lags = True #needed inside detector
self.generalized_bayes_rld = "kullback_leibler" #changed from inside detector init
self.alpha_rld_learning = False
self.alpha_rld = None #changed from inside detector init
self.S1, self.S2 = S1, S2
"""STEP 3.1: If we are on a regular grid with strong param binding"""
self.restriction_sequence = restriction_sequence
self.nbh_sequence = nbh_sequence
self.padding = padding
"""STEP 3.2: If we are on general neighbourhood structures"""
self.general_nbh_sequence = general_nbh_sequence
self.general_nbh_restriction_sequence = general_nbh_restriction_sequence
self.general_nbh_coupling = general_nbh_coupling
self.intercept_grouping = intercept_grouping
"""STEP 3.3: Check if we use regular grid + strong param binding or
the more general framework"""
if ((not self.restriction_sequence is None) and
(not self.nbh_sequence is None) and
(not self.padding is None)):
self.regular_grid = True
elif ((not self.general_nbh_sequence is None) and
(not self.general_nbh_restriction_sequence is None) and
(not self.general_nbh_coupling is None)):
self.regular_grid = False
elif (( self.restriction_sequence is None) and
( self.nbh_sequence is None) and
( self.general_nbh_sequence is None) and
( self.general_nbh_restriction_sequence is None)):
#In this case, we have only constant terms
self.regular_grid = False
self.has_lags = False
self.lag_length = 0 #unclear if it is arrived at automatically
self.general_nbh_coupling = None
else:
"""Neither specification is complete, so end the execution here"""
raise SystemExit("Your neighbourhood specifications " +
"are incomplete: At least one of " +
"restriction_sequence, nbh_sequence, padding is None; " +
"or at least one of " +
"general_nbh_sequence, general_nbh_restriction_sequence ,"+
" general_nbh_coupling is None")
"""STEP 3.4: If we have any exogeneous/additional variables"""
if exo_selection is None or exo_selection == []:
self.exo_bool = False
exo_selection = []
self.exo_selection = []
else:
self.exo_bool = True
self.exo_selection = exo_selection
"""STEP 4: Convert the neighbourhood into a sequence of strings
for the endogeneous variables"""
"""STEP 4.1: Get the codes for the intercept design"""
self.get_intercept_codes()
"""STEP 4.2: Get endogeneous regressor codes (self.endo_vars), lag
length (self.lag_length), and information about empty nbhs
(self.empty_nbhs, self.sum_empty_nbhs_per_lag)"""
#DEBUG: Not needed under constant fct. Simply set self.endo_var=[].
# do this inside fct.
self.get_endo_vars()
"""STEP 4.3: Get exogeneous regressor codes (self.exo_vars)"""
self.exo_vars = [self.intercept_codes + exo_selection]
"""STEP 4.4: Get all regressor codes"""
self.all_vars = list(self.exo_vars) + list(self.endo_vars)
self.all_vars = sum(self.all_vars, [])
"""STEP 5: Define quantities relating to the regressors:
the sequences of variables, the counts of variables,
the lag-structure, extraction list for updating"""
"""STEP 5.1: Get the number of each type of variable"""
self.num_exo_regressors = len(sum(self.exo_vars, []))
self.num_endo_regressors = len(sum(self.endo_vars, []))
self.num_regressors = (self.num_endo_regressors +
self.num_exo_regressors)
"""STEP 5.2: Get the lag structure such that lag_counts stores the
#exo_vars at position 0,and stores at position l the count
{#exo_vars + sum(#endo_vars: lag <= l) inside
self.lag_counts"""
#DEBUG: For constant function, this should be only the first line of
# the function
self.get_lag_counts()
"""STEP 6: Get the extraction vector and the insertion position. Note
that the result will be a list of form [1,1,1,0,0,1,1,1,1,0,0,0], which
means that the first 3 endogeneous variables will be kept, the next
two will be discarded, the next 4 will be kept, and the next 3 disc."""
"""STEP 6.1: Store in each entry l the number of endogeneous regressors
for lag l"""
#For constant fct, this should just return an empty list (if se set lag_length = 0)
endo_regressors_per_lag = self.get_endo_regressors_per_lag()
"""STEP 6.2: You can now get a list that tells you for given X_t-1
which columns need copying to X_t. You never copy exogeneous variables.
Also, the first lag for X_t will be new, so one can copy at most
lag_length -1 neighbourhoods from X_t-1 to X_t. Store this list as
self.extraction_list, and the position where you start extracting
as self.insertion_position with the function below"""
#DEBUG: This should still work and return an empty extraction list as
# well as an insertion position = p
self.get_extraction_list(endo_regressors_per_lag)
"""STEP 7: create the objects we need to trace through time"""
self.XX, self.YX, self.model_log_evidence = None, None, -np.inf
"""NOTE: The quantities below will be re-initialized in the
initialization function, but have to be instantated here due to how
the enclosing Detector object calls model_and_run_length_distr"""
self.retained_run_lengths = np.array([0,0])
self.joint_log_probabilities = 1
#DEBUG: Should not really be here (but insted in initialization)
self.log_alpha_derivatives_joint_probabilities = None #np.ones(3)
self.log_alpha_derivatives_joint_probabilities_sign = None #np.ones(3)
"""STEP 8: Rectify prior_beta_mean and prior_beta_var if needed.
Give a warning about this, too!"""
"""STEP 8.1: prior mean beta is not supplied or does not correspond
to the right dimensions: Check if a scale is
supplied. If not, automatically set the scale to 0.0, ensuring
that beta_0 = 0."""
if (self.prior_mean_beta is None or
self.num_regressors != np.size(self.prior_mean_beta)):
if prior_mean_scale is None:
prior_mean_scale = 0.0
self.prior_mean_beta = (prior_mean_scale*
np.ones(self.num_regressors))
"""STEP 8.2: prior var beta is not supplied or does not correspond
to the right dimensions: Check if a scale is
supplied. If not, automatically set the scale to 100.0, ensuring
that V_0 = 100*I."""
if (self.prior_var_beta is None or
self.num_regressors != prior_var_beta.shape[0] or
self.num_regressors != prior_var_beta.shape[1]):
if prior_var_scale is None:
prior_var_scale = 100.0
self.prior_var_beta = (prior_var_scale*
np.identity(self.num_regressors))
def get_intercept_codes(self):
"""Only called in __init__: Gets the intercept regressor codes"""
if (self.intercept_grouping is None or
self.intercept_grouping == np.array([])):
self.intercept_codes = ["intercept"]
else:
self.num_intercept_groups = self.intercept_grouping.shape[0]
self.intercept_codes = []
for g in range(0, self.num_intercept_groups):
self.intercept_codes.append(("intercept_group_" + str(g)))
def get_endo_vars(self):
"""Only called in __init__: Gets self.endo_vars, self.lag_length,
self.empty_nbhs, self.sum_empty_nbhs_per_lag in different ways,
depending on how your nbh structure is set up."""
endo_vars = []
""""STEP A: If you are on regular grid with strong parameter binding"""
if self.regular_grid:
self.lag_length = np.size(self.nbh_sequence)
for lag in range(0,int(self.lag_length)):
restriction = self.restriction_sequence[lag]
nbh = self.nbh_sequence[lag]
if restriction == 0:
if nbh == 0:
endo_vars.append(["center"])
elif nbh == 4:
endo_vars.append([ "center","top", "left", "right",
"bottom"])
elif nbh == 8:
endo_vars.append(["center",
"top", "left", "right", "bottom",
"topleft", "topright","bottomleft", "bottomright"])
elif restriction == 4:
if nbh == 0:
endo_vars.append(["center"])
print("Warning: Restriction sequence")
print("contained 4, nbh sequence a 1-nbh")
print("at the same position.\n")
elif nbh == 4:
endo_vars.append(["center", "4_inner_nbh_res"])
elif nbh == 8:
endo_vars.append(["center", "4_outer_nbh_res",
"4_inner_nbh_res"])
elif restriction == 8:
if nbh == 0:
endo_vars.append(["center"])
print("Warning: Restriction sequence")
print("contained 8, nbh sequence a 1-nbh")
print("at the same position.\n")
elif nbh == 4:
endo_vars.append(["center", "4_inner_nbh_res"])
print("Warning: Restriction sequence")
print("contained 8, nbh sequence a 4-nbh")
print("at the same position.\n")
elif nbh == 8:
endo_vars.append(["center", "8_nbh_res"])
print("Warning: Restriction = 8, which is not fully implemented")
elif self.general_nbh_coupling == "weak coupling":
"""STEP B: If we use the general nbh sequence formulation with
weak coupling (i.e. nbh-specific, but not across space).
Recall that the structure is as follows:
general_nbh_sequence = [[[4,5,6],[7,8],[9]], [[2,3,4],[5],[7]],...]
general_nbh_restriction_sequence = [[0,1,2],[0,1,2],[0,1],[2]].
Here, lag_length = 4, general_nbh_restriction_sequence[lag] = g(l),
where g(l) gives you the index of the nbh generating the regressors
at lag length l for s, i.e. N_p(l)(s)
We want to get strings of form
general_nbh_<lag>_<nbh_index>_<loc>,
where <lag> gives you the index in general_nbh_restriction_seq that
you need, say <lag> = 0, i.e. we care about [0,1,2]. Given this
index list, <nbh_index> then tells us which of the indices (and
thus neighbourhoods) we care about, i.e. nbh_index = 0 would mean
we care about [0,1,2][0] = [0]. Lastly, the <loc> tells us which
index on the lattice we care about, allowing us to retrieve
general_nbh_sequence[<loc>][general_nbh_restriction_seq[<lag>][<nbh_index>]]
as the indices of the nbh with <nbh_index> corresponding to
<loc>'s neighbourhood at lag <lag>+1
"""
self.lag_length = int(len(self.general_nbh_restriction_sequence))
self.empty_nbhs = [] #helps us to sort out the extraction list later
self.sum_empty_nbhs_per_lag = np.zeros(self.lag_length)
"""loop I: Go over all lag lengths, since the nbhs and their
restrictions will differ between lag lengths"""
for lag in range(0, int(self.lag_length)):
new_endo_vars_entry = []
"""Loop II: over all locations to fill self.endo_vars with the
correct endogeneous variables for each location and lag"""
for location in range(0, self.S1*self.S2):
#DEBUG: This marks the center for each location separately
# make sure that this does not cause problems for how
# we find the lag (e.g., by counting # of "center"s)
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + "center" + "_" +
str(location))
self.empty_nbhs.append(False)
relevant_nbh_indices = self.general_nbh_restriction_sequence[lag]
"""Loop III: Over all relevant nbh indices for this
location at the current lag. This makes sure that our
endo var codes are specific to lag, location, and the
neighbour whose values are used."""
for nbh_index in relevant_nbh_indices:
"""Only add the nbh if it is non-empty. If it is
empty, nbh_index will have boolean value False."""
if nbh_index:
"""Again, we only want to create the regressor code
if the list is non-empty. If it is empty, we
instead note so inside self.empty_nbhs and
self.sum_empty_nbhs_per_lag in the 'else' cond."""
if self.general_nbh_sequence[location][nbh_index]:
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + str(nbh_index) + "_" +
str(location))
self.empty_nbhs.append(False)
else:
"""Mark which neighbourhoods were left out because
they were empty. Needed for extraction_list and
lag_counts"""
self.empty_nbhs.append(True)
self.sum_empty_nbhs_per_lag[lag] += 1
"""Inside Loop II: For this location and lag, add the
required endogeneous variables into the collection of all
of them"""
endo_vars.append(new_endo_vars_entry)
new_endo_vars_entry = []
elif self.general_nbh_coupling == "strong coupling":
"""STEP C: In this case, we have the same input as for weak
coupling, but a different interpretation. In particular, we couple
the effects over different spatial locations. Accordingly, we save
general_nbh_<lag>_<nbh_index> only.
Then, in the extractors, we loop over <loc> to retrieve the
regressors in a single column as
regressor(<lag>, <nbh_index>)[<loc>] = sum over all measurements
at time t - <lag> for nbh given by
gen_nbh_seq[<loc>][gen_nbh_res_seq[<lag>][<nbh]].
"""
self.lag_length = int(len(self.general_nbh_restriction_sequence))
"""Loop I: Over the lags"""
for lag in range(0, int(self.lag_length)):
new_endo_vars_entry = ["general_nbh_" + str(lag) + "_center"]
relevant_nbh_indices = self.general_nbh_restriction_sequence[lag]
"""Loop II: Over the relevant nbhs. Notice that unlike for the
weak coupling, we only have 2 (rather than 3) loops, as the
locations do not require a separate loop for strong coupling"""
for nbh_index in relevant_nbh_indices:
new_endo_vars_entry.append("general_nbh_" +
str(lag) + "_" + str(nbh_index))
endo_vars.append(new_endo_vars_entry)
elif (self.general_nbh_coupling is None) and (not self.regular_grid):
"""In this case, we only fit constants!|"""
endo_vars = []
self.lag_length = 0
"""Last step: Declare endo_vars as the new attribute of the object"""
self.endo_vars = endo_vars
def get_lag_counts(self):
"""Only called in __init__: Gets self.lag_counts"""
self.lag_counts = [self.num_exo_regressors]
last_count = self.num_exo_regressors
if self.regular_grid:
"""STEP 1.A: If 0/4/8 nbhs used: Can be done via endo vars"""
for entry in self.endo_vars:
self.lag_counts.append(last_count + len(entry) + 1)
last_count = last_count + len(entry) + 1 #update
elif self.general_nbh_coupling == "strong coupling":
"""STEP 1.B: Similar to weak coupling, except you don't need to
multiply by the numbers of locations"""
for lag in range(0, self.lag_length):
self.lag_counts.append(last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1)))
last_count = last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1))
elif self.general_nbh_coupling == "weak coupling":
"""STEP 1.C: If general nbhs, we need more care"""
"""each gen_res corresponds to a lag and gives you a set at
position l, e.g. [0,1,2] at position 0, telling you that at the
first lag, the neighbourhoods used are 0,1,2. Thus, at the first
lag, each location has 3 regressors corresponding to the first
three nbhs for that location in general_nbh_sequence PLUS the
autoregressive term, which is always incorporated but not repre-
sented in any regressor code.
I.e., we need [len([0,1,2]) + 1]*S1*S2 to get the #endogeneous
variables at lag 1. Generally, we thus need
[len(gen_nbh_res_seq[l]) + 1]*S1*S2"""
for lag in range(0, self.lag_length):
self.lag_counts.append(last_count + (
(len(self.general_nbh_restriction_sequence[lag]) + 1)
*self.S1*self.S2) - self.sum_empty_nbhs_per_lag[lag])
last_count = last_count + ( - self.sum_empty_nbhs_per_lag[lag] +
(len(self.general_nbh_restriction_sequence[lag]) + 1)
*self.S1*self.S2)
elif (not self.regular_grid) and self.general_nbh_coupling is None:
"""STEP 1.D: We only fit a constant, so self.lag_counts remains
unchanged. self.lag_counts will be None"""
def get_endo_regressors_per_lag(self):
"""Returns as output the endogeneous regressors per lag"""
if self.regular_grid:
"""STEP 1A: If we have the 4-nbh structure"""
endo_regressors_per_lag = []
for l in range(0, self.lag_length):
res = self.restriction_sequence[l]
nbh = self.nbh_sequence[l]
if res == 0:
endo_regressors_per_lag.append(int(nbh) + 1)
elif res == 4:
endo_regressors_per_lag.append(int(nbh*0.25) + 1)
elif self.general_nbh_coupling is not None:
"""STEP 1B: If we have a general nbh structure, we get
endo_regressors_per_lag differently. In particular, just look at
the self.endo_vars object."""
endo_regressors_per_lag = []
for l in range(0, self.lag_length):
endo_regressors_per_lag.append(int(len(self.endo_vars[l])))
else:
"""STEP 1C: If we only fit a constant"""
endo_regressors_per_lag = []
"""STEP 2: Return the result"""
return endo_regressors_per_lag
def get_extraction_list(self, endo_regressors_per_lag):
"""Gets self.extraction_list and self.insertion position"""
""""STEP 1: Make sure we don't want to copy exogeneous regressors"""
self.extraction_list = [False]*(self.num_exo_regressors)
if self.regular_grid:
"""STEP 1A: IF we have 0/4/8 nbhs """
for i in range(0,self.lag_length-1):
self.extraction_list = (self.extraction_list
+ [True]*endo_regressors_per_lag[i+1]
+ [False]*int(endo_regressors_per_lag[i] -
endo_regressors_per_lag[i+1]))
"""STEP 2A: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
self.extraction_list += ([False]*
endo_regressors_per_lag[self.lag_length-1])
elif self.general_nbh_coupling == "weak coupling":
"""STEP 1B: IF we have general nbhs"""
per_location = []
for lag in range(0, self.lag_length-1):
num_retained = (1 + len(np.intersect1d(
self.general_nbh_restriction_sequence[lag],
self.general_nbh_restriction_sequence[lag+1])))
num_discarded = ( -num_retained + 1 +
len(self.general_nbh_restriction_sequence[lag]))
per_location += ([True]* num_retained +
[False] * num_discarded)
"""STEP 2B: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
total_num_last_lag = 1+ len(
self.general_nbh_restriction_sequence[self.lag_length-1])
per_location += ([False]* total_num_last_lag)
"""STEP 3B: Use that we have the same structure all across the
lattice, and simply multiply each entry of 'per_location' by the
number of lattice elements"""
self.extraction_list += sum(
[self.S1*self.S2*[e] for e in per_location],[])
self.extraction_list[self.num_exo_regressors:] = np.array(
self.extraction_list)[np.where(np.array(
self.empty_nbhs) == False)].tolist()
elif self.general_nbh_coupling == "strong coupling":
"""STEP 1C: IF we have general nbhs"""
per_location = []
for lag in range(0, self.lag_length-1):
num_retained = (1 + len(np.intersect1d(
self.general_nbh_restriction_sequence[lag],
self.general_nbh_restriction_sequence[lag+1])))
num_discarded = ( -num_retained + 1 +
len(self.general_nbh_restriction_sequence[lag]))
per_location += ([True]* num_retained +
[False] * num_discarded)
"""STEP 2C: The last lag of X_t-1 will 'slide out' of sight, so it
definitely is not needed for X_t anymore."""
total_num_last_lag = 1+ len(
self.general_nbh_restriction_sequence[self.lag_length-1])
per_location += ([False]* total_num_last_lag)
"""STEP 3C: Use that we have the same structure all across the
lattice, and simply multiply each entry of 'per_location' by the
number of lattice elements"""
self.extraction_list += per_location
elif self.general_nbh_coupling is None and not self.regular_grid:
"""We have constant function and don't need to change anything"""
"""STEP 4: In order to copy entries of X_t-1 to X_t, you need to know
the position of X_t at which you should insert. (This does
only affect the endogeneous part of the regressors)"""
self.insertion_position = - sum(self.extraction_list)
def reinstantiate(self, a = None, b = None):
"""Return a new BVARNIG-model that contains all the same attributes as
this BVARNIG model. In some sense, it is an 'emptied' version of the
same model. Used inside HyperparameterOptimization, if BVARNIGs
Detector is run for hyperparameter optimization"""
"""STEP 1: Get all the characteristics of this model"""
prior_a, prior_b, S1, S2 = self.a, self.b, self.S1, self.S2
prior_mean_beta,prior_var_beta=self.prior_mean_beta,self.prior_var_beta
nbh_sequence = self.nbh_sequence
restriction_sequence = self.restriction_sequence
intercept_grouping = self.intercept_grouping
general_nbh_sequence = self.general_nbh_sequence
general_nbh_restriction_sequence = self.general_nbh_restriction_sequence
nbh_sequence_exo = self.nbh_sequence_exo
exo_selection = self.exo_selection
padding = self.padding
auto_prior_update = self.auto_prior_update
hyperparameter_optimization = self.hyperparameter_optimization
general_nbh_coupling = self.general_nbh_coupling
non_spd_alerts = self.non_spd_alerts
"""STEP 2: Check whether you have a new prior already"""
if a is None:
a = prior_a
if b is None:
b = prior_b
"""STEP 2: Use the characteristics to clone the model"""
clone_model = BVARNIG(prior_a = a, prior_b = b, S1=S1, S2=S2,
prior_mean_beta=prior_mean_beta,
prior_var_beta =prior_var_beta,
prior_mean_scale=None, prior_var_scale=None,
nbh_sequence=nbh_sequence,
restriction_sequence=restriction_sequence,
intercept_grouping=intercept_grouping,
general_nbh_sequence=general_nbh_sequence,
general_nbh_restriction_sequence=general_nbh_restriction_sequence,
nbh_sequence_exo=nbh_sequence_exo, exo_selection=exo_selection,
padding=padding, auto_prior_update=auto_prior_update,
hyperparameter_optimization=hyperparameter_optimization,
general_nbh_coupling=general_nbh_coupling,
non_spd_alerts=non_spd_alerts)
"""STEP 3: Return the cloned model"""
return clone_model
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" FIRST OBSERVATION INITIALIZATION FUNCTIONS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
#NOTE: We need to pass X_endo with one more entry into this function,
# namely for y_2!
def initialization(self, X_endo, X_exo, Y_2, X_exo_2, cp_model, model_prior,
padding_columns_computeXX = None, padding_column_get_x_new = None):
"""Initialize the model (i.e. t=1) with some inputs from the
containing Detector object. The padding_column arguments are only
needed for the demo Csurf object. This is different from object
instantiation/creation, as it processes the very first (collection of)
observation(s), thus creating the objects and quantities we will trace
through time.
NOTE I: The exo_selection list is applied inside detector, so X_exo
will already contain everything relevant
NOTE II: The tag #QR ADAPTION means that the lines following the tag
could/can be adapted to QR-updating (rather than Woodbury)
X_endo: S1xS2x(L+1) numpy array, float:
is the S1xS2x(L+1) array of the last L observations before t
as well as the observation at t at position L.
Y_2: S1xS2 np array, float:
will be endogeneous regressors at time t+1, which means Y_t.
X_exo: S1xS2xnum_exo np array, float:
will contain exogeneous variables at time t (NOT IMPLEMENTED)
X_exo_2: S1xS2xnum_exo np array, float:
will contain exogeneous variables at time t+1 (NOT IMPLEMENTED)
cp_model: CpModel object:
gives the hazard function inside an object
model_prior: float:
Passes the prior of the Detector object into the model object
padding_columns_computeXX, padding_column_get_x_new:
deprecated, leave None.
"""
print("Initializing BVAR object")
"""STEP 1: Take the data-stream that was partitioned appropriately
inside the Detector object and reshape/rename it for further processing
Y1 = Y_t, Y2 = Y_{t+1}, X1_endo = Y_1:t-1, with t = L-1."""
Y1 = X_endo[-1,:].flatten()
Y2 = Y_2.flatten()
if self.has_lags:
X1_endo = X_endo[:self.lag_length,:].reshape(self.lag_length,
self.S1, self.S2)
else:
X1_endo = None
"""In case there are no exogeneous variables in this model, take
the relevant precautions."""
if self.exo_bool:
#RESHAPE will not corr. to real dims of exo vars
X1_exo = (X_exo[-1,:,:].reshape(
self.num_exo_regressors, self.S1, self.S2))
else:
X1_exo = None
"""STEP 2: Format the quantities we wish to trace through time (i.e.
typically sufficient statistics), and correctly compute them using
neighbourhood structure"""
"""STEP 2.1: Quantities for time point t, i.e. dimension does not
depend on how many run-lengths we retain.
Quantities will hold:
XX
Y_1:t-1'Y_1:t-1, i.e. the cross-product of regressors at time t.
XY
Y_1:t-1'Y_t, i.e. the cross-product of regressors and obs. at t
X_t
Y_1:t-1, i.e. regressors at time t
X_tp1
Y_2:t, i.e. regressors at time t+1 ('t plus (p) 1')
YY
Y_t'Y_t, i.e. observation cross-product
"""
self.XX = np.zeros(shape=(self.num_regressors,self.num_regressors))
self.XY = np.zeros(self.num_regressors)
self.X_t = np.zeros(shape=(self.S1*self.S2, self.num_regressors))
self.X_tp1 = np.zeros(shape=(self.S1*self.S2, self.num_regressors))
self.YY = np.inner(Y1, Y1)
"""STEP 2.2: Cross-product quantities for time point t and run-length
r, i.e. dimension does depend on how many run-lengths we retain. Unlike
quantities only stored for the current time, the quantities below
incorporate the prior beliefs.
Quantities will hold;
XX_rt
At time t, r-th entry holds the cross-product of all regressors
corresponding to run-length r_t, i.e. you sum over the last r_t
cross-products XX. Additionally, XX_rt also holds the prior
belief inside, so
XX_rt[r,:,:] = prior_var_beta^-1 + sum_{i = t-r}^t XX(i)
XY_rt
At time t, r-th entry holds the cross-product of all regressors
and observationscorresponding to run-length r_t, i.e. you sum
over the last r_t cross-products XY. Additionally, XY_rt also holds
the prior belief inside, so
XY_rt[r,:] = prior_var_beta^-1 * prior_beta + sum_{i = t-r}^t XY(i)
YY_rt
As the other two, but with YY, and no prior belief occurs, so
YY_rt[r] = sum_{i = t-r}^t YY(i)
Q_rt, R_rt
Unuseable in current version, would hold the QR-decomposition of
inverse of XX_rt
"""
self.XX_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors)) #2 for r=-1 and r=0
self.XY_rt = np.zeros(shape=(2,self.num_regressors)) #2 for r=-1 and r=0
self.YY_rt = np.zeros(2)
#QR ADAPTION
self.Q_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors))
self.R_rt = np.zeros(shape=(2,self.num_regressors, self.num_regressors))
"""STEP 2.3: Inverse-related quantities for time point t and run-length
r, i.e. dimension again depends on how many run-lengths on retains.
These are direct functionals of the cross-produts stored above, but
computed/updated in an efficient rather than brute-force way
Quantities will hold:
M_inv_1_rt
Inverse of XX_rt, updated via Woodbury formula at each time point,
but at a later time point than M_inv_2_rt. This means within a
certain time window inside an iteration, we have access to both,
XX_rt^-1 at t and XX_rt^-1 at time t-1, which is needed for
efficient updates.
M_inv_2_rt
Inverse or XX_rt, updated via Woodbury formula at each time point.
See above for the relative timing.
log_det_1_rt
log determinants of all entries in M_inv_1_rt, computed efficiently
log_det_2_rt
log dets of all entries in M_inv_2_rt, computed efficiently
"""
self.M_inv_1_rt = np.zeros(shape=(2,self.num_regressors,
self.num_regressors))
self.M_inv_2_rt = np.zeros(shape=(2,self.num_regressors,
self.num_regressors))
self.log_det_1_rt = np.zeros(2)
self.log_det_2_rt = np.zeros(2)
"""STEP 2.4: beta-coef related quantities for time point t and run-
length r, i.e. dimension depends on how many run-lengths one retains
Quantities will hold:
beta_rt
beta_rt[r,:] stores the coefficients beta corresponding to the
MAP-estimate at time t if one assumes run-length r
beta_XX_beta_rt
what it says: beta_rt[r,:] * XX_rt[r,:,:] * beta_rt[r,:] at pos r
each time point t.
"""
self.beta_XX_beta_rt = np.zeros(2)
self.beta_rt = np.zeros(shape=(2,self.num_regressors))
"""STEP 2.5: Retained run lengths, storing which run-lengths you retain
at time t. Careful with this, as retained_run_lengths[i] = j means that
the i-th longest run-length you retain is j"""
self.retained_run_lengths = np.array([0,0])
"""STEP 3: Compute prior- and data-dependent quantities:
Computation of X_t, X_tp1, X'X, X'Y, and Y'Y from scratch."""
"""STEP 3.1: Gives X_t, X'X, X'Y, Y'Y"""
#DEBUG: Unclear if this does what I want for constant case!
self.compute_X_XX_XY_YY( Y1, X1_endo, X1_exo,
padding_columns_computeXX,
compute_XY = True)
"""STEP 3.2: Gives X_{t+1}"""
#DEBUG: Unclear if this does what I want for constant case!
self.X_tp1 = self.get_x_new(Y2, X_exo_2 ,1,padding_column_get_x_new)
"""STEP 4: Using the results of STEP 3, compute some computationally
burdensome results, like XX_rt's inverses and prior inv + det"""
"""STEP 4.1: Computation of the prior inverse, which will be needed
at each iteration to inform the chaingepoint probabilities"""
self.D_inv = np.linalg.inv(self.prior_var_beta) #not efficient if D diagonal
_, self.D_inv_log_det = np.linalg.slogdet(self.D_inv)
#QR ADAPTION
self.D_inv_Q, self.D_inv_R = np.linalg.qr(self.D_inv)
self.D_inv_log_det = np.sum(np.log(np.abs(np.diagonal(self.D_inv_R))))
"""STEP 4.2: Use the prior inverse from STEP 4.1 to get the first
inverse computation of XX_rt underway"""
M_inv_1 = np.linalg.inv(self.D_inv + self.XX)
self.M_inv_1_rt[0,:,:] = self.M_inv_1_rt[1,:,:] = M_inv_1
#QR ADAPTION
Q0, R0 = self.QR_loop(self.D_inv_Q, self.D_inv_R, self.X_t)
self.Q_rt[0,:,:] = self.Q_rt[1,:,:] = Q0
self.R_rt[0,:,:] = self.R_rt[1,:,:] = R0
"""STEP 5: Compute the prior contributions/quantities and use them to
get XX_rt, YY_rt, XY_rt with prior influences for r_t = 0"""
"""STEP 5.1: Get D^-1*beta_prior and beta_prior * D^-1 * beta_prior
which are needed later in the estimation as the prior contributions"""
self.D_inv_b0 = np.matmul(self.D_inv, self.prior_mean_beta)
self.b0_D_inv_b0 = np.inner(self.prior_mean_beta, self.D_inv_b0)
"""STEP 5.2: Get the first two values of X'X_rt and X'Y_rt using
the result of STEP 6.1.
NOTE: Since we will only need X'Y for computing beta(r,t),
we need to work with (D^-1 * beta_0 + X'Y), which is why
we add D^-1 * beta_0 to X'Y whenever we are at r=0."""
self.XX_rt[0,:,:] = self.XX_rt[1,:,:] = self.XX + self.D_inv
self.XY_rt[0,:] = self.XY_rt[1,:] = (self.XY + self.D_inv_b0)
self.YY_rt[0] = self.YY_rt[1] = self.YY
"""STEP 6: Get the log-determinants by brute force or QR
NOTE: If using QR, use trace for determinants of Q(r,t)R(r,t)
for all run-lengths. These are needed in posterior of Y
They can be obtained as trace of R[r,:,:] because Q is an
orthogonal matrix, so det(Q) = 1 and as
det(QR) = det(Q)det(R), it follows det(QR) = det(R)"""
sign, value = np.linalg.slogdet(self.M_inv_1_rt[0,:,:])
self.log_det_1_rt[0] = self.log_det_1_rt[1] = (value) #s.p.d. matrices have pos dets
#QR ADAPTION
#diag = np.abs(np.diagonal(self.R_rt, axis1=1, axis2=2))
#self.log_det_1_rt = np.sum(np.log(diag), axis=1)
"""STEP 7: Compute the MAP of beta = MX'Y from scratch, using triangular
solvers for speedy computation! Also compute beta^T X'X(r,t) beta.
If QR is used, you also calculate the inverses here."""
beta = np.matmul(self.M_inv_1_rt[0,:,:],self.XY_rt[0,:])
self.beta_rt[0,:] = self.beta_rt[1,:] = beta
#QR ADAPTION
#beta = linalg.solve_triangular(a = self.R_rt[0,:,:],
# b = np.matmul(np.transpose(self.Q_rt[0,:,:]),self.XY_rt[0,:]),
# check_finite=False)
#self.M_inv_1_rt[0,:,:] = self.M_inv_1_rt[1,:,:] = (
# linalg.solve_triangular(a=R0, b = np.transpose(Q0),
# check_finite=False))
self.beta_XX_beta_rt[0] = self.beta_XX_beta_rt[1] = (np.inner(np.matmul(
self.beta_rt[0,:], self.XX_rt[0,:]), self.beta_rt[0,:]))
"""STEP 8: Lastly, update the inverses for one-step-ahead of time, i.e.
get M_inv_2_rt as well as its log determinant."""
"""STEP 8.1: If we do Woodbury, this is a brute force step involving
inversion of the small matrix that re-appears later on
inside 'mvt_log_density' as C_t_inv.
If we do QR-updates, perform QR update w.r.t. X_tp1 and
get M_inv + log_det_2. Do NOT update X'X, X'Y, X_t, X_tp1, Y'Y since
they will be already updated"""
small_matrix_inv = (
np.linalg.inv(
np.identity(self.S1*self.S2) +
np.matmul((self.X_tp1), np.matmul(
self.M_inv_1_rt[0,:,:], np.transpose(self.X_tp1)))) )
"""Brute force determinant calc for small matrix + recursive update for
determinant of M(r,t). We take -value because log(det(M^-1)) =
-log(det(M))"""
sign2, value2 = np.linalg.slogdet(small_matrix_inv)
self.log_det_2_rt[0] = self.log_det_2_rt[1] = (
value2 + self.log_det_1_rt[0])
"""Woodbury Update-Inversion formula for M_inv_2, see handwritten notes
for derivation"""
M_inv_1_x_X_tp1 = np.matmul(self.M_inv_1_rt[0,:,:],
np.transpose(self.X_tp1))
self.M_inv_2_rt[0,:,:] = self.M_inv_2_rt[1,:,:] = (
self.M_inv_1_rt[0,:,:] - np.matmul((M_inv_1_x_X_tp1),
np.matmul( small_matrix_inv,
np.transpose(M_inv_1_x_X_tp1))))
#QR ADAPTION
#Q1, R1 = self.QR_loop(self.Q_rt[0,:,:], self.R_rt[0,:,:], self.X_tp1)
#self.Q_rt[0,:,:] = self.Q_rt[1,:,:] = Q1
#self.R_rt[0,:,:] = self.R_rt[1,:,:] = R1
#self.M_inv_2_rt[0,:,:] = self.M_inv_2_rt[1,:,:] = linalg.solve_triangular(
# a=R1, b = np.transpose(Q1), check_finite=False)
#diag = np.abs(np.diagonal(self.R_rt, axis1=1, axis2=2))
#self.log_det_2_rt = np.sum(np.log(diag), axis=1)
"""STEP 9: Compute the joint log probabilities under your prior by
computing the predictive and multiplying it with the model prior as
well as the probability that we have a CP at time 1 vs at a time
before the first observation was made. Also compute their gradients
for efficient updating."""
"""STEP 9.1: Get the posterior parameter estimates from your model,
use them to get the value of your predictive distribution."""
a_ = self.a + 0.5
b_ = self.b + 0.5*(self.b0_D_inv_b0 + self.YY - self.beta_XX_beta_rt[0])
C_0_inv = (a_/b_)*(np.identity(self.S1*self.S2) -
np.matmul(self.X_t, np.matmul(self.M_inv_1_rt[0,:,:],
np.transpose(self.X_t))))
if b_<0:
log_det = np.nan
else:
log_det = ((self.S1*self.S2) * (np.log(b_) - np.log(a_)) +
self.D_inv_log_det - self.log_det_1_rt[0])
"""This step ensures that we center the MVT at zero, which makes
the computations inside mvt_log_density easier"""
resid = Y1 - np.matmul(self.X_t, self.beta_rt[0,:])
"""For the first observation, the predictive probability and the
model evidence are equivalent, as the model evidence is computed under
prior beliefs (captured by a_, b_, C_0_inv) only."""
self.model_log_evidence = ( np.log(model_prior) +
BVARNIG.mvt_log_density(resid, C_0_inv, log_det, 2*a_,
self.non_spd_alerts))
"""STEP 9.2: Multiply the model evidence by the hazard rate/cp prior
as well as the model prior to get the joint log probs for run-length
equalling 0 or being >0 (i.e., first CP occured before first obs)"""
"""Numerical stability: Ensure that we do not get np.log(0)=np.inf
by perturbation"""
if cp_model.pmf_0(1) == 0:
epsilon = 0.000000000001
else:
epsilon = 0
"""get log-probs for r_1=0 or r_1>0. Typically, we assume that the
first observation corresponds to a CP (i.e. P(r_1 = 0) = 1),
but this need not be the case in general."""
r_equal_0 = (self.model_log_evidence +
np.log(cp_model.pmf_0(0) + epsilon))
r_larger_0 = (self.model_log_evidence +
np.log(cp_model.pmf_0(1)+ epsilon))
self.joint_log_probabilities = np.array([r_equal_0, r_larger_0])
"""STEP 8.3: Get the derivative of the log probs, too, just
initialize to 1 (since log(1) = 0), initialize with 2 columns
(for 2 hyperparams: a,b). We may wish to extend this to more params"""
self.model_specific_joint_log_probabilities_derivative = np.ones((2,2))
self.model_specific_joint_log_probabilities_derivative_sign = np.ones(
(2,2))
"""STEP 8.4: Similar to 8.3, but for alpha-optimization. Hence we only
do this if we have to"""
if self.alpha_rld_learning:
self.log_alpha_derivatives_joint_probabilities = None #np.ones(3)
self.log_alpha_derivatives_joint_probabilities_sign = None #np.ones(3)
def compute_X_XX_XY_YY(self, Y0, X0_endo, X0_exo, padding_columns = None,
compute_XY = True):
"""Compute X'X, X'Y, Y'Y, X_t from scratch. Called at initialization.
Uses the nbh-strings to concatenate the raw data of X0_endo, Y0 (and
potentially at a later stage X0_exo) into the regressors that we want
for our model.
NOTE: compute_XY = False only for BVARNIGDPD models, where there is
no need to know XY
"""
"""Computation: Loop over both exogeneous and endogeneous variables,
retrieve their cross-products element-wise. If you have already it from
product before, just copy the relevant entry in X'X and paste it."""
#DEBUG: Reshape X0_endo into (lag_length,S1, S2)
if self.has_lags:
X0_endo = X0_endo.reshape(self.lag_length, self.S1, self.S2)
else:
X0_endo = None
lag_count1, lag_count2 = 0,0
"""OUTER LOOP: Over all regressors"""
for i in range(0, self.num_regressors):
"""Since exo vars are stored first in all_vars, this condition
allows us to see if we need to access exo or endo vars"""
if (i <= (self.num_exo_regressors - 1)):
"""EXOGENEOUS"""
#DEBUG: Do I get the intercept from here? I should, since
# self.all_vars will still be containing the intercept_codes
data_vector1 = self.get_exo_regressors(self.all_vars[i], i,
X0_exo)
elif self.has_lags:
"""If we need endo vars, make sure that we advance the lag
length appropriately afterwards"""
if (i >= self.lag_counts[lag_count1]):
lag_count1 = lag_count1 + 1
"""ENDOGENEOUS"""
"""I.e., if we do not pass padding columns, we cannot access
the None-type object and thus skip the argument"""
if padding_columns is None:
data_vector1 = self.get_endo_regressors(self.all_vars[i],
lag_count1, X0_endo)
else:
data_vector1 = self.get_endo_regressors(self.all_vars[i],
lag_count1, X0_endo, padding_columns[i,:])
lag_count2 = 0 #reset lag count
"""INNER LOOP: Over all regressors"""
for j in range(0, self.num_regressors):
"""This condition ensures that we do not re-compute cross-
products after having done so before"""
if (i <= j):
if (j <= (self.num_exo_regressors - 1)):
"""EXOGENEOUS"""
data_vector2 = self.get_exo_regressors(
self.all_vars[j], j, X0_exo)
elif self.has_lags:
"""If we need endo vars, make sure that we advance the lag
length appropriately afterwards"""
if (j >= self.lag_counts[lag_count2]):
lag_count2 = lag_count2 + 1
"""ENDOGENEOUS"""
if padding_columns is None:
data_vector2 = self.get_endo_regressors(
self.all_vars[j], lag_count2, X0_endo)
else:
data_vector2 = self.get_endo_regressors(
self.all_vars[j], lag_count2, X0_endo,
padding_columns[i,:])
"""if i == 0, we loop over all j. Use this to compute X'Y
as well as X"""
if(i == 0):
self.X_t[:,j] = data_vector2
if compute_XY:
self.XY[j] = np.inner(data_vector2, Y0)
"""Computation: Fill in X'X with dot products!"""
prod = np.inner(data_vector1, data_vector2)
self.XX[i,j] = prod
self.XX[j,i] = prod
"""Lastly, compute Y'Y"""
self.YY = np.inner(Y0, Y0)
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
""" EVALUATE PROBABILITIES/BELIEFS """
"""~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
def evaluate_predictive_log_distribution(self, y, t):
"""Returns the log densities of *y* using the predictive posteriors
for all possible run-lengths r=0,1,...,t-1,>t-1 as currently stored by
virtue of the sufficient statistics.
The corresponding density is computed for all run-lengths and
returned in a np array
Note: This is called BEFORE update_log_distr, so at time t, the
quantities one tracks through time (like Q_rt, R_rt, ...) will
still only hold Q(r,t), R(r, t), ... and so on (rather than
Q(r+1,t+1), R(r+1,t+1) ... ). Similarly, the regressors
X_t will actually correspond to time point t-1, so we instead
use the regressors stored inside X_tp1 = X_t+1 for evaluating
the pred. density of y.
"""
"""STEP 1: Preliminaries.
- Get y into vector format,
- get log_densities as container of log predictive densities
- get C_t_inv[r,:,:] as the posterior precision for run-length r+1
"""
y = y.flatten()
run_length_num = self.retained_run_lengths.shape[0]
log_densities = -np.inf * np.ones(shape=run_length_num)
"""Note that we store the r0-C_t_inv too, so this quantity is one
entry longer than all other quantities"""
self.C_t_inv = np.zeros((run_length_num+1, self.S1*self.S2,
self.S1*self.S2))
self.predictive_variance_log_det = np.zeros(run_length_num+1)
self.C_t_inv[0,:,:] = self.C_t_inv_r0
self.predictive_variance_log_det[0] = (
self.predictive_variance_r0_log_det)
"""STEP 2: Loop over all retained run-lengths and fill log_densities[r]
with the predictive log density for run-length r.
NOTE: We cannot use retained_run_lengths to loop directly, since
r=t-1 and r>t-1 both have a 0 in there."""
for r in range(0,run_length_num):
"""STEP 2.1: Get inverse of posterior variance ( = posterior
precision) using stored quantities & woodbury (see notes)"""
a_ = self.a + (self.retained_run_lengths[r]+1.0)*0.5
b_ = (self.b + 0.5*(self.b0_D_inv_b0 + self.YY_rt[r] -
self.beta_XX_beta_rt[r]))
self.C_t_inv[r+1,:,:] = (np.identity(self.S1*self.S2) -
np.matmul(self.X_tp1, np.matmul(self.M_inv_2_rt[r,:,:],
np.transpose(self.X_tp1))))
"""STEP 2.2: Get the log determinant using the Woodbury Formula and
applying the determinant lemma afterwards (see notes)
NOTE: We take the minus in front because we compute the
log det of the INVERSE matrix C(r,t)^-1 here, but
need that of C(r,t) for call of 'mvt_log_density'"""
if b_ < 0:
log_det = np.nan
else:
log_det = ((self.S1 * self.S2)*(
|
np.log(b_)
|
numpy.log
|
'''
# Spam SMS Detection
# <NAME>
'''
import os
import sys
import numpy as np
def create_vocabulary(tokenized_sms):
vocabulary = np.array([])
# read file and add every sms to a numpy array
with open(tokenized_sms, 'r') as file:
for line in file:
cur_line = np.array(line.replace('\n','').split(','))
vocabulary = np.append(vocabulary, cur_line)
# obtain all unique words in dataset (vocabulary)
vocabulary, index = np.unique(vocabulary, return_index=True)
vocabulary = vocabulary[index.argsort()]
return vocabulary
"""Feature Matrix"""
def create_feature_matrix(tokenized_sms, vocabulary):
feature_matrix = []
# read file and create a feature matrix
with open(tokenized_sms, 'r') as file:
for line in file:
fill_vocab = np.array([0]*len(vocabulary))
cur_line = np.array(line.replace('\n','').split(','))
cur_vocab, cur_frequency = np.unique(cur_line, return_counts=True)
cur_dict = dict(zip(cur_vocab, cur_frequency))
for key in cur_dict.keys():
fill_vocab[np.where(vocabulary == key)] += cur_dict[key]
feature_matrix.append(fill_vocab)
feature_matrix = np.asarray(feature_matrix)
return feature_matrix
def create_sets(feature_matrix, labels_file):
labels = np.array([])
with open(labels_file, 'r') as file:
for line in file:
labels = np.append(labels, int(line[0]))
# split feature matrix into training and testing sets
train_set = feature_matrix[:4460]
test_set = feature_matrix[:1112]
train_labels = labels[:4460]
test_labels = labels[:1112]
return (train_set, test_set, train_labels, test_labels)
"""Multinomial Naive Bayes Model"""
# calculate the MLE estimators
def mle_estimate(train_set, train_labels):
y_spam = []
y_ham = []
T_spam = np.array([])
T_ham = np.array([])
theta_spam = np.array([])
theta_ham = np.array([])
# get the spam and ham messages into their own array from train set
y_ham = [train_set[i,:] for i in np.where(train_labels == 0)][0]
y_spam = [train_set[i,:] for i in np.where(train_labels == 1)][0]
y_ham = np.asarray(y_ham)
y_spam = np.asarray(y_spam)
# number of occurrences of the word j in spam SMSs in the training set
T_spam = np.sum(y_spam, axis=0)
T_ham = np.sum(y_ham, axis=0)
# estimate the probability that a particular word in a spam and ham SMS will be the j-th word of the vocabulary
theta_spam = np.divide(T_spam, np.sum(T_spam))
theta_ham = np.divide(T_ham, np.sum(T_ham))
# estimates the probability that any particular SMS will be spam
spam_prob = len(y_spam) / len(train_set)
return (theta_spam, theta_ham, spam_prob)
# multinomial naive bayes
def naive_bayes(spam_prob, theta_spam, theta_ham, test_set, test_labels):
actual = test_labels
predictions = []
# naive bayes model
# probability that ith SMS is spam
y_pspam = np.log(spam_prob) + np.sum((test_set * np.log(theta_spam)),axis=1)
y_pham = np.log(1-spam_prob) + np.sum((test_set * np.log(theta_ham)),axis=1)
# add prediction
predictions = [1 if y_pspam[i] >= y_pham[i] else 0 for i in range(len(test_set))]
# accuracy metric calculation
TN = 0
FN = 0
TP = 0
FP = 0
for a_y, p_y in zip(actual, predictions):
if a_y == 0 and p_y == 0:
TN += 1
elif a_y == 0 and p_y == 1:
FP += 1
elif a_y == 1 and p_y == 0:
FN += 1
elif a_y == 1 and p_y == 1:
TP += 1
# print("TN:", str(TN))
# print("FP:", str(FP))
# print("FN:", str(FN))
# print("TP:", str(TP))
return np.around(((TN+TP)/len(actual))*100, decimals=2)
# MAP estimate of theta using a Dirichlet prior
def map_estimate(train_set, train_labels, alpha=1):
alpha = alpha
y_spam = []
y_ham = []
T_spam = np.array([])
T_ham = np.array([])
theta_spam = np.array([])
theta_ham = np.array([])
# get the spam and ham messages into their own array from train set
y_ham = [train_set[i,:] for i in np.where(train_labels == 0)][0]
y_spam = [train_set[i,:] for i in np.where(train_labels == 1)][0]
y_ham = np.asarray(y_ham)
y_spam = np.asarray(y_spam)
# number of occurrences of the word j in spam/ham SMSs in the training set
# add alpha value to each element in T_spam/T_ham array
T_spam = np.sum(y_spam, axis=0)
T_spam = T_spam + alpha
T_ham = np.sum(y_ham, axis=0)
T_ham = T_ham + alpha
# estimate the probability that a particular word in a spam and ham SMS will be the j-th word of the vocabulary
# add alpha * V to the denominator in theta_spam/theta_ham
theta_spam = np.divide(T_spam, (np.sum(T_spam) + alpha * len(T_spam)))
theta_ham = np.divide(T_ham, (np.sum(T_ham) + alpha * len(T_ham)))
# estimates the probability that any particular SMS will be spam
spam_prob = len(y_spam) / len(train_set)
ham_prob = 1 - spam_prob
return (theta_spam, theta_ham, spam_prob)
"""Feature Selection"""
# forward selection
def forward_selection(feature_matrix_r, train_labels, test_labels):
selected_features_indices = []
prev_acc = -1
curr_acc = 0
G = []
scores = []
while curr_acc - prev_acc > 0.01:
selected_features_indices = G[scores.index(max(scores))] if len(scores) > 0 else selected_features_indices
prev_acc = curr_acc
curr_selected = []
G = []
scores = []
for i in range(len(feature_matrix_r[0])):
if i not in selected_features_indices:
G.append(selected_features_indices + [i])
# get the modified set based on the indices of G
modified_set = feature_matrix_r[:, G[-1]]
# redefine train and test sets
train_set = modified_set[:4460]
test_set = modified_set[:1112]
# redefine parameters with dirichlet prior and test
(re_theta_spam, re_theta_ham, spam_prob) = map_estimate(train_set, train_labels, alpha=1)
scores.append(naive_bayes(spam_prob, re_theta_spam, re_theta_ham, test_set, test_labels))
curr_acc = max(scores) if len(scores) > 0 else curr_acc
return selected_features_indices
# feature selection using the frequency of words
def frequency_selection(feature_matrix_r, train_labels, test_labels):
feature_matrix_r_collapse = np.sum(feature_matrix_r, axis = 0)
desc_feature_tuples = {i:feature_matrix_r_collapse[i] for i in range(len(feature_matrix_r_collapse))}
desc_feature_tuples = sorted(desc_feature_tuples.items(), key=lambda x: x[1], reverse=True)
frequency_accuracy = []
for i in range(1, len(desc_feature_tuples)):
score = 0
G = [desc_feature_tuples[:i][j][0] for j in range(i)]
# get the modified set based on the indices of G
modified_set = feature_matrix_r[:, G]
# redefine train and test sets
train_set = modified_set[:4460]
test_set = modified_set[:1112]
# redefine parameters with dirichlet prior and test
(re_theta_spam, re_theta_ham, spam_prob) = map_estimate(train_set, train_labels, alpha=1)
score = naive_bayes(spam_prob, re_theta_spam, re_theta_ham, test_set, test_labels)
frequency_accuracy.append((i, score))
return frequency_accuracy
def main():
# get the tokenized_corpus file
data_root = './'
tokenized_sms = os.path.join(data_root, 'tokenized_corpus.csv')
vocabulary = create_vocabulary(tokenized_sms)
feature_matrix = create_feature_matrix(tokenized_sms, vocabulary)
# save feature matrix as csv
np.savetxt(fname="feature_set.csv",X = feature_matrix, fmt='%.2f', delimiter=',')
# get the labels
labels_file = os.path.join(data_root, 'labels.csv')
(train_set, test_set, train_labels, test_labels) = create_sets(feature_matrix, labels_file)
# get accuracy with mle estimate
(theta_spam, theta_ham, spam_prob) = mle_estimate(train_set, train_labels)
accuracy = naive_bayes(spam_prob, theta_spam, theta_ham, test_set, test_labels)
print("The accuracy obtained is " + str(accuracy) + "%")
# save accuracy to file
np.savetxt(fname="test_accuracy.csv", X = [accuracy], fmt='%.2f')
# get accuracy with map estimate
(map_spam, map_ham, map_spam_prob) = map_estimate(train_set, train_labels, alpha=1)
accuracy = naive_bayes(map_spam_prob, map_spam, map_ham, test_set, test_labels)
print("The accuracy obtained is " + str(accuracy) + "%")
# save accuracy to file
np.savetxt(fname="test_accuracy_laplace.csv", X = [accuracy], fmt='%.2f')
# create new reduced feature set of words that occur atleast 10 times
feature_matrix_collapse = np.sum(feature_matrix, axis = 0)
feature_matrix_r = np.array([])
feature_matrix_r = [feature_matrix[:, i] for i in np.where(feature_matrix_collapse >= 10)][0]
selected_features_indices = forward_selection(feature_matrix_r, train_labels, test_labels)
print(len(selected_features_indices),"features selected using forward selection")
# save feature indices to file
|
np.savetxt(fname="forward_selection.csv", X = selected_features_indices, fmt='%.2f', delimiter="\n")
|
numpy.savetxt
|
import numpy as np
import scipy.linalg as linalg
from sklearn.cluster import KMeans
import time
# from utils import stft, istft, write_wav, read_wav
from utils_reverb import load_files, do_reverb, do_stft
import matplotlib.pyplot as plt
import mir_eval
def signum(x):
return x/np.abs(x)
class SpatCov_NTF_H1(object):
def __init__(self, X, mic_locs, V, K_partition=[3,3], J=2):
self.X = X
self.F, self.T, self.M, self.M = self.X.shape
self.K = np.sum(K_partition)
self.J = J
self.MJ = self.M * self.J
mix_psd = 0.5 * (np.mean(np.power(np.abs(self.X[:,:,0,0]),2) + np.power(np.abs(self.X[:,:,1,1]),2),axis=1))
mix_psd = mix_psd.reshape((-1, 1))
self.A = 0.5 * np.multiply(1.9 * np.abs(np.random.randn(self.M,self.MJ,self.F)) + 0.1 * np.ones((self.M,self.MJ,self.F)),signum(np.random.randn(self.M,self.MJ,self.F) + 1j *np.random.randn(self.M,self.MJ,self.F)))
self.W = 0.5 * np.multiply(np.abs(np.random.randn(self.F,self.K)) + np.ones((self.F,self.K)), np.matmul(mix_psd, np.ones((1,self.K))))
self.H = 0.5 * np.abs(np.random.randn(self.K,self.T)) + np.ones((self.K,self.T))
self.Q = 0.5 * np.abs(np.random.randn(self.J,self.K)) + np.ones((self.J,self.K))
self.sigma_b = np.matmul(mix_psd / 100, np.ones((1,self.T))).astype('complex')
self.I = np.diag(np.ones((self.MJ)))
self.O = np.ones((1,self.T))
self.source_ind = []
for j in range(self.J):
self.source_ind.append(np.arange(0,int(self.K/self.J))+int(j*(self.K/self.J)))
self.V = V
def E_step(self):
sigma_s = self.calcSigmas()
sigma_x = self.calcSigmaX(sigma_s)
omega_s = self.calcOmegas(sigma_s, sigma_x)
sigma_hat_xs = self.calcSigmahatxs(omega_s)
sigma_hat_s = self.calcSigmahats(omega_s,sigma_s)
self.calculateSigmab(sigma_x, sigma_hat_xs, sigma_hat_s)
return sigma_hat_xs, sigma_hat_s
def calculateSigmab(self, sigma_x, sigma_hat_xs, sigma_hat_s):
for f in range(self.F):
for t in range(self.T):
Axs = np.matmul(self.A[:,:,f],
np.conj(np.transpose(sigma_hat_xs[:,:,f,t])))
xsA = np.matmul(sigma_hat_xs[:,:,f,t],
np.conj(np.transpose(self.A[:,:,f])))
AsA = np.matmul(np.matmul(self.A[:,:,f],
sigma_hat_s[:,:,f,t]),
np.conj(np.transpose(self.A[:,:,f])))
self.sigma_b[f,t] = 0.5 * np.trace(sigma_x[:,:,f,t] - \
Axs - xsA + AsA)
def calcSigmahats(self, omega_s, sigma_s):
sigma_hat_s = np.zeros((self.MJ, self.MJ, self.F, self.T),'complex')
for f in range(self.F):
for t in range(self.T):
sigma_hat_s[:,:,f,t] = np.matmul(np.matmul(omega_s[:,:,f,t],
self.X[f,t,:,:]),
np.conj(omega_s[:,:,f,t].T)) + \
np.matmul(self.I - np.matmul(omega_s[:,:,f,t],
self.A[:,:,f]),
sigma_s[:,:,f,t])
return sigma_hat_s
def calcSigmahatxs(self, omega_s):
sigma_hat_xs = np.zeros((self.M, self.MJ,self.F, self.T),'complex')
for f in range(self.F):
for t in range(self.T):
sigma_hat_xs[:,:, f,t] = np.matmul(self.X[f,t,:,:],
np.conj(omega_s[:,:,f,t].T))
return sigma_hat_xs
def calcOmegas(self, sigma_s, sigma_x):
Omega_s = np.zeros((self.MJ, self.M, self.F, self.T), 'complex')
for f in range(self.F):
for t in range(self.T):
Omega_s[:,:,f,t] = np.matmul(np.matmul(sigma_s[:,:,f,t],
np.conj(self.A[:,:,f].T)),
np.linalg.inv(sigma_x[:,:,f,t]))
return Omega_s
def calcSigmaX(self, sigma_s):
sigma_x = np.zeros((self.M, self.M, self.F, self.T), 'complex')
for f in range(self.F):
for t in range(self.T):
sigma_x[:,:,f,t] = np.matmul(np.matmul(self.A[:,:,f],
sigma_s[:,:,f,t]),
np.conj(self.A[:,:,f].T)) + \
self.sigma_b[f,t]
return sigma_x
def calcSigmas(self):
sigma_s = np.zeros((self.M,self.J,self.F,self.T), 'complex')
for i in range(self.M):
sigma_s[i,:,:,:] = self.V[:,:,:]
temp_sigma_s = sigma_s.reshape((self.MJ, self.F, self.T))
sigma_s = np.zeros((self.MJ,self.MJ,self.F,self.T), 'complex')
for i in range(self.MJ):
sigma_s[i,:,:,:] = temp_sigma_s[:,:,:]
return sigma_s
def M_step(self, sigma_hat_xs, sigma_hat_s):
self.calculateA(sigma_hat_xs, sigma_hat_s)
self.normaliseA()
def calculateA(self, sigma_hat_xs, sigma_hat_s):
self.A = np.zeros((self.M, self.MJ,self.F), 'complex')
for f in range(self.F):
for t in range(self.T):
inv = np.linalg.pinv(sigma_hat_s[:,:,f,t])
self.A[:,:,f] += np.matmul(sigma_hat_xs[:,:,f,t],
inv)
def normaliseA(self):
for j in range(self.J):
nonzero_f_ind = np.nonzero(self.A[:, j, :])
self.A[:, j, nonzero_f_ind] = np.divide(self.A[:, j, nonzero_f_ind], signum(self.A[:,j,nonzero_f_ind]))
# self._A[0, j, nonzero_f_ind] = np.divide(self.A[0, j, nonzero_f_ind], signum(self.A[0,j,nonzero_f_ind]))
A_scale = np.sum(np.power(np.abs(self.A[:,j,:]),2), axis=0)
self.A[:, j,:] = np.divide(self.A[:, j,:], np.tile(np.sqrt(A_scale).reshape(1,-1),(self.M,1)))
def run(self, epochs=100):
print("Running")
self.E_step()
def reconstruct(self, Xb):
self.calculateV()
Y = np.zeros((self.M, self.J, self.F, self.T), 'complex')
for f in range(self.F):
for t in range(self.T):
RV = np.zeros((self.M, self.M))
for j in range(self.J):
start_index = (j*self.M)
end_index = (j+1) * self.M
R_i = np.matmul(self.A[:,start_index:end_index,f],
np.conj(self.A[:,start_index:end_index,f].T))
RV += np.multiply(R_i, self.V[j,f,t])
RV = np.linalg.pinv(RV)
for j in range(self.J):
start_index = (j*self.M)
end_index = (j+1) * self.M
R_i = np.matmul(self.A[:,start_index:end_index,f],
np.conj(self.A[:,start_index:end_index,f].T))
Y[:,j,f,t] = np.matmul(np.matmul(np.multiply(R_i,
self.V[j,f,t]),
RV),
Xb[:,f,t])
return Y
class SpatCov_NTF_Hs(SpatCov_NTF_H1):
def __init__(self, X, mic_locs, V, K_partition=[3,3], J=2):
super(SpatCov_NTF_Hs, self).__init__(X,mic_locs,V, K_partition, J)
def calculateA(self, bar_Rxx, bar_Rxs, bar_Rsx, bar_Rss, s_value=0.9999):
G_xyxy = np.array([[bar_Rxx, bar_Rxs],[bar_Rsx, bar_Rss]]).reshape(2*self.F, 2*self.T)
U, s, V = linalg.svd(G_xyxy)
total_s = np.sum(s)
summed_s = []
inter_total_s = 0
for val_s in s:
inter_total_s += val_s
summed_s.append(inter_total_s/total_s)
# print(summed_s[0])
indices = np.array(np.where(np.array(summed_s) < s_value))
p_value = indices[0,-1]
U_p = U[:self.F,:p_value]
V_p = V[:p_value,self.T:]
return np.matmul(U_p, linalg.pinv(V_p).T)
def E_step(self):
print("correct E")
sigma_s = self.calcSigmas()
sigma_x = self.calcSigmaX(sigma_s)
omega_s = self.calcOmegas(sigma_s, sigma_x)
sigma_hat_xs = self.calcSigmahatxs(omega_s)
sigma_hat_sx = self.calcSigmahatsx(omega_s)
sigma_hat_s = self.calcSigmahats(omega_s,sigma_s)
self.calculateSigmab(sigma_x, sigma_hat_xs, sigma_hat_s)
self.A =
|
np.zeros((self.M, self.MJ, self.F), 'complex')
|
numpy.zeros
|
"""
.. module:: data_structures
:synopsis: Data structure for Isotope and Ratio data.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
from pyevtk.hl import gridToVTK
except ImportError:
print("Unable to load pvevtk, output to VTK will be disabled")
class IsotopeData(object):
""" Create an IsotopeData file for an isotope with given name, and data.
:param isotope_label: Name of the isotope.
:type isotope_label: string
:param isotope_data: The data for the given isotope.
:type isotope_data: 3D `numpy` array
"""
def __init__(self, isotope_label, isotope_data):
self._label = isotope_label
self._data = np.array(isotope_data, dtype=float)
self._is_deadtime_corrected = False
def get_label(self):
return self._label
def __leq__(self, value):
return self._data <= value
def __lt__(self, value):
return self._data < value
def __gt__(self, value):
return self._data > value
def get_data(self, mask=None):
""" Return the isotope's data. Optionally include a mask to return a \
numpy masked array.
:param mask: mask to apply to the data.
:type mask: numpy bool array.
"""
if type(mask) is np.ndarray:
return ma.array(self._data, mask = mask)
else:
return self._data
def get_mask(self, lower=0, upper=np.Inf):
"""Return a mask that will mask all data outside of the bounds given. \
note that the numpy mask sets values that *will* be masked to True.
:param lower: lower bound for mask (default 0), values less than or equal to this \
will be masked.
:type type: float
:param upper: upper bound for mask (default infinity) , values more \
than this will be masked.
:type upper: float
"""
return np.logical_or(self._data <= lower, self._data > upper)
def n_cycles(self):
return np.shape(self._data)[0]
def n_pixels(self, mask = None):
"""Returns the total number of pixels in the dataset. Optionally masked \
to return the number of *non-masked* entries.
:param mask: mask to apply to the data.
:type mask: numpy bool array
"""
if type(mask) == np.ndarray:
return ma.array(self._data, mask = mask).count()
else:
return self._data.size
def perform_deadtime_correction(self, dwell_time, dead_time):
r""" Perform deadtime correction on the count data:\
.. math:: n=\frac{n_0}{1-n_0\tau}
using dead time, :math:`\tau`, and :math:`n_0` is in counts per second:\
.. math:: n_0=\frac{n}{T}
where :math:`n` is the counts, and :math:`T` is the dwell time.
.. important:: Dwell time and dead time may be in any unit of time as \
long as they are in the *same* units.
:param dwell_time: Dwell time.
:type dwell_time: float
:param dead_time: Dead time.
:type dead_time: float
"""
# Prevent applying deadtime correction twice
if self._is_deadtime_corrected:
raise RuntimeError("Error: Isotope " + self._label +
" is already deadtime corrected")
print("Deadtime correction: isotope: " + self._label + ";\tdwell_time: " +
str(dwell_time) + ";\tdead_time: " + str(dead_time))
self._dwell_time = dwell_time
self._dead_time = dead_time
# Perform deadtime correction
count_rate = np.divide(self._data, dwell_time)
self._data = np.divide(count_rate,
1 - np.multiply(count_rate, dead_time))
self._data *= dwell_time
self._is_deadtime_corrected = True
def plot(self, mask=None):
""" Plot the isotope, with desired mask.
:param mask: Mask to be used.
:type mask: numpy bool array
"""
# Get plot data
if type(mask) is np.ndarray:
plot_data = ma.masked_array(self._data, mask = mask)
vmin =
|
ma.min(plot_data)
|
numpy.ma.min
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tools for building optimal circuits out of XX interactions.
Inputs:
+ A set of native XX operations, described as strengths.
+ A right-angled path, computed using the methods in `paths.py`.
Output:
+ A circuit which implements the target operation (expressed exactly as the exponential of
`a XX + b YY + c ZZ`) using the native operations and local gates.
"""
from functools import reduce
import math
from operator import itemgetter
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.circuit.library.standard_gates import RXXGate, RYYGate, RZGate
from qiskit.exceptions import QiskitError
from .paths import decomposition_hop
from .utilities import EPSILON, safe_arccos
from .weyl import (
apply_reflection,
apply_shift,
canonical_rotation_circuit,
reflection_options,
shift_options,
)
# pylint:disable=invalid-name
def decompose_xxyy_into_xxyy_xx(a_target, b_target, a_source, b_source, interaction):
"""
Consumes a target canonical interaction CAN(a_target, b_target) and source interactions
CAN(a1, b1), CAN(a2), then manufactures a circuit identity of the form
CAN(a_target, b_target) = (Zr, Zs) CAN(a_source, b_source) (Zu, Zv) CAN(interaction) (Zx, Zy).
Returns the 6-tuple (r, s, u, v, x, y).
"""
cplus, cminus = np.cos(a_source + b_source), np.cos(a_source - b_source)
splus, sminus = np.sin(a_source + b_source), np.sin(a_source - b_source)
ca, sa = np.cos(interaction), np.sin(interaction)
uplusv = (
1
/ 2
* safe_arccos(
cminus ** 2 * ca ** 2 + sminus ** 2 * sa ** 2 - np.cos(a_target - b_target) ** 2,
2 * cminus * ca * sminus * sa,
)
)
uminusv = (
1
/ 2
* safe_arccos(
cplus ** 2 * ca ** 2 + splus ** 2 * sa ** 2 - np.cos(a_target + b_target) ** 2,
2 * cplus * ca * splus * sa,
)
)
u, v = (uplusv + uminusv) / 2, (uplusv - uminusv) / 2
# NOTE: the target matrix is phase-free
middle_matrix = reduce(
np.dot,
[
RXXGate(2 * a_source).to_matrix() @ RYYGate(2 * b_source).to_matrix(),
np.kron(RZGate(2 * u).to_matrix(), RZGate(2 * v).to_matrix()),
RXXGate(2 * interaction).to_matrix(),
],
)
phase_solver = np.array(
[
[
1 / 4,
1 / 4,
1 / 4,
1 / 4,
],
[
1 / 4,
-1 / 4,
-1 / 4,
1 / 4,
],
[
1 / 4,
1 / 4,
-1 / 4,
-1 / 4,
],
[
1 / 4,
-1 / 4,
1 / 4,
-1 / 4,
],
]
)
inner_phases = [
np.angle(middle_matrix[0, 0]),
np.angle(middle_matrix[1, 1]),
np.angle(middle_matrix[1, 2]) + np.pi / 2,
np.angle(middle_matrix[0, 3]) + np.pi / 2,
]
r, s, x, y = np.dot(phase_solver, inner_phases)
# If there's a phase discrepancy, need to conjugate by an extra Z/2 (x) Z/2.
generated_matrix = reduce(
np.dot,
[
np.kron(RZGate(2 * r).to_matrix(), RZGate(2 * s).to_matrix()),
middle_matrix,
np.kron(RZGate(2 * x).to_matrix(), RZGate(2 * y).to_matrix()),
],
)
if (abs(np.angle(generated_matrix[3, 0]) - np.pi / 2) < 0.01 and a_target > b_target) or (
abs(np.angle(generated_matrix[3, 0]) + np.pi / 2) < 0.01 and a_target < b_target
):
x += np.pi / 4
y += np.pi / 4
r -= np.pi / 4
s -= np.pi / 4
return r, s, u, v, x, y
def xx_circuit_step(source, strength, target, embodiment):
"""
Builds a single step in an XX-based circuit.
`source` and `target` are positive canonical coordinates; `strength` is the interaction strength
at this step in the circuit as a canonical coordinate (so that CX = RZX(pi/2) corresponds to
pi/4); and `embodiment` is a Qiskit circuit which enacts the canonical gate of the prescribed
interaction `strength`.
"""
permute_source_for_overlap, permute_target_for_overlap = None, None
# apply all possible reflections, shifts to the source
for source_reflection_name in reflection_options:
reflected_source_coord, source_reflection, reflection_phase_shift = apply_reflection(
source_reflection_name, source
)
for source_shift_name in shift_options:
shifted_source_coord, source_shift, shift_phase_shift = apply_shift(
source_shift_name, reflected_source_coord
)
# check for overlap, back out permutation
source_shared, target_shared = None, None
for i, j in [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]:
if (
abs(np.mod(abs(shifted_source_coord[i] - target[j]), np.pi)) < EPSILON
or abs(np.mod(abs(shifted_source_coord[i] - target[j]), np.pi) - np.pi)
< EPSILON
):
source_shared, target_shared = i, j
break
if source_shared is None:
continue
# pick out the other coordinates
source_first, source_second = [x for x in [0, 1, 2] if x != source_shared]
target_first, target_second = [x for x in [0, 1, 2] if x != target_shared]
# check for arccos validity
r, s, u, v, x, y = decompose_xxyy_into_xxyy_xx(
float(target[target_first]),
float(target[target_second]),
float(shifted_source_coord[source_first]),
float(shifted_source_coord[source_second]),
float(strength),
)
if any(math.isnan(val) for val in (r, s, u, v, x, y)):
continue
# OK: this combination of things works.
# save the permutation which rotates the shared coordinate into ZZ.
permute_source_for_overlap = canonical_rotation_circuit(source_first, source_second)
permute_target_for_overlap = canonical_rotation_circuit(target_first, target_second)
break
if permute_source_for_overlap is not None:
break
if permute_source_for_overlap is None:
raise QiskitError(
f"Error during RZX decomposition: Could not find a suitable Weyl "
f"reflection to match {source} to {target} along {strength}."
)
prefix_circuit, affix_circuit = QuantumCircuit(2), QuantumCircuit(2)
# the basic formula we're trying to work with is:
# target^p_t_f_o =
# rs * (source^s_reflection * s_shift)^p_s_f_o * uv * operation * xy
# but we're rearranging it into the form
# target = affix source prefix
# and computing just the prefix / affix circuits.
# the outermost prefix layer comes from the (inverse) target permutation.
prefix_circuit.compose(permute_target_for_overlap.inverse(), inplace=True)
# the middle prefix layer comes from the local Z rolls.
prefix_circuit.rz(2 * x, [0])
prefix_circuit.rz(2 * y, [1])
prefix_circuit.compose(embodiment, inplace=True)
prefix_circuit.rz(2 * u, [0])
prefix_circuit.rz(2 * v, [1])
# the innermost prefix layer is source_reflection, shifted by source_shift,
# finally conjugated by p_s_f_o.
prefix_circuit.compose(permute_source_for_overlap, inplace=True)
prefix_circuit.compose(source_reflection, inplace=True)
prefix_circuit.global_phase += -np.log(reflection_phase_shift).imag
prefix_circuit.global_phase += -np.log(shift_phase_shift).imag
# the affix circuit is constructed in reverse.
# first (i.e., innermost), we install the other half of the source transformations and p_s_f_o.
affix_circuit.compose(source_reflection.inverse(), inplace=True)
affix_circuit.compose(source_shift, inplace=True)
affix_circuit.compose(permute_source_for_overlap.inverse(), inplace=True)
# then, the other local rolls in the middle.
affix_circuit.rz(2 * r, [0])
affix_circuit.rz(2 * s, [1])
# finally, the other half of the p_t_f_o conjugation.
affix_circuit.compose(permute_target_for_overlap, inplace=True)
return {"prefix_circuit": prefix_circuit, "affix_circuit": affix_circuit}
def canonical_xx_circuit(target, strength_sequence, basis_embodiments):
"""
Assembles a Qiskit circuit from a specified `strength_sequence` of XX-type interactions which
emulates the canonical gate at canonical coordinate `target`. The circuits supplied by
`basis_embodiments` are used to instantiate the individual XX actions.
NOTE: The elements of `strength_sequence` are expected to be normalized so that np.pi/2
corresponds to RZX(np.pi/2) = CX; `target` is taken to be a positive canonical coordinate;
and `basis_embodiments` maps `strength_sequence` elements to circuits which instantiate
these gates.
"""
# empty decompositions are easy!
if len(strength_sequence) == 0:
return QuantumCircuit(2)
# assemble the prefix / affix circuits
prefix_circuit, affix_circuit = QuantumCircuit(2), QuantumCircuit(2)
while len(strength_sequence) > 1:
source = decomposition_hop(target, strength_sequence)
strength = strength_sequence[-1]
preceding_prefix_circuit, preceding_affix_circuit = itemgetter(
"prefix_circuit", "affix_circuit"
)(xx_circuit_step(source, strength / 2, target, basis_embodiments[strength]))
prefix_circuit.compose(preceding_prefix_circuit, inplace=True)
affix_circuit.compose(preceding_affix_circuit, inplace=True, front=True)
target, strength_sequence = source, strength_sequence[:-1]
circuit = prefix_circuit
# lastly, deal with the "leading" gate.
if target[0] <= np.pi / 4:
circuit.compose(basis_embodiments[strength_sequence[0]], inplace=True)
else:
_, source_reflection, reflection_phase_shift = apply_reflection("reflect XX, YY", [0, 0, 0])
_, source_shift, shift_phase_shift = apply_shift("X shift", [0, 0, 0])
circuit.compose(source_reflection, inplace=True)
circuit.compose(basis_embodiments[strength_sequence[0]], inplace=True)
circuit.compose(source_reflection.inverse(), inplace=True)
circuit.compose(source_shift, inplace=True)
circuit.global_phase += -
|
np.log(shift_phase_shift)
|
numpy.log
|
from typing import Any, Dict
from pathlib import PurePosixPath
from kedro.io import AbstractVersionedDataSet, Version
from kedro.io.core import get_filepath_str, get_protocol_and_path
import fsspec
import numpy as np
import netCDF4
from netCDF4 import Dataset
class NetCDFDataSet(AbstractVersionedDataSet):
def __init__(self, filepath: str, attr_name: str, version: Version = None):
"""Creates a new instance of NetCDFDataSet to load / save image data for given filepath.
Args:
filepath: The location of the image file to load / save data.
version: The version of the dataset being saved and loaded.
"""
protocol, path = get_protocol_and_path(filepath)
self._protocol = protocol
self._fs = fsspec.filesystem(self._protocol)
self._format = "NETCDF4"
self._attr_name = attr_name
super().__init__(
filepath=PurePosixPath(path),
version=version,
exists_function=self._fs.exists,
glob_function=self._fs.glob,
)
def _load(self) -> np.ndarray:
"""Loads data from the netCDF file.
Returns:
Data from the netCDF file as numpy array
"""
load_path = self._get_load_path()
data = Dataset(load_path, "r", format=self._format)
data = data.variables[self._attr_name][:].data
return np.asarray(data)
def _save(self, data: np.ndarray) -> None:
"""Saves NetCDF data to the specified filepath.
"""
save_path = self._get_save_path()
out_data: netCDF4._netCDF4.Dataset = Dataset(
save_path, "w", format=self._format
)
# Create dimensions
lat_length: int
lon_length: int
_, lat_length, lon_length = data.shape
lat_dim = out_data.createDimension("lat", lat_length)
lon_dim = out_data.createDimension("lon", lon_length)
time_dim = out_data.createDimension("time", None)
# Attributes
out_data.title = "NetCDF data"
# Variables
lat = out_data.createVariable("lat", np.float32, ("lat",))
lon = out_data.createVariable("lon", np.float32, ("lon",))
time = out_data.createVariable("time", np.float32, ("time",))
unit = out_data.createVariable(
"unit", np.float32, ("time", "lat", "lon")
)
# Writing data
n_lats, n_lons, n_times = len(lat_dim), len(lon_dim), 3
lat[:] = -90.0 + (180 / n_lats) * np.arange(n_lats)
lon[:] = (180 / n_lats) *
|
np.arange(n_lons)
|
numpy.arange
|
import numpy as np
from .regularizers import Regularizer
def gradient_descent(w0, optimizer, regularizer=None, opts=dict()):
"""
Mini-Batch Stochastic Gradient descent algorithm.
w0: is the initial guess
loss_function: is the loss function you want to optimize. It should have the gradient and loss method.
opts: a dictionary with the algorithm parameters
"""
w = w0
dim = w0.size
if regularizer is None:
regularizer = Regularizer()
eta = opts.get('eta0', 0.01)
n_iter = opts.get('n_iter', 10)
batch_size = opts.get('batch_size', 1)
algorithm = opts.get('algorithm', 'GD')
n_samples = opts.get('n_samples', optimizer.number_samples)
indexes =
|
np.arange(0, n_samples, 1)
|
numpy.arange
|
import numpy as np
import matplotlib as mpl
import mpl_toolkits.axes_grid1 as mplax
import matplotlib.colors as mplc
import matplotlib.cm as mplcm
import numba
import warnings
import scipy.misc as scm
import scipy.optimize as spo
import scipy.ndimage as scnd
import scipy.signal as scsig
import skimage.color as skc
import stemtool as st
def move_by_phase(image_to_move, x_pixels, y_pixels):
"""
Move Images with sub-pixel precision
Parameters
----------
image_to_move: ndarray
Original Image to be moved
x_pixels: float
Pixels to shift in X direction
y_pixels: float
Pixels to Shift in Y direction
Returns
-------
moved_image: ndarray
Moved Image
Notes
-----
The underlying idea is that a shift in the real space
is phase shift in Fourier space. So we take the original
image, and take it's Fourier transform. Also, we calculate
how much the image shifts result in the phase change, by
calculating the Fourier pixel dimensions. We then multiply
the FFT of the image with the phase shift value and then
take the inverse FFT.
:Authors:
<NAME> <<EMAIL>>
"""
image_size = (np.asarray(image_to_move.shape)).astype(int)
fourier_cal_y = np.linspace(
(-image_size[0] / 2), ((image_size[0] / 2) - 1), image_size[0]
)
fourier_cal_y = fourier_cal_y / (image_size[0]).astype(np.float64)
fourier_cal_x = np.linspace(
(-image_size[1] / 2), ((image_size[1] / 2) - 1), image_size[1]
)
fourier_cal_x = fourier_cal_x / (image_size[1]).astype(np.float64)
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_matrix = np.multiply(fourier_mesh_x, x_pixels) + np.multiply(
fourier_mesh_y, y_pixels
)
move_phase = np.exp((-2) * np.pi * 1j * move_matrix)
original_image_fft = np.fft.fftshift(np.fft.fft2(image_to_move))
moved_in_fourier = np.multiply(move_phase, original_image_fft)
moved_image = np.fft.ifft2(moved_in_fourier)
return moved_image
def image_normalizer(image_orig):
"""
Normalizing Image
Parameters
----------
image_orig: ndarray
'image_orig' is the original input image to be normalized
Returns
-------
image_norm: ndarray
Normalized Image
Notes
-----
We normalize a real valued image here
so that it's values lie between o and 1.
This is done by first subtracting the
minimum value of the image from the
image itself, and then subsequently
dividing the image by the maximum value
of the subtracted image.
:Authors:
<NAME> <<EMAIL>>
"""
image_norm = np.zeros_like(image_orig, dtype=np.float64)
image_norm = (image_orig - np.amin(image_orig)) / (
np.amax(image_orig) - np.amin(image_orig)
)
return image_norm
def image_logarizer(image_orig, bit_depth=64):
"""
Normalized log of image
Parameters
----------
image_orig: ndarray
Numpy array of real valued image
bit_depth: int
Bit depth of output image
Default is 32
Returns
-------
image_log: ndarray
Normalized log
Notes
-----
Normalize the image, and scale it 2^0 to 2^bit_depth.
Take log2 of the scaled image.
:Authors:
<NAME> <<EMAIL>>
"""
bit_max = 2 ** bit_depth
image_norm = image_normalizer(image_orig)
image_scale = np.zeros_like(image_norm, dtype=np.float64)
image_log = np.zeros_like(image_norm, dtype=np.float64)
image_scale = 1 + ((bit_max - 1) * image_norm)
image_log = np.log2(image_scale)
return image_log
def remove_dead_pixels(image_orig, iter_count=1, level=10000):
"""
Remove dead pixels
Parameters
----------
image_orig: ndarray
Numpy array of real valued image
iter_count: int
Number of iterations to run
the process. Default is 1
level: int,float
Ratio of minima pixels to total
pixels. Default is 10,000
Returns
-------
image_orig: ndarray
Image with dead pixels converted
Notes
-----
Subtract the minima from the image, and if the
number of pixels with minima values is less than
the 1/level of the total pixels, then those are
decided to be dead pixels. Iterate if necessary
:Authors:
<NAME> <<EMAIL>>
"""
pp, qq = np.mgrid[0 : image_orig.shape[0], 0 : image_orig.shape[1]]
no_points = np.size(pp)
for ii in range(iter_count):
original_min = np.amin(image_orig)
image_pos = image_orig - original_min
no_minima = np.size(pp[image_pos == 0])
if no_minima < (no_points / level):
new_minimum = np.amin(image_pos[image_pos > 0])
image_pos = image_pos - new_minimum
image_pos[image_pos < 0] = 0
image_orig = image_pos + new_minimum + original_min
return image_orig
def hanned_image(image):
"""
2D hanning filter for images
Parameters
----------
image: ndarray
Original Image on which the Hanning filter
is to be applied
Returns
-------
hanned_image: ndarray
Image with the hanning filter applied
Notes
-----
:Authors:
<NAME> <<EMAIL>>
"""
size_image = np.asarray(np.shape(image), dtype=int)
row_hann = np.zeros((size_image[0], 1))
row_hann[:, 0] = np.hanning(size_image[0])
col_hann = np.zeros((1, size_image[1]))
col_hann[0, :] = np.hanning(size_image[1])
hann_window = np.multiply(row_hann, col_hann)
hanned_image = np.multiply(image, hann_window)
return hanned_image
def sane_colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = mplax.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(mappable, cax=cax)
def phase_color(phase_image):
size_im = np.asarray(np.shape(phase_image), dtype=int)
hsv_im = np.ones((size_im[0], size_im[1], 3))
hsv_im[:, :, 0] = (phase_image + (2 * np.pi)) / (2 * np.pi)
hsv_im[:, :, 0] = hsv_im[:, :, 0] -
|
np.floor(hsv_im[:, :, 0])
|
numpy.floor
|
"""
Test functions in the space of solutions of the
Euler Lagrange equations of
\int_{-1}^{1} (2/tau) \alpha dq/ds + (2/tau)^5 (1-\alpha) d^3 q / ds^3 dt
"""
import unittest
from scipy.sparse import csc_matrix
import numpy as np
from gsplines.interpolator.gspline import cSplineCalc
from gsplines.basis.basis1010 import cBasis1010
from gsplines.basis.basis0010 import cBasis0010
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(cMyTest, self).__init__(*args, **kwargs)
import sys
np.set_printoptions(
linewidth=5000000,
formatter={'float': '{:+10.3e}'.format},
threshold=sys.maxsize)
pass
def testInversion(self):
import time
print('Testinf inversion of matrix')
dim = 6 # np.random.randint(2, 6)
N = 50 # np.random.randint(3, 120)
a = np.random.rand()
splcalc = cSplineCalc(dim, N, cBasis1010(a))
for i in range(50):
tauv = np.random.rand(N)
A1 = splcalc.eval_A(tauv)
# A0 = self.eval_A(tauv, dim, N, cBasis1010(a))
# e = np.max(np.abs(A1 - A0.todense()))
# # print(A0)
# # print('----------------------------------------')
# # print(A1)
# # print(dim, N)
# assert e < 1.0e-8
splcalc.printPerformace()
pass
def testcontinuity(self):
print('Test continuity constraints with plot')
for i in range(3):
dim = np.random.randint(2, 3)
N = np.random.randint(3, 10)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
tauv = 0.5 + np.random.rand(N) * 3.0
tis = [np.sum(tauv[0:i]) for i in range(0, N + 1)]
T = np.sum(tauv)
splcalc = cSplineCalc(dim, N, cBasis1010(a))
spln = splcalc.getSpline(tauv, wp)
from matplotlib import pyplot as plt
t = np.arange(0, T, 0.005)
q_list = [spln.deriv(i)(t) for i in range(0, 6)]
fig, axs = plt.subplots(6, dim)
for i in range(0, 6):
for j in range(0, dim):
axs[i, j].plot(t, q_list[i][:, j])
axs[i, j].grid()
for ti in tis:
axs[i, j].axvline(x=ti, color='b', linestyle='--')
plt.show()
def test_eval_b(self):
import time
print('Test evaluation of b vector')
for i in range(20):
dim = np.random.randint(1, 8)
N = np.random.randint(3, 200)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
dwp0 = np.zeros((dim, ))
ddwp0 = np.zeros((dim, ))
dwpT = np.zeros((dim, ))
ddwpT = np.zeros((dim, ))
splcalc = cSplineCalc(dim, N, cBasis1010(a))
b1 = splcalc.eval_b(wp)
b2 = self.eval_b(wp, N, dim, dwp0, ddwp0, dwpT, ddwpT)
e = np.max(np.abs(b1 - b2))
assert e < 1.0e-8
def non_zero_diagonals_A(self):
for i in range(0, 1):
dim = np.random.randint(1, 8)
N = np.random.randint(3, 200)
a = np.random.rand()
splcalc = cSplineCalc(dim, N, cBasis1010, a)
tauv = 10.0 * np.random.rand(N) + 0.5
splcalc.eval_A(tauv)
A = splcalc.Aeval
upper_diags = 0
flag = 0
for i in range(A.shape[0]):
if np.max(np.abs(np.diagonal(A, i))) > 1.0e-10:
assert flag != 1, 'Matrix is Not Banded!!!'
upper_diags += 1
else:
flag = 1
lower_diags = 0
flag = 0
for i in range(A.shape[0]):
if np.max(np.abs(np.diagonal(A, -i))) > 1.0e-10:
assert flag != 1, 'Matrix is Not Banded!!!'
lower_diags += 1
else:
flag = 1
# print('upper diagonas = {:d} lower diagonals = {:d}'.format(
# upper_diags, lower_diags))
assert 4 * dim + 4 == max(upper_diags, lower_diags)
# wp = (np.random.rand(N + 1, dim) - 0.5) * 2.0 * np.pi
# b = splcalc.eval_b(wp)
#
# wp = np.random.rand(N + 1, dim)
#
# spline = splcalc.solve(wp, tauv)
#
# tis = [np.sum(tauv[0:i]) for i in range(0, N + 1)]
#
# t = np.arange(0, tis[-1], 0.1)
#
# plt.plot(t, spline(t)[:, 0])
#
# plt.show()
def test_derivative_b(self):
'''
Here we rest the correctness of the numerical output of the basis
class comparing it with its analitical form optained using sympy
'''
print('Test derivative of b w.r.t. waypoint components')
np.random.seed()
dim = np.random.randint(1, 8)
N = np.random.randint(2, 60)
a =
|
np.random.rand()
|
numpy.random.rand
|
import numpy
import pandas
from palm.base.model import Model
from palm.base.model_factory import ModelFactory
from palm.base.parameter_set import ParameterSet
from palm.base.target_data import TargetData
from palm.aggregated_kinetic_model import AggregatedKineticModel
from palm.discrete_state_trajectory import DiscreteStateTrajectory,\
DiscreteDwellSegment
from palm.state_collection import StateCollectionFactory
from palm.route_collection import RouteCollectionFactory
from palm.probability_vector import make_prob_vec_from_state_ids
class State(object):
def __init__(self, id_str, observation_class):
self.id = id_str
self.observation_class = observation_class
self.initial_state_flag = False
def __str__(self):
return "%s %s" % (self.id, self.observation_class)
def as_array(self):
return None
def get_id(self):
return self.id
def get_class(self):
return self.observation_class
def is_initial_state(self):
return self.initial_state_flag
def set_initial_state_flag(self):
self.initial_state_flag = True
def as_dict(self):
return {'observation_class':self.get_class()}
class Route(object):
'''
A generic route class for aggregated kinetic models.
'''
def __init__(self, id_str, start_state_id, end_state_id, rate_id,
multiplicity):
self.id = id_str
self.start_state_id = start_state_id
self.end_state_id = end_state_id
self.rate_id = rate_id
self.multiplicity = multiplicity
def __str__(self):
my_str = "%s %s %s %s %d" % (
self.id, self.start_state_id, self.end_state_id,
self.rate_id, self.multiplicity)
return my_str
def get_id(self):
return self.id
def get_start_state(self):
return self.start_state_id
def get_end_state(self):
return self.end_state_id
def get_multiplicity(self):
return self.multiplicity
def as_dict(self):
return {'start_state':self.start_state_id,
'end_state':self.end_state_id,
'rate_id':self.rate_id,
'multiplicity':self.multiplicity}
class SimpleParameterSet(ParameterSet):
"""
Parameters for a simple two-state model.
"""
def __init__(self):
super(SimpleParameterSet, self).__init__()
self.parameter_dict = {'log_k1':-1.0, 'log_k2':-1.0}
self.bounds_dict = {'log_k1':(None, None),
'log_k2':(None, None)}
def __str__(self):
my_array = self.as_array()
return "%s" % (my_array)
def __iter__(self):
for param_name, param_value in self.parameter_dict.iteritems():
yield param_name, param_value
def set_parameter(self, parameter_name, parameter_value):
self.parameter_dict[parameter_name] = parameter_value
def get_parameter(self, parameter_name):
return self.parameter_dict[parameter_name]
def as_array(self):
log_k1 = self.get_parameter('log_k1')
log_k2 = self.get_parameter('log_k2')
return
|
numpy.array([log_k1, log_k2])
|
numpy.array
|
from PyQt4 import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import sys
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Polarization Visualization')
## Create widgets to be placed inside
heading_text = QtGui.QLabel('Polarization Angles ' + u"\u03C8" + ' and ' + u"\u03B4")
# Box with sliders
sliderbox = QtGui.QGroupBox()
hBoxLayout = QtGui.QHBoxLayout()
psi_slider_layout = QtGui.QVBoxLayout()
delta_slider_layout = QtGui.QVBoxLayout()
# psi slider
psi_label = QtGui.QLabel(u"\u03C8")
psi_slider = QtGui.QSlider()
psi_slider.setOrientation(QtCore.Qt.Vertical)
psi_slider.setMinimum(0)
psi_slider.setMaximum(90)
psi_slider.setValue(0)
psi_value = QtGui.QLabel(str(psi_slider.value()) + u"\u00b0")
psi_slider_layout.addWidget(psi_label)
psi_slider_layout.addWidget(psi_slider)
psi_slider_layout.addWidget(psi_value)
def set_psi_value(value):
psi_value.setText(str(value) + u"\u00b0")
global psi_deg
psi_deg = value
psi_slider.valueChanged.connect(set_psi_value)
# delta slider
delta_label = QtGui.QLabel(u"\u03B4")
delta_slider = QtGui.QSlider()
delta_slider.setOrientation(QtCore.Qt.Vertical)
delta_slider.setMinimum(-180)
delta_slider.setMaximum(180)
delta_slider.setValue(0)
delta_value = QtGui.QLabel(str(delta_slider.value()) + u"\u00b0")
delta_slider_layout.addWidget(delta_label)
delta_slider_layout.addWidget(delta_slider)
delta_slider_layout.addWidget(delta_value)
def set_delta_value(value):
delta_value.setText(str(value) + u"\u00b0")
global delta_deg
delta_deg = value
delta_slider.valueChanged.connect(set_delta_value)
# Set layout of box containing sliders
hBoxLayout.addItem(psi_slider_layout)
hBoxLayout.addItem(delta_slider_layout)
sliderbox.setLayout(hBoxLayout)
# Create openGL view widget & add a grid
wGL = gl.GLViewWidget()
wGL.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
wGL.opts['distance'] = 5
g = gl.GLGridItem()
wGL.addItem(g)
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.setColumnStretch (1, 2)
## Add widgets to the layout in their proper positions
layout.addWidget(heading_text, 0, 0) # heading text goes in upper-left
layout.addWidget(sliderbox, 1, 0) # slider box goes underneath heading text
layout.addWidget(wGL, 0, 1, 3, 1) # wGL goes on right side, spanning 3 rows
## Display the widget as a new window
w.show()
##------------ Set up polarization animation ------------##
degtorad = np.pi/180.0
# Function to create new array from old where new array is formatted to prepare to
# draw lines perpendicular from z-axis to curve defined by input array
def preptomakelines(pts):
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
return pts2
psi_deg = float(psi_slider.value())
delta_deg = float(delta_slider.value())
# Calculate sinusoidal electric field for arbitrary polarization
def efield_arbpol(t,z,amplitude,psi_rad,delta_rad):
x = amplitude * np.cos(psi_rad) * np.cos(2*np.pi*(t-z))
y = amplitude * np.sin(psi_rad) *
|
np.cos(2*np.pi*(t-z) + delta_rad)
|
numpy.cos
|
from . import TrainingAlgorithm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
def create_q_model(n_inputs, n_outputs):
# Network defined by the Deepmind paper
inputs = layers.Input(n_inputs)
# Convolutions on the frames on the screen
layer1 = layers.Dense(32, activation="relu")(inputs)
layer2 = layers.Dense(64, activation="relu")(layer1)
layer3 = layers.Dense(64, activation="relu")(layer2)
layer4 = layers.Dense(64, activation="relu")(layer3)
layer5 = layers.Dense(512, activation="relu")(layer4)
action = layers.Dense(n_outputs, activation="linear")(layer5)
return keras.Model(inputs=inputs, outputs=action)
class DQN(TrainingAlgorithm):
def __init__(self, env, num_agents, gamma, epsilon, epsilon_min,
epsilon_max, batch_size, n_inputs_n, n_outputs):
super().__init__()
self.env = env
# Configuration paramaters for the whole setup
self.gamma = gamma # Discount factor for past rewards
self.num_agents = num_agents
self.epsilon_min = epsilon_min # Minimum epsilon greedy parameter
self.epsilon_max = epsilon_max # Maximum epsilon greedy parameter
self.epsilon_interval = (
self.epsilon_max - self.epsilon_min
) # Rate at which to reduce chance of random action being taken
self.batch_size = batch_size # Size of batch taken from replay buffer
self.n_inputs_n = n_inputs_n # observation space
self.n_outputs = n_outputs # action space
# prepare_model
self.model_n = None
self.model_target_n = None
self.epsilon = epsilon # Epsilon greedy parameter
# In the Deepmind paper they use RMSProp however then Adam optimizer
# improves training time
self.optimizer_n = None
# Experience replay buffers
self.action_history_n = None
self.rewards_history_n = None
self.state_history_n = None
self.done_history_n = None
self.episode_reward_history_n = None
# Number of frames to take random action and observe output
self.epsilon_random_frames = None
# Number of frames for exploration
self.epsilon_greedy_frames = None
# Maximum replay length
# Note: The Deepmind paper suggests 1000000 however this causes memory issues
self.max_memory_length_n = None
# Train the model after 4 actions
self.update_after_actions_n = None
# How often to update the target network
self.update_target_network_n = None
# Using huber loss for stability
self.loss_function_n = None
# before episode
self.episode_reward_n = None
self.state_next_history_n = None
def prepare_model(self, *args, **kwargs):
# The first model makes the predictions for Q-values which are used to
# make a action.
self.model_n = [create_q_model(self.n_inputs_n[j], self.n_outputs) for j in range(self.num_agents)]
# Build a target model for the prediction of future rewards.
# The weights of a target model get updated every 10000 steps thus when the
# loss between the Q-values is calculated the target Q-value is stable.
self.model_target_n = [create_q_model(self.n_inputs_n[j], self.n_outputs) for j in range(self.num_agents)]
# Number of frames to take random action and observe output
self.epsilon_random_frames = round(self.env.cfg['duration'] * 0.05)
# Number of frames for exploration
self.epsilon_greedy_frames = round(self.env.cfg['duration'] * 0.2)
# Maximum replay length
# Note: The Deepmind paper suggests 1000000 however this causes memory issues
self.max_memory_length_n = [100000 for _ in range(self.num_agents)]
# Train the model after 4 actions
self.update_after_actions_n = [100 for _ in range(self.num_agents)]
# How often to update the target network
self.update_target_network_n = [1000 for _ in range(self.num_agents)]
# Using huber loss for stability
self.loss_function_n = \
[keras.losses.Huber() for _ in range(self.num_agents)]
# ---
self.state_history_n = [[] for _ in range(self.num_agents)]
self.state_next_history_n = [[] for _ in range(self.num_agents)]
self.optimizer_n = [keras.optimizers.Adam(learning_rate=0.00025, clipnorm=1.0) for _ in range(self.num_agents)]
self.action_history_n = [[] for _ in range(self.num_agents)]
self.rewards_history_n = [[] for _ in range(self.num_agents)]
self.done_history_n = [[] for _ in range(self.num_agents)]
self.episode_reward_history_n = [[] for _ in range(self.num_agents)]
def before_episode(self, *args, **kwargs):
self.episode_reward_n = [0 for _ in range(self.num_agents)]
def before_action(self, *args, **kwargs):
pass
def take_action(self, *args, **kwargs):
# Use epsilon-greedy for exploration
if self.env.cfg['duration'] - self.env.duration < self.epsilon_random_frames \
or self.epsilon > np.random.rand(1)[0]:
# Take random action
action_n = [np.random.choice(self.n_outputs) for _ in range(self.num_agents)]
_ = [self.action_history_n[j].append(action_n[j]) for j in range(self.num_agents)]
return action_n
else:
# Predict action Q-values
# From environment state
action_n = []
for j in range(self.num_agents):
state_tensor = tf.convert_to_tensor(kwargs['obs_n'][j])
state_tensor = tf.expand_dims(state_tensor, 0)
action_probs = self.model_n[j](state_tensor, training=False)
# Take best action
action_n.append(tf.argmax(action_probs[0]).numpy())
self.action_history_n[j].append(action_n[j])
return action_n
def before_step(self, *args, **kwargs):
# Decay probability of taking random action
self.epsilon -= self.epsilon_interval / self.epsilon_greedy_frames
self.epsilon = max(self.epsilon, self.epsilon_min)
def after_step(self, *args, **kwargs):
for j in range(self.num_agents):
self.episode_reward_n[j] = kwargs['reward_n'][j]
# =====================================================================
# Save actions and states in replay buffer
self.rewards_history_n[j].append(kwargs['reward_n'][j])
self.state_history_n.append(kwargs['obs_old_n'][j])
self.state_next_history_n.append(kwargs['obs_n'][j])
self.done_history_n.append(kwargs['done'][0])
# Update every fourth frame and once batch size is over 32
if (self.env.cfg['duration'] - self.env.duration) % self.update_after_actions_n[j] == 0 \
and len(self.done_history_n[j]) > self.batch_size:
# Get indices of samples for replay buffers
indices = np.random.choice(range(len(self.done_history_n[j])), size=self.batch_size)
# Using list comprehension to sample from replay buffer
state_sample =
|
np.array([self.state_history_n[j][i] for i in indices])
|
numpy.array
|
# ------------------------------- Information ------------------------------- #
# Author: <NAME> <<EMAIL>> #
# Created: Nov. 15th, 2016 #
# Description: We analyze the output of the WavMixer. #
# We compute the number of photons generated in different #
# experimental configurations: #
# - On-axis parabolic mirrors (HNA) #
# - On-axis parabolic mirrors with hole (HNA-h) #
# - Off-axis parabolic mirrors (OFF) #
# - Transmission parabolic mirrors (TRA) #
# - Transmission parabolic mirrors with hole (TRA-h) #
# Dependencies: - NumPy #
# - SciPy #
# - H5Py #
# - matplotlib #
# --------------------------------------------------------------------------- #
# --------------------------- Modules Importation --------------------------- #
import numpy as np
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from matplotlib import ticker
import scipy.signal as signal
import scipy.integrate as integration
import scipy.interpolate as interp
import argparse
import h5py
import time
import math
import configparser
from mpl_toolkits.axes_grid1 import make_axes_locatable
import vphys
# ------------------------------ Configuration ------------------------------ #
pgf_with_pdflatex = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": [
r"\usepackage{amsmath}",
r"\usepackage{siunitx}",
#r"\usepackage{mathspec}",
r"\usepackage[charter]{mathdesign}",
r"\usepackage{fontspec}",
#r"\setmathfont{Fira Sans}",
r"\setmainfont{Oswald}",
]
}
matplotlib.rcParams.update(pgf_with_pdflatex)
# -- Fonts
matplotlib.rcParams['font.size'] = 8
matplotlib.rcParams['font.family'] = 'serif'
# -- Plots
#matplotlib.rcParams['axes.labelsize'] = 'large'
#matplotlib.rcParams['xtick.labelsize'] = 'large'
#matplotlib.rcParams['ytick.labelsize'] = 'large'
#matplotlib.rcParams['legend.numpoints'] = 5
#matplotlib.rcParams['figure.figsize'] = '4,2'
matplotlib.rcParams['axes.grid'] = True
# -------------------------------- Functions ------------------------------- #
def _infunc(x,func,gfun,hfun,more_args):
a = gfun(x)
b = hfun(x)
myargs = (x,) + more_args
return integration.quad(func,a,b,args=myargs)[0]
def custom_dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8, maxp1=100, limit=100):
return integration.quad(_infunc, a, b, (func, gfun, hfun, args),epsabs=epsabs, epsrel=epsrel, maxp1=maxp1, limit=limit)
def fmt(x, pos):
a, b = '{:1.0e}'.format(x).split('e')
b = int(b)
return r'${} \times\, 10^{{{}}}$'.format(a, b)
# -------------------------------- Constants -------------------------------- #
UNIT_MASS = 9.109382914e-31
UNIT_LENGTH = 3.86159e-13
UNIT_TIME = 1.2880885e-21
SPEED_OF_LIGHT = 299792458
EPSILON_0 = 8.85418782e-12
MU_0 = 4*np.pi*1.0e-7
ALPHA = 1.0/137.035999074
UNIT_E_FIELD = 1.3e18*np.sqrt(4*np.pi*ALPHA)
UNIT_B_FIELD = UNIT_E_FIELD/SPEED_OF_LIGHT
# -------------------- Analysis of the Number of Photons -------------------- #
# -- We analyze the number of photons generated in a given geometry. -- #
# --------------------------------------------------------------------------- #
# -- We parse the arguments.
parser = argparse.ArgumentParser()
parser.add_argument("min", type=int, help="Minimum index of simulation to analyze.")
parser.add_argument("max", type=int, help="Maximum index of simulation to analyze.")
parser.add_argument("dim", type=int, help='Dimension of the focal region.')
parser.add_argument("--geometry", dest='geom', help="Geometry under consideration.")
parser.add_argument("--prefix", dest='prefix', help="Folder prefix.")
parser.add_argument("--config", dest='configFile', help="INI file containing the parameters of the simualtion.")
args = parser.parse_args()
# We analyze the simulation in between min and max.
simu_dir = args.prefix+"_{0:05}.BQ".format(1)+"/../"
# -- Global analysis.
n_photons_file = open(simu_dir+args.geom+"_data.txt", 'w')
max_angle_file = open(simu_dir+args.geom+"_max_angle.txt", 'w')
# We determine if we analyze the shadow.
analyze_shadow_bool = (args.geom=="hna-h-artifical" or args.geom=="tra-h" or args.geom=="hna-h" or args.geom == "off-axis-hole")
if (analyze_shadow_bool):
n_photons_shadow_file = open(simu_dir+args.geom+"_shadow_data.txt", 'w+')
for i in range(args.min,args.max+1):
# -- We open the files.
simu_prefix = args.prefix+"_{0:05d}.BQ/{0:05d}.BQ/".format(i)
try:
n_photons_first_file = h5py.File(simu_prefix+"number_of_photons_first_harmonic.hdf5", 'r')
spatial_dist_first_file = h5py.File(simu_prefix+"spatial_dist_first_harmonic.hdf5", 'r')
n_photons_third_file = h5py.File(simu_prefix+"number_of_photons_third_harmonic.hdf5", 'r')
spatial_dist_third_file = h5py.File(simu_prefix+"spatial_dist_third_harmonic.hdf5", 'r')
config = configparser.ConfigParser(inline_comment_prefixes=";")
config.read(simu_prefix+"/"+args.configFile)
except:
continue
focal_length = float(config['Parabola']['focal_length'])
rmax = float(config['Parabola']['r_max'])
# -- We plot the total spectrum of photons for both harmonics.
n_photons_first = n_photons_first_file['/spectrum/Number of photons'][:]
wavelengths_first = n_photons_first_file['/spectrum/wavelength (m)'][:]
freqs_first = n_photons_first_file['/spectrum/frequency (Hz)'][:]
n_photons_third = n_photons_third_file['/spectrum/Number of photons'][:]
wavelengths_third = n_photons_third_file['/spectrum/wavelength (m)'][:]
freqs_third = n_photons_third_file['/spectrum/frequency (Hz)'][:]
phi_first = spatial_dist_first_file['/coordinates/phi'][:]
phi_first_deg = np.degrees(phi_first)
if args.dim == 3:
theta_first = spatial_dist_first_file['/coordinates/theta'][:]
theta_first_deg = np.degrees(theta_first)
# -- Support older versions of the WaveMixer.
try:
n_density_first = spatial_dist_first_file['/field/Component0'][:]
except:
n_density_first = spatial_dist_first_file['/field/ScalarField'][:]
phi_third = spatial_dist_third_file['/coordinates/phi'][:]
phi_third_deg = np.degrees(phi_third)
if args.dim == 3:
theta_third = spatial_dist_third_file['/coordinates/theta'][:]
theta_third_deg = np.degrees(theta_third)
# -- Support older versions of the WaveMixer.
try:
n_density_third = spatial_dist_third_file['/field/Component0'][:]
except:
n_density_third = spatial_dist_third_file['/field/ScalarField'][:]
# -- Determine the phi at which the emission is maximum.
max_idx_f = np.argmax(n_density_first)
if args.dim == 3:
max_phi = phi_first_deg[np.unravel_index(max_idx_f, n_density_first.shape)[0]]
else:
max_phi = phi_first_deg[max_idx_f]
# Create the figures.
plot_options = {"rasterized": True, "shading": "interp", "cmap":"magma"}
n_photons_fig = plt.figure(figsize=(6,4))
n_photons_f_spec_ax = n_photons_fig.add_subplot(221)
plt.plot(wavelengths_first/1.0e-9,n_photons_first)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Number of photons")
plt.ticklabel_format(style='sci',scilimits=(0,0),axis='y')
n_photons_t_spec_ax = n_photons_fig.add_subplot(222)
plt.plot(wavelengths_third/1.0e-9,n_photons_third)
plt.xlabel("Wavelength (nm)")
plt.ticklabel_format(style='sci',scilimits=(0,0),axis='y')
photon_density_f_ax = n_photons_fig.add_subplot(223)
if args.dim == 2:
plt.plot(phi_first_deg,n_density_first)
plt.xlabel('$\\phi$ (degrees)')
plt.ylabel('Photon density')
if args.dim == 3:
im=photon_density_f_ax.pcolormesh(theta_first_deg,phi_first_deg,n_density_first,**plot_options)
photon_density_f_ax.axis([0.0,360.0,0.0,180.0])
photon_density_f_ax.set_aspect('equal')
photon_density_f_ax.set_xlabel('$\\theta$ (degrees)')
photon_density_f_ax.set_ylabel('$\\phi$ (degrees)')
photon_density_f_ax.set_xticks(np.arange(0,365,45))
photon_density_f_ax.set_yticks(np.arange(0,185,30))
cbar=plt.colorbar(im,shrink=0.62,ax=photon_density_f_ax)
cbar.formatter.set_powerlimits((0,0))
cbar.update_ticks()
photon_density_t_ax = n_photons_fig.add_subplot(224)
if args.dim == 2:
plt.plot(phi_third_deg,n_density_third)
plt.xlabel('$\\phi$ (degrees)')
plt.ylabel('Photon density')
if args.dim ==3:
plt.pcolormesh(theta_third_deg,phi_third_deg,n_density_third,**plot_options)
plt.axis([0.0,360.0,0.0,180.0])
plt.gca().set_aspect('equal')
plt.xlabel('$\\theta$ (degrees)')
plt.xticks(np.arange(0,365,45))
plt.yticks(np.arange(0,185,30))
cbar=plt.colorbar(shrink=0.62)
cbar.formatter.set_powerlimits((0,0))
cbar.update_ticks()
#plt.ylabel('$\\phi$ (degrees)')
plt.tight_layout()
plt.savefig(simu_prefix+"n_photons.pdf", dpi=500)
plt.close()
# -- THESIS READY PLOTS
if args.dim == 3:
figDensityFirst = plt.figure(num="figFirst",figsize=(4,3))
ax = figDensityFirst.add_subplot(111)
im=ax.pcolormesh(theta_first_deg,phi_first_deg,n_density_first,**plot_options)
ax.axis([0.0,360.0,0.0,180.0])
ax.set_aspect('equal')
ax.set_xlabel('$\\theta$ (degrees)')
ax.set_ylabel('$\\phi$ (degrees)', rotation='horizontal', ha='left')
ax.yaxis.set_label_coords(-0.1, 1.1, transform=ax.transAxes)
ax.set_xticks(np.arange(0,365,45))
ax.set_yticks(np.arange(0,185,30))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.formatter.set_powerlimits((0,0))
cbar.update_ticks()
plt.savefig(simu_prefix+"photon_density_f.pdf", bbox_inches='tight', dpi=500)
figDensityThird = plt.figure(num="figThird",figsize=(4,3))
ax = figDensityThird.add_subplot(111)
im=ax.pcolormesh(theta_third_deg,phi_third_deg,n_density_third,**plot_options)
ax.axis([0.0,360.0,0.0,180.0])
ax.set_aspect('equal')
ax.set_xlabel('$\\theta$ (degrees)')
ax.set_ylabel('$\\phi$ (degrees)', rotation='horizontal', ha='left')
ax.yaxis.set_label_coords(-0.1, 1.1, transform=ax.transAxes)
ax.set_xticks(np.arange(0,365,45))
ax.set_yticks(np.arange(0,185,30))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.formatter.set_powerlimits((0,0))
cbar.update_ticks()
plt.savefig(simu_prefix+"photon_density_t.pdf", bbox_inches='tight', dpi=500)
# -- Contours of density plots.
contour_plot_options = {"linestyles": '--', "colors": 'k', 'linewidths': 0.5}
figDensityFirst = plt.figure(num="figFirst")
ax = figDensityFirst.get_axes()[0]
ax.contour(theta_first_deg,phi_first_deg,n_density_first,**contour_plot_options)
plt.savefig(simu_prefix+"photon_density_f_mod.pdf", bbox_inches='tight', dpi=500)
figDensityThird = plt.figure(num="figThird")
ax = figDensityThird.get_axes()[0]
ax.contour(theta_third_deg,phi_third_deg,n_density_third,**contour_plot_options)
plt.savefig(simu_prefix+"photon_density_t_mod.pdf", bbox_inches='tight', dpi=500)
print("------------- i = {} -----------------------".format(i))
# -- Write the number of photons.
print("The total number of photons is \n(1st harmonic): {} (3rd harmonic): {}".format(sum(n_photons_first),sum(n_photons_third)))
n_photons_file.write("{}\t{}\t{}".format(2*focal_length/rmax,sum(n_photons_first),sum(n_photons_third)))
n_photons_file.write("\n")
# -- Write the angles.
max_angle_file.write("{}\t{}".format(2*focal_length/rmax,max_phi))
max_angle_file.write("\n")
# -- We now plot the detectable number of photons (in the shadow).
if (analyze_shadow_bool):
# For the HNA parabola, we compute the number of photons that
# are emitted in the shadow of a hole burred in the deep region
# of the parabola. Contrary to the transmission parabola, this results
# in a forward facing shadow.
# Real hole.
# Actual hole in the parabola when computing the number of photons.
if args.geom == "hna-h":
r_hole = float(config['Parabola']['r_min'])
z_hole = r_hole**2/(4.0*focal_length)-focal_length
th_shadow = np.arctan2(r_hole,focal_length)
# -- Arrays for manual integration.
n_density_first_integrand = np.zeros_like(n_density_first)
n_density_third_integrand = np.zeros_like(n_density_third)
if args.dim == 3:
# -- We prepare the integrands of the photon densities.
for idx_i in range(len(theta_first)):
for idx_j in range(len(phi_first)):
n_density_first_integrand[idx_j][idx_i] = n_density_first[idx_j][idx_i]*np.sin(phi_first[idx_j])
for idx_i in range(len(theta_third)):
for idx_j in range(len(phi_third)):
n_density_third_integrand[idx_j][idx_i] = n_density_third[idx_j][idx_i]*np.sin(phi_third[idx_j])
# -- We interpolate the integrands.
n_density_first_interp = interp.interp2d(theta_first,phi_first,n_density_first_integrand, kind='cubic')
n_density_third_interp = interp.interp2d(theta_third,phi_third,n_density_third_integrand, kind='cubic')
# -- We integrate in the shadow.
n_photon_first_total = custom_dblquad(n_density_first_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_first_shadow = custom_dblquad(n_density_first_interp,0.0, th_shadow, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_total = custom_dblquad(n_density_third_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_shadow = custom_dblquad(n_density_third_interp,0.0, th_shadow, lambda x: 0.0, lambda x: 2.0*np.pi)
# - We store the values in the file.
n_photons_shadow_file.write("{}\t{}\t{}\t{}".format(2*focal_length/rmax,r_hole,n_photon_first_shadow[0],n_photon_third_shadow[0]))
n_photons_shadow_file.write("\n")
# Artifical hole.
# No hole in the simulation, so no loss of energy. We can get an approximate
# number of photons by scaling by the approximate energy loss a posteriori.
if args.geom =="hna-h-artifical":
r_hole = np.linspace(5.0e-3,15.0e-3,10)
# -- Arrays for manual integration.
n_density_first_integrand = np.zeros_like(n_density_first)
n_density_third_integrand = np.zeros_like(n_density_third)
if args.dim == 3:
# -- We prepare the integrands of the photon densities.
for idx_i in range(len(theta_first)):
for idx_j in range(len(phi_first)):
n_density_first_integrand[idx_j][idx_i] = n_density_first[idx_j][idx_i]*np.sin(phi_first[idx_j])
for idx_i in range(len(theta_third)):
for idx_j in range(len(phi_third)):
n_density_third_integrand[idx_j][idx_i] = n_density_third[idx_j][idx_i]*np.sin(phi_third[idx_j])
# -- We interpolate the integrands.
n_density_first_interp = interp.interp2d(theta_first,phi_first,n_density_first_integrand, kind='cubic')
n_density_third_interp = interp.interp2d(theta_third,phi_third,n_density_third_integrand, kind='cubic')
# -- We open a file for the current value of the focal length.
n_photons_hna_shadow_file = open(simu_prefix+"/"+args.geom+"_shadow.txt", 'w')
for j in range(r_hole.size):
z_hole = r_hole[j]**2/(4.0*focal_length)-focal_length
th_shadow = np.arctan2(r_hole[j],focal_length)
# -- We integrate in the shadow.
n_photon_first_total = custom_dblquad(n_density_first_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_first_shadow = custom_dblquad(n_density_first_interp,0.0, th_shadow, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_total = custom_dblquad(n_density_third_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_shadow = custom_dblquad(n_density_third_interp,0.0, th_shadow, lambda x: 0.0, lambda x: 2.0*np.pi)
# - We store the values in the file.
n_photons_hna_shadow_file.write("{}\t{}\t{}\t{}".format(2*focal_length/rmax,r_hole[j],n_photon_first_shadow[0],n_photon_third_shadow[0]))
n_photons_hna_shadow_file.write("\n")
# -- We print the values.
#print("---------- r_hole = {} -----------".format(r_hole[j]))
#print("Number of photons (total) :\n {} and {}".format(n_photon_first_total[0],n_photon_third_total[0]))
#print("Number of photons (shadow):\n {} and {}".format(n_photon_first_shadow[0],n_photon_third_shadow[0]))
n_photons_hna_shadow_file.close()
# For the transmission parabola, we compute the number of photons
# that are emitted in the shadow of the incident beam, plus an engineering
# factor of 2 degrees.
if args.geom=="tra-h":
z_rmax = np.abs(0.25*rmax**2/focal_length - focal_length)
angle_shadow = np.pi-np.arctan2(rmax,z_rmax) + np.radians(2.0)
angle_hole_deg = 180-np.degrees(angle_shadow)
# -- Arrays for manual integration.
n_density_first_integrand = np.zeros_like(n_density_first)
n_density_third_integrand = np.zeros_like(n_density_third)
if args.dim == 2:
# -- We prepare the integrands of the photon densities.
for idx_i in range(len(phi_first)):
n_density_first_integrand[idx_i] = n_density_first[idx_i]*np.sin(phi_first[idx_i])
for idx_i in range(len(phi_third)):
n_density_third_integrand[idx_i] = n_density_third[idx_i]*np.sin(phi_third[idx_i])
# -- We interpolate the integrands.
n_density_first_interp = interp.interp1d(phi_first,n_density_first_integrand, kind='cubic')
n_density_third_interp = interp.interp1d(phi_third,n_density_third_integrand, kind='cubic')
# -- We integrate in the shadow.
n_photon_first_total = integration.quad(n_density_first_interp,0.0, np.pi)
n_photon_first_shadow = integration.quad(n_density_first_interp,angle_shadow,np.pi)
n_photon_third_total = integration.quad(n_density_third_interp,0.0, np.pi)
n_photon_third_shadow = integration.quad(n_density_third_interp,angle_shadow,np.pi)
if args.dim == 3:
# -- We prepare the integrands of the photon densities.
for idx_i in range(len(theta_first)):
for idx_j in range(len(phi_first)):
n_density_first_integrand[idx_j][idx_i] = n_density_first[idx_j][idx_i]*np.sin(phi_first[idx_j])
for idx_i in range(len(theta_third)):
for idx_j in range(len(phi_third)):
n_density_third_integrand[idx_j][idx_i] = n_density_third[idx_j][idx_i]*np.sin(phi_third[idx_j])
# -- We interpolate the integrands.
n_density_first_interp = interp.interp2d(theta_first,phi_first,n_density_first_integrand, kind='cubic')
n_density_third_interp = interp.interp2d(theta_third,phi_third,n_density_third_integrand, kind='cubic')
# -- We integrate in the shadow.
n_photon_first_total = custom_dblquad(n_density_first_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_first_shadow = custom_dblquad(n_density_first_interp,angle_shadow, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_total = custom_dblquad(n_density_third_interp,0.0, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
n_photon_third_shadow = custom_dblquad(n_density_third_interp,angle_shadow, np.pi, lambda x: 0.0, lambda x: 2.0*np.pi)
# -- We print and save the values.
print("Number of photons (total) :\n {} and {}".format(n_photon_first_total[0],n_photon_third_total[0]))
print("Number of photons (shadow):\n {} and {}".format(n_photon_first_shadow[0],n_photon_third_shadow[0]))
n_photons_shadow_file.write("{}\t{}\t{}".format(2*focal_length/rmax,n_photon_first_shadow[0],n_photon_third_shadow[0]))
n_photons_shadow_file.write("\n")
# For an off-axis hole, we compute the position of the hole in cylindrical
# coordinates, then compute the number of photons over that region.
# THAT DOESN'T WORK LUL, I INTEGRATE IN THE HOLE, NOT IN THE SHADOW OF THE WHOLE.
if args.geom == "off-axis-hole":
# -- Arrays for manual integration.
n_density_first_integrand = np.zeros_like(n_density_first)
n_density_third_integrand = np.zeros_like(n_density_third)
if args.dim == 3:
# -- We compute the position of the hole.
mask_x_pos = float(config['Model']['mask_x_pos'])
mask_y_pos = float(config['Model']['mask_y_pos'])
mask_radius = float(config['Model']['mask_radius'])
mask_r_pos = np.sqrt(mask_x_pos**2+mask_y_pos**2);
mask_t_pos = np.arctan2(mask_x_pos,mask_y_pos)
def z_abs(r):
z_abs = np.abs(r**2/(4*focal_length)-focal_length)
return z_abs
mask_r_min = mask_r_pos-mask_radius
mask_r_max = mask_r_pos+mask_radius
phi_min = np.arctan((mask_r_min)/z_abs(mask_r_min))
phi_max = np.arctan((mask_r_max)/z_abs(mask_r_max))
# -- Fix this.
if (2*focal_length < rmax):
phi_min = np.pi - phi_min
phiMax = np.pi - phi_max
def theta_min(phi):
r = np.abs(((1.0+np.sqrt(1.0+np.tan(phi)**2))/np.tan(phi)))*(2.0*focal_length)
#print(r, mask_r_pos-mask_radius, mask_r_pos+mask_radius)
sq_root = np.sqrt((mask_radius**2-(r-mask_r_pos)**2)/mask_r_pos**2)
theta_min = mask_t_pos - np.arcsin(sq_root)
return theta_min
return mask_t_pos - np.arctan(mask_radius/mask_r_pos) + np.pi
def theta_max(phi):
r = np.abs(((1.0+np.sqrt(1.0+np.tan(phi)**2))/np.tan(phi)))*(2.0*focal_length)
sq_root = np.sqrt((mask_radius**2-(r-mask_r_pos)**2)/mask_r_pos**2)
return mask_t_pos + np.arcsin(sq_root)
return mask_t_pos + np.arctan(mask_radius/mask_r_pos) + np.pi
# -- We prepare the integrands of the photon densities.
for idx_i in range(len(theta_first)):
for idx_j in range(len(phi_first)):
n_density_first_integrand[idx_j][idx_i] = n_density_first[idx_j][idx_i]*np.sin(phi_first[idx_j])
for idx_i in range(len(theta_third)):
for idx_j in range(len(phi_third)):
n_density_third_integrand[idx_j][idx_i] = n_density_third[idx_j][idx_i]*
|
np.sin(phi_third[idx_j])
|
numpy.sin
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.