prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
from collections import deque
import cv2
import numpy as np
from PIL import Image, ImageSequence
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
from mindspore.dataset.vision.utils import Inter
from mindspore.communication.management import get_rank, get_group_size
def _load_multipage_tiff(path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def _get_val_train_indices(length, fold, ratio=0.8):
assert 0 < ratio <= 1, "Train/total data ratio must be in range (0.0, 1.0]"
np.random.seed(0)
indices = np.arange(0, length, 1, dtype=np.int)
np.random.shuffle(indices)
if fold is not None:
indices = deque(indices)
indices.rotate(fold * round((1.0 - ratio) * length))
indices = np.array(indices)
train_indices = indices[:round(ratio * len(indices))]
val_indices = indices[round(ratio * len(indices)):]
else:
train_indices = indices
val_indices = []
return train_indices, val_indices
def data_post_process(img, mask):
img = np.expand_dims(img, axis=0)
mask = (mask > 0.5).astype(np.int)
mask = (np.arange(mask.max() + 1) == mask[..., None]).astype(int)
mask = mask.transpose(2, 0, 1).astype(np.float32)
return img, mask
def train_data_augmentation(img, mask):
h_flip = np.random.random()
if h_flip > 0.5:
img = np.flipud(img)
mask = np.flipud(mask)
v_flip = np.random.random()
if v_flip > 0.5:
img = np.fliplr(img)
mask = np.fliplr(mask)
left = int(
|
np.random.uniform()
|
numpy.random.uniform
|
import os
import sys
from datetime import datetime,timedelta
import logging
import pathlib
import tempfile
import subprocess
import shutil
from typing import Union
from time import time
import numpy as np
import scipy as sp
from numba import jit, prange
import netCDF4 as nc
from netCDF4 import Dataset
from matplotlib.transforms import Bbox
import seawater as sw
import xarray as xr
from pyschism.mesh.base import Nodes, Elements
from pyschism.mesh.vgrid import Vgrid
logger = logging.getLogger(__name__)
def get_database(date, Bbox=None):
if date >= datetime(2018, 12, 4):
database = f'GLBy0.08/expt_93.0'
elif date >= datetime(2018, 1, 1) and date < datetime(2018, 12, 4):
database = f'GLBv0.08/expt_93.0'
elif date >= datetime(2017, 10, 1) and date < datetime(2018, 1, 1):
database = f'GLBv0.08/expt_92.9'
elif date >= datetime(2017, 6, 1) and date < datetime(2017, 10, 1):
database = f'GLBv0.08/expt_57.7'
elif date >= datetime(2017, 2, 1) and date < datetime(2017, 6, 1):
database = f'GLBv0.08/expt_92.8'
elif date >= datetime(2016, 5, 1) and date < datetime(2017, 2, 1):
database = f'GLBv0.08/expt_57.2'
elif date >= datetime(2016, 1, 1) and date < datetime(2016, 5, 1):
database = f'GLBv0.08/expt_56.3'
elif date >= datetime(1994, 1, 1) and date < datetime(2016, 1, 1):
database = f'GLBv0.08/expt_53.X/data/{date.year}'
else:
logger.info('No data for {date}')
return database
def get_idxs(date, database, bbox):
if date.strftime("%Y-%m-%d") >= datetime.now().strftime("%Y-%m-%d"):
date2 = datetime.now() - timedelta(days=1)
baseurl = f'https://tds.hycom.org/thredds/dodsC/{database}/FMRC/runs/GLBy0.08_930_FMRC_RUN_{date2.strftime("%Y-%m-%dT12:00:00Z")}?depth[0:1:-1],lat[0:1:-1],lon[0:1:-1],time[0:1:-1]'
else:
baseurl=f'https://tds.hycom.org/thredds/dodsC/{database}?lat[0:1:-1],lon[0:1:-1],time[0:1:-1],depth[0:1:-1]'
ds=Dataset(baseurl)
time1=ds['time']
times=nc.num2date(time1,units=time1.units,only_use_cftime_datetimes=False)
lon=ds['lon'][:]
lat=ds['lat'][:]
dep=ds['depth'][:]
lat_idxs=np.where((lat>=bbox.ymin-2.0)&(lat<=bbox.ymax+2.0))[0]
lon_idxs=np.where((lon>=bbox.xmin-2.0) & (lon<=bbox.xmax+2.0))[0]
lon=lon[lon_idxs]
lat=lat[lat_idxs]
#logger.info(lon_idxs)
#logger.info(lat_idxs)
lon_idx1=lon_idxs[0].item()
lon_idx2=lon_idxs[-1].item()
#logger.info(f'lon_idx1 is {lon_idx1}, lon_idx2 is {lon_idx2}')
lat_idx1=lat_idxs[0].item()
lat_idx2=lat_idxs[-1].item()
#logger.info(f'lat_idx1 is {lat_idx1}, lat_idx2 is {lat_idx2}')
for ilon in np.arange(len(lon)):
if lon[ilon] > 180:
lon[ilon] = lon[ilon]-360.
#lonc=(np.max(lon)+np.min(lon))/2.0
#logger.info(f'lonc is {lonc}')
#latc=(np.max(lat)+np.min(lat))/2.0
#logger.info(f'latc is {latc}')
x2, y2=transform_ll_to_cpp(lon, lat)
idxs=np.where( date == times)[0]
#check if time_idx is empty
if len(idxs) == 0:
#If there is missing data, use the data from the next days, the maximum searching days is 3. Otherwise, stop.
for i in np.arange(0,3):
date_before=(date + timedelta(days=int(i)+1)) #.astype(datetime)
logger.info(f'Try replacing the missing data from {date_before}')
idxs=np.where(date_before == times)[0]
if len(idxs) == 0:
continue
else:
break
if len(idxs) ==0:
logger.info(f'No date for date {date}')
sys.exit()
time_idx=idxs.item()
ds.close()
return time_idx, lon_idx1, lon_idx2, lat_idx1, lat_idx2, x2, y2
def transform_ll_to_cpp(lon, lat, lonc=-77.07, latc=24.0):
#lonc=(np.max(lon)+np.min(lon))/2.0
#logger.info(f'lonc is {lonc}')
#latc=(np.max(lat)+np.min(lat))/2.0
#logger.info(f'latc is {latc}')
longitude=lon/180*np.pi
latitude=lat/180*np.pi
radius=6378206.4
loncc=lonc/180*np.pi
latcc=latc/180*np.pi
lon_new=[radius*(longitude[i]-loncc)*np.cos(latcc) for i in np.arange(len(longitude))]
lat_new=[radius*latitude[i] for i in np.arange(len(latitude))]
return np.array(lon_new), np.array(lat_new)
def interp_to_points_3d(dep, y2, x2, bxyz, val):
idxs = np.where(abs(val) > 10000)
val[idxs] = float('nan')
val_fd = sp.interpolate.RegularGridInterpolator((dep,y2,x2),np.squeeze(val),'linear', bounds_error=False, fill_value = float('nan'))
val_int = val_fd(bxyz)
idxs = np.isnan(val_int)
if np.sum(idxs) != 0:
val_int[idxs] = sp.interpolate.griddata(bxyz[~idxs,:], val_int[~idxs], bxyz[idxs,:],'nearest')
idxs = np.isnan(val_int)
if np.sum(idxs) != 0:
logger.info(f'There is still missing value for {val}')
sys.exit()
return val_int
def interp_to_points_2d(y2, x2, bxy, val):
idxs = np.where(abs(val) > 10000)
val[idxs] = float('nan')
val_fd = sp.interpolate.RegularGridInterpolator((y2,x2),np.squeeze(val),'linear', bounds_error=False, fill_value = float('nan'))
val_int = val_fd(bxy)
idxs = np.isnan(val_int)
if np.sum(idxs) != 0:
val_int[idxs] = sp.interpolate.griddata(bxy[~idxs,:], val_int[~idxs], bxy[idxs,:],'nearest')
idxs = np.isnan(val_int)
if np.sum(idxs) != 0:
logger.info(f'There is still missing value for {val}')
sys.exit()
return val_int
def ConvertTemp(salt, temp, dep):
nz = temp.shape[0]
ny = temp.shape[1]
nx = temp.shape[2]
pr = np.ones(temp.shape)
pre = pr*dep[:,None, None]
Pr = np.zeros(temp.shape)
ptemp = sw.ptmp(salt, temp, pre, Pr)*1.00024
return ptemp
class OpenBoundaryInventory:
def __init__(self, hgrid, vgrid=None):
self.hgrid = hgrid
self.vgrid = Vgrid.default() if vgrid is None else vgrid
def fetch_data(self, outdir: Union[str, os.PathLike], start_date, rnday, elev2D=True, TS=True, UV=True, adjust2D=False, lats=None, msl_shifts=None):
outdir = pathlib.Path(outdir)
self.start_date = start_date
self.rnday=rnday
self.timevector=np.arange(
self.start_date,
self.start_date + timedelta(days=self.rnday+1),
timedelta(days=1)).astype(datetime)
#Get open boundary
gdf=self.hgrid.boundaries.open.copy()
opbd=[]
for boundary in gdf.itertuples():
opbd.extend(list(boundary.indexes))
blon = self.hgrid.coords[opbd,0]
blat = self.hgrid.coords[opbd,1]
#logger.info(f'blon min {np.min(blon)}, max {np.max(blon)}')
NOP = len(blon)
#calculate zcor for 3D
if TS or UV:
vd=Vgrid.open(self.vgrid)
sigma=vd.sigma
#get bathymetry
depth = self.hgrid.values
#compute zcor
zcor = depth[:,None]*sigma
nvrt=zcor.shape[1]
#zcor2=zcor[opbd,:]
#idxs=np.where(zcor2 > 5000)
#zcor2[idxs]=5000.0-1.0e-6
#construct schism grid
#x2i=np.tile(xi,[nvrt,1]).T
#y2i=np.tile(yi,[nvrt,1]).T
#bxyz=np.c_[zcor2.reshape(np.size(zcor2)),y2i.reshape(np.size(y2i)),x2i.reshape(np.size(x2i))]
#logger.info('Computing SCHISM zcor is done!')
#create netcdf
ntimes=self.rnday+1
nComp1=1
nComp2=2
one=1
#ndt=np.zeros([ntimes])
if elev2D:
#timeseries_el=np.zeros([ntimes,NOP,nComp1])
#create netcdf
dst_elev = Dataset(outdir / 'elev2D.th.nc', 'w', format='NETCDF4')
#dimensions
dst_elev.createDimension('nOpenBndNodes', NOP)
dst_elev.createDimension('one', one)
dst_elev.createDimension('time', None)
dst_elev.createDimension('nLevels', one)
dst_elev.createDimension('nComponents', nComp1)
#variables
dst_elev.createVariable('time_step', 'f', ('one',))
dst_elev['time_step'][:] = 86400
dst_elev.createVariable('time', 'f', ('time',))
#dst_elev['time'][:] = ndt
dst_elev.createVariable('time_series', 'f', ('time', 'nOpenBndNodes', 'nLevels', 'nComponents'))
#dst_elev['time_series'][:,:,:,:] = timeseries_el
if TS:
#timeseries_s=np.zeros([ntimes,NOP,nvrt,nComp1])
dst_salt = Dataset(outdir / 'SAL_3D.th.nc', 'w', format='NETCDF4')
#dimensions
dst_salt.createDimension('nOpenBndNodes', NOP)
dst_salt.createDimension('one', one)
dst_salt.createDimension('time', None)
dst_salt.createDimension('nLevels', nvrt)
dst_salt.createDimension('nComponents', nComp1)
#variables
dst_salt.createVariable('time_step', 'f', ('one',))
dst_salt['time_step'][:] = 86400
dst_salt.createVariable('time', 'f', ('time',))
#dst_salt['time'][:] = ndt
dst_salt.createVariable('time_series', 'f', ('time', 'nOpenBndNodes', 'nLevels', 'nComponents'))
#temp
#timeseries_t=np.zeros([ntimes,NOP,nvrt,nComp1])
dst_temp = Dataset(outdir / 'TEM_3D.th.nc', 'w', format='NETCDF4')
#dimensions
dst_temp.createDimension('nOpenBndNodes', NOP)
dst_temp.createDimension('one', one)
dst_temp.createDimension('time', None)
dst_temp.createDimension('nLevels', nvrt)
dst_temp.createDimension('nComponents', nComp1)
#variables
dst_temp.createVariable('time_step', 'f', ('one',))
dst_temp['time_step'][:] = 86400
dst_temp.createVariable('time', 'f', ('time',))
#dst_temp['time'][:] = ndt
dst_temp.createVariable('time_series', 'f', ('time', 'nOpenBndNodes', 'nLevels', 'nComponents'))
#dst_temp['time_series'][:,:,:,:] = timeseries_t
if UV:
#timeseries_uv=np.zeros([ntimes,NOP,nvrt,nComp2])
dst_uv = Dataset(outdir / 'uv3D.th.nc', 'w', format='NETCDF4')
#dimensions
dst_uv.createDimension('nOpenBndNodes', NOP)
dst_uv.createDimension('one', one)
dst_uv.createDimension('time', None)
dst_uv.createDimension('nLevels', nvrt)
dst_uv.createDimension('nComponents', nComp2)
#variables
dst_uv.createVariable('time_step', 'f', ('one',))
dst_uv['time_step'][:] = 86400
dst_uv.createVariable('time', 'f', ('time',))
#dst_uv['time'][:] = ndt
dst_uv.createVariable('time_series', 'f', ('time', 'nOpenBndNodes', 'nLevels', 'nComponents'))
#dst_uv['time_series'][:,:,:,:] = timeseries_uv
logger.info('**** Accessing GOFS data*****')
t0=time()
for it, date in enumerate(self.timevector):
database=get_database(date)
logger.info(f'Fetching data for {date} from database {database}')
#loop over each open boundary
ind1 = 0
ind2 = 0
for boundary in gdf.itertuples():
opbd = list(boundary.indexes)
ind1 = ind2
ind2 = ind1 + len(opbd)
#logger.info(f'ind1 = {ind1}, ind2 = {ind2}')
blon = self.hgrid.coords[opbd,0]
blat = self.hgrid.coords[opbd,1]
xi,yi = transform_ll_to_cpp(blon, blat)
bxy = np.c_[yi, xi]
if TS or UV:
zcor2=zcor[opbd,:]
idxs=np.where(zcor2 > 5000)
zcor2[idxs]=5000.0-1.0e-6
#construct schism grid
x2i=np.tile(xi,[nvrt,1]).T
y2i=np.tile(yi,[nvrt,1]).T
bxyz=np.c_[zcor2.reshape(np.size(zcor2)),y2i.reshape(np.size(y2i)),x2i.reshape(np.size(x2i))]
xmin, xmax = np.min(blon), np.max(blon)
ymin, ymax = np.min(blat), np.max(blat)
if date.strftime("%Y-%m-%d") >= datetime(2017, 2, 1).strftime("%Y-%m-%d") and \
date.strftime("%Y-%m-%d") < datetime(2017, 6, 1).strftime("%Y-%m-%d") or \
date.strftime("%Y-%m-%d") >= datetime(2017, 10, 1).strftime("%Y-%m-%d"):
xmin = xmin + 360. if xmin < 0 else xmin
xmax = xmax + 360. if xmax < 0 else xmax
bbox = Bbox.from_extents(xmin, ymin, xmax, ymax)
else:
bbox = Bbox.from_extents(xmin, ymin, xmax, ymax)
#logger.info(f'xmin is {xmin}, xmax is {xmax}')
time_idx, lon_idx1, lon_idx2, lat_idx1, lat_idx2, x2, y2 = get_idxs(date, database, bbox)
if date.strftime("%Y-%m-%d") >= datetime.now().strftime("%Y-%m-%d"):
date2 = datetime.now() - timedelta(days=1)
url = f'https://tds.hycom.org/thredds/dodsC/{database}/FMRC/runs/GLBy0.08_930_FMRC_RUN_' + \
f'{date2.strftime("%Y-%m-%dT12:00:00Z")}?depth[0:1:-1],lat[{lat_idx1}:1:{lat_idx2}],' + \
f'lon[{lon_idx1}:1:{lon_idx2}],time[{time_idx}],' + \
f'surf_el[{time_idx}][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_temp[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'salinity[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_u[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_v[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}]'
else:
url=f'https://tds.hycom.org/thredds/dodsC/{database}?lat[{lat_idx1}:1:{lat_idx2}],' + \
f'lon[{lon_idx1}:1:{lon_idx2}],depth[0:1:-1],time[{time_idx}],' + \
f'surf_el[{time_idx}][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_temp[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'salinity[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_u[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}],' + \
f'water_v[{time_idx}][0:1:39][{lat_idx1}:1:{lat_idx2}][{lon_idx1}:1:{lon_idx2}]'
#logger.info(url)
ds=Dataset(url)
dep=ds['depth'][:]
logger.info('****Interpolation starts****')
#ndt[it]=it*24*3600.
if elev2D:
#ssh
ssh=np.squeeze(ds['surf_el'][:,:])
ssh_int = interp_to_points_2d(y2, x2, bxy, ssh)
dst_elev['time'][it] = it*24*3600.
if adjust2D:
elev_adjust = np.interp(blat, lats, msl_shifts)
dst_elev['time_series'][it,ind1:ind2,0,0] = ssh_int + elev_adjust
else:
dst_elev['time_series'][it,ind1:ind2,0,0] = ssh_int
if TS:
#salt
salt = np.squeeze(ds['salinity'][:,:,:])
salt_int = interp_to_points_3d(dep, y2, x2, bxyz, salt)
salt_int = salt_int.reshape(zcor2.shape)
#timeseries_s[it,:,:,0]=salt_int
dst_salt['time'][it] = it*24*3600.
dst_salt['time_series'][it,ind1:ind2,:,0] = salt_int
#temp
temp = np.squeeze(ds['water_temp'][:,:,:])
#Convert temp to potential temp
ptemp = ConvertTemp(salt, temp, dep)
temp_int = interp_to_points_3d(dep, y2, x2, bxyz, ptemp)
temp_int = temp_int.reshape(zcor2.shape)
#timeseries_t[it,:,:,0]=temp_int
dst_temp['time'][it] = it*24*3600.
dst_temp['time_series'][it,ind1:ind2,:,0] = temp_int
if UV:
uvel=np.squeeze(ds['water_u'][:,:,:])
vvel=np.squeeze(ds['water_v'][:,:,:])
dst_uv['time'][it] = it*24*3600.
#uvel
uvel_int = interp_to_points_3d(dep, y2, x2, bxyz, uvel)
uvel_int = uvel_int.reshape(zcor2.shape)
dst_uv['time_series'][it,ind1:ind2,:,0] = uvel_int
#vvel
vvel_int = interp_to_points_3d(dep, y2, x2, bxyz, vvel)
vvel_int = vvel_int.reshape(zcor2.shape)
dst_uv['time_series'][it,ind1:ind2,:,1] = vvel_int
#timeseries_uv[it,:,:,1]=vvel_int
logger.info(f'Writing *th.nc takes {time()-t0} seconds')
class Nudge:
def __init__(self):
self.include = None
def gen_nudge(self, outdir: Union[str, os.PathLike], hgrid, rlmax = 1.5, rnu_day=0.25):
@jit(nopython=True, parallel=True)
def compute_nudge(lon, lat, nnode, opbd, out):
rnu_max = 1.0 / rnu_day / 86400.0
rnu = 0
for idn in prange(nnode):
if idn in opbd:
rnu = rnu_max
distmin = 0.
else:
distmin = np.finfo(np.float64).max
for j in opbd:
tmp = np.square(lon[idn]-lon[j]) + np.square(lat[idn]-lat[j])
rl2 = np.sqrt(tmp)
if rl2 < distmin:
distmin=rl2
rnu = 0.
if distmin <= rlmax:
rnu = (1-distmin/rlmax)*rnu_max
#idxs_nudge[idn]=1 #idn
out[idn] = rnu
outdir = pathlib.Path(outdir)
#get nudge zone
lon=hgrid.coords[:,0]
lat=hgrid.coords[:,1]
#Get open boundary
gdf=hgrid.boundaries.open.copy()
opbd=[]
for boundary in gdf.itertuples():
opbd.extend(list(boundary.indexes))
opbd = np.array(opbd)
elnode=hgrid.elements.array
NE, NP = len(elnode), len(lon)
out = np.zeros([NP])
idxs_nudge=np.zeros(NP, dtype=int)
t0 = time()
#compute_nudge(lon, lat, NP, opbd2, idxs_nudge, out)
compute_nudge(lon, lat, NP, opbd, out)
idxs=np.where(out > 0)[0]
idxs_nudge[idxs]=1
#expand nudging marker to neighbor nodes
idxs=np.where(np.max(out[elnode], axis=1) > 0)[0]
fp=elnode[idxs,-1] < 0
idxs_nudge[elnode[idxs[fp],:3]]=1
idxs_nudge[elnode[idxs[~fp],:]]=1
#idxs_nudge=np.delete(idxs_nudge, np.where(idxs_nudge == -99))
idxs=np.where(idxs_nudge == 1)[0]
self.include=idxs
#logger.info(f'len of nudge idxs is {len(idxs)}')
logger.info(f'It took {time() -t0} sencods to calcuate nudge coefficient')
nudge = [f"{rlmax}, {rnu_day}"]
nudge.extend("\n")
nudge.append(f"{NE} {NP}")
nudge.extend("\n")
hgrid = hgrid.to_dict()
nodes = hgrid['nodes']
elements = hgrid['elements']
for idn, (coords, values) in nodes.items():
line = [f"{idn}"]
line.extend([f"{x:<.7e}" for x in coords])
line.extend([f"{out[int(idn)-1]:<.7e}"])
line.extend("\n")
nudge.append(" ".join(line))
for id, element in elements.items():
line = [f"{id}"]
line.append(f"{len(element)}")
line.extend([f"{e}" for e in element])
line.extend("\n")
nudge.append(" ".join(line))
with open(outdir / 'TEM_nudge.gr3','w+') as fid:
fid.writelines(nudge)
shutil.copy2(outdir / 'TEM_nudge.gr3', outdir / 'SAL_nudge.gr3')
return self.include
def fetch_data(self, outdir: Union[str, os.PathLike], hgrid, vgrid, start_date, rnday):
outdir = pathlib.Path(outdir)
self.start_date = start_date
self.rnday=rnday
self.timevector=np.arange(
self.start_date,
self.start_date + timedelta(days=self.rnday+1),
timedelta(days=1)).astype(datetime)
vd=Vgrid.open(vgrid)
sigma=vd.sigma
#Get the index for nudge
include = self.gen_nudge(outdir,hgrid)
#get coords of SCHISM
loni=hgrid.nodes.coords[:,0]
lati=hgrid.nodes.coords[:,1]
#get bathymetry
depth = hgrid.values
#compute zcor
zcor = depth[:,None]*sigma
nvrt=zcor.shape[1]
#logger.info(f'zcor at node 1098677 is {zcor[1098676,:]}')
#Get open nudge array
nlon = hgrid.coords[include, 0]
nlat = hgrid.coords[include, 1]
xi,yi = transform_ll_to_cpp(nlon, nlat)
bxy = np.c_[yi, xi]
zcor2=zcor[include,:]
idxs=np.where(zcor2 > 5000)
#logger.info(idxs)
zcor2[idxs]=5000.0-1.0e-6
#logger.info(f'zcor2 at node 200 is {zcor2[199,:]}')
#construct schism grid
x2i=
|
np.tile(xi,[nvrt,1])
|
numpy.tile
|
from lifelines import KaplanMeierFitter
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
import pickle
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC2':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]
if path.stem == 'ATGC2':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]
import sys
sys.path.append(str(cwd))
D, samples = pickle.load(open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data' / 'survival' / 'experiment_1' / 'sim_data.pkl', 'rb'))
# instance_sum_evaluations, instance_sum_histories, weights = pickle.load(open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data' / 'survival' / 'experiment_1' / 'instance_model_sum.pkl', 'rb'))
sample_sum_evaluations, sample_sum_histories, weights = pickle.load(open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data' / 'survival' / 'experiment_1' / 'sample_model_attention_dynamic.pkl', 'rb'))
import tensorflow as tf
# from model.Instance_MIL import InstanceModels, RaggedModels
from model.Sample_MIL import InstanceModels, RaggedModels
from model import DatasetsUtils
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
##perform embeddings with a zero vector for index 0
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand']]
indexes = [np.where(D['sample_idx'] == idx) for idx in range(len(samples['classes']))]
five_p = np.array([D['seq_5p'][i] for i in indexes], dtype='object')
three_p = np.array([D['seq_3p'][i] for i in indexes], dtype='object')
ref = np.array([D['seq_ref'][i] for i in indexes], dtype='object')
alt = np.array([D['seq_alt'][i] for i in indexes], dtype='object')
strand = np.array([D['strand_emb'][i] for i in indexes], dtype='object')
five_p_loader = DatasetsUtils.Map.FromNumpy(five_p, tf.int32)
three_p_loader = DatasetsUtils.Map.FromNumpy(three_p, tf.int32)
ref_loader = DatasetsUtils.Map.FromNumpy(ref, tf.int32)
alt_loader = DatasetsUtils.Map.FromNumpy(alt, tf.int32)
strand_loader = DatasetsUtils.Map.FromNumpy(strand, tf.float32)
cancer_strat =
|
np.zeros_like(samples['classes'])
|
numpy.zeros_like
|
#-*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os, sys
import numpy as np
import time
sys.path.append(os.path.dirname(sys.path[0])) # 用于上级目录的包调用
from layers import conv2d, depthwise_conv2d, relu, pooling
import input_data_zynq as input_data
import models_zynq as models
# from gen_bin import save_bin
# os.chdir('../')
def data_stats(train_data, val_data, test_data):
"""mean and std_dev
Args:
train_data: (36923, 490)
val_data: (4445, 490)
test_data: (4890, 490)
Return: (mean, std_dev)
Result:
mean: -3.975149608704592, 220.81257374779565
std_dev: 0.8934739293234528
"""
print(train_data.shape, val_data.shape, test_data.shape)
all_data = np.concatenate((train_data, val_data, test_data), axis=0)
std_dev = 255. / (all_data.max() - all_data.min())
# mean_ = all_data.mean()
mean_ = 255. * all_data.min() / (all_data.min() - all_data.max())
return (mean_, std_dev)
def fp32_to_uint8(r):
# method 1
# s = (r.max() - r.min()) / 255.
# z = 255. - r.max() / s
# q = r / s + z
# tf_mfcc
# std_dev = 0.8934739293234528
# mean_ = 220.81257374779565
# q = r / std_dev + mean_
# q = q.astype(np.uint8)
# new_mfcc
std_dev = 0.9671023485944863
mean_ = 220.46072856666711
q = r / std_dev + mean_
q = q.astype(np.uint8)
return q
def simulate_net(input_data):
# tf mfcc parameters
# bias_scale = np.array([0.0008852639002725482, 0.0035931775346398354, 0.00785899069160223, 0.0014689048985019326, 0.0015524440677836537, 0.0028435662388801575, 0.001141879241913557, 0.0007087105768732727, 0.009289528243243694, 0.0015117411967366934, 0.004092711955308914])
# result_sacale = np.array([0.20100615918636322, 0.42823609709739685, 0.23841151595115662, 0.1732778549194336, 0.21222199499607086, 0.15781369805335999, 0.12740808725357056, 0.1111915186047554, 0.11338130384683609, 0.19232141971588135, 0.17540767788887024])
# add_scale = np.array([0.1732778549194336, 0.20100615918636322, 0.26455792784690857, 0.19232141971588135, 0.12740808725357056, 0.20970593392848969])
# new mfcc parameters V100
# bias_scale = np.array([0.0005171183147467673, 0.0021205246448516846, 0.004102946724742651, 0.0007573990151286125, 0.0009573157876729965, 0.0045410459861159325, 0.0007452332065440714, 0.0003749248862732202, 0.0028607698623090982, 0.0014322539791464806, 0.0036672416608780622])
# result_sacale = np.array([0.12663139402866364, 0.20024137198925018, 0.13141511380672455, 0.11106141656637192, 0.1328522115945816, 0.08316611498594284, 0.08792730420827866, 0.08202825486660004, 0.1061563566327095, 0.17049182951450348, 0.18540261685848236])
# add_scale = np.array([0.11106141656637192, 0.12663139402866364, 0.13807182013988495, 0.17049182951450348, 0.08792730420827866, 0.20207594335079193])
# new mfcc parameters ACT
bias_scale = np.array([0.0006772454944439232, 0.0019126507686451077, 0.004039060324430466, 0.0009780717082321644, 0.0011637755669653416, 0.002527922624722123, 0.000784197065513581, 0.00036984056350775063, 0.0027576638385653496, 0.0018317087087780237, 0.003179859137162566])
result_sacale = np.array([0.15135173499584198, 0.20287899672985077, 0.1442921757698059, 0.11213209480047226, 0.1550600677728653, 0.0902664065361023, 0.07894150912761688, 0.0978255569934845, 0.08960756659507751, 0.1850544661283493, 0.19603444635868073])
add_scale = np.array([0.11213209480047226, 0.15135173499584198, 0.16829396784305573, 0.1850544661283493, 0.07894150912761688, 0.1915309578180313])
scale = bias_scale / result_sacale
# scale = (np.round(scale * 2**10) / 2**10).astype(np.float32)
# add_scale = (np.round(add_scale * 2**10) / 2**10).astype(np.float32)
scale = np.round(scale * 2**10).astype(np.int32)
add_scale = np.round(add_scale * 2**10).astype(np.int32)
# change division to multiplication
add_scale[2] = np.floor(1 / add_scale[2] * 2**15).astype(np.int32)
add_scale[5] = np.floor(1 / add_scale[5] * 2**15).astype(np.int32)
s_iwr = {
'stem_conv': scale[0],
'inverted_residual_1_expansion': scale[1], 'inverted_residual_1_depthwise': scale[2], 'inverted_residual_1_projection': scale[3],
'inverted_residual_2_expansion': scale[4], 'inverted_residual_2_depthwise': scale[5], 'inverted_residual_2_projection': scale[6],
'inverted_residual_3_expansion': scale[7], 'inverted_residual_3_depthwise': scale[8], 'inverted_residual_3_projection': scale[9],
'Conv2D': scale[10]
}
s_add = {
'inverted_residual_1_add': add_scale[:3],
'inverted_residual_3_add': add_scale[3:],
}
# model_dir = 'test_log/mobilenetv3_quant_gen'
model_dir = 'test_log/mobilenetv3_quant_mfcc_gen'
if args.save_layers_output == True:
# define output directory
layers_output_dir = os.path.join(model_dir, 'layers_output')
if os.path.exists(layers_output_dir) == False:
os.mkdir(layers_output_dir)
# save input
np.save(os.path.join(layers_output_dir, 'input_data.npy'), input_data)
################## stem conv ##################
# print('stem conv')
new_data = input_data.astype(np.float32)
new_data = new_data - 221.
# s_iwr = tf.constant(0.0008852639002725482 / 0.20100615918636322, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_stem_conv_conv_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_stem_conv_conv_Conv2D_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,0,3)
bias = bias.astype(np.float32)
# print(weight)
# print(bias)
output = depthwise_conv2d(new_data, weight, stride=(2,2), pad="SAME")
output += bias
output = output.astype(np.int32) * s_iwr['stem_conv']
output = output / 2**10
output = relu(output)
output += 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
add_2 = output_uint8.copy() # 给之后的做加法
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'stem_conv.npy'), output_uint8)
################## inverted residual 1 expansion ##################
# print('inverted residual 1 expansion')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.0035931775346398354 / 0.42823609709739685, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_expansion_conv_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_expansion_conv_Conv2D_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_1_expansion']
output = output / 2**10
output = relu(output)
output += 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_1_expansion.npy'), output_uint8)
################## inverted residual 1 depthwise ##################
# print('inverted residual 1 depthwise')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.00785899069160223 / 0.23841151595115662, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_depthwise_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_depthwise_depthwise_conv_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = depthwise_conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_1_depthwise']
output = output / 2**10
output = relu(output)
output += 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_1_depthwise.npy'), output_uint8)
################## inverted residual 1 projection ##################
# print('inverted residual 1 projection')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.0014689048985019326 / 0.1732778549194336, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_projection_conv_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_1_projection_conv_Conv2D_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_1_projection']
output = output / 2**10 + 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
add_1 = output_uint8.copy()
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_1_projection.npy'), output_uint8)
################## inverted residual 1 add ##################
add_1 = add_1.astype(np.int32)
add_2 = add_2.astype(np.int32)
# add_1 = tf.constant(0.1732778549194336, tf.float32) * (add_1 - 128)
# add_2 = tf.constant(0.20100615918636322, tf.float32) * (add_2 - 128)
add_1 = s_add['inverted_residual_1_add'][0] * (add_1 - 128)
add_2 = s_add['inverted_residual_1_add'][1] * (add_2 - 128)
output_result = add_1 + add_2
# output = output_result / tf.constant(0.26455792784690857, tf.float32) + 128
output = output_result * s_add['inverted_residual_1_add'][2]
output = output / 2**15 + 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_1_add.npy'), output_uint8)
################## inverted residual 2 expansion ##################
# print('inverted residual 2 expansion')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.0015524440677836537 / 0.21222199499607086, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_expansion_conv_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_expansion_conv_Conv2D_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_2_expansion']
output = output / 2**10
output = relu(output)
output += 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_2_expansion.npy'), output_uint8)
################## inverted residual 2 depthwise ##################
# print('inverted residual 2 depthwise')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.0028435662388801575 / 0.15781369805335999, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_depthwise_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_depthwise_depthwise_conv_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = depthwise_conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_2_depthwise']
output = output / 2**10
output = relu(output)
output += 128
output_uint8 = output.round()
output_uint8 = np.clip(output_uint8, 0, 255).astype(np.uint8)
# print()
# save output
if args.save_layers_output == True:
np.save(os.path.join(layers_output_dir, 'inverted_residual_2_depthwise.npy'), output_uint8)
################## inverted residual 2 projection ##################
# print('inverted residual 2 projection')
new_data = output_uint8.astype(np.float32)
new_data -= 128
# s_iwr = tf.constant(0.001141879241913557 / 0.12740808725357056, tf.float32)
# s_iwr = tf.cast(s_iwr, tf.float32)
weight = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_projection_conv_weights_quant_FakeQuantWithMinMaxVars.npy'))
bias = np.load(os.path.join(model_dir, 'weight/MBNetV3-CNN_inverted_residual_2_projection_conv_Conv2D_Fold_bias.npy'))
# print(weight.dtype, weight.shape)
# print(bias.dtype, bias.shape)
weight = weight.astype(np.float32)
weight = weight - 128.
weight = weight.transpose(1,2,3,0)
# print(weight)
bias = bias.astype(np.float32)
# print(bias)
output = conv2d(new_data, weight, stride=(1,1), pad="SAME")
output = output + bias
output = output.astype(np.int32) * s_iwr['inverted_residual_2_projection']
output = output / 2**10 + 128
output_uint8 = output.round()
output_uint8 =
|
np.clip(output_uint8, 0, 255)
|
numpy.clip
|
#Precision Recall Curve
import sys
import pylab as pl
import numpy
import matplotlib.pyplot as plt
import getopt
WIDTH = 4.5
HEIGHT = 3.5
DPI = 300
def PR_stat(state, scores, thresh):
"""
@abstract Non-standard Precision-Recall Curve
@param state If prediction is correct [vector <int>]
@param scores Scores [vector <float>]
@param thresh Marker threshold (single value), None if not use [float]
@return Stat results, a tuple of <precision> <recall> <threshold> <auc> if success,
None otherwise.
"""
n = state.shape[0]
if n <= 0 or scores.shape[0] != n:
return None
score_gap = numpy.array([thresh])
if thresh is None:
score_gap = numpy.unique(scores)
if score_gap.shape[0] > 2000:
idx = numpy.random.permutation(score_gap.shape[0])
score_gap = score_gap[idx[:2000]]
thresholds = numpy.sort(score_gap)
precision = numpy.zeros(thresholds.shape[0], dtype = "float")
recall = numpy.zeros(thresholds.shape[0], dtype = "float")
for i in range(thresholds.shape[0]):
idx_r = numpy.where(scores >= thresholds[i])[0]
nr = idx_r.shape[0]
recall[i] = 1.0 * nr / n
np = sum(state[idx_r])
precision[i] = 1.0 * np / nr
auc = None
if thresh is None:
_recall = numpy.append(1.0, recall)
_recall =
|
numpy.append(_recall, 0.0)
|
numpy.append
|
'''
description:
co-optimization for finger reach task
'''
import os
import sys
example_base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
sys.path.append(example_base_dir)
from parameterization_torch import Design as Design
from parameterization import Design as Design_np
from renderer import SimRenderer
import numpy as np
import scipy.optimize
import redmax_py as redmax
import os
import argparse
import time
from common import *
import torch
import matplotlib.pyplot as plt
torch.set_default_dtype(torch.double)
if __name__ == '__main__':
parser = argparse.ArgumentParser('')
parser.add_argument("--model", type = str, default = 'rss_finger_flip')
parser.add_argument('--record', action = 'store_true')
parser.add_argument('--record-file-name', type = str, default = 'rss_finger_flip')
parser.add_argument('--seed', type=int, default = 0)
parser.add_argument('--save-dir', type=str, default = './results/tmp/')
parser.add_argument('--no-design-optim', action='store_true', help = 'whether control-only')
parser.add_argument('--visualize', type=str, default='True', help = 'whether visualize the simulation')
parser.add_argument('--load-dir', type = str, default = None, help = 'load optimized parameters')
parser.add_argument('--verbose', default = False, action = 'store_true', help = 'verbose output')
parser.add_argument('--test-derivatives', default = False, action = 'store_true')
asset_folder = os.path.abspath(os.path.join(example_base_dir, '..', 'assets'))
args = parser.parse_args()
if args.model[-4:] == '.xml':
model_path = os.path.join(asset_folder, args.model)
else:
model_path = os.path.join(asset_folder, args.model + '.xml')
optimize_design_flag = not args.no_design_optim
os.makedirs(args.save_dir, exist_ok = True)
visualize = (args.visualize == 'True')
play_mode = (args.load_dir is not None)
'''init sim and task'''
sim = redmax.Simulation(model_path, args.verbose)
if args.verbose:
sim.print_ctrl_info()
sim.print_design_params_info()
num_steps = 150
ndof_u = sim.ndof_u
ndof_r = sim.ndof_r
ndof_var = sim.ndof_var
ndof_p = sim.ndof_p
# set up camera
sim.viewer_options.camera_pos = np.array([2.5, -4, 1.8])
# init design params
design = Design()
design_np = Design_np()
cage_params = np.ones(9)
ndof_cage = len(cage_params)
design_params, meshes = design_np.parameterize(cage_params, True)
sim.set_design_params(design_params)
Vs = []
for i in range(len(meshes)):
Vs.append(meshes[i].V)
sim.set_rendering_mesh_vertices(Vs)
# init control sequence
sub_steps = 5
assert (num_steps % sub_steps) == 0
num_ctrl_steps = num_steps // sub_steps
if args.seed == 0:
action = np.zeros(ndof_u * num_ctrl_steps)
else:
np.random.seed(args.seed)
action = np.random.uniform(-0.5, 0.5, ndof_u * num_ctrl_steps)
if visualize:
print('ndof_p = ', ndof_p)
print('ndof_u = ', len(action))
print('ndof_cage = ', ndof_cage)
if not optimize_design_flag:
params = action
else:
params = np.zeros(ndof_u * num_ctrl_steps + ndof_cage)
params[0:ndof_u * num_ctrl_steps] = action
params[-ndof_cage:] = cage_params
# init optimization history
f_log = []
global num_sim
num_sim = 0
'''compute the objectives by forward pass'''
def forward(params, backward_flag = False):
global num_sim
num_sim += 1
action = params[:ndof_u * num_ctrl_steps]
u = np.tanh(action)
if optimize_design_flag:
cage_params = params[-ndof_cage:]
design_params = design_np.parameterize(cage_params)
sim.set_design_params(design_params)
sim.reset(backward_flag = backward_flag, backward_design_params_flag = optimize_design_flag)
# objectives coefficients
coef_u = 5.
coef_touch = 1.
coef_flip = 50.
f_u = 0.
f_touch = 0.
f_flip = 0.
f = 0.
if backward_flag:
df_dq = np.zeros(ndof_r * num_steps)
df_du = np.zeros(ndof_u * num_steps)
df_dvar = np.zeros(ndof_var * num_steps)
if optimize_design_flag:
df_dp = np.zeros(ndof_p)
for i in range(num_ctrl_steps):
sim.set_u(u[i * ndof_u:(i + 1) * ndof_u])
sim.forward(sub_steps, verbose = args.verbose)
variables = sim.get_variables()
q = sim.get_q()
# compute objective f
f_u_i = np.sum(u[i * ndof_u:(i + 1) * ndof_u] ** 2)
f_touch_i = 0.
if i < num_ctrl_steps // 2:
f_touch_i += np.sum((variables[0:3] - variables[3:6]) ** 2) # MSE
f_flip_i = 0.
f_flip_i += (q[-1] - np.pi / 2.) ** 2
f_u += f_u_i
f_touch += f_touch_i
f_flip += f_flip_i
f += coef_u * f_u_i + coef_touch * f_touch_i + coef_flip * f_flip_i
# backward info
if backward_flag:
df_du[i * sub_steps * ndof_u:(i * sub_steps + 1) * ndof_u] += \
coef_u * 2. * u[i * ndof_u:(i + 1) * ndof_u]
if i < num_ctrl_steps // 2:
df_dvar[((i + 1) * sub_steps - 1) * ndof_var:((i + 1) * sub_steps - 1) * ndof_var + 3] += \
coef_touch * 2. * (variables[0:3] - variables[3:6])
df_dvar[((i + 1) * sub_steps - 1) * ndof_var + 3:((i + 1) * sub_steps) * ndof_var] += \
-coef_touch * 2. * (variables[0:3] - variables[3:6]) # MSE
df_dq[((i + 1) * sub_steps) * ndof_r - 1] += coef_flip * 2. * (q[-1] - np.pi / 2.)
if backward_flag:
sim.backward_info.set_flags(False, False, optimize_design_flag, True)
sim.backward_info.df_du = df_du
sim.backward_info.df_dq = df_dq
sim.backward_info.df_dvar = df_dvar
if optimize_design_flag:
sim.backward_info.df_dp = df_dp
return f, {'f_u': f_u, 'f_touch': f_touch, 'f_flip': f_flip}
'''compute loss and gradient'''
def loss_and_grad(params):
with torch.no_grad():
f, _ = forward(params, backward_flag = True)
sim.backward()
grad = np.zeros(len(params))
# gradient for control params
action = params[:ndof_u * num_ctrl_steps]
df_du_full = np.copy(sim.backward_results.df_du)
grad[:num_ctrl_steps * ndof_u] = np.sum(df_du_full.reshape(num_ctrl_steps, sub_steps, ndof_u), axis = 1).reshape(-1)
grad[:num_ctrl_steps * ndof_u] = grad[:num_ctrl_steps * ndof_u] * (1. - np.tanh(action) ** 2)
# gradient for design params
if optimize_design_flag:
df_dp = torch.tensor(np.copy(sim.backward_results.df_dp))
cage_params = torch.tensor(params[-ndof_cage:], dtype = torch.double, requires_grad = True)
design_params = design.parameterize(cage_params)
design_params.backward(df_dp)
df_dcage = cage_params.grad.numpy()
grad[-ndof_cage:] = df_dcage
return f, grad
'''call back function'''
def callback_func(params, render = False, record = False, record_path = None, log = True):
f, info = forward(params, backward_flag = False)
global f_log, num_sim
num_sim -= 1
print_info('iteration ', len(f_log), ', num_sim = ', num_sim, ', Objective = ', f, info)
if log:
f_log.append(np.array([num_sim, f]))
if render:
if optimize_design_flag:
cage_params = params[-ndof_cage:]
_, meshes = design_np.parameterize(cage_params, True)
Vs = []
for i in range(len(meshes)):
Vs.append(meshes[i].V)
sim.set_rendering_mesh_vertices(Vs)
sim.viewer_options.speed = 0.2
SimRenderer.replay(sim, record = record, record_path = record_path)
if not play_mode:
''' checking initial guess '''
callback_func(params, render = False, log = True)
if visualize:
print_info('Press [Esc] to continue')
callback_func(params, render = True, log = False, record = args.record, record_path = args.record_file_name + "_init.gif")
t0 = time.time()
''' set bounds for optimization variables '''
bounds = []
for i in range(num_ctrl_steps * ndof_u):
bounds.append((-1., 1.))
if optimize_design_flag:
for i in range(ndof_cage):
bounds.append((0.5, 3.))
''' optimization by L-BFGS-B '''
res = scipy.optimize.minimize(loss_and_grad, np.copy(params), method = "L-BFGS-B", jac = True, callback = callback_func, bounds = bounds, options={'maxiter': 100})
t1 = time.time()
print('time = ', t1 - t0)
params = np.copy(res.x)
''' save results '''
with open(os.path.join(args.save_dir, 'params.npy'), 'wb') as fp:
np.save(fp, params)
f_log = np.array(f_log)
with open(os.path.join(args.save_dir, 'logs.npy'), 'wb') as fp:
np.save(fp, f_log)
else:
with open(os.path.join(args.load_dir, 'params.npy'), 'rb') as fp:
params = np.load(fp)
with open(os.path.join(args.load_dir, 'logs.npy'), 'rb') as fp:
f_log =
|
np.load(fp)
|
numpy.load
|
import numpy as np
from layers import *
class RNN(object):
def __init__(self, vocab_dim, idx_to_char, input_dim=30, hidden_dim=25, cell_type='lstm'):
"""Takes as arguments
vocab_dim: The number of unique characters/words in the dataset
idx_to_char: A dictionary converting integer representations of vocabulary to string form.
Mainly used in the sample function in order to neatly output results
input_dim: Reduces one-hot encoding dimension of character from size vocab_dim to a vector of size input_dim
hidden_dim: Size of hidden dimension
cell_type: must choose one of 'lstm' or 'vanilla'
Automatically intiializes all weights"""
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.vocab_dim = vocab_dim
self.cell_type = cell_type
self.idx_to_char = idx_to_char
if cell_type != 'lstm' and cell_type != 'vanilla':
raise ValueError('Invalid cell type. Please choose lstm or vanilla')
self.dim_mul = (1 if cell_type == 'vanilla' else 4)
# self.idx_to_char = idx_to_char
self._initialize_params()
def _initialize_params(self):
"""Initialize all weights. We use He normalization for weights and
zeros for biases. We also intialize zeroth hidden layer h0"""
D, H, V = self.input_dim, self.hidden_dim, self.vocab_dim
self.params = {}
self.params['b'] = np.zeros(self.dim_mul*H)
self.params['Wx'] = 2*np.random.randn(D,self.dim_mul*H)/np.sqrt(D)
self.params['Wh'] = 2*np.random.randn(H,self.dim_mul*H)/np.sqrt(H)
self.params['b_out'] = np.zeros(V)
self.params['W_out'] = 2*np.random.randn(H,V)/np.sqrt(H)
self.params['W_embed'] = 2*np.random.randn(V,D)/np.sqrt(V)
self.h0 = np.random.randn(1,H)
def loss(self, inputs, targets):
"""inputs: an array of size (N,T,D), for N the minibatch size, T the sequence length, and D the input_dim
targets: an array of size (N,T) consisting of integers in [0,vocab_dim). Each value is the target characters
given the (N,T)^th input.
Outputs:
Loss -> the loss function taken over all N and T
grads -> a dictionary containing the gradients of all parameters in self.parameters
"""
loss, grads = 0, {}
# VERY IMPORTANT. We must name the items in grads identical to their names in self.params!
# Unpack params
b = self.params['b']
b_out = self.params['b_out']
Wx = self.params['Wx']
Wh = self.params['Wh']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# x is a sequence of integers of length T, with integers in [0,V).
# we can always change this input later if we choose
# We use an embedding matrix W_embed: (N,T) -> (N,T,D) that generalizes the one-hot-encoding
# i.e. one-hot would be directly x: (N,T) -> (N,T,V) for V size of vocabulary
# Forward pass
inputs = (np.expand_dims(inputs, axis=0) if len(inputs.shape)==1 else inputs)
x, cache_embed = embed_forward(inputs, W_embed)
h_prev = np.broadcast_to(self.h0,(len(inputs),self.h0.shape[1]))
h, cache_h = (lstm_all_forward(x, Wx, b, h_prev, Wh) if self.cell_type=='lstm' else vanilla_all_forward(x, Wx, b, h_prev, Wh))
probs, cache_probs = affine_all_forward(h, W_out, b_out)
loss, dprobs = softmax_loss_all(probs, targets)
# Backward pass
dh, grads['W_out'], grads['b_out'] = affine_all_backward(dprobs, cache_probs)
dx, grads['Wx'], grads['b'], grads['Wh'] = (lstm_all_backward(dh, cache_h) if self.cell_type=='lstm' else vanilla_all_backward(dh, cache_h))
grads['W_embed'] = embed_backward(dx, cache_embed)
# reset memory layer to last in batch, last in sequence
self.h0 = h[-1,-1,:].reshape(1,-1)
# return loss and gradient
return loss, grads
def sample(self, seed_idx=None, T=200, h0=None, p_power=1):
"""Inputs: seed_idx=None -> the starting character index for the generated sequences
T=200 -> the default length of sequence to output
h0=self.h0 -> the current memory, i.e. initial hidden state. Defaults to last computed h0
p_power=1 -> raises probability distribution of next character by power p_power.
higher p_power produces more deterministic, higher prob words.
Will result in short repeating sequences, but with well-defined words"""
if h0 is None:
h0 = self.h0
if seed_idx is None:
seed_idx = np.random.choice(self.vocab_dim)
#initialize word
idxs = [seed_idx]
# unpack weights
b = self.params['b']
b_out = self.params['b_out']
Wx = self.params['Wx']
Wh = self.params['Wh']
W_out = self.params['W_out']
W_embed = self.params['W_embed']
# Forward pass only
x, _ = embed_forward(seed_idx, W_embed)
x = np.expand_dims(x, axis=0)
c = np.zeros_like(h0)
for t in range(T):
if self.cell_type == 'lstm':
c, h0, _ = lstm_forward(x, Wx, b, h0, Wh, c)
else:
h0, _ = vanilla_forward(x, Wx, b, h0, Wh)
probs, _ = affine_forward(h0, W_out, b_out)
probs = np.squeeze(probs)
# predict next entry
probs = np.exp(probs-np.max(probs))
probs = probs**p_power
probs /= np.sum(probs)
idx = np.random.choice(np.arange(len(probs)),p=probs.ravel())
idxs.append(idx)
x, _ = embed_forward(idx, W_embed)
x = np.expand_dims(x, axis=0)
# return index list
return ''.join([self.idx_to_char[i] for i in idxs])
#########################################
import numpy as np
from layers import *
class TwoHiddenLayerRNN(object):
def __init__(self, vocab_dim, idx_to_char, input_dim=30, hidden_dim=25, H2=25, cell_type='lstm'):
"""Takes as arguments
vocab_dim: The number of unique characters/words in the dataset
idx_to_char: A dictionary converting integer representations of vocabulary to string form.
Mainly used in the sample function in order to neatly output results
input_dim: Reduces one-hot encoding dimension of character from size vocab_dim to a vector of size input_dim
hidden_dim: Size of hidden dimension
cell_type: must choose one of 'lstm' or 'vanilla'
Automatically intiializes all weights"""
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.H2 = H2
self.vocab_dim = vocab_dim
self.cell_type = cell_type
self.idx_to_char = idx_to_char
if cell_type != 'lstm' and cell_type != 'vanilla':
raise ValueError('Invalid cell type. Please choose lstm or vanilla')
self.dim_mul = (1 if cell_type == 'vanilla' else 4)
# self.idx_to_char = idx_to_char
self._initialize_params()
def _initialize_params(self):
"""Initialize all weights. We use He normalization for weights and
zeros for biases. We also intialize zeroth hidden layer h0"""
D, H, V, H2 = self.input_dim, self.hidden_dim, self.vocab_dim, self.H2
self.params = {}
self.params['b'] = np.zeros(self.dim_mul*H)
self.params['Wx'] = 2*np.random.randn(D,self.dim_mul*H)/np.sqrt(D)
self.params['Wh'] = 2*np.random.randn(H,self.dim_mul*H)/np.sqrt(H)
self.params['Wh2'] = 2*np.random.randn(H2,self.dim_mul*H2)/np.sqrt(H2)
self.params['b2'] = np.zeros(self.dim_mul*H2)
self.params['W2'] = 2*np.random.randn(H,self.dim_mul*H2)/np.sqrt(H)
self.params['b_out'] = np.zeros(V)
self.params['W_out'] = 2*np.random.randn(H2,V)/np.sqrt(H2)
self.params['W_embed'] = 2*
|
np.random.randn(V,D)
|
numpy.random.randn
|
# ------------------------------------------------------------------
# Tensorflow implementation of
# "Visual Tracking via Dynamic Memory Networks", TPAMI, 2019
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------
import glob
import os
import time
import numpy as np
import tensorflow as tf
import config
DEBUG = False
def generate_input_fn(is_train, tfrecords_path, batch_size, time_step):
"Return _input_fn for use with Experiment."
def _input_fn():
with tf.device('/cpu:0'):
query_patch, search_patch, bbox, label = _batch_input(is_train, tfrecords_path, batch_size, time_step)
patches = {
'query': query_patch,
'search': search_patch,
}
labels = {
'bbox': bbox,
'label': label
}
return patches, labels
return _input_fn
def _batch_input(is_train, tfrecords_path, batch_size, time_step):
if is_train:
tf_files = glob.glob(os.path.join(tfrecords_path, 'train-*.tfrecords'))
filename_queue = tf.train.string_input_producer(tf_files, shuffle=True, capacity=16)
min_queue_examples = config.min_queue_examples
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
enqueue_ops = []
for _ in range(config.num_readers):
_, value = tf.TFRecordReader().read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.add_queue_runner(
tf.train.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
tf_files = sorted(glob.glob(os.path.join(tfrecords_path, 'val-*.tfrecords')))
filename_queue = tf.train.string_input_producer(tf_files, shuffle=False, capacity=8)
_, example_serialized = tf.TFRecordReader().read(filename_queue)
# example_serialized = next(tf.python_io.tf_record_iterator(self._tf_files[0]))
images_and_labels = []
for thread_id in range(config.num_preprocess_threads):
sequence, context = _parse_example_proto(example_serialized)
image_buffers = sequence['images']
bboxes = sequence['bboxes']
seq_len = tf.cast(context['seq_len'][0], tf.int32)
label = context['label'][0] - 1
z_exemplars, x_crops, y_crops = _process_images(image_buffers, bboxes, seq_len, thread_id, time_step, is_train)
images_and_labels.append([z_exemplars, x_crops, y_crops, label])
batch_z, batch_x, batch_y, batch_cls = tf.train.batch_join(images_and_labels,
batch_size=batch_size,
capacity=2 * config.num_preprocess_threads * batch_size)
if is_train:
tf.summary.image('exemplars', batch_z[0], 5)
tf.summary.image('crops', batch_x[0], 5)
return batch_z, batch_x, batch_y, batch_cls
def _process_images(image_buffers, bboxes, seq_len, thread_id, time_step, is_train):
if config.is_limit_search:
search_range = tf.minimum(config.max_search_range, seq_len - 1)
else:
search_range = seq_len-1
rand_start_idx = tf.random_uniform([], 0, seq_len-search_range, dtype=tf.int32)
selected_len = time_step + 1
if is_train:
frame_idxes = tf.range(rand_start_idx, rand_start_idx+search_range)
shuffle_idxes = tf.random_shuffle(frame_idxes)
selected_idxes = shuffle_idxes[0:selected_len]
selected_idxes, _ = tf.nn.top_k(selected_idxes, selected_len)
selected_idxes = selected_idxes[::-1]
else:
selected_idxes = tf.to_int32(tf.linspace(0.0, tf.to_float(seq_len - 1), selected_len))
# self.seq_len = seq_len
# self.search_range = search_range
# self.selected_idxes = selected_idxes
z_exemplars, y_exemplars, x_crops, y_crops = [], [], [], []
shift = int((config.patch_size - config.z_exemplar_size) / 2)
for i in range(selected_len):
idx = selected_idxes[i]
image_buffer = tf.gather(image_buffers, idx)
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image.set_shape([config.patch_size, config.patch_size, 3])
# # Randomly distort the colors.
# if is_train:
# image = _distort_color(image, thread_id)
if i < time_step:
# if self._is_train:
exemplar = tf.image.crop_to_bounding_box(image, shift, shift, config.z_exemplar_size,
config.z_exemplar_size)
if config.is_augment and i > 0:
exemplar = _translate_and_strech(image,
[config.z_exemplar_size, config.z_exemplar_size],
config.max_strech_z, config.max_translate_z)
z_exemplars.append(exemplar)
if i > 0:
bbox = tf.gather(bboxes, idx)
if config.is_augment:
image, bbox = _translate_and_strech(image, [config.x_instance_size, config.x_instance_size],
config.max_strech_x, config.max_translate_x, bbox)
x_crops.append(image)
y_crops.append(bbox)
x_crops = tf.stack(x_crops, 0)
y_crops = tf.stack(y_crops, 0)
z_exemplars = tf.stack(z_exemplars, 0)
return z_exemplars, x_crops, y_crops
def _translate_and_strech(image, m_sz, max_strech, max_translate=None, bbox=None, rgb_variance=None):
m_sz_f = tf.convert_to_tensor(m_sz, dtype=tf.float32)
img_sz = tf.convert_to_tensor(image.get_shape().as_list()[0:2],dtype=tf.float32)
scale = 1+max_strech*tf.random_uniform([2], -1, 1, dtype=tf.float32)
scale_sz = tf.round(tf.minimum(scale*m_sz_f, img_sz))
if max_translate is None:
shift_range = (img_sz - scale_sz) / 2
else:
shift_range = tf.minimum(float(max_translate), (img_sz-scale_sz)/2)
start = (img_sz - scale_sz)/2
shift_row = start[0] + tf.random_uniform([1], -shift_range[0], shift_range[0], dtype=tf.float32)
shift_col = start[1] + tf.random_uniform([1], -shift_range[1], shift_range[1], dtype=tf.float32)
x1 = shift_col/(img_sz[1]-1)
y1 = shift_row/(img_sz[0]-1)
x2 = (shift_col + scale_sz[1]-1)/(img_sz[1]-1)
y2 = (shift_row + scale_sz[0]-1)/(img_sz[0]-1)
crop_img = tf.image.crop_and_resize(tf.expand_dims(image,0),
tf.expand_dims(tf.concat(axis=0, values=[y1, x1, y2, x2]), 0),
[0], m_sz)
crop_img = tf.squeeze(crop_img)
if rgb_variance is not None:
crop_img = crop_img + rgb_variance*tf.random_normal([1,1,3])
if bbox is not None:
new_bbox = bbox - tf.concat(axis=0, values=[shift_col, shift_row, shift_col, shift_row])
scale_ratio = m_sz_f/tf.reverse(scale_sz, [0])
new_bbox = new_bbox*tf.tile(scale_ratio,[2])
return crop_img, new_bbox
else:
return crop_img
def _distort_color(image, thread_id=0):
"""Distort the color of the image.
"""
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def _parse_example_proto(example_serialized):
context_features = {
'seq_name': tf.FixedLenFeature([], dtype=tf.string),
'seq_len': tf.FixedLenFeature(1, dtype=tf.int64),
'trackid': tf.FixedLenFeature(1, dtype=tf.int64),
'label': tf.FixedLenFeature(1, dtype=tf.int64)
}
sequence_features = {
'images': tf.FixedLenSequenceFeature([],dtype=tf.string),
'bboxes': tf.FixedLenSequenceFeature([4],dtype=tf.float32)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(example_serialized, context_features, sequence_features)
return sequence_parsed, context_parsed
def generate_labels_dist(batch_size, feat_size):
dist = lambda i,j,orgin: np.linalg.norm(np.array([i,j])-orgin)
labels = -
|
np.ones(feat_size, dtype=np.int32)
|
numpy.ones
|
# --------------------------------------------------------------------------------------------
# MoorPy
#
# A mooring system visualizer and quasi-static modeler in Python.
# <NAME> and <NAME>
#
# --------------------------------------------------------------------------------------------
# 2018-08-14: playing around with making a QS shared-mooring simulation tool, to replace what's in Patrick's work
# 2020-06-17: Trying to create a new quasi-static mooring system solver based on my Catenary function adapted from FAST v7, and using MoorDyn architecture
import numpy as np
import moorpy.MoorSolve as msolve
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import scipy.optimize
# reload the libraries each time in case we make any changes
import importlib
msolve = importlib.reload(msolve)
# base class for MoorPy exceptions
class Error(Exception):
''' Base class for MoorPy exceptions'''
pass
# Catenary error class
class CatenaryError(Error):
'''Derived error class for Catenary function errors. Contains an error message.'''
def __init__(self, message):
self.message = message
# Line Object error class
class LineError(Error):
'''Derived error class for Line object errors. Contains an error message and the line number with the error.'''
def __init__(self, num, message):
self.line_num = num
self.message = message
# Solve error class for any solver process
class SolveError(Error):
'''Derived error class for various solver errors. Contains an error message'''
def __init__(self, message):
self.message = message
def Catenary(XF, ZF, L, EA, W, CB=0, HF0=0, VF0=0, Tol=0.000001, nNodes=20, MaxIter=50, plots=0):
'''
The quasi-static mooring line solver. Adapted from Catenary subroutine in FAST v7 by <NAME>.
Note: this version is updated Oct 7 2020 to use the dsolve solver.
Parameters
----------
XF : float
Horizontal distance from end 1 to end 2 [m]
ZF : float
Vertical distance from end 1 to end 2 [m] (positive up)
L : float
Unstretched length of line [m]
EA : float
Extensional stiffness of line [N]
W : float
Weight of line in fluid per unit length [N/m]
CB : float, optional
If positive, coefficient of seabed static friction drag. If negative, no seabed contact and the value is the distance down from end A to the seabed in m\
NOTE: for lines between floating bodies, there must be no seabed contact (set CB < 0)
HF0 : float, optional
Horizontal fairlead tension. If zero or not provided, a guess will be calculated.
VF0 : float, optional
Vertical fairlead tension. If zero or not provided, a guess will be calculated.
Tol : int, optional
Convergence tolerance within Newton-Raphson iteration specified as a fraction of tension
nNodes : int, optional
Number of nodes to describe the line
MaxIter: int, optional
Maximum number of iterations to try before resetting to default ICs and then trying again
plots : int, optional
1: plot output, 0: don't
Returns
-------
: tuple
(end 1 horizontal tension, end 1 vertical tension, end 2 horizontal tension, end 2 vertical tension, info dictionary) [N] (positive up)
'''
# make info dict to contain any additional outputs
info = dict(error=False)
# flip line in the solver if end A is above end B
if ZF < 0:
ZF = -ZF
reverseFlag=1
else:
reverseFlag=0
# ensure the input variables are realistic
if XF <= 0.0:
raise CatenaryError("XF is zero or negative!")
if L <= 0.0:
raise CatenaryError("L is zero or negative!")
if EA <= 0.0:
raise CatenaryError("EA is zero or negative!")
# Solve for the horizontal and vertical forces at the fairlead (HF, VF) and at the anchor (HA, VA)
# There are many "ProfileTypes" of a mooring line and each must be analyzed separately
# ProfileType=1: No portion of the line rests on the seabed
# ProfileType=2: A portion of the line rests on the seabed and the anchor tension is nonzero
# ProfileType=3: A portion of the line must rest on the seabed and the anchor tension is zero
# ProfileType=4: Entire line is on seabed
# ProfileType=0: The line is negatively buoyant, seabed interaction is enabled, and the line
# is longer than a full L between end points (including stretching) i.e. it is horizontal
# along the seabed from the anchor, then vertical to the fairlaed. Computes the maximum
# stretched length of the line with seabed interaction beyond which the line would have to
# double-back on itself; the line forms an "L" between the anchor and fairlead. Then it
# models it as bunched up on the seabed (instead of throwing an error)
EA_W = EA/W
# ProfileType 4 case - entirely along seabed
if ZF==0.0 and CB >= 0.0 and W > 0:
ProfileType = 4
# this is a special case that requires no iteration
HF = np.max([0, (XF/L - 1.0)*EA]) # calculate fairlead tension based purely on elasticity
VF = 0.0
HA = np.max([0.0, HF - CB*W*L]) # calculate anchor tension by subtracting any seabed friction
VA = 0.0
dZFdVF = np.sqrt(2.0*ZF*EA_W + EA_W*EA_W)/EA_W # inverse of vertical stiffness
info["HF"] = HF # solution to be used to start next call (these are the solved variables, may be for anchor if line is reversed)
info["VF"] = 0.0
info["jacobian"] = np.array([[0.0, 0.0], [0.0, dZFdVF]])
info["LBot"] = L
# ProfileType 0 case - slack
elif (W > 0.0) and (CB >= 0.0) and (L >= XF - EA_W + np.sqrt(2.0*ZF*EA_W + EA_W*EA_W)):
ProfileType = 0
# this is a special case that requires no iteration
LHanging = np.sqrt(2.0*ZF*EA_W + EA_W*EA_W) - EA_W # unstretched length of line hanging vertically to seabed
HF = 0.0
VF = W*LHanging
HA = 0.0
VA = 0.0
dZFdVF = np.sqrt(2.0*ZF*EA_W + EA_W*EA_W)/EA_W # inverse of vertical stiffness
info["HF"] = HF # solution to be used to start next call (these are the solved variables, may be for anchor if line is reversed)
info["VF"] = VF
info["jacobian"] = np.array([[0.0, 0.0], [0.0, dZFdVF]])
info["LBot"] = L - LHanging
# Use an iterable solver function to solve for the forces on the line
else:
# Initialize some commonly used terms that don't depend on the iteration:
WL = W *L
WEA = W *EA
L_EA = L /EA
CB_EA = CB/EA
#MaxIter = 50 #int(1.0/Tol) # Smaller tolerances may take more iterations, so choose a maximum inversely proportional to the tolerance
# more initialization
I = 1 # Initialize iteration counter
FirstIter = 1 # 1 means first attempt (can be retried), 0 means it's alread been retried, -1 triggers a retry
# make HF and VF initial guesses if either was provided as zero <<<<<<<<<<<< why does it matter if VF0 is zero??
if HF0 <= 0 or VF0 <= 0:
XF2 = XF*XF;
ZF2 = ZF*ZF;
if ( L <= np.sqrt( XF2 + ZF2 ) ): # if the current mooring line is taut
Lamda0 = 0.2
else: # The current mooring line must be slack and not vertical
Lamda0 = np.sqrt( 3.0*( ( L*L - ZF2 )/XF2 - 1.0 ) )
HF = np.max([ abs( 0.5*W* XF/ Lamda0 ), Tol ]); # ! As above, set the lower limit of the guess value of HF to the tolerance
VF = 0.5*W*( ZF/np.tanh(Lamda0) + L )
else:
HF = 1.0*HF0
VF = 1.0*VF0
# make sure required values are non-zero
HF = np.max([ HF, Tol ])
XF = np.max([ XF, Tol ])
ZF = np.max([ ZF, Tol ])
# some initial values just for printing before they're filled in
EXF=0
EZF=0
# Solve the analytical, static equilibrium equations for a catenary (or taut) mooring line with seabed interaction:
X0 = [HF, VF]
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.15,1.0,1.5])
# call the master solver function
X, Y, info2 = msolve.dsolve(msolve.eval_func_cat, X0, Ytarget=Ytarget, step_func=msolve.step_func_cat, args=args, tol=Tol, maxIter=MaxIter, a_max=1.2)
# retry if it failed
if info2['iter'] >= MaxIter-1 or info2['oths']['error']==True:
# ! Perhaps we failed to converge because our initial guess was too far off.
# (This could happen, for example, while linearizing a model via large
# pertubations in the DOFs.) Instead, use starting values documented in:
# Peyrot, <NAME>. and <NAME>., "Analysis Of Cable Structures,"
# Computers & Structures, Vol. 10, 1979, pp. 805-813:
# NOTE: We don't need to check if the current mooring line is exactly
# vertical (i.e., we don't need to check if XF == 0.0), because XF is
# limited by the tolerance above. */
XF2 = XF*XF;
ZF2 = ZF*ZF;
if ( L <= np.sqrt( XF2 + ZF2 ) ): # if the current mooring line is taut
Lamda0 = 0.2
else: # The current mooring line must be slack and not vertical
Lamda0 = np.sqrt( 3.0*( ( L*L - ZF2 )/XF2 - 1.0 ) )
HF = np.max([ abs( 0.5*W* XF/ Lamda0 ), Tol ]) # As above, set the lower limit of the guess value of HF to the tolerance
VF = 0.5*W*( ZF/np.tanh(Lamda0) + L )
X0 = [HF, VF]
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.1,0.8,1.5]) # step: alpha_min, alpha0, alphaR
# call the master solver function
X, Y, info3 = msolve.dsolve(msolve.eval_func_cat, X0, Ytarget=Ytarget, step_func=msolve.step_func_cat, args=args, tol=Tol, maxIter=MaxIter, a_max=1.1) #, dX_last=info2['dX'])
# retry if it failed
if info3['iter'] >= MaxIter-1 or info3['oths']['error']==True:
X0 = X
Ytarget = [0,0]
args = dict(cat=[XF, ZF, L, EA, W, CB, WL, WEA, L_EA, CB_EA], step=[0.1,1.0,2.0])
# call the master solver function
X, Y, info4 = msolve.dsolve(msolve.eval_func_cat, X0, Ytarget=Ytarget, step_func=msolve.step_func_cat, args=args, tol=Tol, maxIter=10*MaxIter, a_max=1.15) #, dX_last=info3['dX'])
# check if it failed
if info4['iter'] >= 10*MaxIter-1 or info4['oths']['error']==True:
print("Catenary solve failed on all 3 attempts.")
print(f"Catenary({XF}, {ZF}, {L}, {EA}, {W}, CB={CB}, HF0={HF0}, VF0={VF0}, Tol={Tol}, MaxIter={MaxIter}, plots=1)")
print("First attempt's iterations are as follows:")
for i in range(info2['iter']+1):
print(f" Iteration {i}: HF={info2['Xs'][i,0]: 8.4e}, VF={info2['Xs'][i,1]: 8.4e}, EX={info2['Es'][i,0]: 6.2e}, EZ={info2['Es'][i,1]: 6.2e}")
print("Second attempt's iterations are as follows:")
for i in range(info3['iter']+1):
print(f" Iteration {i}: HF={info3['Xs'][i,0]: 8.4e}, VF={info3['Xs'][i,1]: 8.4e}, EX={info3['Es'][i,0]: 6.2e}, EZ={info3['Es'][i,1]: 6.2e}")
print("Last attempt's iterations are as follows:")
for i in range(info4['iter']+1):
print(f" Iteration {i}: HF={info4['Xs'][i,0]: 8.4e}, VF={info4['Xs'][i,1]: 8.4e}, EX={info4['Es'][i,0]: 6.2e}, EZ={info4['Es'][i,1]: 6.2e}")
# plot solve performance
fig, ax = plt.subplots(4,1, sharex=True)
ax[0].plot(np.hstack([info2['Xs'][:,0], info3['Xs'][:,0], info4['Xs'][:,0]]))
ax[1].plot(np.hstack([info2['Xs'][:,1], info3['Xs'][:,1], info4['Xs'][:,1]]))
ax[2].plot(np.hstack([info2['Es'][:,0], info3['Es'][:,0], info4['Es'][:,0]]))
ax[3].plot(np.hstack([info2['Es'][:,1], info3['Es'][:,1], info4['Es'][:,1]]))
ax[0].set_ylabel("HF")
ax[1].set_ylabel("VF")
ax[2].set_ylabel("X err")
ax[3].set_ylabel("Z err")
# plot solve path
plt.figure()
#c = np.hypot(info2['Es'][:,0], info2['Es'][:,1])
c = np.arange(info2['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info2['iter']):
plt.plot(info2['Xs'][i:i+2,0], info2['Xs'][i:i+2,1],":", c=c[i])
plt.plot(info2['Xs'][0,0], info2['Xs'][0,1],"o")
c = np.arange(info3['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info3['iter']):
plt.plot(info3['Xs'][i:i+2,0], info3['Xs'][i:i+2,1], c=c[i])
plt.plot(info3['Xs'][0,0], info3['Xs'][0,1],"*")
c = np.arange(info4['iter']+1)
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c)))
for i in np.arange(info4['iter']):
plt.plot(info4['Xs'][i:i+2,0], info4['Xs'][i:i+2,1], c=c[i])
plt.plot(info4['Xs'][0,0], info4['Xs'][0,1],"*")
plt.title("Catenary solve path for troubleshooting")
plt.show()
#breakpoint()
raise CatenaryError("Catenary solver failed.")
else: # if the solve was successful,
info.update(info4['oths']) # copy info from last solve into existing info dictionary
else: # if the solve was successful,
info.update(info3['oths']) # copy info from last solve into existing info dictionary
else: # if the solve was successful,
info.update(info2['oths']) # copy info from last solve into existing info dictionary
# check for errors ( WOULD SOME NOT ALREADY HAVE BEEN CAUGHT AND RAISED ALREADY?)
if info['error']==True:
breakpoint()
# >>>> what about errors for which we can first plot the line profile?? <<<<
raise CatenaryError("Error in Catenary computations: "+info['message'])
if info['Zextreme'] < CB:
info["warning"] = "Line is suspended from both ends but hits the seabed (this isn't allowed in MoorPy)"
ProfileType = info['ProfileType']
HF = X[0]
VF = X[1]
HA = info['HA']
VA = info['VA']
# do plotting-related calculations (plots=1: show plots; plots=2: just return values)
if plots > 0 or info['error']==True:
# some arrays only used for plotting each node
s = np.linspace(0,L,nNodes) # Unstretched arc distance along line from anchor to each node where the line position and tension can be output (meters)
X = np.zeros(nNodes) # Horizontal locations of each line node relative to the anchor (meters)
Z = np.zeros(nNodes) # Vertical locations of each line node relative to the anchor (meters)
Te= np.zeros(nNodes) # Effective line tensions at each node (N)
# ------------------------ compute line position and tension at each node -----------------------------
for I in range(nNodes):
# check s values?
if( ( s[I] < 0.0 ) or ( s[I] > L ) ):
raise CatenaryError("Warning from Catenary:: All line nodes must be located between the anchor and fairlead (inclusive) in routine Catenary()")
#cout << " s[I] = " << s[I] << " and L = " << L << endl;
#return -1;
# fully along seabed
if ProfileType==4:
if (L-s[I])*CB*W > HF: # if this node is in the zero tension range
X [I] = s[I];
Z [I] = 0.0;
Te[I] = 0.0;
else: # this node rests on the seabed and the tension is nonzero
if L*CB*W > HF: # zero anchor tension case
X [I] = s[I] - 1.0/EA*( HF*(s[I]-L) - CB*W*( L*s[I] - 0.5*s[I]*s[I] - 0.5*L*L ) + 0.5*HF*HF/(CB*W) )
else:
X [I] = s[I] + s[I]/EA*( HF - CB*W*(L-0.5*s[I]))
Z [I] = 0.0;
Te[I] = HF - CB*W*(L-s[I])
# Freely hanging line with no horizontal tension
elif ProfileType==0:
if s[I] > L-LHanging: # this node is on the suspended/hanging portion of the line
X [I] = XF
Z [I] = ZF - ( L-s[I] + 0.5*W/EA*(L-s[I])**2 )
Te[I] = W*(L-s[I])
else: # this node is on the seabed
X [I] = np.min([s[I], XF])
Z [I] = 0.0
Te[I] = 0.0
# the other profile types are more involved
else:
# calculate some commonly used terms that depend on HF and VF: AGAIN
VFMinWL = VF - WL;
LBot = L - VF/W; # unstretched length of line resting on seabed (Jonkman's PhD eqn 2-38), LMinVFOVrW
HF_W = HF/W;
HF_WEA = HF/WEA
VF_WEA = VF/WEA
VF_HF = VF/HF
VFMinWL_HF = VFMinWL/HF
VF_HF2 = VF_HF *VF_HF
VFMinWL_HF2 = VFMinWL_HF*VFMinWL_HF
SQRT1VF_HF2 = np.sqrt( 1.0 + VF_HF2 )
SQRT1VFMinWL_HF2 = np.sqrt( 1.0 + VFMinWL_HF2 )
# calculate some values for the current node
Ws = W *s[I]
VFMinWLs = VFMinWL + Ws # = VF - W*(L-s[I])
VFMinWLs_HF = VFMinWLs/HF
s_EA = s[I] /EA
SQRT1VFMinWLs_HF2 = np.sqrt( 1.0 + VFMinWLs_HF*VFMinWLs_HF )
# No portion of the line rests on the seabed
if ProfileType==1:
X [I] = ( np.log( VFMinWLs_HF + SQRT1VFMinWLs_HF2 ) - np.log( VFMinWL_HF + SQRT1VFMinWL_HF2 ) )*HF_W + s_EA* HF;
Z [I] = ( SQRT1VFMinWLs_HF2 - SQRT1VFMinWL_HF2 )*HF_W + s_EA*( VFMinWL + 0.5*Ws );
Te[I] = np.sqrt( HF*HF + VFMinWLs*VFMinWLs );
# A portion of the line rests on the seabed and the anchor tension is nonzero
elif ProfileType==2:
if( s[I] <= LBot ): # // .TRUE. if this node rests on the seabed and the tension is nonzero
X [I] = s[I] + s_EA*( HF + CB*VFMinWL + 0.5*Ws*CB );
Z [I] = 0.0;
Te[I] = HF + CB*VFMinWLs;
else: #// LBot < s <= L: ! This node must be above the seabed
X [I] = np.log( VFMinWLs_HF + SQRT1VFMinWLs_HF2 ) *HF_W + s_EA* HF + LBot - 0.5*CB*VFMinWL*VFMinWL/WEA;
Z [I] = ( - 1.0 + SQRT1VFMinWLs_HF2 )*HF_W + s_EA*( VFMinWL + 0.5*Ws ) + 0.5* VFMinWL*VFMinWL/WEA;
Te[I] = np.sqrt( HF*HF + VFMinWLs*VFMinWLs );
# A portion of the line must rest on the seabed and the anchor tension is zero
elif ProfileType==3:
if s[I] <= LBot - HF_W/CB: # (aka Lbot - s > HF/(CB*W) ) if this node rests on the seabed and the tension is zero
X [I] = s[I];
Z [I] = 0.0;
Te[I] = 0.0;
elif( s[I] <= LBot ): # // .TRUE. if this node rests on the seabed and the tension is nonzero
X [I] = s[I] - ( LBot - 0.5*HF_W/CB )*HF/EA + s_EA*( HF + CB*VFMinWL + 0.5*Ws*CB ) + 0.5*CB*VFMinWL*VFMinWL/WEA;
Z [I] = 0.0;
Te[I] = HF + CB*VFMinWLs;
else: # // LBot < s <= L ! This node must be above the seabed
X [I] = np.log( VFMinWLs_HF + SQRT1VFMinWLs_HF2 ) *HF_W + s_EA* HF + LBot - ( LBot - 0.5*HF_W/CB )*HF/EA;
Z [I] = ( -1.0 + SQRT1VFMinWLs_HF2)*HF_W + s_EA*(VFMinWL + 0.5*Ws ) + 0.5* VFMinWL*VFMinWL/WEA;
Te[I] = np.sqrt( HF*HF + VFMinWLs*VFMinWLs );
# re-reverse line distributed data back to normal if applicable
if reverseFlag == 1:
s = L - s [::-1]
X = XF - X[::-1]
Z = Z[::-1] - ZF # remember ZF still has a flipped sign right now
Te= Te[::-1]
# print("End 1 Fx "+str(HA))
# print("End 1 Fy "+str(VA))
# print("End 2 Fx "+str(-HF))
# print("End 2 Fy "+str(-VF))
# print("Scope is "+str(XF-LBot))
if plots==2 or info['error']==True: # also show the profile plot
plt.figure()
plt.plot(X,Z)
# save data to info dict
info["X" ] = X
info["Z" ] = Z
info["s" ] = s
info["Te"] = Te
# un-swap line ends if they've been previously swapped, and apply global sign convention
# (vertical force positive-up, horizontal force positive from A to B)
if reverseFlag == 1:
ZF = -ZF # put height rise from end A to B back to negative
FxA = HF
FzA = -VF # VF is positive-down convention so flip sign
FxB = -HA
FzB = VA
else:
FxA = HA
FzA = VA
FxB = -HF
FzB = -VF
# return horizontal and vertical (positive-up) tension components at each end, and length along seabed
return (FxA, FzA, FxB, FzB, info)
def printMat(mat):
'''Print a matrix'''
for i in range(mat.shape[0]):
print( "\t".join(["{:+8.3e}"]*mat.shape[1]).format( *mat[i,:] ))
def printVec(vec):
'''Print a vector'''
print( "\t".join(["{:+8.3e}"]*len(vec)).format( *vec ))
def RotationMatrix(x3,x2,x1): # this is order-z,y,x intrinsic (tait-bryan?) angles, meaning that order about the ROTATED axes
'''Calculates a rotation matrix based on order-z,y,x instrinsic angles that are about a rotated axis
Parameters
----------
x3, x2, x1: floats
The angles that the rotated axes are from the nonrotated axes [rad]
Returns
-------
R : matrix
The rotation matrix
'''
s1 = np.sin(x1)
c1 = np.cos(x1)
s2 = np.sin(x2)
c2 = np.cos(x2)
s3 = np.sin(x3)
c3 = np.cos(x3)
R = np.array([[ c1*c2, c1*s2*s3-c3*s1, s1*s3+c1*c3*s2],
[ c2*s1, c1*c3+s1*s2*s3, c3*s1*s2-c1*s3],
[ -s2, c2*s3, c2*c3]])
return R
def rotatePosition(rRelPoint, rot3):
'''Calculates the new position of a point by applying a rotation (rotates a vector by three angles)
Parameters
----------
rRelPoint : array
x,y,z coordinates of a point relative to a local frame [m]
rot3 : array
Three angles that describe the difference between the local frame and the global frame [rad]
Returns
-------
rRel : array
The relative rotated position of the point about the local frame [m]
'''
# get rotation matrix from three provided angles
RotMat = RotationMatrix(rot3[0], rot3[1], rot3[2])
# find location of point in unrotated reference frame about reference point
rRel = np.matmul(RotMat,rRelPoint)
return rRel
def transformPosition(rRelPoint, r6):
'''Calculates the position of a point based on its position relative to translated and rotated 6DOF body
Parameters
----------
rRelPoint : array
x,y,z coordinates of a point relative to a local frame [m]
r6 : array
6DOF position vector of the origin of the local frame, in the global frame coorindates [m]
Returns
-------
rAbs : array
The absolute position of the point about the global frame [m]
'''
# note: r6 should be in global orientation frame
# absolute location = rotation of relative position + absolute position of reference point
rAbs = rotatePosition(rRelPoint, r6[3:]) + r6[:3]
return rAbs
def translateForce3to6DOF(r, Fin):
'''Takes in a position vector and a force vector (applied at the positon), and calculates
the resulting 6-DOF force and moment vector.
Parameters
----------
r : array
x,y,z coordinates at which force is acting [m]
Fin : array
x,y,z components of force [N]
Returns
-------
Fout : array
The resulting force and moment vector [N, Nm]
'''
Fout = np.zeros(6, dtype=Fin.dtype) # initialize output vector as same dtype as input vector (to support both real and complex inputs)
Fout[:3] = Fin
Fout[3:] = np.cross(r, Fin)
return Fout
def set_axes_equal(ax):
'''Sets 3D plot axes to equal scale'''
rangex = np.diff(ax.get_xlim3d())[0]
rangey = np.diff(ax.get_ylim3d())[0]
rangez = np.diff(ax.get_zlim3d())[0]
ax.set_box_aspect([rangex, rangey, rangez]) # note: this may require a matplotlib update
#ax.set_xlim3d([x - radius, x + radius])
#ax.set_ylim3d([y - radius, y + radius])
#ax.set_zlim3d([z - radius*0.5, z + radius*0.5])
'''
ax.set_box_aspect([1,1,0.5]) # note: this may require a matplotlib update
limits = np.array([ax.get_xlim3d(),ax.get_ylim3d(),ax.get_zlim3d()])
x, y, z = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
ax.set_xlim3d([x - radius, x + radius])
ax.set_ylim3d([y - radius, y + radius])
ax.set_zlim3d([z - radius*0.5, z + radius*0.5])
'''
# <<<< should make separate class for Rods
# self.RodType = RodType # 0: free to move; 1: pinned; 2: attached rigidly (positive if to something, negative if coupled)
class Line():
'''A class for any mooring line that consists of a single material'''
def __init__(self, mooringSys, num, L, LineType, nSegs=20, cb=0, isRod=0, attachments = [0,0]):
'''Initialize Line attributes
Parameters
----------
mooringSys : system object
The system object that contains the point object
num : int
indentifier number
L : float
line unstretched length [m]
LineType : LineType object
LineType object that holds all the line properties
nSegs : int, optional
Number of segments to split the line into. The default is 40.
cb : float, optional
line seabed friction coefficient (will be set negative if line is fully suspended). The default is 0.
isRod : boolean, optional
determines whether the line is a rod or not. The default is 0.
attachments : TYPE, optional
ID numbers of any Points attached to the Line. The default is [0,0].
Returns
-------
None.
'''
# TODO: replace LineType input with just the name
self.sys = mooringSys # store a reference to the overall mooring system (instance of System class)
self.number = num
self.isRod = isRod
self.L = L # line unstretched length
self.type = LineType.name # string that should match a LineTypes dict entry
self.nNodes = int(nSegs) + 1
self.cb = float(cb) # friction coefficient (will automatically be set negative if line is fully suspended)
self.rA = np.zeros(3) # end coordinates
self.rB = np.zeros(3)
self.fA = np.zeros(3) # end forces
self.fB = np.zeros(3)
#Perhaps this could be made less intrusive by defining it using a line.addpoint() method instead, similar to pointladdline().
self.attached = attachments # ID numbers of the Points at the Line ends [a,b] >>> NOTE: not fully supported <<<<
self.th = 0 # heading of line from end A to B
self.HF = 0 # fairlead horizontal force saved for next solve
self.VF = 0 # fairlead vertical force saved for next solve
self.jacobian = [] # to be filled with the 2x2 Jacobian from Catenary
self.info = {} # to hold all info provided by Catenary
self.qs = 1 # flag indicating quasi-static analysis (1). Set to 0 for time series data
#print("Created Line "+str(self.number))
# Load line-specific time series data from a MoorDyn output file
def loadData(dirname):
'''Loads line-specific time series data from a MoorDyn input file'''
self.qs = 0 # signals time series data
# load time series data
if isRod > 0:
data, ch, channels, units = read_mooring_file(dirname, "Rod"+str(number)+".out") # remember number starts on 1 rather than 0
else:
data, ch, channels, units = read_mooring_file(dirname, "Line"+str(number)+".out") # remember number starts on 1 rather than 0
# get time info
if ("Time" in ch):
self.Tdata = data[:,ch["Time"]]
self.dt = self.Tdata[1]-self.Tdata[0]
else:
raise LineError("loadData: could not find Time channel for mooring line "+str(self.number))
nT = len(self.Tdata) # number of time steps
# check for position data <<<<<<
self.xp = np.zeros([nT,self.nNodes])
self.yp = np.zeros([nT,self.nNodes])
self.zp = np.zeros([nT,self.nNodes])
for i in range(self.nNodes):
self.xp[:,i] = data[:, ch['Node'+str(i)+'px']]
self.yp[:,i] = data[:, ch['Node'+str(i)+'py']]
self.zp[:,i] = data[:, ch['Node'+str(i)+'pz']]
if isRod==0:
self.Te = np.zeros([nT,self.nNodes-1]) # read in tension data if available
if "Seg1Te" in ch:
for i in range(self.nNodes-1):
self.Te[:,i] = data[:, ch['Seg'+str(i+1)+'Te']]
self.Ku = np.zeros([nT,self.nNodes]) # read in curvature data if available
if "Node0Ku" in ch:
for i in range(self.nNodes):
self.Ku[:,i] = data[:, ch['Node'+str(i)+'Ku']]
self.Ux = np.zeros([nT,self.nNodes]) # read in fluid velocity data if available
self.Uy = np.zeros([nT,self.nNodes])
self.Uz = np.zeros([nT,self.nNodes])
if "Node0Ux" in ch:
for i in range(self.nNodes):
self.Ux[:,i] = data[:, ch['Node'+str(i)+'Ux']]
self.Uy[:,i] = data[:, ch['Node'+str(i)+'Uy']]
self.Uz[:,i] = data[:, ch['Node'+str(i)+'Uz']]
self.xpi= self.xp[0,:]
self.ypi= self.yp[0,:]
self.zpi= self.zp[0,:]
# get length (constant)
self.L = np.sqrt( (self.xpi[-1]-self.xpi[0])**2 + (self.ypi[-1]-self.ypi[0])**2 + (self.zpi[-1]-self.zpi[0])**2 )
# check for tension data <<<<<<<
# figure out what time step to use for showing time series data
def GetTimestep(self, Time):
'''Get the time step to use for showing time series data'''
if Time < 0:
ts = np.int(-Time) # negative value indicates passing a time step index
else: # otherwise it's a time in s, so find closest time step
for index, item in enumerate(self.Tdata):
#print "index is "+str(index)+" and item is "+str(item)
ts = -1
if item > Time:
ts = index
break
if ts==-1:
raise LineError("GetTimestep: requested time likely out of range")
return ts
# updates line coordinates for drawing
def GetLineCoords(self, Time): # formerly UpdateLine
'''Updates the line coordinates for drawing'''
# if a quasi-static analysis, just call the Catenary code
if self.qs==1:
depth = self.sys.depth
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1]) # horizontal spacing of line ends
LV = dr[2] # vertical offset from end A to end B
if np.min([self.rA[2],self.rB[2]]) > -depth:
self.cb = -depth - np.min([self.rA[2],self.rB[2]]) # if this line's lower end is off the seabed, set cb negative and to the distance off the seabed
elif self.cb < 0: # if a line end is at the seabed, but the cb is still set negative to indicate off the seabed
self.cb = 0.0 # set to zero so that the line includes seabed interaction.
try:
(fAH, fAV, fBH, fBV, info) = Catenary(LH, LV, self.L, self.sys.LineTypes[self.type].EA,
self.sys.LineTypes[self.type].w, self.cb, HF0=self.HF, VF0=self.VF, nNodes=self.nNodes, plots=1)
except CatenaryError as error:
raise LineError(self.number, error.message)
Xs = self.rA[0] + info["X"]*dr[0]/LH
Ys = self.rA[1] + info["X"]*dr[1]/LH
Zs = self.rA[2] + info["Z"]
return Xs, Ys, Zs
# otherwise, count on read-in time-series data
else:
# figure out what time step to use
ts = self.GetTimestep(Time)
# drawing rods
if self.isRod > 0:
k1 = np.array([ self.xp[ts,-1]-self.xp[ts,0], self.yp[ts,-1]-self.yp[ts,0], self.zp[ts,-1]-self.zp[ts,0] ]) / self.length # unit vector
k = np.array(k1) # make copy
Rmat = np.array(RotationMatrix(0, np.arctan2(np.hypot(k[0],k[1]), k[2]), np.arctan2(k[1],k[0]))) # <<< should fix this up at some point, MattLib func may be wrong
# make points for appropriately sized cylinder
d = self.sys.LineTypes[self.type].d
Xs, Ys, Zs = makeTower(self.length, np.array([d, d]))
# translate and rotate into proper position for Rod
coords = np.vstack([Xs, Ys, Zs])
newcoords = np.matmul(Rmat,coords)
Xs = newcoords[0,:] + self.xp[ts,0]
Ys = newcoords[1,:] + self.yp[ts,0]
Zs = newcoords[2,:] + self.zp[ts,0]
return Xs, Ys, Zs
# drawing lines
else:
return self.xp[ts,:], self.yp[ts,:], self.zp[ts,:]
def DrawLine2d(self, Time, ax, color="k", Xuvec=[1,0,0], Yuvec=[0,0,1]):
'''Draw the line in 2D
Parameters
----------
Time : float
time value at which to draw the line
ax : axis
the axis on which the line is to be drawn
color : string, optional
color identifier in one letter (k=black, b=blue,...). The default is "k".
Xuvec : list, optional
plane at which the x-axis is desired. The default is [1,0,0].
Yuvec : lsit, optional
plane at which the y-axis is desired. The default is [0,0,1].
Returns
-------
linebit : list
list of axes and points on which the line can be plotted
'''
# draw line on a 2d plot (ax must be 2d)
linebit = [] # make empty list to hold plotted lines, however many there are
if self.isRod > 0:
Xs, Ys, Zs = self.GetLineCoords(Time)
# apply any 3D to 2D transformation here to provide desired viewing angle
Xs2d = Xs*Xuvec[0] + Ys*Xuvec[1] + Zs*Xuvec[2]
Ys2d = Xs*Yuvec[0] + Ys*Yuvec[1] + Zs*Yuvec[2]
for i in range(int(len(Xs)/2-1)):
linebit.append(ax.plot(Xs2d[2*i:2*i+2] ,Ys2d[2*i:2*i+2] , lw=0.5, color=color)) # side edges
linebit.append(ax.plot(Xs2d[[2*i,2*i+2]] ,Ys2d[[2*i,2*i+2]] , lw=0.5, color=color)) # end A edges
linebit.append(ax.plot(Xs2d[[2*i+1,2*i+3]],Ys2d[[2*i+1,2*i+3]], lw=0.5, color=color)) # end B edges
# drawing lines...
else:
Xs, Ys, Zs = self.GetLineCoords(Time)
# apply any 3D to 2D transformation here to provide desired viewing angle
Xs2d = Xs*Xuvec[0] + Ys*Xuvec[1] + Zs*Xuvec[2]
Ys2d = Xs*Yuvec[0] + Ys*Yuvec[1] + Zs*Yuvec[2]
linebit.append(ax.plot(Xs2d, Ys2d, lw=1, color=color))
self.linebit = linebit # can we store this internally?
return linebit
def DrawLine(self, Time, ax, color="k"):
'''Draw the line
Parameters
----------
Time : float
time value at which to draw the line
ax : axis
the axis on which the line is to be drawn
color : string, optional
color identifier in one letter (k=black, b=blue,...). The default is "k".
Xuvec : list, optional
plane at which the x-axis is desired. The default is [1,0,0].
Yuvec : lsit, optional
plane at which the y-axis is desired. The default is [0,0,1].
Returns
-------
linebit : list
list of axes and points on which the line can be plotted
'''
# draw line in 3d for first time (ax must be 2d)
linebit = [] # make empty list to hold plotted lines, however many there are
if self.isRod > 0:
Xs, Ys, Zs = self.GetLineCoords(Time)
for i in range(int(len(Xs)/2-1)):
linebit.append(ax.plot(Xs[2*i:2*i+2],Ys[2*i:2*i+2],Zs[2*i:2*i+2] , color=color)) # side edges
linebit.append(ax.plot(Xs[[2*i,2*i+2]],Ys[[2*i,2*i+2]],Zs[[2*i,2*i+2]] , color=color)) # end A edges
linebit.append(ax.plot(Xs[[2*i+1,2*i+3]],Ys[[2*i+1,2*i+3]],Zs[[2*i+1,2*i+3]], color=color)) # end B edges
# drawing lines...
else:
Xs, Ys, Zs = self.GetLineCoords(Time)
linebit.append(ax.plot(Xs, Ys, Zs, color=color))
# drawing water velocity vectors (not for Rods for now) <<< should handle this better (like in GetLineCoords) <<<
if self.qs == 0:
ts = self.GetTimestep(Time)
Ux = self.Ux[ts,:]
Uy = self.Uy[ts,:]
Uz = self.Uz[ts,:]
self.Ubits = ax.quiver(Xs, Ys, Zs, Ux, Uy, Uz) # make quiver plot and save handle to line object
self.linebit = linebit # can we store this internally?
self.X = np.array([Xs, Ys, Zs])
return linebit
def RedrawLine(self, Time): #, linebit):
'''Update 3D line drawing based on instantaneous position'''
linebit = self.linebit
if self.isRod > 0:
Xs, Ys, Zs = self.GetLineCoords(Time)
for i in range(int(len(Xs)/2-1)):
linebit[3*i ][0].set_data(Xs[2*i:2*i+2],Ys[2*i:2*i+2]) # side edges (x and y coordinates)
linebit[3*i ][0].set_3d_properties(Zs[2*i:2*i+2]) # (z coordinates)
linebit[3*i+1][0].set_data(Xs[[2*i,2*i+2]],Ys[[2*i,2*i+2]]) # end A edges
linebit[3*i+1][0].set_3d_properties(Zs[[2*i,2*i+2]])
linebit[3*i+2][0].set_data(Xs[[2*i+1,2*i+3]],Ys[[2*i+1,2*i+3]]) # end B edges
linebit[3*i+2][0].set_3d_properties(Zs[[2*i+1,2*i+3]])
# drawing lines...
else:
Xs, Ys, Zs = self.GetLineCoords(Time)
linebit[0][0].set_data(Xs,Ys) # (x and y coordinates)
linebit[0][0].set_3d_properties(Zs) # (z coordinates)
# drawing water velocity vectors (not for Rods for now)
if self.qs == 0:
ts = self.GetTimestep(Time)
Ux = self.Ux[ts,:]
Uy = self.Uy[ts,:]
Uz = self.Uz[ts,:]
segments = quiver_data_to_segments(Xs, Ys, Zs, Ux, Uy, Uz, scale=2)
self.Ubits.set_segments(segments)
return linebit
def setEndPosition(self, r, endB):
'''Sets the end position of the line based on the input endB value.
Parameters
----------
r : array
x,y,z coorindate position vector of the line end [m].
endB : boolean
An indicator of whether the r array is at the end or beginning of the line
Raises
------
LineError
If the given endB value is not a 1 or 0
Returns
-------
None.
'''
if endB == 1:
self.rB = np.array(r, dtype=np.float)
elif endB == 0:
self.rA = np.array(r, dtype=np.float)
else:
raise LineError("setEndPosition: endB value has to be either 1 or 0")
def staticSolve(self, reset=False):
'''Solves static equilibrium of line. Sets the end forces of the line based on the end points' positions.
Parameters
----------
reset : boolean, optional
Determines if the previous fairlead force values will be used for the Catenary iteration. The default is False.
Raises
------
LineError
If the horizontal force at the fairlead (HF) is less than 0
Returns
-------
None.
'''
depth = self.sys.depth
dr = self.rB - self.rA
LH = np.hypot(dr[0], dr[1]) # horizontal spacing of line ends
LV = dr[2] # vertical offset from end A to end B
if self.rA[2] < -depth:
raise LineError("Line {} end A is lower than the seabed.".format(self.number))
elif self.rB[2] < -depth:
raise LineError("Line {} end B is lower than the seabed.".format(self.number))
elif np.min([self.rA[2],self.rB[2]]) > -depth:
self.cb = -depth -
|
np.min([self.rA[2],self.rB[2]])
|
numpy.min
|
import os, sys
import numpy as np
import keras.backend as K
import cv2
from PIL import Image
from PIL.ImageFilter import GaussianBlur
from model_utils import ApplicationModel
import image_utils
sys.path.insert(0, os.path.abspath('./libs/IntegratedGradients'))
import IntegratedGradients
class GuidedBackpropVisualizer():
def __init__(self, model):
assert isinstance(model, ApplicationModel)
self.model = model
def gradient_function(self, preds, output_neuron):
# For now, assert multi-class
assert preds.shape[0] > 1, 'Provide multi-class output for now.'
one_hots = np.zeros((1, preds.shape[0]))
one_hots[:, output_neuron] = 1.
loss_out = one_hots * self.model.guided_model.output
input_grads = K.gradients(loss_out, self.model.guided_model.input)
outputs = [self.model.guided_model.output]
if type(input_grads) in {list, tuple}:
outputs += input_grads
else:
outputs.append(input_grads)
f_outputs = K.function([self.model.guided_model.input], outputs)
return f_outputs
def calculate(self, x, preds, output_neuron, cmap=None, percentile=99):
f = self.gradient_function(preds, output_neuron)
function_preds, guided_gradients = f([self.model.preprocessing_function(x)])
return self.postprocess(guided_gradients[0], cmap, percentile), function_preds
def postprocess(self, x, cmap, percentile):
return image_utils.VisualizeImageGrayscale(x, percentile=percentile, cmap=cmap)
class IntegratedGradientsVisualizer():
def __init__(self, model):
assert isinstance(model, ApplicationModel)
self.model = model
self.ig = IntegratedGradients.integrated_gradients(model.model)
def calculate(self, x, preds, output_neuron, reference_image=None,
cmap=None, percentile=99):
assert len(x.shape) == 4
if not reference_image is None:
assert len(reference_image.shape) == 4
if reference_image is None:
reference_image = self.model.preprocessing_function(np.zeros(x.shape))
explanation = self.ig.explain(
self.model.preprocessing_function(x)[0],
reference=reference_image[0],
outc=output_neuron
)
return self.postprocess(explanation, cmap, percentile)
def postprocess(self, x, cmap, percentile):
preprocessed_image = image_utils.VisualizeImageGrayscale(x, percentile=percentile, cmap=cmap)
mask = preprocessed_image[..., -1] > 0
preprocessed_image = np.expand_dims(mask, axis=-1) * preprocessed_image
return preprocessed_image
class GradCAMVisualizer():
def __init__(self, model, filter_radius=7):
assert isinstance(model, ApplicationModel)
self.model = model
self.im_filter = GaussianBlur(radius=filter_radius)
def gradient_function(self, preds, output_neuron):
# For now, assert multi-class
assert preds.shape[0] > 1, 'Provide multi-class output for now.'
one_hots = np.zeros((1, preds.shape[0]))
one_hots[:, output_neuron] = 1.
loss_out = one_hots * self.model.model_mod.output
cam_grads = K.gradients(
# Gradient of output layer
loss_out,
# wrt output of final conv layer
self.model.model_mod.get_layer(self.model.last_conv_layer).output)
alpha_tensor = K.mean(cam_grads[0], axis=(0, 1, 2))
cam = self.model.model_mod.get_layer(self.model.last_conv_layer).output
scaled_map = cam[0] * alpha_tensor
grad_cam = K.relu(K.sum(scaled_map, axis=-1))
outputs = [grad_cam]
cam_func = K.function([self.model.model_mod.input], outputs)
return cam_func
def calculate(self, x, preds, output_neuron):
cam_f = self.gradient_function(preds, output_neuron)
grad_cam = cam_f([self.model.preprocessing_function(x)])
# Width by Height
target_size = (x.shape[2], x.shape[1])
processed_grad_cam = self.post_process(grad_cam[0], target_size)
return processed_grad_cam
def post_process(self, activation_map, target_size):
# Normalize 0 - 1
vmax = np.max(activation_map)
vmin = np.min(activation_map)
scaled_map = np.clip((activation_map - vmin) / (vmax - vmin), 0, 1)
# Normalize 0 - 255, Resize, Smoothen
scaled_map = scaled_map * 255.
scaled_map = scaled_map.astype(np.uint8)
act_image = Image.fromarray(scaled_map)
act_image = act_image.resize(target_size)
act_image = act_image.filter(self.im_filter)
# Renormalize
act_image = np.array(act_image)
vmax = np.max(act_image)
vmin =
|
np.min(act_image)
|
numpy.min
|
import argparse
import numpy as np
from getimage.__main__ import getimage
from PIL import Image, ImageOps
def append_to_filename_with_ext(filename, s, rename = False):
if rename:
f_new = filename
else:
temp = filename.split('.')
f_new = '.'.join(temp[0:-1])+s+'.'+temp[-1]
return f_new
def transparentizeBG(**kwargs):
filename = kwargs['filename']
rename = kwargs['rename']
image, grayscale = getimage(filename)
f_new = append_to_filename_with_ext(filename, '_transparentBG', rename)
image = Image.fromarray(image)
image = image.convert("RGBA")
datas = image.getdata()
newData = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
image.putdata(newData)
image.save(f_new, "PNG")
def auto_invert_image(image, tol, N_allowable_misses, grayscale):
if grayscale:
removeable_rows, removeable_cols = auto_invert_image_bw(image, tol, N_allowable_misses)
else:
removeable_rows = []
removeable_cols = []
for k in range(0,3):
r_rows, r_cols = auto_invert_image_bw(image[:,:,k], tol, N_allowable_misses)
removeable_rows += r_rows
removeable_cols += r_cols
removeable_rows = sorted(list(set(removeable_rows)))
removeable_cols = sorted(list(set(removeable_cols)))
image = np.delete(image, removeable_rows, axis=0)
image = np.delete(image, removeable_cols, axis=1)
return image
def auto_invert_image_bw(image, tol, N_allowable_misses):
S = image.shape
removeable_cols = []
N_misses = 0
MIN =
|
np.min(image)
|
numpy.min
|
"""
Distance statistics for planar point patterns
"""
__author__ = "<NAME> <EMAIL>"
__all__ = ['DStatistic', 'G', 'F', 'J', 'K', 'L', 'Envelopes', 'Genv', 'Fenv', 'Jenv', 'Kenv', 'Lenv']
from .process import PoissonPointProcess as csr
import numpy as np
from matplotlib import pyplot as plt
class DStatistic(object):
"""
Abstract Base Class for distance statistics.
Parameters
----------
name : string
Name of the function. ("G", "F", "J", "K" or "L")
Attributes
----------
d : array
The distance domain sequence.
"""
def __init__(self, name):
self.name = name
def plot(self, qq=False):
"""
Plot the distance function
Parameters
----------
qq: Boolean
If False the statistic is plotted against distance. If Frue, the
quantile-quantile plot is generated, observed vs. CSR.
"""
# assuming mpl
x = self.d
if qq:
plt.plot(self.ev, self._stat)
plt.plot(self.ev, self.ev)
else:
plt.plot(x, self._stat, label='{}'.format(self.name))
plt.ylabel("{}(d)".format(self.name))
plt.xlabel('d')
plt.plot(x, self.ev, label='CSR')
plt.title("{} distance function".format(self.name))
class G(DStatistic):
"""
Estimates the nearest neighbor distance distribution function G for a
point pattern.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
intervals : int
The length of distance domain sequence.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
Attributes
----------
name : string
Name of the function. ("G", "F", "J", "K" or "L")
d : array
The distance domain sequence.
G : array
The cumulative nearest neighbor distance distribution over d.
Notes
-----
In the analysis of planar point processes, the estimate of :math:`G` is
typically compared to the value expected from a completely spatial
random (CSR) process given as:
.. math::
G(d) = 1 - e^{-\lambda \pi d^2}
where :math:`\lambda` is the intensity (points per unit area) of the point
process and :math:`d` is distance.
For a clustered pattern, the empirical function will be above the
expectation, while for a uniform pattern the empirical function falls below
the expectation.
"""
def __init__(self, pp, intervals=10, dmin=0.0, dmax=None, d=None):
res = _g(pp, intervals, dmin, dmax, d)
self.d = res[:, 0]
self.G = self._stat = res[:, 1]
self.ev = 1 - np.exp(-pp.lambda_window * np.pi * self.d * self.d)
self.pp = pp
super(G, self).__init__(name="G")
class F(DStatistic):
"""
Estimates the empty space distribution function for a point pattern: F(d).
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intervals : int
The length of distance domain sequence.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
Attributes
----------
d : array
The distance domain sequence.
G : array
The cumulative empty space nearest event distance distribution
over d.
Notes
-----
In the analysis of planar point processes, the estimate of :math:`F` is
typically compared to the value expected from a process that displays
complete spatial randomness (CSR):
.. math::
F(d) = 1 - e^{-\lambda \pi d^2}
where :math:`\lambda` is the intensity (points per unit area) of the point
process and :math:`d` is distance.
The expectation is identical to the expectation for the :class:`G` function
for a CSR process. However, for a clustered pattern, the empirical G
function will be below the expectation, while for a uniform pattern the
empirical function falls above the expectation.
"""
def __init__(self, pp, n=100, intervals=10, dmin=0.0, dmax=None, d=None):
res = _f(pp, n, intervals, dmin, dmax, d)
self.d = res[:, 0]
self.F = self._stat = res[:, 1]
self.ev = 1 - np.exp(-pp.lambda_window * np.pi * self.d * self.d)
super(F, self).__init__(name="F")
class J(DStatistic):
"""
Estimates the J function for a point pattern :cite:`VanLieshout1996`
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intervals : int
The length of distance domain sequence.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
Attributes
----------
d : array
The distance domain sequence.
j : array
F function over d.
Notes
-----
The :math:`J` function is a ratio of the hazard functions defined for
:math:`G` and :math:`F`:
.. math::
J(d) = \\frac{1-G(d) }{1-F(d)}
where :math:`G(d)` is the nearest neighbor distance distribution function
(see :class:`G`)
and :math:`F(d)` is the empty space function (see :class:`F`).
For a CSR process the J function equals 1. Empirical values larger than 1
are indicative of uniformity, while values below 1 suggest clustering.
"""
def __init__(self, pp, n=100, intervals=10, dmin=0.0, dmax=None, d=None):
res = _j(pp, n, intervals, dmin, dmax, d)
self.d = res[:, 0]
self.j = self._stat = res[:, 1]
self.ev = self.j / self.j
super(J, self).__init__(name="J")
class K(DStatistic):
"""
Estimates the K function for a point pattern.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
intervals : int
The length of distance domain sequence.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
Attributes
----------
d : array
The distance domain sequence.
j : array
K function over d.
"""
def __init__(self, pp, intervals=10, dmin=0.0, dmax=None, d=None):
res = _k(pp, intervals, dmin, dmax, d)
self.d = res[:, 0]
self.k = self._stat = res[:, 1]
self.ev = np.pi * self.d * self.d
super(K, self).__init__(name="K")
class L(DStatistic):
"""
Estimates the l function for a point pattern.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
intervals : int
The length of distance domain sequence.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
Attributes
----------
d : array
The distance domain sequence.
l : array
L function over d.
"""
def __init__(self, pp, intervals=10, dmin=0.0, dmax=None, d=None):
res = _l(pp, intervals, dmin, dmax, d)
self.d = res[:, 0]
self.l = self._stat = res[:, 1]
super(L, self).__init__(name="L")
def plot(self):
# assuming mpl
x = self.d
plt.plot(x, self._stat, label='{}'.format(self.name))
plt.ylabel("{}(d)".format(self.name))
plt.xlabel('d')
plt.title("{} distance function".format(self.name))
def _g(pp, intervals=10, dmin=0.0, dmax=None, d=None):
"""
Estimate the nearest neighbor distances function G.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
intevals : int
Number of intervals to evaluate F over.
dmin : float
Lower limit of distance range.
dmax : float
Upper limit of distance range. If dmax is None, dmax will be set
to maximum nearest neighor distance.
d : sequence
The distance domain sequence. If d is specified, intervals, dmin
and dmax are ignored.
Returns
-------
: array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the point pattern. The second
column is the cumulative nearest neighbor distance distribution.
Notes
-----
See :class:`G`.
"""
if d is None:
w = pp.max_nnd/intervals
if dmax:
w = dmax/intervals
d = [w*i for i in range(intervals + 2)]
cdf = [0] * len(d)
for i, d_i in enumerate(d):
smaller = [nndi for nndi in pp.nnd if nndi <= d_i]
cdf[i] = len(smaller)*1./pp.n
return np.vstack((d, cdf)).T
def _f(pp, n=100, intervals=10, dmin=0.0, dmax=None, d=None):
"""
F empty space function.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intevals : int
Number of intervals to evaluate F over.
dmin : float
Lower limit of distance range.
dmax : float
Upper limit of distance range. If dmax is None, dmax will be set
to maximum nearest neighor distance.
d : sequence
The distance domain sequence. If d is specified, intervals, dmin
and dmax are ignored.
Returns
-------
: array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the point pattern. The second
column is corresponding F function.
Notes
-----
See :class:`.F`
"""
# get a csr pattern in window of pp
c = csr(pp.window, n, 1, asPP=True).realizations[0]
# for each point in csr pattern find the closest point in pp and the
# associated distance
nnids, nnds = pp.knn_other(c, k=1)
if d is None:
w = pp.max_nnd/intervals
if dmax:
w = dmax/intervals
d = [w*i for i in range(intervals + 2)]
cdf = [0] * len(d)
for i, d_i in enumerate(d):
smaller = [nndi for nndi in nnds if nndi <= d_i]
cdf[i] = len(smaller)*1./n
return np.vstack((d, cdf)).T
def _j(pp, n=100, intervals=10, dmin=0.0, dmax=None, d=None):
"""
J function: Ratio of hazard functions for F and G.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intevals : int
Number of intervals to evaluate F over.
dmin : float
Lower limit of distance range.
dmax : float
Upper limit of distance range. If dmax is None, dmax will be set
to maximum nearest neighor distance.
d : sequence
The distance domain sequence. If d is specified, intervals, dmin
and dmax are ignored.
Returns
-------
: array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the point pattern. The second
column is corresponding J function.
Notes
-----
See :class:`.J`
"""
F = _f(pp, n, intervals=intervals, dmin=dmin, dmax=dmax, d=d)
G = _g(pp, intervals=intervals, dmin=dmin, dmax=dmax, d=d)
FC = 1 - F[:, 1]
GC = 1 - G[:, 1]
last_id = len(GC) + 1
if np.any(FC == 0):
last_id = np.where(FC == 0)[0][0]
return np.vstack((F[:last_id, 0], GC[:last_id]/FC[:last_id])).T
def _k(pp, intervals=10, dmin=0.0, dmax=None, d=None):
"""
Interevent K function.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intevals : int
Number of intervals to evaluate F over.
dmin : float
Lower limit of distance range.
dmax : float
Upper limit of distance range. If dmax is None, dmax will be set
to length of bounding box diagonal.
d : sequence
The distance domain sequence. If d is specified, intervals, dmin
and dmax are ignored.
Returns
-------
kcdf : array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the point pattern. The second
column is corresponding K function.
Notes
-----
See :class:`.K`
"""
if d is None:
# use length of bounding box diagonal as max distance
bb = pp.mbb
dbb = np.sqrt((bb[0]-bb[2])**2 + (bb[1]-bb[3])**2)
w = dbb/intervals
if dmax:
w = dmax/intervals
d = [w*i for i in range(intervals + 2)]
den = pp.lambda_window * pp.n * 2.
kcdf = np.asarray([(di, len(pp.tree.query_pairs(di))/den) for di in d])
return kcdf
def _l(pp, intervals=10, dmin=0.0, dmax=None, d=None):
"""
Interevent L function.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
n : int
Number of empty space points (random points).
intevals : int
Number of intervals to evaluate F over.
dmin : float
Lower limit of distance range.
dmax : float
Upper limit of distance range. If dmax is None, dmax will be set
to length of bounding box diagonal.
d : sequence
The distance domain sequence. If d is specified, intervals, dmin
and dmax are ignored.
Returns
-------
kf : array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the point pattern. The second
column is corresponding L function.
Notes
-----
See :class:`.L`
"""
kf = _k(pp, intervals, dmin, dmax, d)
kf[:, 1] = np.sqrt(kf[:, 1] / np.pi) - kf[:, 0]
return kf
class Envelopes(object):
"""
Abstract base class for simulation envelopes.
Parameters
----------
pp : :class:`.PointPattern`
Point Pattern instance.
intervals : int
The length of distance domain sequence. Default is 10.
dmin : float
The minimum of the distance domain.
dmax : float
The maximum of the distance domain.
d : sequence
The distance domain sequence.
If d is specified, intervals, dmin and dmax are ignored.
pct : float
1-alpha, alpha is the significance level. Default is 0.05,
1-alpha is the confidence level for the envelope.
realizations: :class:`.PointProcess`
Point process instance with more than 1 realizations.
Attributes
----------
name : string
Name of the function. ("G", "F", "J", "K" or "L")
observed : array
A 2-dimensional numpy array of 2 columns. The first column is
the distance domain sequence for the observed point pattern.
The second column is the specific function ("G", "F", "J",
"K" or "L") over the distance domain sequence for the
observed point pattern.
low : array
A 1-dimensional numpy array. Lower bound of the simulation
envelope.
high : array
A 1-dimensional numpy array. Higher bound of the simulation
envelope.
mean : array
A 1-dimensional numpy array. Mean values of the simulation
envelope.
"""
def __init__(self, *args, **kwargs):
# setup arguments
self.name = kwargs['name']
# calculate observed function
self.pp = args[0]
self.observed = self.calc(*args, **kwargs)
self.d = self.observed[:, 0] # domain to be used in all realizations
# do realizations
self.mapper(kwargs['realizations'])
def mapper(self, realizations):
reals = realizations.realizations
res = np.asarray([self.calc(reals[p]) for p in reals])
# When calculating the J function for all the simulations, the length
# of the returned interval domains might be different.
if self.name == "J":
res = []
for p in reals:
j = self.calc(reals[p])
if j.shape[0] < self.d.shape[0]:
diff = self.d.shape[0]-j.shape[0]
for i in range(diff):
j =
|
np.append(j, [[self.d[i+diff], np.inf]], axis=0)
|
numpy.append
|
# class FrequencyDomain:
# def simple_resonance_detector()
from scipy.optimize import curve_fit, least_squares
import matplotlib.pyplot as plt
import numpy as np
from .helper_functions import *
def lorentzian_fit_func(f, f0, gamma, a, b):
omega, omega0 = 2 * np.pi * f, 2 * np.pi * f0
return a + b / np.pi * (gamma / 2) / ((omega - omega0) ** 2 + (gamma / 2) ** 2)
def analyze_lorentzian(f, sig, p0=None):
sig_mag = sig
if sig.dtype == complex:
sig_mag = np.abs(sig) ** 2
if p0 is None:
if (np.max(sig_mag) - np.mean(sig_mag)) > (np.mean(sig_mag) - np.min(sig_mag)):
# peak detected case
f0 = f[np.argmax(sig_mag)]
a = np.mean(sig_mag[np.argsort(sig_mag)[:int(len(sig_mag) // 10)]]) # baseline (average of smallest 10% samples)
# linewidth is extracted from sample closest to half-max
gamma = 2 * np.abs(f[np.argmin(np.abs(sig_mag - 0.5 * (np.max(sig_mag) + a)))] - f0)
b = np.pi * gamma / 2 * (np.max(sig_mag) - a)
p0 = [f0, gamma, a, b]
elif (np.max(sig_mag) - np.mean(sig_mag)) < (np.mean(sig_mag) - np.min(sig_mag)):
# valley detected case
f0 = f[np.argmin(sig_mag)]
a = np.mean(sig_mag[np.argsort(-sig_mag)[:int(len(sig) // 10)]]) # baseline (average of largest 10% samples)
# linewidth is extracted from sample closest to half-max
gamma = 2 * np.abs(f[np.argmin(np.abs(sig_mag - 0.5 * (np.min(sig_mag) + a)))] - f0)
b = np.pi * gamma / 2 * (np.min(sig_mag) - a)
p0 = [f0, gamma, a, b]
fit = curve_fit(lorentzian_fit_func, f, sig_mag, p0=p0,
bounds=([p0[0] * 0.5, p0[1] * 0.5, 0, p0[3] * 0.1],
[p0[0] * 1.5, p0[1] * 1.5, np.inf, p0[3] * 10]))
return fit
def gaussian_fit_func(f, f0, a, c, d):
return a * np.exp(-(f - f0)**2 / (2 * c**2)) + d
def analyze_gaussian(f, sig, p0=None):
sig_mag = sig
if sig.dtype == complex:
sig_mag = np.abs(sig) ** 2
if p0 is None:
if (np.max(sig_mag) - np.mean(sig_mag)) > (np.mean(sig_mag) - np.min(sig_mag)):
# peak detected case
f0 = f[np.argmax(sig_mag)]
d = np.mean(sig_mag[np.argsort(sig_mag)[:int(len(sig_mag) // 10)]]) # baseline (average of smallest 10% samples)
# linewidth is extracted from sample closest to half-max
c = 1 / np.sqrt(2) * np.abs(f[np.argmin(np.abs(sig_mag - ((np.max(sig_mag) - d) / np.exp(1) + d)))] - f0)
a = (np.max(sig_mag) - d)
p0 = [f0, a, c, d]
elif (np.max(sig_mag) - np.mean(sig_mag)) < (np.mean(sig_mag) - np.min(sig_mag)):
# valley detected case
f0 = f[np.argmin(sig_mag)]
d = np.mean(sig_mag[np.argsort(-sig_mag)[:int(len(sig) // 10)]]) # baseline (average of largest 10% samples)
# linewidth is extracted from sample closest to half-max
c = 1 / np.sqrt(2) * np.abs(f[np.argmin(np.abs(sig_mag - ((np.min(sig_mag) - d) / np.exp(1) + d)))] - f0)
a = (np.min(sig_mag) - d)
p0 = [f0, a, c, d]
fit = curve_fit(gaussian_fit_func, f, sig_mag, p0=p0,
bounds=([p0[0] * 0.5, p0[1] * 0.5, p0[2] * 0.1, 0],
[p0[0] * 1.5, p0[1] * 1.5, p0[2] * 10, np.inf]))
return fit
# class DispersiveShift:
class FrequencyDomain:
def __init__(self, freq, signal):
# initialize parameters
self.frequency = freq
self.signal = signal
self.n_pts = len(self.signal)
self.is_analyzed = False
self.p0 = None
self.popt = None
self.pcov = None
def _guess_init_params(self):
"""
Guess initial parameters from data. Will be overwritten in subclass
"""
def _set_init_params(self, p0):
if p0 is None:
self._guess_init_params()
else:
self.p0 = p0
def _save_fit_results(self, popt, pcov):
self.popt = popt
self.pcov = pcov
def analyze(self, p0=None, plot=True, **kwargs):
"""
Analyze the data with initial parameter `p0`.
"""
# set initial fit parameters
self._set_init_params(p0)
# perform fitting
popt, pcov = curve_fit(self.fit_func, self.frequency, self.signal,
p0=self.p0, **kwargs)
self.is_analyzed = True
# save fit results
self._save_fit_results(popt, pcov)
if plot:
self.plot_result()
def _plot_base(self):
fig = plt.figure()
# plot data
_, self.frequency_prefix = number_with_si_prefix(np.max(np.abs(self.frequency)))
self.frequency_scaler = si_prefix_to_scaler(self.frequency_prefix)
plt.plot(self.frequency / self.frequency_scaler,
self.signal, '.', label="Data", color="black")
plt.xlabel("Frequency (" + self.frequency_prefix + "Hz)")
plt.ylabel("Signal")
plt.legend(loc=0, fontsize=14)
fig.tight_layout()
return fig
def plot_result(self):
"""
Will be overwritten in subclass
"""
if not self.is_analyzed:
raise ValueError("The data must be analyzed before plotting")
def _get_const_baseline(self, baseline_portion=0.2,
baseline_ref='symmetric'):
samples = self.signal
N = len(samples)
if baseline_ref == 'left':
bs = np.mean(samples[:int(baseline_portion * N)])
elif baseline_ref == 'right':
bs = np.mean(samples[int(-baseline_portion * N):])
elif baseline_ref == 'symmetric':
bs_left = np.mean(samples[int(-baseline_portion * N / 2):])
bs_right = np.mean(samples[:int(baseline_portion * N / 2)])
bs = np.mean([bs_left, bs_right])
return bs
class LorentzianFit(FrequencyDomain):
"""
"""
def fit_func(self, f, f0, df, a, b):
"""
Lorentzian fit function
Parameters
----------
f : TYPE
DESCRIPTION.
f0 : TYPE
Resonant frequency.
df : TYPE
Full-width half-maximum linewidth.
a : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
return a / ((f - f0) ** 2 + (df / 2) ** 2) + b
def _guess_init_params(self):
"""
Guess initial parameters from data.
"""
signal = self.signal
f = self.frequency
b0 = self._get_const_baseline()
peak_A0, dip_A0 = np.max(signal) - b0, np.min(signal) - b0
if peak_A0 > - dip_A0: # peak detected case
A0 = peak_A0
f0 = f[np.argmax(signal)]
else: # valley detected case
A0 = dip_A0
f0 = f[np.argmin(signal)]
# linewidth is extracted from sample closest to half-max(arg1, arg2, _args)
df0 = 2 * np.abs(f[np.argmin(
|
np.abs(signal - (0.5 * A0 + b0))
|
numpy.abs
|
import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.tests.generator.mock_generator import MockGenerator
from distancematrix.generator.filter_generator import _invalid_data_to_invalid_subseq
from distancematrix.generator.filter_generator import FilterGenerator
from distancematrix.generator.filter_generator import is_not_finite
class TestFilterGenerator(TestCase):
def test_data_points_are_filtered_for_different_query_and_series(self):
mock_gen = MockGenerator(np.arange(12).reshape((3, 4)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite)
filter_gen.prepare(
3,
np.array([1, np.inf, 3, 4, 5, np.inf]),
np.array([np.inf, 2, 3, 4, np.inf])
)
npt.assert_equal(mock_gen.series, [1, 0, 3, 4, 5, 0])
npt.assert_equal(mock_gen.query, [0, 2, 3, 4, 0])
def test_data_points_are_filtered_for_self_join(self):
mock_gen = MockGenerator(np.arange(9).reshape((3, 3)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite)
data = np.array([np.inf, 2, 3, 4, np.inf])
filter_gen.prepare(3, data)
npt.assert_equal(mock_gen.series, [0, 2, 3, 4, 0])
self.assertIsNone(mock_gen.query)
def test_calc_column_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(12, dtype=np.float).reshape((3, 4)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare(
3,
np.array([1, np.inf, 3, 4, 5, 6], dtype=np.float),
np.array([1, 2, 3, 4, np.inf], dtype=np.float)
)
npt.assert_equal(filter_gen.calc_column(0), [np.inf, np.inf, np.inf])
npt.assert_equal(filter_gen.calc_column(1), [np.inf, np.inf, np.inf])
npt.assert_equal(filter_gen.calc_column(2), [2, 6, np.inf])
npt.assert_equal(filter_gen.calc_column(3), [3, 7, np.inf])
def test_calc_diag_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(12, dtype=np.float).reshape((3, 4)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare(
3,
np.array([1, np.inf, 3, 4, 5, 6], dtype=np.float),
np.array([1, 2, 3, 4, np.inf], dtype=np.float)
)
# i i 2 3
# i i 6 7
# i i i i
npt.assert_equal(filter_gen.calc_diagonal(-2), [np.inf])
npt.assert_equal(filter_gen.calc_diagonal(-1), [np.inf, np.inf])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.inf, np.inf, np.inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [np.inf, 6, np.inf])
npt.assert_equal(filter_gen.calc_diagonal(2), [2, 7])
npt.assert_equal(filter_gen.calc_diagonal(3), [3])
class TestStreamingFilterGenerator(TestCase):
def test_streaming_data_points_are_filtered_for_different_query_and_series(self):
mock_gen = MockGenerator(np.arange(12).reshape((3, 4)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6, 5)
npt.assert_equal(mock_gen.bound_gen.appended_series, [])
npt.assert_equal(mock_gen.bound_gen.appended_query, [])
filter_gen.append_series(np.array([0, np.inf, 1, 2]))
filter_gen.append_query(np.array([np.inf]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 0, 1, 2])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0])
filter_gen.append_series(np.array([3, 4, 5]))
filter_gen.append_query(np.array([0, 1, 2, 3, 4, 5]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 0, 1, 2, 3, 4, 5])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 0, 1, 2, 3, 4, 5])
filter_gen.append_series(np.array([6, 7, np.nan]))
filter_gen.append_query(np.array([6, 7]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 0, 1, 2, 3, 4, 5, 6, 7, 0])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 0, 1, 2, 3, 4, 5, 6, 7])
def test_streaming_data_points_are_filtered_for_self_join(self):
mock_gen = MockGenerator(np.arange(9).reshape((3, 3)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6)
npt.assert_equal(mock_gen.bound_gen.appended_series, [])
filter_gen.append_series(np.array([0, 1, 2, 3]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3])
npt.assert_equal(mock_gen.bound_gen.appended_query, [])
filter_gen.append_series(np.array([np.nan, np.inf, 4, 5, 6]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 0, 4, 5, 6])
npt.assert_equal(mock_gen.bound_gen.appended_query, [])
filter_gen.append_series(np.array([7, 8, 9]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 0, 4, 5, 6, 7, 8, 9])
npt.assert_equal(mock_gen.bound_gen.appended_query, [])
def test_streaming_calc_column_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(100, dtype=np.float).reshape((10, 10)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6, 5)
filter_gen.append_series(np.array([0, 1, 2, 3, np.Inf]))
filter_gen.append_query(np.array([0, 1, 2]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2])
npt.assert_equal(filter_gen.calc_column(0), [0])
npt.assert_equal(filter_gen.calc_column(1), [1])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf])
filter_gen.append_series(np.array([4, 5, 6]))
filter_gen.append_query(np.array([3]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(3), [5, 15])
filter_gen.append_series(np.array([7]))
filter_gen.append_query(np.array([np.Inf, 4]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6, 7])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 0, 4])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(2), [15, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(3), [16, np.Inf, np.Inf])
filter_gen.append_series(np.array([8]))
filter_gen.append_query(np.array([5, 6]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6, 7, 8])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 0, 4, 5, 6])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, np.Inf, 55])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, np.Inf, 56])
npt.assert_equal(filter_gen.calc_column(3), [np.Inf, np.Inf, 57])
filter_gen.append_query(np.array([7]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6, 7, 8])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 0, 4, 5, 6, 7])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, 55, 65])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, 56, 66])
npt.assert_equal(filter_gen.calc_column(3), [np.Inf, 57, 67])
def test_streaming_self_join_calc_column_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(100, dtype=np.float).reshape((10, 10)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6)
filter_gen.append_series(np.array([0, 1, 2, 3, np.Inf]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0])
npt.assert_equal(filter_gen.calc_column(0), [0, 10, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [1, 11, np.Inf])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, np.Inf, np.Inf])
filter_gen.append_series(np.array([4, 5, 6]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(3), [np.Inf, np.Inf, np.Inf, 55])
filter_gen.append_series(np.array([7]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6, 7])
npt.assert_equal(filter_gen.calc_column(0), [np.Inf, np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(1), [np.Inf, np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_column(2), [np.Inf, np.Inf, 55, 65])
npt.assert_equal(filter_gen.calc_column(3), [np.Inf, np.Inf, 56, 66])
filter_gen.append_series(np.array([8, 9]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 0, 4, 5, 6, 7, 8, 9])
npt.assert_equal(filter_gen.calc_column(0), [55, 65, 75, 85])
npt.assert_equal(filter_gen.calc_column(1), [56, 66, 76, 86])
npt.assert_equal(filter_gen.calc_column(2), [57, 67, 77, 87])
npt.assert_equal(filter_gen.calc_column(3), [58, 68, 78, 88])
def test_streaming_calc_diag_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(100, dtype=np.float).reshape((10, 10)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6, 5)
filter_gen.append_series(np.array([0, 1, 2]))
filter_gen.append_query(np.array([np.Inf, 1, 2]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.inf])
filter_gen.append_query(np.array([3, 4, 5]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 4, 5])
npt.assert_equal(filter_gen.calc_diagonal(0), [10])
npt.assert_equal(filter_gen.calc_diagonal(-1), [20])
npt.assert_equal(filter_gen.calc_diagonal(-2), [30])
filter_gen.append_series(np.array([3, 4, np.nan]))
filter_gen.append_query(np.array([np.Inf, 6, 7]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 4, 0])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 4, 5, 0, 6, 7])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-1), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-2), [np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [np.Inf, np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(2), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(3), [np.Inf])
filter_gen.append_series(np.array([5, 6, 7, 8]))
filter_gen.append_query(np.array([8]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 4, 0, 5, 6, 7, 8])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 4, 5, 0, 6, 7, 8])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.Inf, np.Inf, 76])
npt.assert_equal(filter_gen.calc_diagonal(-1), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-2), [np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [np.Inf, np.Inf, 77])
npt.assert_equal(filter_gen.calc_diagonal(2), [np.Inf, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(3), [np.Inf])
# i i i i
# i i i i
# i i . .
filter_gen.append_series(np.array([9]))
filter_gen.append_query(np.array([9]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 4, 0, 5, 6, 7, 8, 9])
npt.assert_equal(mock_gen.bound_gen.appended_query, [0, 1, 2, 3, 4, 5, 0, 6, 7, 8, 9])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.Inf, 76, 87])
npt.assert_equal(filter_gen.calc_diagonal(-1), [np.Inf, 86])
npt.assert_equal(filter_gen.calc_diagonal(-2), [np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [np.Inf, 77, 88])
npt.assert_equal(filter_gen.calc_diagonal(2), [np.Inf, 78])
npt.assert_equal(filter_gen.calc_diagonal(3), [np.Inf])
# i i i i
# i . . .
# i . . .
def test_streaming_self_join_calc_diag_with_invalid_data(self):
mock_gen = MockGenerator(np.arange(100, dtype=np.float).reshape((10, 10)))
filter_gen = FilterGenerator(mock_gen,
invalid_data_function=is_not_finite).prepare_streaming(3, 6)
filter_gen.append_series(np.array([np.Inf, 1, 2]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.inf])
filter_gen.append_series(np.array([3, 4, 5]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 4, 5])
npt.assert_equal(filter_gen.calc_diagonal(0), [np.Inf, 11, 22, 33])
npt.assert_equal(filter_gen.calc_diagonal(-1), [np.Inf, 21, 32])
npt.assert_equal(filter_gen.calc_diagonal(-2), [np.Inf, 31])
npt.assert_equal(filter_gen.calc_diagonal(-3), [np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [np.Inf, 12, 23])
npt.assert_equal(filter_gen.calc_diagonal(2), [np.Inf, 13])
npt.assert_equal(filter_gen.calc_diagonal(3), [np.Inf])
filter_gen.append_series(np.array([6, 7, np.nan]))
npt.assert_equal(mock_gen.bound_gen.appended_series, [0, 1, 2, 3, 4, 5, 6, 7, 0])
npt.assert_equal(filter_gen.calc_diagonal(0), [33, 44, 55, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-1), [43, 54, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-2), [53, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(-3), [np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(1), [34, 45, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(2), [35, np.Inf])
npt.assert_equal(filter_gen.calc_diagonal(3), [np.Inf])
class TestHelperMethods(TestCase):
def test_invalid_data_to_invalid_subseq(self):
data = np.array([0, 0, 0, 0, 0, 0], dtype=np.bool)
corr = np.array([0, 0, 0, 0], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([1, 0, 0, 0, 0, 0], dtype=np.bool)
corr = np.array([1, 0, 0, 0], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([0, 1, 0, 0, 0, 0], dtype=np.bool)
corr = np.array([1, 1, 0, 0], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([0, 0, 1, 0, 0, 0], dtype=np.bool)
corr = np.array([1, 1, 1, 0], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([0, 0, 0, 1, 0, 0], dtype=np.bool)
corr = np.array([0, 1, 1, 1], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([0, 0, 0, 0, 1, 0], dtype=np.bool)
corr = np.array([0, 0, 1, 1], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([0, 0, 0, 0, 0, 1], dtype=np.bool)
corr = np.array([0, 0, 0, 1], dtype=np.bool)
npt.assert_equal(_invalid_data_to_invalid_subseq(data, 3), corr)
data = np.array([1, 0, 1, 0, 0, 0], dtype=np.bool)
corr =
|
np.array([1, 1, 1, 0], dtype=np.bool)
|
numpy.array
|
import numpy as np
import pandas
import random
import math
import matplotlib.pyplot as plt
#creating a perceptron class
class perceptron:
def __init__(self,input_size,output_size,learning_rate):
self.input_size = input_size
self.output_size = output_size
self.learning_rate = learning_rate
self.weight = self.init_weight(self.input_size)
self.bias = self.init_bias()
self.loss_list = []
self.dw = None
self.db = None
def forward_pass(self,x,y):
x = np.array(x,dtype = np.float64).reshape((self.input_size,1))
y = np.array(y,dtype = np.float64).reshape((self.output_size,1))
self.dw = x
self.db = None
x = self.linear_cal(self.weight,self.bias,x)
x1 = self.sigmoid_cal(x)
x = self.loss_cal(x1,y)
print("new loss = ",x)
self.loss_list.append(x)
x2 = 2 * (y-x1)
self.dw = np.dot(self.dw,(x1*x2))
self.db = x1*x2
def backward_pass(self,x,y):
self.weight -= self.dw * self.learning_rate
self.bias -= self.db * self.learning_rate
def eval(self,data_x,data_y):
for x,y in zip(data_x,data_y):
self.forward_pass(x,y)
self.backward_pass(x,y)
@classmethod
def standardization(self,x):
mean = np.mean(x,axis = 0)
sd = np.var(x,axis = 0) ** .5
for i in range(x.shape[1]):
x[:i] = x[:i] - mean[i]
x[:i] = x[:i] / sd[i]
return x
@classmethod
def linear_cal(self,weight,bias,x):
return np.dot(weight.T,x)+bias
@classmethod
def sigmoid_cal(self,x):
return (1+np.exp(-x)) ** -1
@classmethod
def loss_cal(self,prob,y):
return (y-prob) ** 2
@classmethod
def init_weight(self,size):
w = []
for x in range(size):
w.append(np.random.randn())
return
|
np.array(w)
|
numpy.array
|
""""
Author : <NAME>
Contact : https://adityajain.me
"""
import numpy as np
import matplotlib.pyplot as plt
class SGDClassifier():
"""
SGD classifier model, that optimizes using gradient descent
Note: this implementation is restricted to the binary classification task.
Parameters
----------
learning_rate = float, default 0.01, learning rate while updating weights
tol : float, default 0.01, stopping criteria
seed : integer, random seed
normalize : boolean, normalize X in fit method
Attributes
----------
coef_ : Estimated coefficients for the linear regression problem
intercept_ : integer, bias for the linear regression problem
"""
def __init__(self, learning_rate=0.01, tol=0.01, seed=None, normalize=False):
np.random.seed(seed if seed is not None else np.random.randint(100))
self.W = None
self.b = None
self.__lr = learning_rate
self.__tol = tol
self.__length = None
self.__normalize = normalize
self.__m = None
self.__costs = []
self.__iterations = []
def __sigmoid(self,z): return 1/(1+np.exp(-z))
def __initialize_weights_and_bais(self):
self.W =
|
np.random.randn(self.__length)
|
numpy.random.randn
|
import h5py
import numpy as np
|
np.set_printoptions(threshold=np.nan)
|
numpy.set_printoptions
|
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
class crimemap:
def __init__(self, map_dir):
self.shapefile0 = gpd.read_file(map_dir)
self.shapefile0.plot(figsize=(10, 10))
def mapshow(self, x_min=-73.59, x_max=-73.55, y_min=45.49, y_max=45.53, grid_size=0.002):
self.extent = [x_min, x_max, y_min, y_max]
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
self.x_sticks =
|
np.arange(x_min, x_max + grid_size/2, grid_size)
|
numpy.arange
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a function for generating generalized parameter shift rules."""
import functools
import itertools
import warnings
import numpy as np
import pennylane as qml
def _process_shifts(rule, tol=1e-10):
"""Utility function to process gradient rules.
Args:
rule (array): a ``(N, M)`` array corresponding to ``M`` terms
with ``N-1`` simultaneous function shifts. The first row of
the array corresponds to the linear combination coefficients;
subsequent rows correspond to parameter shifts for these coefficients.
tol (float): floating point tolerance used when comparing shifts/coefficients
This utility function accepts coefficients and shift values, and performs the following
processing:
- Removes all small (within absolute tolerance ``tol``) coefficients and shifts
- Removes terms with the coefficients are 0
- Terms with the same shift value are combined into a single term.
- Finally, the terms are sorted according to the absolute value of ``shift``,
ensuring that, if there is a zero-shift term, this is returned first.
"""
# remove all small coefficients and shifts
rule[np.abs(rule) < tol] = 0
# remove columns where the coefficients are 0
rule = rule[:, ~(rule[0] == 0)]
# sort columns according to abs(shift)
rule = rule[:, np.argsort(np.abs(rule)[-1])]
# determine unique shifts
round_decimals = int(-np.log10(tol))
rounded_rule = np.round(rule[-1], round_decimals)
unique_shifts = np.unique(rounded_rule)
if rule.shape[-1] != len(unique_shifts):
# sum columns that have the same shift value
coeffs = [
np.sum(rule[:, np.nonzero(rounded_rule == s)[0]], axis=1)[0] for s in unique_shifts
]
rule = np.stack([np.stack(coeffs), unique_shifts])
# sort columns according to abs(shift)
rule = rule[:, np.argsort(np.abs(rule)[-1])]
return rule
@functools.lru_cache(maxsize=None)
def eigvals_to_frequencies(eigvals):
r"""Convert an eigenvalue spectrum to frequency values, defined
as the the set of positive, unique differences of the eigenvalues in the spectrum.
Args:
eigvals (tuple[int, float]): eigenvalue spectra
Returns:
tuple[int, float]: frequencies
**Example**
>>> eigvals = (-0.5, 0, 0, 0.5)
>>> eigvals_to_frequencies(eigvals)
(0.5, 1.0)
"""
unique_eigvals = sorted(set(eigvals))
return tuple({j - i for i, j in itertools.combinations(unique_eigvals, 2)})
@functools.lru_cache(maxsize=None)
def frequencies_to_period(frequencies, decimals=5):
r"""Returns the period of a Fourier series as defined
by a set of frequencies.
The period is simply :math:`2\pi/gcd(frequencies)`,
where :math:`\text{gcd}` is the greatest common divisor.
Args:
spectra (tuple[int, float]): frequency spectra
decimals (int): Number of decimal places to round to
if there are non-integral frequencies.
Returns:
tuple[int, float]: frequencies
**Example**
>>> frequencies = (0.5, 1.0)
>>> frequencies_to_period(frequencies)
12.566370614359172
"""
try:
gcd = np.gcd.reduce(frequencies)
except TypeError:
# np.gcd only support integer frequencies
exponent = 10 ** decimals
frequencies = np.round(frequencies, decimals) * exponent
gcd = np.gcd.reduce(np.int64(frequencies)) / exponent
return 2 * np.pi / gcd
@functools.lru_cache(maxsize=None)
def _get_shift_rule(frequencies, shifts=None):
n_freqs = len(frequencies)
frequencies = qml.math.sort(qml.math.stack(frequencies))
freq_min = frequencies[0]
if len(set(frequencies)) != n_freqs or freq_min <= 0:
raise ValueError(
f"Expected frequencies to be a list of unique positive values, instead got {frequencies}."
)
mu = np.arange(1, n_freqs + 1)
if shifts is None: # assume equidistant shifts
shifts = (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min)
equ_shifts = True
else:
shifts = qml.math.sort(qml.math.stack(shifts))
if len(shifts) != n_freqs:
raise ValueError(
f"Expected number of shifts to equal the number of frequencies ({n_freqs}), instead got {shifts}."
)
if len(set(shifts)) != n_freqs:
raise ValueError(f"Shift values must be unique, instead got {shifts}")
equ_shifts = all(np.isclose(shifts, (2 * mu - 1) * np.pi / (2 * n_freqs * freq_min)))
if len(set(np.round(np.diff(frequencies), 10))) <= 1 and equ_shifts: # equidistant case
coeffs = (
freq_min
* (-1) ** (mu - 1)
/ (4 * n_freqs * np.sin(np.pi * (2 * mu - 1) / (4 * n_freqs)) ** 2)
)
else: # non-equidistant case
sin_matr = -4 * np.sin(np.outer(shifts, frequencies))
det_sin_matr =
|
np.linalg.det(sin_matr)
|
numpy.linalg.det
|
import numpy as np
from astropy.table import Table
from astropy.io import fits
import astropy.units as u
from scipy import interpolate
from scipy.fft import fft,ifft,fftshift,fftfreq
from specutils import Spectrum1D
from specutils.fitting import fit_continuum
from specutils.manipulation import SplineInterpolatedResampler
import warnings
import glob
import pickle
from .data import data_loader
from .grid import grid_loader
warnings.filterwarnings('ignore')
class PyXCSAO:
def __init__(self,st_lambda=None,end_lambda=None,ncols=8192,low_bin=0,top_low=20,top_nrun=125,nrun=255,bell_window=0.05,minvel=-500,maxvel=500,from_spectrum=None,spectrum_num=0,data_class='boss'):
self.ncols=ncols
self.bell_window=bell_window
self.spline = SplineInterpolatedResampler()
self.taper=self.taper_spectrum()
self.taperf_med=self.taper_FFT(low_bin,top_low,top_nrun,nrun)
self.taperf_low=self.taper_FFT(20,50,top_nrun,nrun)
self.taperf=self.taperf_med
self.minvel=minvel
self.maxvel=maxvel
if st_lambda is not None:
self.st_lambda=st_lambda
else:
self.st_lambda=None
if end_lambda is not None:
self.end_lambda=end_lambda
else:
self.end_lambda=None
if (self.st_lambda is not None) & (self.end_lambda is not None):
self.la,self.lag=self.set_lambda()
if (from_spectrum is not None):
self.add_spectrum(from_spectrum,i=spectrum_num,data_class=data_class)
if (self.st_lambda is None) | (self.end_lambda is None):
raise RuntimeError('Please specify st_lambda & end_lambda, or provide a data spectrum to automatically determine the range.')
def add_spectrum(self,name,i=0,laname=None,data_class='boss',meta=None,emission=False,clip=True):
flux,la,meta=data_loader(name,i=i,data_class=data_class,laname=laname,meta=meta)
if self.st_lambda is None:
self.st_lambda=np.ceil(min(la))
if self.end_lambda is not None:
self.la,self.lag=self.set_lambda()
if self.end_lambda is None:
self.end_lambda=np.floor(max(la))
self.la,self.lag=self.set_lambda()
self.data=self.format_spectrum(flux,la,clip=clip,emission=emission)
self.meta=meta
self.best_r=None
self.best_grid_index=None
self.best_ccf=None
self.best_rv=None
self.grid_r=None
self.best_teff=None
self.best_logg=None
self.best_feh=None
self.best_alpha=None
return
def add_grid(self,grid_pickle=None,grid_path=None,grid_class=None,laname=None,silent=False):
if grid_path is None and grid_pickle is not None:
try:
self.grid,self.grid_teff,self.grid_logg,self.grid_feh,self.grid_alpha,grid_la,self.grid_class=pickle.load( open( grid_pickle, "rb" ) )
except ValueError:
print("Cannot load this grid, recompute and make sure it has been properly formated.")
if (np.abs(grid_la[0].value-self.st_lambda)>0.1) | (np.abs(grid_la[-1].value-self.end_lambda)>0.1) | (len(grid_la)!=self.ncols):
raise RuntimeError('The grid has an incompatible wavelength range of [{st},{ed},{bn}] with the data. Specify grid_path and recompute again.'.format(st=str(grid_la[0]),ed=str(grid_la[-1]),bn=str(len(grid_la))))
elif grid_pickle is None:
raise RuntimeError('Please provide a path to a pickle file to either load or save the grid/templates.')
else:
if grid_class is None:
raise RuntimeError('Please provide the grid type or an appropriate data loader')
self.grid,self.grid_teff,self.grid_logg,self.grid_feh,self.grid_alpha,self.grid_class=self.add_new_grid(grid_pickle,grid_path,grid_class,laname=laname,silent=silent)
self.grid_teff_num=len(np.unique(self.grid_teff))
self.grid_teff_min=np.min(self.grid_teff)
self.grid_teff_max=np.max(self.grid_teff)
self.grid_logg_num=len(np.unique(self.grid_logg))
self.grid_logg_min=np.min(self.grid_logg)
self.grid_logg_max=np.max(self.grid_logg)
self.grid_feh_num=len(np.unique(self.grid_feh))
self.grid_feh_min=np.min(self.grid_feh)
self.grid_feh_max=np.max(self.grid_feh)
self.grid_alpha_num=len(np.unique(self.grid_alpha))
self.grid_alpha_min=np.min(self.grid_alpha)
self.grid_alpha_max=np.max(self.grid_alpha)
return
def add_new_grid(self,grid_pickle,grid_path,grid_class,laname=None,silent=False):
path= glob.glob(grid_path)
if len(path)>1:
a=np.argsort(path)
path=np.array(path)[a]
else:
path=np.array(path)
teffs=[]
loggs=[]
fehs=[]
alphas=[]
temps=[]
for i in path:
if not silent: print(i)
#try:
temp,la,teff,logg,feh,alpha=grid_loader(i,grid_class,laname=laname)
temps.append(self.format_spectrum(temp,la))
teffs.append(teff)
loggs.append(logg)
fehs.append(feh)
alphas.append(alpha)
#except:
# pass
pickle.dump( [np.array(temps),np.array(teffs),np.array(loggs),np.array(fehs),np.array(alphas),self.la,grid_class], open(grid_pickle, "wb" ) )
return np.array(temps),np.array(teffs),np.array(loggs),np.array(fehs),np.array(alphas),grid_class
def format_spectrum(self,flux,la,clip=True,emission=False):
mx=np.nanmax(flux)
if mx==0.: mx=1.
spec = Spectrum1D(spectral_axis=la*u.AA, flux=np.nan_to_num(flux)/mx*u.Jy)
#rebin
if (min(la)>self.st_lambda) | (max(la)<self.end_lambda):
raise RuntimeError('st_lambda {st} or end_lambda {ed} are outside of the input spectrum range of {mn} to {mx}'.format(st=str(self.st_lambda),ed=str(self.end_lambda),mn=str(min(la)),mx=str(max(la))))
spec=self.spline(spec,self.la)
#continuum correct
spec_fit = fit_continuum(spec)
spec_cont = spec_fit(self.la)
spec=spec/spec_cont
spec=spec.flux.value
if clip:
a=np.where((spec>2) | (spec<-0.5))[0]
spec[a]=1
if emission:
w=15
a=np.where(((self.la.value>6562.79-w) & (self.la.value<6562.79+w)) | ((self.la.value>4861.35-w) & (self.la.value<4861.35+w)) | ((self.la.value>4340.472-w) & (self.la.value<4340.472+w)) | ((self.la.value>4101.734-w) & (self.la.value<4101.734+w)))[0]
spec[a]=1
return spec
def small_spectrum(self):
a=np.where((self.la>6400*u.AA) & (self.la<6800*u.AA))[0]
if len(a)>0:
spec = Spectrum1D(spectral_axis=self.la[a], flux=self.data[a]*u.Jy)
spec_fit = fit_continuum(spec)
spec_cont = spec_fit(self.la[a])
spec=spec/spec_cont
spec = Spectrum1D(spectral_axis=self.la[a], flux=spec.flux.value*u.Jy)
return spec
else:
return None
#taper function to bring ends of the rebinned spectra & template to zero within bell_window fraction
def taper_spectrum(self):
taper=np.ones(self.ncols)
off=int(np.around(self.ncols*self.bell_window))
taper[:off]=taper[:off]*np.sin(np.arange(off)*np.pi/2/off)
taper[-off:]=taper[-off:]*np.cos(np.arange(off)*np.pi/2/off)
return taper
#taper function for the cross correlation in FFT space
def taper_FFT(self,low_bin=0,top_low=20,top_nrun=125,nrun=255):
k=fftfreq(self.ncols)*self.ncols/2/np.pi
taperf=np.ones(self.ncols)
a=np.where((np.abs(k)>=nrun) | (np.abs(k)<=low_bin))[0]
taperf[a]=0
a=np.where((np.abs(k)>low_bin) & (np.abs(k)<=top_low))[0]
taperf[a]=np.sin((np.abs(k[a])-low_bin)*np.pi/2/(top_low-low_bin))
a=np.where((np.abs(k)>=top_nrun) & (np.abs(k)<nrun))[0]
taperf[a]=np.cos((np.abs(k[a])-top_nrun)*np.pi/2/(nrun-top_nrun))
return taperf
#sets up the rebinned loglam & creates lag in km/s
def set_lambda(self):
i=int(self.ncols/2)
new_la=10**(np.linspace(np.log10(self.st_lambda),np.log10(self.end_lambda),self.ncols))*u.AA
lagmult=(new_la[i+1]-new_la[i-1])/new_la[i]/2*299792.458
lag=np.arange(-self.ncols/2,self.ncols/2)*lagmult
return new_la,lag
#calculates r value of cross correlation
def calcR(self,x,pm=None):
if pm is None:
pm=int(self.ncols/2)
a=np.where((self.lag>self.minvel) & (self.lag<self.maxvel))[0]
peak_loc=a[np.argmax(x[a])]
if peak_loc<pm: pm=peak_loc
if peak_loc>len(x)-pm: pm=len(x)-peak_loc
if peak_loc==0:
return -1000
endpoint=peak_loc+pm
startpoint=peak_loc-pm
mirror=np.flip(x[peak_loc:endpoint])
sigmaA=np.sqrt(1./2/len(mirror)*np.sum((x[startpoint:peak_loc]-mirror)**2))
return x[peak_loc]/sigmaA/np.sqrt(2)
def run_XCSAO_optimized(self,run_subgrid=True,m=1.5,resample_teff=None,resample_logg=None,resample_feh=None,resample_alpha=None,optimized_for_boss=False):
self.run_XCSAO(run_subgrid=False,loggrange=[4.5,4.5],fehrange=[0,0],alpharange=[0,0])
goodteff=self.get_par(self.best_teff_sparse)
teffrange=[goodteff[0]-goodteff[1]*m,goodteff[0]+goodteff[1]*m]
if optimized_for_boss==False:
teffrangemin=np.where(np.array(teffrange)>=3500)[0]
if len(teffrangemin)==0:
teffrange=[goodteff[0]-goodteff[1]*m,3500]
self.run_XCSAO(run_subgrid=False,teffrange=teffrange,fehrange=[0,0],alpharange=[0,0],new=False)
goodlogg=self.get_par(self.best_logg_sparse)
loggrange=[goodlogg[0]-goodlogg[1]*m,goodlogg[0]+goodlogg[1]*m]
self.run_XCSAO(run_subgrid=False,teffrange=teffrange,loggrange=loggrange,alpharange=[0,0],new=False)
goodfeh=self.get_par(self.best_feh_sparse)
fehrange=[goodfeh[0]-goodfeh[1]*m,goodfeh[0]+goodfeh[1]*m]
if optimized_for_boss==True:
if self.best_teff<3500:
self.taperf=self.taperf_low
else:
self.taperf=self.taperf_med
x= self.run_XCSAO(run_subgrid=run_subgrid,teffrange=teffrange,loggrange=loggrange,fehrange=fehrange,new=False,resample_teff=resample_teff,resample_logg=resample_logg,resample_feh=resample_feh,resample_alpha=resample_alpha,min_teff_for_rv=3500)
self.taperf=self.taperf_med
return x
else:
return self.run_XCSAO(run_subgrid=run_subgrid,teffrange=teffrange,loggrange=loggrange,fehrange=fehrange,new=False,resample_teff=resample_teff,resample_logg=resample_logg,resample_feh=resample_feh,resample_alpha=resample_alpha)
def run_XCSAO(self,run_subgrid=True,teffrange=[],loggrange=[],fehrange=[],alpharange=[],new=True,resample_teff=None,resample_logg=None,resample_feh=None,resample_alpha=None,min_teff_for_rv=None):
if self.data is None:
raise RuntimeError('Please add a data spectrum.')
if self.grid is None:
raise RuntimeError('Please add a template grid/spectrum.')
if len(teffrange)==0:
teffrange=[self.grid_teff_min,self.grid_teff_max]
if len(loggrange)==0:
loggrange=[self.grid_logg_min,self.grid_logg_max]
if len(fehrange)==0:
fehrange=[self.grid_feh_min,self.grid_feh_max]
if len(alpharange)==0:
alpharange=[self.grid_alpha_min,self.grid_alpha_max]
if new:
self.grid_r=np.zeros(len(self.grid))
ind=np.where((self.grid_r ==0) & (self.grid_teff>=teffrange[0]) & (self.grid_teff<=teffrange[1]) & (self.grid_logg>=loggrange[0]) & (self.grid_logg<=loggrange[1]) & (self.grid_feh>=fehrange[0]) & (self.grid_feh<=fehrange[1]) & (self.grid_alpha>=alpharange[0]) & (self.grid_alpha<=alpharange[1]))[0]
self.grid_r[ind]=self.get_r_for_grid(self.grid[ind])
try:
a=np.where(self.grid_r==max(self.grid_r))[0][0]
except:
a=0
self.best_grid_index=a
if (self.grid_teff_num>2) & (resample_teff is not None):
self.best_teff=self.get_par(self.best_teff_subgrid,resample_teff)
elif (self.grid_teff_num>2) & (run_subgrid):
self.best_teff=self.get_par(self.best_teff_sparse)
else:
self.best_teff=self.grid_teff[self.best_grid_index]
if (self.grid_logg_num>2) & (resample_logg is not None):
self.best_logg=self.get_par(self.best_logg_subgrid,resample_logg)
elif (self.grid_logg_num>2) & (run_subgrid):
self.best_logg=self.get_par(self.best_logg_sparse)
else:
self.best_logg=self.grid_logg[self.best_grid_index]
if (self.grid_feh_num>2) & (resample_feh is not None):
self.best_feh=self.get_par(self.best_feh_subgrid,resample_feh)
elif (self.grid_feh_num>2) & (run_subgrid):
self.best_feh=self.get_par(self.best_feh_sparse)
else:
self.best_feh=self.grid_feh[self.best_grid_index]
if (self.grid_alpha_num>2) & (resample_alpha is not None):
self.best_alpha=self.get_par(self.best_alpha_subgrid,resample_alpha)
if (self.grid_alpha_num>2) & (run_subgrid):
self.best_alpha=self.get_par(self.best_alpha_sparse)
else:
self.best_alpha=self.grid_alpha[self.best_grid_index]
if not min_teff_for_rv is None:
x=np.where(self.grid_teff>=min_teff_for_rv)[0]
try:
a=x[np.where(self.grid_r[x]==max(self.grid_r[x]))[0][0]]
except:
a=0
self.best_r=self.grid_r[a]
self.best_ccf=self.getCCF(self.data,self.grid[a])
if np.isfinite(self.best_r):
self.get_rv()
else:
self.best_rv=[np.nan,np.nan]
return self.best_template()
def compare_sparse(self):
print(self.get_par(self.best_teff_sparse))
print(self.get_par(self.best_logg_sparse))
print(self.get_par(self.best_feh_sparse))
return
def get_r_for_grid(self,grid):
rr=[]
for g in grid:
out=self.getCCF(self.data,g)
rr.append(self.calcR(out))
return np.array(rr)
def get_rv(self):
a=np.where((self.lag>self.minvel) & (self.lag<self.maxvel))[0]
peak_loc=a[np.argmax(self.best_ccf[a])]
left,right=peak_loc,peak_loc
while self.best_ccf[peak_loc]<self.best_ccf[left]*2:
left=left-1
while self.best_ccf[peak_loc]<self.best_ccf[right]*2:
right=right+1
z=np.polyfit(self.lag[left:right],self.best_ccf[left:right], 2)
rv=(-z[1]/2/z[0])
rve=3*(self.lag[right]-self.lag[left])/8/(1+self.best_r)
self.best_rv=[rv,rve.value]
return self.best_rv
def get_par(self,func,subscale=None):
if subscale is None:
par,rr=func()
else:
par,rr=func(subscale=subscale)
weight=np.exp(rr)
weight=10**(rr)
average=np.average(par,weights=weight)
variance = np.average((par-average)**2, weights=weight)
return average,
|
np.sqrt(variance)
|
numpy.sqrt
|
import numpy as np
__author__ = 'hikaru'
import cv2
import urllib
hsv_min = np.array([0, 0, 127], np.uint8)
hsv_max = np.array([255, 255, 255], np.uint8)
def HSVChange(x):
global hsv_min, hsv_max
hn = cv2.getTrackbarPos('Hn', 'GUI')
sn = cv2.getTrackbarPos('Sn', 'GUI')
vn = cv2.getTrackbarPos('Vn', 'GUI')
hx = cv2.getTrackbarPos('Hx', 'GUI')
sx = cv2.getTrackbarPos('Sx', 'GUI')
vx = cv2.getTrackbarPos('Vx', 'GUI')
hsv_min = np.array([hn, sn, vn], np.uint8)
hsv_max = np.array([hx, sx, vx], np.uint8)
def initValues():
global hsv_min, hsv_max
# Create a black image, a window
img = np.zeros((128, 256, 3), np.uint8)
cv2.namedWindow('GUI')
# create trackbars for color change
cv2.createTrackbar('Hn', 'GUI', 0, 255, HSVChange)
cv2.createTrackbar('Sn', 'GUI', 0, 255, HSVChange)
cv2.createTrackbar('Vn', 'GUI', 0, 255, HSVChange)
cv2.createTrackbar('Hx', 'GUI', 255, 255, HSVChange)
cv2.createTrackbar('Sx', 'GUI', 255, 255, HSVChange)
cv2.createTrackbar('Vx', 'GUI', 255, 255, HSVChange)
def usb():
global hsv_min, hsv_max
initValues()
kernel = np.ones((9, 9), np.uint8)
alpha = 0.5
beta = 1.0 - alpha
gamma = 0.5
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 360)
while True:
ret, frame = cap.read()
if ret:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# ret, thresh = cv2.threshold(hsv, (h, s, v), 255, cv2.THRESH_BINARY)
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
canny = cv2.Canny(opening, 100, 200)
canny3 = cv2.cvtColor(canny, cv2.COLOR_GRAY2RGB)
mask_inv = cv2.bitwise_not(canny)
final = cv2.bitwise_and(canny3, canny3, mask=canny)
dst = cv2.add(frame, final)
# cv2.imshow("dst", dst)
# ret, thresh2 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# canny2 = cv2.Canny(thresh2, 100, 200)
# final2 = cv2.bitwise_and(thresh2, thresh2, mask=canny2)
# drawing = cv2.add(gray, final2)
# drawing = cv2.cvtColor(drawing, cv2.COLOR_GRAY2RGB)
# cv2.imshow("gray", drawing)
# cv2.imshow("frame", frame)
# cv2.imshow("hsv", hsv)
# cv2.imshow("thresh", thresh)
# cv2.imshow("opening", opening)
# cv2.imshow("canny", canny)
# mask = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
thresh3 = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
vis = np.concatenate((dst, thresh3), axis=0)
cv2.imshow("vis", vis)
if cv2.waitKey(1) & 0xff == 27:
break
cv2.destroyAllWindows()
def ipcam(url):
global hsv_min, hsv_max
initValues()
stream = urllib.urlopen(url)
bytes = ''
# cap = cv2.VideoCapture(0)
# cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
# cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 360)
'''Variables'''
kernel =
|
np.ones((9, 9), np.uint8)
|
numpy.ones
|
import numpy as np
from pymatgen.core import Lattice
from pymatgen.electronic_structure.bandstructure import BandStructure, Kpoint
from pymatgen.electronic_structure.core import Spin
def read_wavecar(filename):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): path to input file
"""
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
_C = 0.262465832
with open(filename, 'rb') as f:
# read the header information
recl, spin, rtag = np.fromfile(
f, dtype=np.float64, count=3).astype(np.int)
recl8 = int(recl / 8)
# check to make sure we have precision correct
if rtag != 45200 and rtag != 45210:
raise ValueError('invalid rtag of {}'.format(rtag))
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
nk, nb, encut = np.fromfile(
f, dtype=np.float64, count=3).astype(np.int)
a = Lattice(np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3)))
efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
# calculate reciprocal lattice
b = a.reciprocal_lattice._matrix
# calculate maximum number of b vectors in each direction
bmag = np.linalg.norm(b, axis=1)
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = (np.dot(b[2, :], np.cross(b[0, :], b[1, :])) /
(bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :]))))
nbmaxA = np.sqrt(encut * _C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = (np.dot(b[1, :], np.cross(b[0, :], b[2, :])) /
(bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :]))))
nbmaxB = np.sqrt(encut * _C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = (np.dot(b[0, :], np.cross(b[1, :], b[2, :])) /
(bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :]))))
nbmaxC = np.sqrt(encut * _C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
_nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0).astype(np.int)
# padding
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
# np.set_printoptions(precision=7, suppress=True)
Gpoints = [None for _ in range(nk)]
kpoints = []
band_energy = {Spin.up: [[None for i in range(nk)] for j in range(nb)]}
if spin == 2:
coeffs = [[[None for i in range(nb)]
for j in range(nk)] for _ in range(spin)]
band_energy[Spin.down] = [
[None for i in range(nk)] for j in range(nb)]
else:
coeffs = [[None for i in range(nb)] for j in range(nk)]
for ispin in range(spin):
for ink in range(nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
if ispin == 0:
kpoints.append(kpoint)
else:
assert np.allclose(kpoints[ink], kpoint)
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64,
count=3 * nb).reshape((nb, 3))
if ispin == 1:
for iband, eig in enumerate(enocc):
band_energy[Spin.down][iband][ink] = eig[0]
else:
for iband, eig in enumerate(enocc):
band_energy[Spin.up][iband][ink] = eig[0]
# padding
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * nb))
# generate G integers
gpoints = []
for i in range(2 * _nbmax[2] + 1):
i3 = i - 2 * _nbmax[2] - 1 if i > _nbmax[2] else i
for j in range(2 * _nbmax[1] + 1):
j2 = j - 2 * _nbmax[1] - 1 if j > _nbmax[1] else j
for k in range(2 * _nbmax[0] + 1):
k1 = k - 2 * _nbmax[0] - 1 if k > _nbmax[0] else k
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, b))
E = g ** 2 / _C
if E < encut:
gpoints.append(G)
Gpoints[ink] = np.array(gpoints, dtype=np.float64)
Gflag = False
if (len(Gpoints[ink]) + 1)/2 == nplane:
Gflag = True
gptemp = []
for i in range(2 * _nbmax[2] + 1):
i3 = i - 2 * _nbmax[2] - 1 if i > _nbmax[2] else i
for j in range(2 * _nbmax[1] + 1):
j2 = j - 2 * _nbmax[1] - 1 if j > _nbmax[1] else j
for k in range(_nbmax[0] + 1):
if k == 0 and j2 < 0:
pass
elif k == 0 and j2 == 0 and i3 < 0:
pass
else:
G = np.array([k, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, b))
E = g ** 2 / _C
if E < encut:
gptemp.append(G)
Gptemp = np.array(gptemp, dtype=np.float64)
elif len(Gpoints[ink]) != nplane:
print(len(Gpoint), nplane)
raise ValueError(
'failed to generate the correct number of G points')
# extract coefficients
for inb in range(nb):
if rtag == 45200:
data = np.fromfile(f, dtype=np.complex64, count=nplane)
buf = np.fromfile(f, dtype=np.float64,
count=recl8 - nplane)
elif rtag == 45210:
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = np.fromfile(
f, dtype=np.complex128, count=nplane)
np.fromfile(f, dtype=np.float64,
count=recl8 - 2 * nplane)
if Gflag:
"""
This is confusing but bear with me, occupations for most bands
are twice as large so need factor of sqrt(1/2). This isn't true
for the first band it seems this has some weird factor ~ the one
used.
"""
data = data * \
|
np.sqrt(0.5)
|
numpy.sqrt
|
# from gym import wrappers
import make_env
import numpy as np
import random
from ReplayMemory import ReplayMemory
from ExplorationNoise import OrnsteinUhlenbeckActionNoise as OUNoise
from actorcritic_dis import ActorNetwork,CriticNetwork
import argparse
import os
import multiprocessing as mp
import tensorflow as tf
from mpi4py import MPI
import time
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def prioritized_batch(replay, critics, m_size, n_size):
"""
1. sample m_size batch from replay memory
2. calculating td loss
3. ranking
4. select n_size td loss with higher td loss
5. use it for
"""
s_batch,a_batch,r_batch,d_batch,s2_batch = replayMemory.miniBatch(m_size)
def build_summaries(n):
"""
Tensorboard summary for losses or rewards
"""
losses = [tf.Variable(0.) for i in range(n)]
for i in range(n):
tf.summary.scalar("Reward_Agent" + str(i), losses[i])
summary_vars = losses
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
def saveWeights(actor, i, pathToSave):
"""
Save model weights
"""
actor.mainModel.save_weights(pathToSave + str(i) + "_weights.h5")
def showReward(episode_reward, n, ep, start):
reward_string = ""
for re in episode_reward:
reward_string += " {:5.2f} ".format(re)
print ('|Episode: {:4d} | Time: {:2d} | Rewards: {:s}'.format(ep, int(time.time() - start), reward_string))
def distributed_train(sess, env, args, actors, critics, noise, ave_n):
"""
1. replay memory
- for each timestep
2. async batch data
3.
"""
summary_ops,summary_vars = build_summaries(env.n)
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
replayMemory = ReplayMemory(int(args['buffer_size']),int(args['random_seed']))
start_time = 0.0
end_time = 0.0
for ep in range(int(args['max_episodes'])):
# collecting reward
s = env.reset()
episode_reward = np.zeros((env.n,))
start = time.time()
for step in range(int(args['max_episode_len'])):
action_dims_done = 0
a = []
for i in range(env.n):
actor = actors[i]
state_input = np.reshape(s[i],(-1, actor.state_dim))
a.append(actor.act(state_input, noise[i]()).reshape(actor.action_dim,))
s2, r, done, _ = env.step(a) # a is a list with each element being an array
episode_reward += r
if replayMemory.size() > int(args["minibatch_size"]):
# MADDPG Adversary Agent
for i in range(ave_n):
actor = actors[i]
critic = critics[i]
if replayMemory.size() > int(args['m_size']):
s_batch, a_batch, r_batch, d_batch, s2_batch = replayMemory.miniBatch(int(args['m_size']))
a = []
for j in range(ave_n):
state_batch_j = np.asarray([x for x in s_batch[:,j]]) #batch processing will be much more efficient even though reshaping will have to be done
a.append(actors[j].predict_target(state_batch_j))
a_temp = np.transpose(np.asarray(a),(1,0,2))
a_for_critic = np.asarray([x.flatten() for x in a_temp])
s2_batch_i = np.asarray([x for x in s2_batch[:,i]])
targetQ = critic.predict_target(s2_batch_i,a_for_critic)
yi = []
for k in range(int(args['m_size'])):
if d_batch[:,i][k]:
yi.append(r_batch[:,i][k])
else:
yi.append(r_batch[:,i][k] + critic.gamma*targetQ[k])
# a2 = actor.predict_target(s_batch)
# Q_target = critic.predict_target(s2_batch, a2)
# y = r + gamma * Q_target
# TD loss = yi - critic.predict(s_batch, a_batch)
s_batch_i = np.asarray([x for x in s_batch[:,i]])
a_batch_data = np.asarray([x.flatten() for x in a_batch[:, 0: ave_n, :]])
target_q = np.asarray(yi)
#############################################
## prioritized_batch
#############################################
# loss = batch
losses = []
# clip
index = 0
# number of losses
loss_num = int(int(args['m_size']) / int(args['n_size']))
for i in range(loss_num):
loss = critic.get_loss(s_batch_i[index:index+int(args["n_size"])],
a_batch_data[index:index+int(args["n_size"])],
target_q[index:index+int(args["n_size"])])
losses.append(loss)
index += int(args["n_size"])
# which has max loss
sorted_index = np.argsort(losses).tolist()
max_index = sorted_index[-1]
# clip index
head = max_index * int(args["n_size"])
tail = head + int(args["n_size"])
# clipped batch data with higher losses
prioritized_a_batch = a_batch_data[head: tail]
prioritized_s_batch = s_batch_i[head: tail]
prioritized_target_q = target_q[head: tail]
#############################################
## prioritized_batch
#############################################
# critic train
critic.train(prioritized_s_batch, prioritized_a_batch, prioritized_target_q)
actions_pred = []
# for j in range(ave_n):
for j in range(ave_n):
state_batch_j = np.asarray([x for x in s2_batch[:,j]])
actions_pred.append(actors[j].predict(state_batch_j[head: tail]))
a_temp = np.transpose(np.asarray(actions_pred),(1,0,2))
a_for_critic_pred = np.asarray([x.flatten() for x in a_temp])
grads = critic.action_gradients(prioritized_s_batch, a_for_critic_pred)[:,action_dims_done:action_dims_done + actor.action_dim]
# actor train
actor.train(prioritized_s_batch, grads)
action_dims_done = action_dims_done + actor.action_dim
# Only DDPG agent
for i in range(ave_n, env.n):
actor = actors[i]
critic = critics[i]
if replayMemory.size() > int(args["minibatch_size"]):
s_batch, a_batch, r_batch, d_batch, s2_batch = replayMemory.miniBatch(int(args["minibatch_size"]))
s_batch_i = np.asarray([x for x in s_batch[:,i]])
action = np.asarray(actor.predict_target(s_batch_i))
action_for_critic = np.asarray([x.flatten() for x in action])
s2_batch_i = np.asarray([x for x in s2_batch[:, i]])
targetQ = critic.predict_target(s2_batch_i, action_for_critic)
y_i = []
for k in range(int(args['minibatch_size'])):
if d_batch[:, i][k]:
y_i.append(r_batch[:, i][k])
else:
y_i.append(r_batch[:, i][k] + critic.gamma * targetQ[k])
s_batch_i= np.asarray([x for x in s_batch[:, i]])
critic.train(s_batch_i, np.asarray([x.flatten() for x in a_batch[:, i]]), np.asarray(y_i))
action_for_critic_pred = actor.predict(s2_batch_i)
gradients = critic.action_gradients(s_batch_i, action_for_critic_pred)[:, :]
actor.train(s_batch_i, gradients)
for i in range(0, env.n):
actor = actors[i]
critic = critics[i]
actor.update_target()
critic.update_target()
if step == int(args["max_episode_len"])-1 or np.all(done):
#############################################
## Record reward data into tensorboard
#############################################
ave_reward = 0.0
good_reward = 0.0
for i in range(env.n):
if i < ave_n:
ave_reward += episode_reward[i]
else:
good_reward += episode_reward[i]
#summary_str = sess.run(summary_ops, feed_dict = {summary_vars[0]: episode_reward, summary_vars[1]: episode_av_max_q/float(stp)})
summary_str = sess.run(summary_ops, feed_dict = {summary_vars[0]: ave_reward, summary_vars[1]: good_reward})
# summary_str = sess.run(summary_ops, feed_dict = {summary_vars[i]: losses[i] for i in range(len(losses))})
writer.add_summary(summary_str, ep)
writer.flush()
showReward(episode_reward, env.n, ep, start)
break
if ep % 50 == 0 and ep != 0:
print("Starting saving model weights every 50 episodes")
for i in range(env.n):
saveWeights(actors[i], i, args["modelFolder"])
print("Model weights saved")
if ep % 100 == 0 and ep != 0:
directory = args["modelFolder"] + "ep" + str(ep) + "/"
if not os.path.exists(directory):
os.makedirs(directory)
print("Starting saving model weights to folder every 100 episodes")
for i in range(env.n):
saveWeights(actors[i], i, directory)
print("Model weights saved to folder")
# recieve batch data from workers
batch_data = [comm.recv(source=i, tag=i) for i in range(1, size)]
for batch in batch_data:
for item in batch:
(s, a, r, d, s2) = item
replayMemory.add(s, a, r, d, s2)
# send weights to workers
actor_weights = [actor.mainModel.get_weights() for actor in actors]
for i in range(1, size):
comm.send(actor_weights, dest=i, tag=i)
def distributed_train_every_step(sess, env, args, actors, critics, noise, ave_n):
"""
1. replay memory
- for each timestep
2. async batch data
3.
"""
summary_ops,summary_vars = build_summaries(env.n)
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
replayMemory_predator = ReplayMemory(int(args['buffer_size']),int(args['random_seed']))
replayMemory_prey = ReplayMemory(int(args['buffer_size']),int(args['random_seed']))
# split_dis = int(int(args['max_episode_len']) / size)
# batch_index_count = split_dis
start_time = 0.0
end_time = 0.0
for ep in range(int(args['max_episodes'])):
# collecting reward
#batch_index_count = split_dis
s = env.reset()
episode_reward = np.zeros((env.n,))
# weights_data = []
start = time.time()
for step in range(int(args['max_episode_len'])):
action_dims_done = 0
a = []
for i in range(env.n):
actor = actors[i]
state_input =
|
np.reshape(s[i],(-1, actor.state_dim))
|
numpy.reshape
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for telluride_decoding.infer_decoder."""
import io
import os
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import matplotlib
# pylint: disable=g-import-not-at-top
matplotlib.use('Agg') # Needed for plotting to a file, before the next import
import matplotlib.pyplot as plt
import mock
import numpy as np
from telluride_decoding import brain_data
from telluride_decoding import infer_decoder
from telluride_decoding import ingest
import tensorflow.compat.v2 as tf
flags.DEFINE_string(
'tmp_dir', os.environ.get('TMPDIR') or '/tmp',
'Temporary directory location.')
FLAGS = flags.FLAGS
@tf.function
def _linear_model(input_dict):
"""The simplest possible linear model for testing.
Args:
input_dict: A TF dataset, only one field needed (input_1) containing the
EEG data from which we predict intensity.
Returns:
The predicted intensity
"""
eeg = input_dict['input_1']
return _eeg_to_intensity(eeg)
@tf.function
def _cca_model(input_dict, cca_dims=2):
"""The simplest possible CCA model for testing.
Args:
input_dict: A TF dataset with two fields that are rotated via CCA.
cca_dims: How many CCA dimensions to compute.
Returns:
A concatenated pair of arrays with the best correlation.
"""
return tf.concat((input_dict['input_1'][:, 0:cca_dims], # EEG data
input_dict['input_2'][:, 0:cca_dims]), # Intensity data
axis=1)
def _eeg_to_intensity(eeg):
"""Intensity is uniform random between [0, 1], eeg is [-1, 1]."""
return eeg/2.0 + 0.5
def _intensity_to_eeg(intensity):
"""Intensity is uniform random between [0, 1], eeg is [-1, 1]."""
return (intensity - 0.5)*2.0
_NUM_TEST_POINTS = 1000 # Arbitrary for testing.
class InferDecoderTest(parameterized.TestCase):
def setUp(self):
"""Stores and prepares tf.dataset tests with three kinds of test data.
These data are:
Plain training data,
More training data, but with input and output mixed up for null test,
Test data which switches attention periodically.
"""
super(InferDecoderTest, self).setUp()
params = self.get_default_params()
attended_speaker = 'intensity1'
self._train_filename = self.create_sample_data_file(with_noise=False,
with_switches=False)
self._train_data = infer_decoder.create_dataset(self._train_filename,
params,
attended_speaker)
self._mixed_data = infer_decoder.create_dataset(self._train_filename,
params,
attended_speaker,
mixup_batch=True)
self._test_filename = self.create_sample_data_file(with_noise=False,
with_switches=True)
self._test_data = infer_decoder.create_dataset(self._test_filename, params,
attended_speaker)
def create_sample_data_file(self, test_name='test',
num_dimensions=4,
with_switches=False, with_noise=False):
"""Create a TFRecord data file with two intensity profiles and EEG data."""
intensity1 = np.random.rand(_NUM_TEST_POINTS, num_dimensions)
intensity2 = np.random.rand(_NUM_TEST_POINTS, num_dimensions)
speaker_flag = np.zeros((_NUM_TEST_POINTS), dtype=np.int32)
if with_switches:
# Switch to speaker 2 for second half
speaker_flag[_NUM_TEST_POINTS//2:] = 1
eeg = np.zeros((_NUM_TEST_POINTS, num_dimensions))
eeg[speaker_flag == 0, :] = _intensity_to_eeg(
intensity1[speaker_flag == 0, :])
eeg[speaker_flag == 1, :] = _intensity_to_eeg(
intensity2[speaker_flag == 1, :])
if with_noise:
for i in range(num_dimensions):
frac = i/float(num_dimensions)
eeg[:, i] = (1-frac)*eeg[:, i] + frac*np.random.rand(_NUM_TEST_POINTS,)
data_dict = {'intensity1': intensity1,
'intensity2': intensity2,
'attended_speaker': speaker_flag.astype(np.float32),
'eeg': eeg,
}
brain_trial = ingest.BrainTrial(test_name)
brain_trial.model_features = data_dict
data_dir = self.create_tempdir().full_path
brain_trial.write_data_as_tfrecords(data_dir)
return os.path.join(data_dir, test_name + '.tfrecords')
def get_default_params(self):
return {'input_field': ['eeg'],
'pre_context': 0,
'post_context': 0,
'input2_pre_context': 0,
'input2_post_context': 0,
}
def test_sample_data_file(self):
"""Basic test to make sure we can create the data file and it has data."""
num_dimensions = 4
features = brain_data.discover_feature_shapes(self._train_filename)
print('sample_data_file features are:', features)
self.assertEqual(features['eeg'].shape, [num_dimensions])
self.assertEqual(features['intensity1'].shape, [num_dimensions])
self.assertEqual(features['intensity2'].shape, [num_dimensions])
count, error = brain_data.count_tfrecords(self._train_filename)
self.assertEqual(count, _NUM_TEST_POINTS)
self.assertFalse(error)
def test_conversions(self):
"""Makes sure that the model mapping is invertable."""
data = np.random.rand(1000)
converted = _eeg_to_intensity(_intensity_to_eeg(data))
np.testing.assert_allclose(data, converted, rtol=1e-5)
def test_create_dataset(self):
"""Test to see if we can create the right data file for testing a model."""
num_batches = 0
for input_data, output_data in self._test_data.take(1):
predicted_intensity = _eeg_to_intensity(input_data['input_1'].numpy())
print('Types:', predicted_intensity.dtype, output_data.numpy().dtype)
print('Shapes:', predicted_intensity.shape, output_data.numpy().shape)
np.testing.assert_allclose(predicted_intensity,
output_data.numpy(), atol=1e-7, rtol=1e-4)
num_batches += 1
self.assertGreater(num_batches, 0)
def test_correlation_calculation(self):
num_batches = 50 # Arbitrary
batch_size = 3400 # Arbitrary
total_points = num_batches * batch_size
x = np.random.randn(total_points, 3) + 1.2
y = x*3 + 3.1
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
for i in range(num_batches):
s = i*batch_size
e = s + batch_size
decoder.add_data_correlator(x[s:e, :], y[s:e, :])
r = decoder.compute_correlation(x, y)
np.testing.assert_allclose(np.mean(r), 1, rtol=1e-5)
def test_correlation_save_model(self):
num_batches = 50 # Arbitrary
batch_size = 340 # Arbitrary
total_points = num_batches * batch_size
x = np.random.randn(total_points, 3) + 1.2
y = x*3 + 3.1
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder.add_data_correlator(x, y)
x_new = np.random.randn(total_points, 3) + 1.2
y_new = x_new*3 + 3.1
r = decoder.compute_correlation(x_new, y_new)
tmp_dir = flags.FLAGS.test_tmpdir or '/tmp'
corr_save_dir = os.path.join(tmp_dir, 'corr_params.json')
decoder.save_parameters(corr_save_dir)
decoder_loaded = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder_loaded.restore_parameters(corr_save_dir)
r_loaded = decoder_loaded.compute_correlation(x_new, y_new)
np.testing.assert_equal(r_loaded, r)
def test_linear_model(self):
"""Makes sure our sample TF model performs as expected."""
intensity = np.arange(10) - 5.1 # Arbitrary set of non-positive, non-ints
eeg = _intensity_to_eeg(intensity)
prediction = _linear_model({'input_1': eeg})
np.testing.assert_allclose(intensity, prediction)
def test_cca_data(self):
"""Checks the data is being loaded into the input_dict correctly for CCA."""
def pearson_correlation(x, y):
"""Computes the Pearson correlation coefficient between tensors of data.
This routine computes a vector correlation (ala cosine distance).
Args:
x: one of two input arrays.
y: second of two input arrays.
Returns:
scalar correlation coefficient.
"""
# From: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
x_m = x - tf.math.reduce_mean(x, axis=0)
y_m = y - tf.math.reduce_mean(y, axis=0)
return tf.divide(
tf.math.reduce_sum(tf.multiply(x_m, y_m), axis=0),
tf.multiply(tf.math.sqrt(tf.math.reduce_sum(tf.math.square(x_m),
axis=0)),
tf.math.sqrt(tf.math.reduce_sum(tf.math.square(y_m),
axis=0))))
for input_dict, _ in self._test_data.take(1):
self.assertGreater(np.mean(np.abs(input_dict['input_1'] -
input_dict['input_2'])), 0.1)
r = pearson_correlation(input_dict['input_1'], input_dict['input_2'])
np.testing.assert_allclose(r, 1.0, rtol=1e-5)
@parameterized.named_parameters(
('lda', 'lda'),
('first', 'first'),
('mean', 'mean'),
('mean-squared', 'mean-squared'),
)
def test_inference(self, reduction):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
decoder.train(self._mixed_data, self._train_data)
speaker, labels = decoder.test_all(self._test_data)
plt.clf()
plt.plot(labels)
plt.plot(speaker)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'inference_%s.png' % reduction))
print('test_inference_%s:' % reduction, speaker.shape, labels.shape)
self.assertGreater(np.mean(speaker[labels == 0]), 0.5)
self.assertLess(np.mean(speaker[labels == 1]), 0.5)
@parameterized.named_parameters(
('lda', 'lda', 0.85),
('first', 'first', 0.6),
('mean', 'mean', 0.85),
('mean-squared', 'mean-squared', 0.85),
)
def test_windowed_inference(self, reduction, expected_mean):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
decoder.train(self._mixed_data, self._train_data)
speaker, _ = decoder.test_all(self._test_data)
window_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256]
windowed_means = np.zeros(len(window_sizes))
windowed_stds = np.zeros(len(window_sizes))
for i, window_size in enumerate(window_sizes):
results = []
# Evaluate performance on first half of the training data
for window_start in range(0, _NUM_TEST_POINTS//2,
window_size):
window_end = window_start + window_size
results.append(np.mean(speaker[window_start:window_end] > 0.5))
windowed_means[i] = np.mean(results)
windowed_stds[i] = np.std(results)
plt.clf()
plt.errorbar(window_sizes, windowed_means, windowed_stds)
plt.gca().set_xscale('log')
plt.title('Test_windowed_inference with %s' % reduction)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'windowed_inference_%s.png' % reduction))
plt.xlabel('Window size (frames)')
self.assertAlmostEqual(np.mean(windowed_means), expected_mean, delta=0.05)
def test_one_window(self):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
decoder.train(self._mixed_data, self._train_data)
batch_size = 101
for speaker, label in decoder.test_by_window(self._test_data, batch_size):
self.assertEqual(speaker.shape, (batch_size, 1))
self.assertEqual(label.shape, (batch_size, 1))
def test_train_no_switches(self):
"""Tests the training and inference stages with a linear model."""
# Create the basic decoder class, with a simple TF model.
decoder = infer_decoder.LinearRegressionDecoder(_linear_model)
empty_dataset = tf.data.Dataset.from_tensor_slices(({'input_1': [],
'input_2': []},
[]))
with self.assertRaisesRegex(ValueError, 'No data for class 0'):
decoder.train(empty_dataset, self._mixed_data)
with self.assertRaisesRegex(ValueError, 'No data for class 1'):
decoder.train(self._mixed_data, empty_dataset)
def test_windowing(self):
data = np.reshape(np.arange(12), (6, 2))
ave = infer_decoder.average_data(data, window_size=3)
expected = [[2, 3], [8, 9]]
np.testing.assert_array_equal(ave, expected)
@parameterized.named_parameters(
('linear_first', 'linear', 'first', 0.1, 1),
('linear_lda', 'linear', 'lda', 0.1, 1),
('linear_mean_squared', 'linear', 'mean-squared', 0.1, 1),
('CCA_first', 'CCA', 'first', 0.1, 1),
('CCA_lda', 'CCA', 'lda', 0.16, 1),
('CCA_mean_squared', 'CCA', 'mean-squared', 0.1, 1),
('linear_first-100', 'linear', 'first', 0.15, 100),
('linear_lda-100', 'linear', 'lda', 0.1, 100),
('linear_mean_squared-100', 'linear', 'mean-squared', 0.1, 100),
('CCA_first-100', 'CCA', 'first', 0.1, 100),
('CCA_lda-100', 'CCA', 'lda', 0.16, 100),
('CCA_mean_squared-100', 'CCA', 'mean-squared', 0.1, 100),
)
def test_training_and_inference(self, regressor_name, reduction,
tolerance=0.1,
window_size=1):
"""Tests the training and inference stages with a linear model."""
print('Training the %s regressor.' % regressor_name)
# Create the desired decoder class.
if regressor_name == 'linear':
decoder = infer_decoder.LinearRegressionDecoder(_linear_model,
reduction=reduction)
elif regressor_name == 'CCA':
decoder = infer_decoder.CCADecoder(_cca_model, reduction=reduction)
else:
raise ValueError('Unknown decoder name: %s' % regressor_name)
dprime = decoder.train(self._mixed_data, self._train_data,
window_size=window_size)
logging.info('Infer training of %s data via %s gave a dprime of %g.',
regressor_name, reduction, dprime)
speaker, _ = decoder.test_all(self._test_data)
plt.clf()
plt.plot(speaker)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'inference_train_%s_%s.png' % (regressor_name,
reduction)))
self.assertGreater(np.mean(speaker[:(_NUM_TEST_POINTS//2)]),
1.0 - tolerance)
self.assertLess(np.mean(speaker[(_NUM_TEST_POINTS//2):]),
tolerance)
# Make sure we can retrieve and save parameters (without errors)
decoder.decoding_model_params = decoder.decoding_model_params
def test_two_dimensional_data(self):
"""A copy of the easiest test from scaled_lda_test. Just to verify function.
"""
num_dims = 2
mean_vectors = np.array([[-2, 12], [2, -1]])
d1 = np.matmul(np.random.randn(_NUM_TEST_POINTS, num_dims),
[[2, 0], [0, 0.5]]) + mean_vectors[0, :]
d2 = np.matmul(np.random.randn(_NUM_TEST_POINTS, num_dims),
[[2, 0], [0, 0.5]]) + mean_vectors[1, :]
# Plot the original data.
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(d1[:, 0], d1[:, 1], 'rx')
plt.plot(d2[:, 0], d2[:, 1], 'bo')
plt.title('Original Data')
x = np.concatenate((d1, d2), axis=0)
labels = [42, -12]
y = np.concatenate((np.ones(d1.shape[0])*labels[0],
np.ones(d2.shape[0])*labels[1]))
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
dprime = decoder.compute_lda_model(d1, d2)
logging.info('test_two_dimensional_data dprime is: %g', dprime)
self.assertAlmostEqual(dprime, 26.3253, delta=2.0)
x_lda = decoder.reduce_with_lda(x)
# Plot the transformed data.
plt.subplot(2, 1, 2)
plt.plot(x_lda[y == labels[0], 0], x_lda[y == labels[0], 1], 'rx')
plt.plot(x_lda[y == labels[1], 0], x_lda[y == labels[1], 1], 'bo')
plt.title('Transfomed Data')
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'scaled_lda.png'))
# Make sure the transformed centers are symmetric on the first (x) axis.
centers = decoder.reduce_with_lda(mean_vectors)
logging.info('Transformed centers are: %s', (centers,))
self.assertAlmostEqual(centers[0, 0], 0., delta=0.1)
self.assertAlmostEqual(centers[1, 0], 1., delta=0.1)
def generate_dprime_data(self):
dims = 10
# Create two datasets, with coupled dimensions (decreasing with dim. index)
d1 = np.random.randn(_NUM_TEST_POINTS, dims)
d2 = np.random.randn(_NUM_TEST_POINTS, dims)
for i in range(dims):
p = 2**(-i)
d2[:, i] = p*d1[:, i] + (1-p)*d2[:, i]
d2 += np.ones(d2.shape)
return d1, d2
def test_lda(self):
d1, d2 = self.generate_dprime_data()
# Build and transform the sample data.
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
with self.assertRaisesRegex(
ValueError, 'Must compute the LDA model before reducing data.'):
decoder.reduce_with_lda(24)
dprime = decoder.compute_lda_model(d1, d2)
self.assertAlmostEqual(dprime, 3.31, delta=.1)
all_data = np.concatenate((d1, d2), axis=0)
with self.assertRaisesRegex(
TypeError, 'Input data must be an numpy array, not'):
decoder.reduce_with_lda(24)
transformed_data = decoder.reduce_with_lda(all_data)
self.assertEqual(transformed_data.shape, (2*_NUM_TEST_POINTS,
2))
dprime = infer_decoder.calculate_dprime(decoder.reduce_with_lda(d1)[:, 0],
decoder.reduce_with_lda(d2)[:, 0])
self.assertAlmostEqual(dprime, 3.28, delta=.1)
def test_lda_save_model(self):
d1, d2 = self.generate_dprime_data()
# Build and transform the sample data.
decoder = infer_decoder.Decoder(lambda x: x) # Dummy model for testing
_ = decoder.compute_lda_model(d1, d2)
all_data = np.concatenate((d1, d2), axis=0)
transformed_data = decoder.reduce_with_lda(all_data)
dprime = infer_decoder.calculate_dprime(decoder.reduce_with_lda(d1)[:, 0],
decoder.reduce_with_lda(d2)[:, 0])
print(decoder.model_params)
tmp_dir = flags.FLAGS.test_tmpdir or '/tmp'
save_lda_dir = os.path.join(tmp_dir, 'lda_params.json')
decoder.save_parameters(save_lda_dir)
decoder_loaded = infer_decoder.Decoder(lambda x: x)
decoder_loaded.restore_parameters(save_lda_dir)
transformed_data_loaded = decoder_loaded.reduce_with_lda(all_data)
dprime_loaded = infer_decoder.calculate_dprime(
decoder_loaded.reduce_with_lda(d1)[:, 0],
decoder_loaded.reduce_with_lda(d2)[:, 0])
np.testing.assert_array_equal(transformed_data, transformed_data_loaded)
np.testing.assert_array_equal(dprime, dprime_loaded)
def test_dprime(self):
"""Makes sure our d' calculation is correct."""
num = 1000
np.random.seed(0)
d1 =
|
np.random.randn(num)
|
numpy.random.randn
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import scipy.interpolate as interp
import scipy.optimize as sciopt
def change_x_to_ibias(mos_db, xmat, num_samp=200):
ib_mat = mos_db.get_function('ibias')(xmat)
min_ibias = np.max(np.min(ib_mat, axis=0))
max_ibias = np.min(np.max(ib_mat, axis=0))
ib_vec =
|
np.linspace(min_ibias, max_ibias, num_samp)
|
numpy.linspace
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
HitRatioAtK, used for calculating the hit ratio of results.
The HitRatioAtK class contains the implementation of the hit ratio metric.
Its function is to evaluate results obtained using a certain model.
"""
import numpy as np
from metric.metric_at_k import MetricAtK
from metric.top_selector import TopSelector
class HitRatioAtK(MetricAtK):
"""HitRatioAtK class. Inherits the MetricAtK class.
The HitRatioAtK is used to calculate the hit ratio metric.
Attributes:
_top_selector: A class used to extract top results used in hit ratio
calculations.
"""
def __init__(self, k):
"""Inits HitRatioAtK with its k value.
k must be greater than 0.
Raises:
TypeError: The k value is not an integer or is not set.
ValueError: The k value is smaller than 1.
"""
super().__init__('Hit Ratio', k)
self._top_selector = TopSelector()
def evaluate(self, y_true, y_pred):
"""Evaluates the given predictions with the hit ratio metric.
Calculates the hit ratio on the passed predicted and true values at k.
Args:
y_true: A PyTorch tensor of true values. Only one value per row
can be > 0!
y_pred: A PyTorch tensor of predicted values.
Returns:
Will return a float with the calculated hit ratio value. The hit
ratio is defined as follows:
math::
HR@K = \\frac{Number of Hits @ K}{Number of Ground Truth Items(=1)}
This is then averaged over all sets of predictions/ground truths
(users).
From:
https://www.comp.nus.edu.sg/~kanmy/papers/cikm15-trirank-cr.pdf
Raises:
TypeError: An error occured while accessing the arguments -
one of the arguments is NoneType.
ValueError: An error occured when checking the dimensions of the
y_pred and y_true arguments. One or both are not a 2D arrays,
or they are 2D but of different sizes along those dimensions.
If y_true has more than one true value per row this error
is raised. This is also raised if the output is not in [0,1].
"""
self._check_input(y_true, y_pred)
y_true = y_true.cpu().numpy()
y_pred = y_pred.cpu().numpy()
self._check_args_numpy(y_pred, y_true)
# Check only one ground truth value = 1 per row in y_true.
y_true[y_true > 0] = 1
y_true[y_true < 0] = 0
for x in np.sum(y_true, axis=1):
if x != 1:
raise ValueError('Incorrect format of argument: y_true. \
Input must have only one true value \
per row.')
y_pred_binary = self._top_selector.find_top_k_binary(y_pred, self._k)
y_true_binary = (y_true > 0)
result = (
|
np.logical_and(y_true_binary, y_pred_binary)
|
numpy.logical_and
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings, os, pickle, argparse, multiprocessing, logging
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import yaml
import recommender_config
warnings.simplefilter('ignore', ConvergenceWarning)
warnings.simplefilter('ignore', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
from recommender.functions import Theta_forecast, Theta_forecast_sktime, Naive_forecast, lr_forecast, autoarima_forecast, KNN_forecast, DT_forecast, VPA_forecast
from recommender.functions import perform_tests
def pando_recommender(y_segment, tree, window, limit = 5):
forecasters = {'theta':Theta_forecast, 'theta-sktime': Theta_forecast_sktime, 'naive': Naive_forecast, 'linear': lr_forecast,'arima': autoarima_forecast, 'kn': KNN_forecast, 'dt': DT_forecast, "vpa": VPA_forecast}
# Check y_segment length
if len(y_segment) < window:
forecast = np.zeros(window)
forecast[-len(y_segment):] = y_segment
prov = np.percentile(forecast, recommender_config.TARGET_PERCENTILE)
return forecast, prov, "warmup"
# get label for segment
tests = perform_tests(y_segment, recommender_config.STAT_THRESHOLD, recommender_config.THETA_THRESHOLD, recommender_config.MAX_CHANGEPOINTS)
label = int("".join(str(int(i)) for i in tests.values()), 2)
print("Trace Behavior Label: {}".format(label))
# get forecaster
rec_name = tree[label]
if type(rec_name) == float and
|
np.isnan(rec_name)
|
numpy.isnan
|
import time
import numpy as np
import scipy as sp
from tqdm import tqdm
import jax.numpy as jnp
from jax import jit
from jax.scipy.stats import norm
import pandas as pd
#import from cb package
from run_scripts.load_data import gen_data_hier,load_traintest_hier
from conformal_bayes import conformal_Bayes_functions as cb
from conformal_bayes import Bayes_MCMC_functions as bmcmc
#Conformalized Bayes for grouped data
def run_hier_conformal(dataset,misspec):
#Compute intervals
#Initialize
if dataset == 'sim':
seed = 100
K = 5
p = 1
n = 10
n_test_pergrp = 10
rep = 50
B = 4*2000
y,x,y_test,x_test,beta_true,sigma_true,y_plot = gen_data_hier(n,p,n_test_pergrp,seed,K,misspec = misspec)
elif dataset =='radon':
train_frac = 1.
x,y,x_test,y_test,y_plot,n,d = load_traintest_hier(1,dataset,100)
K = np.shape(np.unique(x[:,1]))[0]
rep = 1
B = 4*2000
x,y,x_test,y_test,y_plot,n,d = load_traintest_hier(train_frac,dataset,100)
#Load all possible x_test and group assignments
x_test = np.zeros((2*K,2))
for k in range(K):
x_test[2*k:2*k + 2,1] = k
x_test[2*k,0] = 0
x_test[2*k+1,0]= 1
n_test = np.shape(x_test)[0]
y_test = np.zeros(n_test) #Place holder
#Compute intervals
alpha = 0.2
dy = y_plot[1]- y_plot[0]
n_test = np.shape(x_test)[0]
coverage_cb =
|
np.zeros((rep,n_test))
|
numpy.zeros
|
import numpy as np
class MolBuilder:
def __init__(self, molecule):
"""
Helper class to decide what can bind where
Args:
molecule: rdkit Mol object that will be the seed
"""
self.mol = molecule
self.bound = False
def initialize_binders(
self, list_of_molecules, molecule_atom_numbers, binder_atoms_numbers
):
"""
Store what molecules can bind where and how
Args:
list_of_molecules: list of mols
molecule_atom_numbers: what atom of the seed they can bind to
binder_atoms_numbers: what atom of the seed binds to the mol
Returns: None
"""
assert len(list_of_molecules) == len(molecule_atom_numbers)
assert len(list_of_molecules) == len(binder_atoms_numbers)
self.binders = list_of_molecules
self.molecule_atom_numbers = molecule_atom_numbers
self.binder_atoms_numbers = binder_atoms_numbers
def setup_recursive_binders(self, binder_objects):
"""
Enables recursion for easier generations
Args:
binder_objects: other Mol wrappers
Returns: None
"""
self.binder_objects = binder_objects
def sample_binder(self):
"""
Randomly sample an atom to bind to each open indice of seed
Returns: List that details how to bind if an atom is available, None if no open spots
"""
if not self.bound:
(
chosen_binders,
chosen_mol_nums,
chosen_binder_atoms,
chosen_binder_objects,
) = ([], [], [], [])
self.bound = True
for unique_node_atom in np.unique(np.array(self.molecule_atom_numbers).T):
possible_linker_indices = np.arange(len(self.molecule_atom_numbers))[
np.where(
|
np.array(self.molecule_atom_numbers)
|
numpy.array
|
# You are at the top. Notice there are no bats hanging from the ceiling
# If there are weird bind errors like the mesh is not deforming correctly, compare
# the oct version of closest triangles to the one without oct
bl_info = {
"name": "Surface Follow",
"author": "<NAME> (<EMAIL>), <NAME> (@ucupumar)",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D > Extended Tools > Surface Follow",
"description": "Doforms an object as the surface of another object changes",
"warning": "Do not use if you are pregnant or have ever met someone who was pregnant",
"wiki_url": "",
"category": '3D View'}
import bpy
import numpy as np
np.seterr(all='ignore')
from bpy.props import *
from bpy.app.handlers import persistent
import bmesh
import time
def rotate_around_axis(coords, Q, origin='empty'):
'''Uses standard quaternion to rotate a vector. Q requires
a 4-dimensional vector. coords is the 3d location of the point.
coords can also be an N x 3 array of vectors. Happens to work
with Q as a tuple or a np array shape 4'''
if origin == 'empty':
vcV = np.cross(Q[1:], coords)
RV = np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)
else:
coords -= origin
vcV = np.cross(Q[1:],coords)
RV = (np.nan_to_num(coords + vcV * (2*Q[0]) + np.cross(Q[1:],vcV)*2)) + origin
coords += origin #undo in-place offset
return RV
def transform_matrix(V, ob='empty', back=False):
'''Takes a vector and returns it with the
object transforms applied. Also works
on N x 3 array of vectors'''
if ob == 'empty':
ob = bpy.context.object
ob.rotation_mode = 'QUATERNION'
if back:
rot = np.array(ob.rotation_quaternion)
rot[1:] *= -1
V -= np.array(ob.location)
rotated = rotate_around_axis(V, rot)
rotated /= np.array(ob.scale)
return rotated
rot = np.array(ob.rotation_quaternion)
rotated = rotate_around_axis(V, rot)
return np.array(ob.location) + rotated * np.array(ob.scale)
def set_key_coords(coords, key, ob):
"""Writes a flattened array to one of the object's shape keys."""
ob.data.shape_keys.key_blocks[key].data.foreach_set("co", coords.ravel())
ob.data.update()
# Workaround for dependancy graph issue
ob.data.shape_keys.key_blocks[key].mute = True
ob.data.shape_keys.key_blocks[key].mute = False
def get_triangle_normals(tri_coords):
'''does the same as get_triangle_normals
but I need to compare their speed'''
t0 = tri_coords[:, 0]
t1 = tri_coords[:, 1]
t2 = tri_coords[:, 2]
return
|
np.cross(t1 - t0, t2 - t0)
|
numpy.cross
|
r"""@package motsfinder.axisym.curve.expcalc
Computation class storing interim results of expansion calculations.
The implementation here uses the formulas derived in
\ref thornburg2003_1 "[1]". Specifically, we make heavy use of the quantities
`A, B, C, D` defined in \ref thornburg2003_1 "[1]" in equation (12) to compute
the expansion \f$ \Theta \f$ using equation (11). See also
\ref pookkolb2018 "[2]" and the docstrings of the individual procedures.
In the base class ExpansionCalc defined in this module, we do not consider how
the used quantities \f$ s_i \f$ and \f$ \partial_i s_j \f$ are obtained. This
depends on how the surfaces are represented and hence is the responsibility of
subclasses to implement. Additionally, subclasses also need to supply surface
parameter derivatives defined in \ref thornburg2003_1 "[1]" as
\f$ X^u_i = \partial_i y^u \f$ and
\f$ X^u_{ij} = \partial_i\partial_j y^u \f$.
In the axisymmetric case considered here, we have only one parameter,
\f$ y^u = \lambda \f$ along the curve, and hence drop the `u` superscript.
Note that in this code, we call the covector field \f$ X_i \f$ simply `X` and
the 2nd rank tensor field \f$ X_{ij} \f$ simply `Y` (Python cannot
differentiate between objects based on how many indices you use).
@b Examples
See implementations starshapedcurve._StarShapedExpansionCalc and
refparamcurve._RefParamExpansionCalc.
@b References
\anchor thornburg2003_1 [1] <NAME>. "A fast apparent horizon finder
for three-dimensional Cartesian grids in numerical relativity." Classical
and quantum gravity 21.2 (2003): 743.
\anchor pookkolb2018 [2] <NAME>, <NAME>, <NAME> and <NAME>, "The existence and stability of marginally trapped surfaces."
arXiv:1811.10405 [gr-qc].
"""
from abc import ABCMeta, abstractmethod
from math import fsum
from six import add_metaclass
import numpy as np
from scipy import linalg
from scipy.misc import derivative
from ...utils import cache_method_results
from ...numutils import inverse_2x2_matrix_derivative
from ...metric import christoffel_symbols, christoffel_deriv
from ...metric import riemann_components
__all__ = []
@add_metaclass(ABCMeta)
class ExpansionCalc(object):
r"""Abstract base class for computing the expansion at one point.
This class serves as coordinator for computing the expansion and
functional derivatives w.r.t. the horizon function. Sub classes need only
implement a small number of computational methods.
The purpose of having a separate class hierarchy for computing the
expansion (as opposed to doing all the computations inside the curve
classes) is to be able to store a number of interim results valid only for
the results at one point of the surface. Including these as `cache` in the
curve classes would in principle be possible. To ease management of cache
invalidation (when computing at a different point), the complete cache
should live on one object. The ExpansionCalc class and its sub classes can
be interpreted as such a cache, with added functionality to do the
necessary computations using the cached values.
"""
def __init__(self, curve, h_fun, param, metric):
r"""Create a "calc" object for certain point of a curve.
The curve represents an axisymmetric surface.
@param curve (expcurve.ExpansionCurve)
The curve representing the (trial) surface on which to compute the
expansion and other quantities.
@param h_fun (exprs.numexpr.NumericExpression)
The (1D) "horizon" function. The subclasses implementing this
ExpansionCalc class are free to interpret as they wish.
@param param (float)
The parameter value along the `curve` at which the quantities
should be computed.
@param metric
The Riemannian 3-metric defining the geometry of the surrounding
space.
"""
## Step sizes for FD numerical differentiation of the expansion
## \wrt `h`, `h'`, ``h''``, respectively.
self.dx_hdiffs = (1e-6, 1e-6, 1e-3)
## Finite difference differentiation order.
self.fd_order = 3
## The curve representing the (trial) surface.
self.curve = curve
## Horizon function (in case we need higher derivatives than ``h''``).
self.h_fun = h_fun
## Value of horizon function `h` at the given parameter.
self.h = h_fun(param)
## Value of `h'` at the given parameter.
self.dh = h_fun.diff(param, n=1)
## Value of ``h''`` at the given parameter.
self.ddh = h_fun.diff(param, n=2)
## Parameter on the curve at which to do the computations.
self.param = param
point = curve(param, xyz=True)
## 3D point in `x`,`y`,`z` coordinates.
self.point = point
## Metric (tensor field).
self.metric = metric
## Metric tensor at the point to do computations at.
self.g = metric.at(point)
if curve.extr_curvature is None:
## Extrinsic curvature at the point to do computations at.
self.K = None
else:
self.K = curve.extr_curvature(point)
# Cached metric derivatives (computed on-demand).
self._dg = None
self._dg_inv = None
self._ddg = None
self._ddg_inv = None
## Derivatives \f$ \partial_i \ln\sqrt{g} \f$
self.dlnsqrtg = np.asarray(metric.diff_lnsqrtg(point))
s, ds, X, Y = self._compute_s_ds_X_Y()
## Normal covector (not normalized).
self.s = np.asarray(s)
## Derivative matrix \f$ \partial_i s_j \f$ of normal vector.
self.ds = np.asarray(ds)
## Derivative covector \f$ X_i := \partial_i \lambda(\vec x) \f$.
self.X = np.asarray(X)
## Second derivatives \f$ Y := X_{ij} := \partial_i\partial_j\lambda\f$.
self.Y = np.asarray(Y)
## Contravariant normal vector (not normalized).
self.s_up = self.g.raise_idx(s)
## Contravariant parameter derivative \f$ X^i := g^{ij}X_j \f$.
self.X_up = self.g.raise_idx(X)
ABCD, trK = self._compute_ABCDtrK()
## A, B, C, D terms of the Thornburg expansion formula.
self.ABCD = ABCD
## Trace of the extrinsic curvature.
self.trK = trK
## Cached expansion result.
self._Th = None
@property
def dg(self):
r"""Derivative of 3-metric components \wrt x,y,z."""
if self._dg is None:
self._dg = np.asarray(self.metric.diff(self.point, diff=1))
return self._dg
@property
def dg_inv(self):
r"""Derivative of inverse 3-metric components.
This is computed using
\f$0 = \partial_i \delta^a_b = \partial_i(g^{ac}g_{cb})\f$
from which we get
\f[
\partial_i g^{-1} = -g^{-1} (\partial_i g) g^{-1}.
\f]
"""
if self._dg_inv is None:
g_inv = self.g.inv
dg = self.dg
# explanation:
# X = g_inv.dot(dg) == g^ad partial_i g_db
# Y = X.dot(g_inv) == X^a_ib g^be
# => Y has indices Y[a,i,e] == (g^-1 partial_i g g^-1)^ae
# we want "i" to be the first axis => swapaxes(0, 1)
# equivalent to: -np.einsum('ic,acd,dj', _g_inv, _dg, _g_inv)
self._dg_inv = -(
g_inv.dot(dg).dot(g_inv).swapaxes(0, 1)
)
return self._dg_inv
@property
def ddg(self):
r"""Second derivatives of 3-metric components."""
if self._ddg is None:
self._ddg = np.asarray(self.metric.diff(self.point, diff=2))
return self._ddg
@property
def ddg_inv(self):
r"""Second derivatives of inverse 3-metric components.
As for `dg_inv`, using
\f$0 = \partial_i \partial_j \delta^a_b
= \partial_i \partial_j (g^{ac}g_{cb})\f$
we get
\f[
\partial_i \partial_j g^{-1}
= -g^{-1}\big[
(\partial_i \partial_j g) g^{-1}
+ (\partial_j g) (\partial_i g^{-1})
+ (\partial_i g) (\partial_j g^{-1})
\big].
\f]
"""
if self._ddg_inv is None:
g_inv = self.g.inv
dg = self.dg
dg_inv = self.dg_inv
ddg = self.ddg
# equivalent to:
# -(
# + np.einsum('ij,abjk,kl', g_inv, ddg, g_inv)
# + np.einsum('ij,bjk,akl', g_inv, dg, dg_inv)
# + np.einsum('ij,ajk,bkl', g_inv, dg, dg_inv)
# )
tmp = g_inv.dot(dg).dot(dg_inv)
self._ddg_inv = -(
+ np.moveaxis(g_inv.dot(ddg).dot(g_inv), [1,2,0], [0,1,2])
+ np.moveaxis(tmp, [2,1,0], [0,1,2])
+ np.moveaxis(tmp, [1,2,0], [0,1,2])
)
return self._ddg_inv
def _compute_ABCDtrK(self):
r"""Compute the A, B, C, D and trace(K) terms.
The computation only uses the cached covariant normal `s` and its
derivatives `ds` (in addition to the metric and extrinsic curvature,
of course). This means that any subclass only needs to implement
computing `s` and `ds` in order to use this function.
This computes the terms as defined in equation (12) in
\ref thornburg2003_1 "[1]".
"""
s, s_up, ds = self.s, self.s_up, self.ds
g, dg_inv, dlnsqrtg = self.g, self.dg_inv, self.dlnsqrtg
A = (
- ds.dot(s_up).dot(s_up)
- 0.5 * dg_inv.dot(s).dot(s).dot(s_up)
)
B = (
dg_inv.dot(s).diagonal().sum()
+ g.inv.dot(ds).diagonal().sum()
+ dlnsqrtg.dot(s_up)
)
if self.K is None:
trK = 0.0
C = 0.0
else:
trK = g.inv.dot(self.K).diagonal().sum()
C = self.K.dot(s_up).dot(s_up)
D = s.dot(s_up)
return (A, B, C, D), trK
def expansion(self, ingoing=False):
r"""Compute the expansion at the configured point.
This implements equation (11) in \ref thornburg2003_1 "[1]".
"""
if ingoing:
A, B, C, D = self.ABCD
return -A/D**1.5 - B/D**0.5 + C/D - self.trK
if self._Th is None:
A, B, C, D = self.ABCD
self._Th = A/D**1.5 + B/D**0.5 + C/D - self.trK
return self._Th
def diff(self, hdiff=0):
r"""Compute derivative of expansion \wrt `h`, `h'`, or ``h''``.
The argument `hdiff` controls the derivative order of `h` with
respect to which to differentiate the expansion, i.e. `hdiff=0` will
compute \f$ \partial_{h}\Theta \f$, while for `hdiff=2` we
compute \f$ \partial_{h''}\Theta \f$.
Numerical FD differentiation is performed if a `NotImplementedError`
is raised in one of the subroutines.
"""
try:
return self._diff(hdiff=hdiff)
except NotImplementedError:
return self._diff_FD(hdiff=hdiff)
def _diff_FD(self, hdiff):
r"""Compute derivatives of the expansion using finite differencing.
Since the expansion depends on `h` and its derivatives only
ultra-locally, a reasonable approximation to the variational
derivative of the expansion w.r.t. `h` can be obtained by varying `h`
(or derivatives) point-wise, i.e. compute the usual partial derivative
of the expansion w.r.t. `h`. This can be approximated using a finite
difference differentiation, which is done in this function. Note that
irrespective of the accuracy of this approximation, the test whether
the expansion has the desired value (e.g. 0.0 for a MOTS) is
independent of the results computed here.
"""
h_orig = self.curve.h
Th0 = self.expansion()
param = self.param
h_plus_eps = _FuncVariation(h_orig.evaluator(), diff=hdiff)
with self.curve.override_evaluator(h_plus_eps):
def f(eps):
if eps == 0:
return Th0
h_plus_eps.eps = eps
with self.curve.suspend_calc_obj():
return self.curve.expansion(param)
dx = self.dx_hdiffs[hdiff]
return derivative(f, x0=0.0, n=1, dx=dx, order=self.fd_order)
def _diff(self, hdiff):
r"""Compute analytical functional derivatives of the expansion.
This may raise a `NotImplementedError`, indicating that FD
differentiation needs to be performed.
@param hdiff
Derivative order of `h` to differentiate the expansion by (see
below). E.g., a value of `0` will compute \f$\partial_h \Theta\f$.
@b Notes
In general, due to the ultra-local dependency of the expansion on `h`
and its first two derivatives, we can treat the variational
differentiation like a simple partial differentiation. This can also
be seen by taking the definition
\f[
(\delta\Theta)(h)\Delta
:= \frac{d}{d\varepsilon}\Big|_{\varepsilon=0}
\Theta(h+\varepsilon\Delta)
\f]
and separating the terms based on the derivative order of
\f$\Delta\f$. The result will be of the form
\f[
(\delta\Theta)(h)\Delta =
\partial_h\Theta \Delta
+ \partial_{h'}\Theta \Delta'
+ \partial_{h''}\Theta \Delta''.
\f]
These three terms are computed here using
\f[
\partial_f \Theta =
\frac{A_f}{D^{3/2}}
- \frac{3}{2} \frac{A D_f}{D^{5/2}}
+ \frac{B_f}{D^{1/2}}
- \frac{1}{2} \frac{B D_f}{D^{3/2}}
+ \frac{C_f}{D}
- \frac{C D_f}{D^2}
- \partial_f \,\mathrm{tr} K,
\f]
where `f` is one of ``h, h', h''``.
The terms `A`, `B`, `C`, and `D` are defined in [1], but here we
repeat them for convenience:
\f{eqnarray*}{
A &:=& -s^i s^j \partial_i s_j - \frac{1}{2} s^i (\partial_i g^{kl}) s_k s_l \\
B &:=& (\partial_i g^{ij}) s_j + g^{ij} \partial_i s_j + (\partial_i \ln\sqrt{g}) s^i \\
C &:=& K^{ij} s_i s_j \\
D &:=& s_i s^i.
\f}
@b References
[1] <NAME>. "A fast apparent horizon finder for
three-dimensional Cartesian grids in numerical relativity."
Classical and quantum gravity 21.2 (2003): 743.
"""
if hdiff == 0: # del_h H
A, B, C, D = self.ABCD
dhA, dhB, dhC, dhD, dhtrK = self.get_dh_ABCDtrK()
return (
- 3 * A * dhD / (2*D**2.5) - B * dhD / (2*D**1.5)
- C/D**2 * dhD
+ dhC / D + dhB / np.sqrt(D) + dhA / D**1.5
- dhtrK
)
if hdiff == 1: # del_h' H
A, B, C, D = self.ABCD
dhpA, dhpB, dhpC, dhpD = self.get_dhp_ABCD()
return (
- 3 * A * dhpD / (2*D**2.5) - B * dhpD / (2*D**1.5)
- C/D**2 * dhpD
+ dhpC / D + dhpB / np.sqrt(D) + dhpA / D**1.5
)
if hdiff == 2: # del_h'' H
D = self.ABCD[-1]
dhppA, dhppB = self.get_dhpp_AB()
return (D * dhppB + dhppA) / D**1.5
raise NotImplementedError
def get_dh_ABCDtrK(self):
r"""Compute the derivative of A, B, C, D, tr(K) \wrt `h`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
Refer to the definition of `A,B,C,D` in the documentation of _diff().
The terms computed here are:
\f[
\partial_h A = -2(\partial_h s^i) s^j \partial_i s_j
- s^i s^j \partial_h \partial_i s_j
- \frac{1}{2} (\partial_h s^i) (\partial_i g^{kl}) s_k s_l
- \frac{1}{2} s^i (\partial_h \partial_i g^{kl}) s_k s_l
- s^i (\partial_i g^{kl}) s_k \partial_h s_l
\f]
\f[
\partial_h B =
(\partial_h \partial_i g^{ij}) s_j
+ (\partial_i g^{ij}) \partial_h s_j
+ (\partial_h g^{ij}) \partial_i s_j
+ g^{ij} \partial_h \partial_i s_j
+ (\partial_h \partial_i \ln\sqrt{g}) s^i
+ (\partial_i \ln\sqrt{g}) \partial_h s^i
\f]
\f[
\partial_h C =
\big[(\partial_h g^{ik}) g^{jl} + g^{ik}(\partial_h g^{jl})\big]
K_{kl} s_i s_j
+ g^{ik} g^{jl} (\partial_h K_{kl}) s_i s_j
+ 2 g^{ik} g^{jl} K_{kl} s_i \partial_h s_j
\f]
\f[
\partial_h D =
(\partial_h g^{ij}) s_i s_j + 2 g^{ij} s_i \partial_h s_j
\f]
\f[
\partial_h \mathrm{tr}K =
(\partial_h g^{ij}) K_{ij} + g^{ij} \partial_h K_{ij}
\f]
The individual terms are computed by simply applying the chain rule.
We obtain for any quantity `f` which depends on the coordinates
`x,y,z`:
\f[
\partial_h f = (\partial_i f) (\partial_h\gamma)^i,
\f]
where \f$\gamma\f$ is the curve along which the computation takes
place.
"""
dh_gamma = self.curve.h_diff(self.param)
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
dg = self.dg
ddg = self.ddg
ddg_inv = self.ddg_inv
s, s_up, ds = self.s, self.s_up, self.ds
dds = self.compute_dds()
dhs = ds.dot(dh_gamma)
dhg_inv = np.einsum('aij,a', dg_inv, dh_gamma)
dhs_up = dhg_inv.dot(s) + g_inv.dot(dhs)
dhdg_inv = np.einsum('aikl,a', ddg_inv, dh_gamma)
dhds = dds.dot(dh_gamma)
dhdlnsqrtg = (
0.5 * np.einsum('icd,acd,a', dg_inv, dg, dh_gamma)
+ 0.5 * np.einsum('cd,iacd,a', g_inv, ddg, dh_gamma)
)
dhA = (
- 2 * np.einsum('i,j,ij', dhs_up, s_up, ds)
- np.einsum('i,j,ij', s_up, s_up, dhds)
- 0.5 * np.einsum('i,ikl,k,l', dhs_up, dg_inv, s, s)
- 0.5 * np.einsum('i,ikl,k,l', s_up, dhdg_inv, s, s)
- np.einsum('i,ikl,k,l', s_up, dg_inv, s, dhs)
)
dhB = (
np.einsum('iij,j', dhdg_inv, s)
+ np.einsum('iij,j', dg_inv, dhs)
+ dhg_inv.dot(ds).diagonal().sum()
+ g_inv.dot(dhds).diagonal().sum()
+ dhdlnsqrtg.dot(s_up)
+ dlnsqrtg.dot(dhs_up)
)
dhD = (
np.einsum('ij,i,j', dhg_inv, s, s)
+ 2 * np.einsum('ij,i,j', g_inv, s, dhs)
)
if self.K is None:
dhC = 0.0
dhtrK = 0.0
else:
K = self.K
dK = self.curve.extr_curvature(self.point, diff=1)
dhK = np.einsum('aij,a', dK, dh_gamma)
dhC = (
np.einsum('ik,jl,kl,i,j', dhg_inv, g_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, dhg_inv, K, s, s)
+ np.einsum('ik,jl,kl,i,j', g_inv, g_inv, dhK, s, s)
+ 2 * np.einsum('ik,jl,kl,i,j', g_inv, g_inv, K, s, dhs)
)
dhtrK = (
np.einsum('ij,ij', dhg_inv, K)
+ np.einsum('ij,ij', g_inv, dhK)
)
return dhA, dhB, dhC, dhD, dhtrK
def get_dhp_ABCD(self):
r"""Compute the derivative of A, B, C, D \wrt `h'`.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h'} s_i &=& - X_i\\
\partial_{h'} \partial_i s_j &=& - X_{ij},
\f}
where \f$X_i := \partial_i \lambda\f$ and
\f$X_{ij} := \partial_i \partial_j \lambda\f$.
The terms computed here then become (refer to _diff()):
\f{eqnarray*}{
\partial_{h'} A &=&
2 X^i s^j \partial_i s_j + s^i s^j X_{ij}
+ \frac{1}{2} (\partial_i g^{kl}) (X^i s_k s_l + 2 s^i X_k s_l)
\\
\partial_{h'} B &=&
-(\partial_i g^{ij}) X_j - g^{ij} X_{ij} - (\partial_i\ln\sqrt{g}) X^i
\\
\partial_{h'} C &=& -2 K_{ij} X^i s^j
\\
\partial_{h'} D &=& -2 X_i s^i
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
g_inv, dg_inv, dlnsqrtg = self.g.inv, self.dg_inv, self.dlnsqrtg
s, s_up, ds = self.s, self.s_up, self.ds
X, X_up, Y = self.X, self.X_up, self.Y
dhpA = (
2 * ds.dot(X_up).dot(s_up)
+ Y.dot(s_up).dot(s_up)
+ 0.5 * dg_inv.dot(s).dot(s).dot(X_up)
+ dg_inv.dot(X).dot(s).dot(s_up)
)
dhpB = (
- dg_inv.dot(X).diagonal().sum()
- g_inv.dot(Y).diagonal().sum()
- dlnsqrtg.dot(X_up)
)
if self.K is None:
dhpC = 0.0
else:
dhpC = - 2 * self.K.dot(X_up).dot(s_up)
dhpD = - 2 * X.dot(s_up)
return dhpA, dhpB, dhpC, dhpD
def get_dhpp_AB(self):
r"""Compute the derivative of A and B \wrt ``h''``.
May raise `NotImplementedError` to indicate numerical differentiation
should be done.
This implementation is correct iff
\f{eqnarray*}{
\partial_{h''} s_i &=& 0\\
\partial_{h''} \partial_i s_j &=& - X_i X_j.
\f}
We compute here (see also _diff()):
\f{eqnarray*}{
\partial_{h''} A &=& s^i s^j X_i X_j \\
\partial_{h''} B &=& -X^i X_i \\
\partial_{h''} C &=& \partial_{h''} D = 0
\f}
This method is agnostic as to how the surfaces are represented as long
as the quantities \f$s_i\f$, \f$\partial_i s_j\f$, \f$X_i\f$, and
\f$X_{ij}\f$ are available.
"""
X, X_up = self.X, self.X_up
s_up = self.s_up
dhppA = np.outer(X, X).dot(s_up).dot(s_up)
dhppB = - X_up.dot(X)
return dhppA, dhppB
@abstractmethod
def _compute_s_ds_X_Y(self):
r"""Compute the terms we need to compute the expansion.
Subclasses need to interpret the horizon function and compute the
covariant normal (not normalized), its derivatives, and the parameter
first (`X = del_i lambda`) and second (`Y = del_i del_j lambda`)
derivatives.
"""
pass
def _compute_dds_Z(self):
r"""Compute second derivatives of the normal and third ones of lambda.
This computes \f$\partial_i\partial_j s_k\f$ and
\f$Z := X_{ijk} = \partial_i\partial_j\partial_k \lambda\f$.
@return Two elements, the first containing the derivatives of the
non-normalized covariant normal `s` and the second those of the
parameter \f$\lambda\f$.
"""
raise NotImplementedError
def _compute_d2_Y(self):
r"""Compute second derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def _compute_d3_Z(self):
r"""Compute third derivatives of xi and lambda \wrt x,y,z."""
raise NotImplementedError
def ricci_scalar(self):
r"""Compute the Ricci scalar of the surface represented by the curve.
The Ricci scalar of a 2-surface is defined as (see e.g. [1])
\f$R = q^{AB}R_{AB}\f$, where `q` is the induced metric
\f$q_{ab} = g_{ab} - \nu_a \nu_b\f$, \f$R_{AB}\f$ is the Ricci tensor
\f$R_{AB} = R^C_{\ A\,CB}\f$ and \f$\nu\f$ the covariant outward unit
normal of the surface.
Here, \f$R^A_{\ B\,CD}\f$ is the Riemann tensor.
Note that `A,B` run over the coordinates \f$(\lambda,\varphi)\f$ on
the surface and `a,b` over `x,y,z`.
See induced_metric() for a bit more details on the induced metric `q`
and the coordinate transformation to get the components \f$q_{AB}\f$
we need here.
It is convenient to compute the Ricci scalar from the purely covariant
Riemann tensor \f$R_{AB\,CD} = q_{AE}R^E_{\ B\,CD}\f$ as this is
antisymmetric in the first and last two index pairs, i.e. it has only
one independent component \f$R_{\lambda\varphi\,\lambda\varphi}\f$ in
two dimensions.
A short calculation reveals
\f[
R = q^{AB}R_{AB}
= 2 R_{\lambda\varphi\,\lambda\varphi}
(q^{\lambda\lambda}q^{\varphi\varphi} - (q^{\lambda\varphi})^2).
\f]
@b References
[1] <NAME>. General relativity. Springer Science &
Business Media, 2004.
"""
R_0101 = self.covariant_riemann()
q_inv = self.induced_metric(inverse=True)
return 2 * R_0101 * (q_inv[0,0]*q_inv[1,1] - q_inv[0,1]**2)
def induced_metric(self, diff=0, inverse=False):
r"""Compute the induced metric on the surface.
This method computes the components of the induced metric in
\f$(\lambda,\varphi)\f$ coordinates as well as the components of the
inverse (i.e. indices upstairs) and derivatives of these components.
Since this class assumes axisymmetry throughout, this method requires
(without loss of generality) that the point at which the metric is to
be returned is located at `phi=0`, i.e. `y=0` and `x>0`.
@param diff
Derivative order to compute. Default is `0`.
@param inverse
Whether to return the (derivatives of the) inverse of the induced
metric. Default is `False`.
@return NumPy array with ``2+diff`` axes, such that the indices
``[A1,A2,...,B,C]`` correspond to
\f$\partial_{A_1}\partial_{A_2}\ldots q_{BC}\f$ for
`inverse==False` and with upstairs indices for `invers==True`.
@b Notes
The induced 2-metric `q` on the surface \f$\sigma\f$ is formally given
by
\f[
q = \Pi_\sigma g = g\big|_\sigma - \underline{\nu} \otimes \underline{\nu},
\qquad
q_{ab} = g_{ab} - \nu_a \nu_b,
\f]
where \f$\nu\f$ is the outward pointing normal of \f$\sigma\f$ and
\f$\underline{\nu} = g(\nu,\,\cdot\,)\f$.
The induced metric can easily be expressed in terms of the components
of the 3-metric `g` by expanding these into the cobasis fields of the
coordinates \f$\lambda, \varphi\f$ on the 2-surface (and thereby
dropping any transversal components). As a result, we get the simple
formula
\f[
q_{AB} = g_{ij}\ (\partial_A x^i)\ (\partial_B x^j),
\f]
where `A,B = 1,2` and
\f$(\partial_A) = (\partial_\lambda, \partial_\varphi)\f$.
The derivatives of the Cartesian coordinates `x,y,z` are computed in
diff_xyz_wrt_laph().
From this, we easily get the first and second derivatives by applying
the chain and product rule:
\f{eqnarray*}{
\partial_A q_{CD} &=&
(\partial_A g_{ij}) x_C^i x_D^j
+ g_{ij} (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\
\partial_A\partial_B q_{CD} &=&
(\partial_A\partial_B g_{ij}) x_C^i x_D^j
+ (\partial_A g_{ij}) (x_{CB}^i x_D^j + x_C^i x_{DB}^j)
+ (\partial_B g_{ij}) (x_{CA}^i x_D^j + x_C^i x_{DA}^j)
\\&&
+ g_{ij} (x_{CAB}^i x_D^j + x_{CA}^i x_{DB}^j
+ x_{CB}^i x_{DA}^j + x_C^i x_{DAB}^j).
\f}
Here, \f$x_{A}^i := \partial_A x^i\f$, etc.
"""
return self._induced_metric(diff, bool(inverse))
@cache_method_results()
def _induced_metric(self, diff, inverse):
if inverse:
q = self.induced_metric(diff=0)
if diff == 0:
return linalg.inv(q)
dq = self.induced_metric(diff=1)
if diff == 1:
dq_inv = inverse_2x2_matrix_derivative(q, dq, diff=1)
return dq_inv
ddq = self.induced_metric(diff=2)
if diff == 2:
ddq_inv = inverse_2x2_matrix_derivative(q, dq, ddq, diff=2)
return ddq_inv
raise NotImplementedError
dx = self.diff_xyz_wrt_laph(diff=1)
g = self.g.mat
if diff == 0:
q = np.einsum('ij,ai,bj', g, dx, dx)
return q
ddx = self.diff_xyz_wrt_laph(diff=2)
dg = self.dg
dg_laph = np.einsum('ak,kij', dx, dg)
if diff == 1:
dq = (
np.einsum('aij,bi,cj', dg_laph, dx, dx)
+ np.einsum('ij,bai,cj', g, ddx, dx)
+ np.einsum('ij,bi,caj', g, dx, ddx)
)
return dq
d3x = self.diff_xyz_wrt_laph(diff=3)
ddg = self.ddg
ddg_laph = (
np.einsum('abk,kij', ddx, dg)
+ np.einsum('ak,bl,klij', dx, dx, ddg)
)
ddq = (
np.einsum('abij,ci,dj', ddg_laph, dx, dx)
+ np.einsum('aij,cbi,dj', dg_laph, ddx, dx)
+ np.einsum('aij,ci,dbj', dg_laph, dx, ddx)
+ np.einsum('bij,cai,dj', dg_laph, ddx, dx)
+ np.einsum('bij,ci,daj', dg_laph, dx, ddx)
+ np.einsum('ij,cabi,dj', g, d3x, dx)
+ np.einsum('ij,cai,dbj', g, ddx, ddx)
+ np.einsum('ij,cbi,daj', g, ddx, ddx)
+
|
np.einsum('ij,ci,dabj', g, dx, d3x)
|
numpy.einsum
|
image_modality = 'rgb'
augmented = True
if augmented is True:
amount_data = '/augmented_data/'
else:
amount_data = '/original_data/'
analyze_validation_set = False
evaluate_train_dir = False
import time
import numpy as np
import cv2
from glob import glob
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, TensorBoard
from tqdm import tqdm
import tensorflow as tf
import keras.backend as K
from tensorflow.keras.backend import sum as suma
from tensorflow.keras.backend import mean
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from keras.utils import CustomObjectScope
import copy
from os import listdir
from os.path import isfile, join
from datetime import datetime
import csv
import matplotlib.pyplot as plt
from sklearn.metrics import average_precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
def load_data(path):
print(path)
path_images = ''.join([path, 'image/', image_modality, "/*"])
path_labels = ''.join([path, "label/*"])
images = sorted(glob(path_images))
masks = sorted(glob(path_labels))
total_size_images = len(images)
total_size_labels = len(masks)
print('total size images:', total_size_images, path_images)
print('total size labels:', total_size_labels, path_labels)
return (images, masks)
def load_data_only_imgs(path):
print(path)
path_images = ''.join([path, "/*"])
images = sorted(glob(path_images))
total_size_images = len(images)
print('total size images:', total_size_images, path_images)
return (images, images)
def read_image_test(path):
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x / 255.0
return x
def read_mask_test(path):
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = np.expand_dims(x, axis=-1)
return x
def read_image(path):
path = path.decode()
x = cv2.imread(path, 1)
x = cv2.resize(x, (256, 256))
x = x/255.0
return x
def read_mask(path):
path = path.decode()
x = cv2.imread(path)
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
x = cv2.resize(x, (256, 256))
x = x/255.0
x = np.expand_dims(x, axis=-1)
return x
def tf_parse(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])
x.set_shape([256, 256, 3])
y.set_shape([256, 256, 1])
return x, y
def tf_dataset(x, y, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
def iou(y_true, y_pred, smooth=1e-15):
def f(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
x = (intersection + smooth) / (union + smooth)
x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
"""def dice_coef(y_true, y_pred, smooth=1):
def f (y_true, y_pred):
intersection = suma(y_true * y_pred, axis=[1,2,3])
union = suma(y_true, axis=[1,2,3]) + suma(y_pred, axis=[1,2,3])
x = mean( (2. * intersection + smooth) / (union + smooth), axis=0)
#x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)"""
def dice_coef(y_true, y_pred, smooth=1):
intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def conv_block(x, num_filters):
x = Conv2D(num_filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
skip = Conv2D(num_filters, (3, 3), padding="same")(x)
skip = Activation("relu")(skip)
skip = BatchNormalization()(skip)
x = Conv2D(num_filters, (3, 3), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = tf.math.add_n([x, skip])
x = Activation("relu")(x)
return x
def build_model():
size = 256
num_filters = [16, 32, 48, 64]
# num_filters = [64, 48, 32, 16]
# num_filters = [64, 128, 256, 512]
inputs = Input((3, size, size, 3))
skip_x = []
x = inputs
for f in num_filters:
x = conv_block(x, f)
print(str(x.shape.as_list()))
skip_x.append(x)
x = MaxPool2D((2, 2))(x)
## Bridge
x = conv_block(x, num_filters[-1])
num_filters.reverse()
skip_x.reverse()
## Decoder
for i, f in enumerate(num_filters):
x = UpSampling2D((2, 2))(x)
xs = skip_x[i]
x = Concatenate()([x, xs])
x = conv_block(x, f)
## Output
x = Conv2D(1, (1, 1), padding="same")(x)
x = Activation("sigmoid")(x)
return Model(inputs, x)
def mask_parse(mask):
mask = np.squeeze(mask)
mask = [mask, mask, mask]
mask = np.transpose(mask, (1, 2, 0))
return mask
def read_image_test(path):
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x / 255.0
return x
def read_mask_test(path):
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = np.expand_dims(x, axis=-1)
return x
def get_mcc(groundtruth_list, predicted_list):
"""Return mcc covering edge cases"""
tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)
if _all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True:
mcc = 1
elif _all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True:
mcc = 1
elif _all_class_1_predicted_as_class_0(groundtruth_list, predicted_list) is True:
mcc = -1
elif _all_class_0_predicted_as_class_1(groundtruth_list, predicted_list) is True:
mcc = -1
elif _mcc_denominator_zero(tn, fp, fn, tp) is True:
mcc = -1
# Finally calculate MCC
else:
mcc = ((tp * tn) - (fp * fn)) / (
np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))
return mcc
def get_confusion_matrix_intersection_mats(groundtruth, predicted):
""" Returns dict of 4 boolean numpy arrays with True at TP, FP, FN, TN
"""
confusion_matrix_arrs = {}
groundtruth_inverse = np.logical_not(groundtruth)
predicted_inverse = np.logical_not(predicted)
confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)
confusion_matrix_arrs['tn'] = np.logical_and(groundtruth, predicted_inverse)
confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)
confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)
return confusion_matrix_arrs
def get_confusion_matrix_overlaid_mask(image, groundtruth, predicted, alpha, colors):
"""
Returns overlay the 'image' with a color mask where TP, FP, FN, TN are
each a color given by the 'colors' dictionary
"""
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
masks = get_confusion_matrix_intersection_mats(groundtruth, predicted)
color_mask = np.zeros_like(image)
for label, mask in masks.items():
color = colors[label]
mask_rgb = np.zeros_like(image)
mask_rgb[mask != 0] = color
color_mask += mask_rgb
return cv2.addWeighted(image, alpha, color_mask, 1 - alpha, 0)
def calculate_rates(image_1, image_2):
image_1 = np.asarray(image_1).astype(np.bool)
image_2 = np.asarray(image_2).astype(np.bool)
image_1 = image_1.flatten()
image_2 = image_2.flatten()
if image_1.shape != image_2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
accuracy_value = accuracy_score(image_1, image_2)
if (np.unique(image_1) == [False]).all() and (np.unique(image_1) == [False]).all():
recall_value = 1.
precision_value = 1.
else:
recall_value = recall_score(image_1, image_2)
precision_value = average_precision_score(image_1, image_2)
return precision_value, recall_value, accuracy_value
def dice(im1, im2, smooth=0.001):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
# Compute Dice coefficient
intersection = np.logical_and(im1, im2)
if (np.unique(im1) == [False]).all() and (np.unique(im2) == [False]).all():
dsc = 1.
else:
dsc = 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)
return dsc
# return 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)
def read_img(dir_image):
original_img = cv2.imread(dir_image)
img = cv2.resize(original_img, (256, 256))
img = img / 255
return img
def read_results_csv(file_path, row_id=0):
dice_values = []
with open(file_path, 'r') as file:
reader = csv.reader(file)
for row in reader:
dice_values.append(float(row[row_id]))
return dice_values
def evaluate_and_predict(model, directory_to_evaluate, results_directory, output_name):
output_directory = 'predictions/' + output_name + '/'
batch_size = 8
(test_x, test_y) = load_data(directory_to_evaluate)
test_dataset = tf_dataset(test_x, test_y, batch=batch_size)
test_steps = (len(test_x)//batch_size)
if len(test_x) % batch_size != 0:
test_steps += 1
# evaluate the model in the test dataset
model.evaluate(test_dataset, steps=test_steps)
test_steps = (len(test_x)//batch_size)
if len(test_x) % batch_size != 0:
test_steps += 1
times = []
for i, (x, y) in tqdm(enumerate(zip(test_x, test_y)), total=len(test_x)):
#print(i, x)
directory_image = x
x = read_image_test(x)
#y = read_mask_test(y)
init_time = time.time()
y_pred = model.predict(np.expand_dims(x, axis=0))[0] > 0.5
delta = time.time() - init_time
times.append(delta)
name_original_file = directory_image.replace(''.join([directory_to_evaluate, 'image/', image_modality, '/']), '')
results_name = ''.join([results_directory, output_directory, name_original_file])
cv2.imwrite(results_name, y_pred * 255.0)
# save the results of the test dataset in a CSV file
ground_truth_imgs_dir = directory_to_evaluate + 'image/' + image_modality + '/'
result_mask_dir = results_directory + output_directory
ground_truth_image_list = [file for file in listdir(ground_truth_imgs_dir) if
isfile(join(ground_truth_imgs_dir, file))]
results_image_list = [file for file in listdir(result_mask_dir) if isfile(join(result_mask_dir, file))]
results_dice = []
results_sensitivity = []
results_specificity = []
output_directory = 'predictions/' + output_name + '/'
batch_size = 16
(test_x, test_y) = load_data(directory_to_evaluate)
test_dataset = tf_dataset(test_x, test_y, batch=batch_size)
test_steps = (len(test_x) // batch_size)
# save the results of the test dataset in a CSV file
ground_truth_imgs_dir = directory_to_evaluate + 'image/' + image_modality + '/'
ground_truth_labels_dir = directory_to_evaluate + 'label/'
result_mask_dir = results_directory + output_directory
ground_truth_image_list = [file for file in listdir(ground_truth_imgs_dir) if
isfile(join(ground_truth_imgs_dir, file))]
results_image_list = [file for file in listdir(result_mask_dir) if isfile(join(result_mask_dir, file))]
results_dice = []
results_sensitivity = []
results_specificity = []
results_accuracy = []
for image in ground_truth_image_list[:]:
result_image = [name for name in results_image_list if image[-12:] == name[-12:]][0]
if result_image is not None:
original_mask = read_img(''.join([ground_truth_labels_dir, image]))
predicted_mask = read_img(''.join([result_mask_dir, result_image]))
dice_val = dice(original_mask, predicted_mask)
results_dice.append(dice_val)
sensitivity, specificity, accuracy = calculate_rates(original_mask, predicted_mask)
results_sensitivity.append(sensitivity)
results_specificity.append(specificity)
results_accuracy.append(accuracy)
else:
print(image, 'not found in results list')
name_test_csv_file = ''.join([results_directory, 'results_evaluation_',
output_name,
'_',
new_results_id,
'_.csv'])
with open(name_test_csv_file, mode='w') as results_file:
results_file_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, file in enumerate(ground_truth_image_list):
results_file_writer.writerow(
[str(i), file, results_dice[i],
results_sensitivity[i],
results_specificity[i],
results_accuracy[i]])
if len(test_x) % batch_size != 0:
test_steps += 1
# evaluate the model in the test dataset
model.evaluate(test_dataset, steps=test_steps)
test_steps = (len(test_x) // batch_size)
print(times)
print(np.average(times), np.std(times))
return name_test_csv_file
def predict_mask(model, input_image):
y_pred = model.predict(np.expand_dims(input_image, axis=0))[0] >= 0.5
return y_pred
def paint_imgs(img, mask):
if np.shape(img) != np.shape(mask):
img = cv2.resize(img, (np.shape(mask)[0], np.shape(mask)[1]))
for i in range(np.shape(mask)[0]):
for j in range(np.shape(mask)[1]):
if mask[i, j, 0] == True:
img[i, j, 1] = 100
return img
def build_contours(array_of_points):
contours = []
for i, y_points in enumerate(array_of_points[0]):
point = (array_of_points[1][i], y_points)
point = np.asarray(point)
contours.append([point])
return contours
def calc_histograms_and_center(mask, image):
if not (np.all(mask == 0)):
percentage = 0.6
#grayscale = cv2.cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grayscale = image[:, :, 2]
grayscale = np.multiply(grayscale, mask)
#list_values_grayscale = [value for row in grayscale for value in row if value != 0]
# create the histogram plot, with three lines, one for
# each color
max_grays = ((np.where(grayscale >= int(percentage * np.amax(grayscale)))))
gray_contours = np.asarray([build_contours(max_grays)])
gray_convex_hull, gray_x, gray_y = determine_convex_hull(gray_contours)
points_x = []
points_y = []
for hull in gray_convex_hull:
for i, point in enumerate(hull):
points_x.append(point[0][0])
points_y.append(point[0][1])
else:
gray_x = 'nAN'
gray_y = 'nAN'
return gray_x, gray_y
def detect_dark_region(mask, image):
w_image, h_image, d_image =
|
np.shape(image)
|
numpy.shape
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(
|
np.average(self.df[2].E_avg)
|
numpy.average
|
import pyscipopt
from pyscipopt import Model
import ecole
import numpy as np
import random
import pathlib
import gzip
import pickle
import json
import matplotlib.pyplot as plt
from geco.mips.loading.miplib import Loader
from utility import lbconstraint_modes, instancetypes, instancesizes, generator_switcher, binary_support, copy_sol, mean_filter,mean_forward_filter, imitation_accuracy, haming_distance_solutions, haming_distance_solutions_asym
from localbranching import addLBConstraint, addLBConstraintAsymmetric
from ecole_extend.environment_extend import SimpleConfiguring, SimpleConfiguringEnablecuts, SimpleConfiguringEnableheuristics
from models import GraphDataset, GNNPolicy, BipartiteNodeData
import torch.nn.functional as F
import torch_geometric
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, ConcatDataset
from scipy.interpolate import interp1d
from localbranching import LocalBranching
import gc
import sys
from memory_profiler import profile
from models_rl import SimplePolicy, ImitationLbDataset, AgentReinforce
from dataset import InstanceDataset, custom_collate
class MlLocalbranch:
def __init__(self, instance_type, instance_size, lbconstraint_mode, incumbent_mode='firstsol', seed=100):
self.instance_type = instance_type
self.instance_size = instance_size
self.incumbent_mode = incumbent_mode
self.lbconstraint_mode = lbconstraint_mode
self.seed = seed
self.directory = './result/generated_instances/' + self.instance_type + '/' + self.instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
# self.generator = generator_switcher(self.instance_type + self.instance_size)
self.initialize_ecole_env()
self.env.seed(self.seed) # environment (SCIP)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def initialize_ecole_env(self):
if self.incumbent_mode == 'firstsol':
self.env = ecole.environment.Configuring(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
elif self.incumbent_mode == 'rootsol':
if self.instance_type == 'independentset':
self.env = SimpleConfiguring(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
else:
self.env = SimpleConfiguringEnablecuts(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
# elif self.instance_type == 'capacitedfacility':
# self.env = SimpleConfiguringEnableheuristics(
#
# # set up a few SCIP parameters
# scip_params={
# "presolving/maxrounds": 0, # deactivate presolving
# "presolving/maxrestarts": 0,
# },
#
# observation_function=ecole.observation.MilpBipartite(),
#
# reward_function=None,
#
# # collect additional metrics for information purposes
# information_function={
# 'time': ecole.reward.SolvingTime().cumsum(),
# }
# )
def set_and_optimize_MIP(self, MIP_model, incumbent_mode):
preprocess_off = True
if incumbent_mode == 'firstsol':
heuristics_off = False
cuts_off = False
elif incumbent_mode == 'rootsol':
if self.instance_type == 'independentset':
heuristics_off = True
cuts_off = True
else:
heuristics_off = True
cuts_off = False
# elif self.instance_type == 'capacitedfacility':
# heuristics_off = False
# cuts_off = True
if preprocess_off:
MIP_model.setParam('presolving/maxrounds', 0)
MIP_model.setParam('presolving/maxrestarts', 0)
if heuristics_off:
MIP_model.setHeuristics(pyscipopt.SCIP_PARAMSETTING.OFF)
if cuts_off:
MIP_model.setSeparating(pyscipopt.SCIP_PARAMSETTING.OFF)
if incumbent_mode == 'firstsol':
MIP_model.setParam('limits/solutions', 1)
elif incumbent_mode == 'rootsol':
MIP_model.setParam("limits/nodes", 1)
MIP_model.optimize()
t = MIP_model.getSolvingTime()
status = MIP_model.getStatus()
lp_status = MIP_model.getLPSolstat()
stage = MIP_model.getStage()
n_sols = MIP_model.getNSols()
# print("* Model status: %s" % status)
# print("* LP status: %s" % lp_status)
# print("* Solve stage: %s" % stage)
# print("* Solving time: %s" % t)
# print('* number of sol : ', n_sols)
incumbent_solution = MIP_model.getBestSol()
feasible = MIP_model.checkSol(solution=incumbent_solution)
return status, feasible, MIP_model, incumbent_solution
def initialize_MIP(self, MIP_model):
MIP_model_2, MIP_2_vars, success = MIP_model.createCopy(origcopy=True)
incumbent_mode = self.incumbent_mode
if self.incumbent_mode == 'firstsol':
incumbent_mode_2 = 'rootsol'
elif self.incumbent_mode == 'rootsol':
incumbent_mode_2 = 'firstsol'
status, feasible, MIP_model, incumbent_solution = self.set_and_optimize_MIP(MIP_model, incumbent_mode)
status_2, feasible_2, MIP_model_2, incumbent_solution_2 = self.set_and_optimize_MIP(MIP_model_2,
incumbent_mode_2)
feasible = (feasible and feasible_2)
if (not status == 'optimal') and (not status_2 == 'optimal'):
not_optimal = True
else:
not_optimal = False
if not_optimal and feasible:
valid = True
else:
valid = False
return valid, MIP_model, incumbent_solution
def generate_instances(self, instance_type, instance_size):
directory = './data/generated_instances/' + instance_type + '/' + instance_size + '/'
directory_transformedmodel = directory + 'transformedmodel' + '/'
directory_firstsol = directory +'firstsol' + '/'
directory_rootsol = directory + 'rootsol' + '/'
pathlib.Path(directory_transformedmodel).mkdir(parents=True, exist_ok=True)
pathlib.Path(directory_firstsol).mkdir(parents=True, exist_ok=True)
pathlib.Path(directory_rootsol).mkdir(parents=True, exist_ok=True)
generator = generator_switcher(instance_type + instance_size)
generator.seed(self.seed)
index_instance = 0
while index_instance < 200:
instance = next(generator)
MIP_model = instance.as_pyscipopt()
MIP_model.setProbName(instance_type + '-' + str(index_instance))
instance_name = MIP_model.getProbName()
print('\n')
print(instance_name)
# initialize MIP
MIP_model_2, MIP_2_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=True)
# MIP_model_orig, MIP_vars_orig, success = MIP_model.createCopy(
# problemName='Baseline', origcopy=True)
incumbent_mode = 'firstsol'
incumbent_mode_2 = 'rootsol'
status, feasible, MIP_model, incumbent_solution = self.set_and_optimize_MIP(MIP_model, incumbent_mode)
status_2, feasible_2, MIP_model_2, incumbent_solution_2 = self.set_and_optimize_MIP(MIP_model_2,
incumbent_mode_2)
feasible = feasible and feasible_2
if (not status == 'optimal') and (not status_2 == 'optimal'):
not_optimal = True
else:
not_optimal = False
if not_optimal and feasible:
valid = True
else:
valid = False
if valid:
MIP_model.resetParams()
MIP_model_transformed, MIP_copy_vars, success = MIP_model.createCopy(
problemName='transformed', origcopy=False)
MIP_model_transformed, sol_MIP_first = copy_sol(MIP_model, MIP_model_transformed, incumbent_solution,
MIP_copy_vars)
MIP_model_transformed, sol_MIP_root = copy_sol(MIP_model_2, MIP_model_transformed, incumbent_solution_2,
MIP_copy_vars)
transformed_model_name = MIP_model_transformed.getProbName()
filename = f'{directory_transformedmodel}{transformed_model_name}.cip'
MIP_model_transformed.writeProblem(filename=filename, trans=False)
firstsol_filename = f'{directory_firstsol}firstsol-{transformed_model_name}.sol'
MIP_model_transformed.writeSol(solution=sol_MIP_first, filename=firstsol_filename)
rootsol_filename = f'{directory_rootsol}rootsol-{transformed_model_name}.sol'
MIP_model_transformed.writeSol(solution=sol_MIP_root, filename=rootsol_filename)
model = Model()
model.readProblem(filename)
sol = model.readSolFile(rootsol_filename)
feas = model.checkSol(sol)
if not feas:
print('the root solution of '+ model.getProbName()+ 'is not feasible!')
model.addSol(sol, False)
print(model.getSolObjVal(sol))
instance = ecole.scip.Model.from_pyscipopt(model)
scipMIP = instance.as_pyscipopt()
sol2 = scipMIP.getBestSol()
print(scipMIP.getSolObjVal(sol2))
# MIP_model_2.resetParams()
# MIP_model_copy2, MIP_copy_vars2, success2 = MIP_model_2.createCopy(
# problemName='rootsol',
# origcopy=False)
# MIP_model_copy2, sol_MIP_copy2 = copy_sol(MIP_model_2, MIP_model_copy2, incumbent_solution_2,
# MIP_copy_vars2)
MIP_model.freeProb()
MIP_model_2.freeProb()
MIP_model_transformed.freeProb()
model.freeProb()
del MIP_model
del MIP_model_2
del MIP_model_transformed
del model
index_instance += 1
def evaluate_lb_per_instance(self, node_time_limit, total_time_limit, index_instance, reset_k_at_2nditeration=False, policy=None,
):
"""
evaluate a single MIP instance by two algorithms: lb-baseline and lb-pred_k
:param node_time_limit:
:param total_time_limit:
:param index_instance:
:return:
"""
device = self.device
instance = next(self.generator)
MIP_model = instance.as_pyscipopt()
MIP_model.setProbName(self.instance_type + '-' + str(index_instance))
instance_name = MIP_model.getProbName()
print('\n')
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
valid, MIP_model, incumbent_solution = self.initialize_MIP(MIP_model)
conti =99
# if self.incumbent_mode == 'rootsol' and self.instance_type == 'independentset':
# conti = 196
if valid:
if index_instance > 99 and index_instance > conti:
gc.collect()
observation, _, _, done, _ = self.env.reset(instance)
del observation
# print(observation)
if self.incumbent_mode == 'firstsol':
action = {'limits/solutions': 1}
elif self.incumbent_mode == 'rootsol':
action = {'limits/nodes': 1} #
sample_observation, _, _, done, _ = self.env.step(action)
# print(sample_observation)
graph = BipartiteNodeData(sample_observation.constraint_features,
sample_observation.edge_features.indices,
sample_observation.edge_features.values,
sample_observation.variable_features)
# We must tell pytorch geometric how many nodes there are, for indexing purposes
graph.num_nodes = sample_observation.constraint_features.shape[0] + \
sample_observation.variable_features.shape[
0]
# instance = Loader().load_instance('b1c1s1' + '.mps.gz')
# MIP_model = instance
# MIP_model.optimize()
# print("Status:", MIP_model.getStatus())
# print("best obj: ", MIP_model.getObjVal())
# print("Solving time: ", MIP_model.getSolvingTime())
initial_obj = MIP_model.getSolObjVal(incumbent_solution)
print("Initial obj before LB: {}".format(initial_obj))
binary_supports = binary_support(MIP_model, incumbent_solution)
print('binary support: ', binary_supports)
model_gnn = GNNPolicy()
model_gnn.load_state_dict(torch.load(
self.saved_gnn_directory + 'trained_params_' + self.regression_dataset + '_' + self.lbconstraint_mode + '_' + self.incumbent_mode + '.pth'))
# model_gnn.load_state_dict(torch.load(
# 'trained_params_' + self.instance_type + '.pth'))
k_model = model_gnn(graph.constraint_features, graph.edge_index, graph.edge_attr,
graph.variable_features)
k_pred = k_model.item() * n_binvars
print('GNN prediction: ', k_model.item())
if self.is_symmetric == False:
k_pred = k_model.item() * binary_supports
k_pred = np.ceil(k_pred)
del k_model
del graph
del sample_observation
del model_gnn
# create a copy of MIP
MIP_model.resetParams()
MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=False)
MIP_model_copy2, MIP_copy_vars2, success2 = MIP_model.createCopy(
problemName='GNN',
origcopy=False)
MIP_model_copy3, MIP_copy_vars3, success3 = MIP_model.createCopy(
problemName='GNN+reset',
origcopy=False)
print('MIP copies are created')
MIP_model_copy, sol_MIP_copy = copy_sol(MIP_model, MIP_model_copy, incumbent_solution,
MIP_copy_vars)
MIP_model_copy2, sol_MIP_copy2 = copy_sol(MIP_model, MIP_model_copy2, incumbent_solution,
MIP_copy_vars2)
MIP_model_copy3, sol_MIP_copy3 = copy_sol(MIP_model, MIP_model_copy3, incumbent_solution,
MIP_copy_vars3)
print('incumbent solution is copied to MIP copies')
MIP_model.freeProb()
del MIP_model
del incumbent_solution
# sol = MIP_model_copy.getBestSol()
# initial_obj = MIP_model_copy.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# # execute local branching baseline heuristic by Fischetti and Lodi
# lb_model = LocalBranching(MIP_model=MIP_model_copy, MIP_sol_bar=sol_MIP_copy, k=self.k_baseline,
# node_time_limit=node_time_limit,
# total_time_limit=total_time_limit)
# status, obj_best, elapsed_time, lb_bits, times, objs = lb_model.search_localbranch(is_symmeric=self.is_symmetric,
# reset_k_at_2nditeration=False)
# print("Instance:", MIP_model_copy.getProbName())
# print("Status of LB: ", status)
# print("Best obj of LB: ", obj_best)
# print("Solving time: ", elapsed_time)
# print('\n')
#
# MIP_model_copy.freeProb()
# del sol_MIP_copy
# del MIP_model_copy
# sol = MIP_model_copy2.getBestSol()
# initial_obj = MIP_model_copy2.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# execute local branching with 1. first k predicted by GNN, 2. for 2nd iteration of lb, reset k to default value of baseline
lb_model3 = LocalBranching(MIP_model=MIP_model_copy3, MIP_sol_bar=sol_MIP_copy3, k=k_pred,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits_pred_reset, times_reset_imitation, objs_reset_imitation, loss_instance, accu_instance = lb_model3.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=reset_k_at_2nditeration,
policy=policy,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy3.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy3.freeProb()
del sol_MIP_copy3
del MIP_model_copy3
# execute local branching with 1. first k predicted by GNN; 2. for 2nd iteration of lb, continue lb algorithm with no further injection
lb_model2 = LocalBranching(MIP_model=MIP_model_copy2, MIP_sol_bar=sol_MIP_copy2, k=k_pred,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits_pred, times_reset_vanilla, objs_reset_vanilla, _, _ = lb_model2.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=True,
policy=None,
optimizer=None,
device=None
)
print("Instance:", MIP_model_copy2.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy2.freeProb()
del sol_MIP_copy2
del MIP_model_copy2
data = [objs_reset_vanilla, times_reset_vanilla, objs_reset_imitation, times_reset_imitation]
filename = f'{self.directory_lb_test}lb-test-{instance_name}.pkl' # instance 100-199
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f)
del data
del lb_model2
del lb_model3
index_instance += 1
del instance
return index_instance
def evaluate_localbranching(self, test_instance_size='-small', total_time_limit=60, node_time_limit=30, reset_k_at_2nditeration=False):
self.regression_dataset = self.instance_type + self.instance_size
self.evaluation_dataset = self.instance_type + test_instance_size
self.generator = generator_switcher(self.evaluation_dataset)
self.generator.seed(self.seed)
self.k_baseline = 20
self.is_symmetric = True
if self.lbconstraint_mode == 'asymmetric':
self.is_symmetric = False
self.k_baseline = self.k_baseline / 2
total_time_limit = total_time_limit
node_time_limit = node_time_limit
self.saved_gnn_directory = './result/saved_models/'
directory = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/' + 'rl/'
self.directory_lb_test = directory + 'imitation4lb-from-' + self.incumbent_mode + '-t_node' + str(node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
pathlib.Path(self.directory_lb_test).mkdir(parents=True, exist_ok=True)
rl_policy = SimplePolicy(7, 4)
rl_policy.load_state_dict(torch.load(
self.saved_gnn_directory + 'trained_params_simplepolicy_rl4lb_imitation.pth'))
criterion = nn.CrossEntropyLoss()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
index_instance = 0
while index_instance < 200:
index_instance = self.evaluate_lb_per_instance(node_time_limit=node_time_limit, total_time_limit=total_time_limit, index_instance=index_instance, reset_k_at_2nditeration=reset_k_at_2nditeration,
policy=rl_policy, criterion=criterion, device=device
)
def compute_primal_integral(self, times, objs, obj_opt, total_time_limit=60):
# obj_opt = objs.min()
times = np.append(times, total_time_limit)
objs = np.append(objs, objs[-1])
gamma_baseline = np.zeros(len(objs))
for j in range(len(objs)):
if objs[j] == 0 and obj_opt == 0:
gamma_baseline[j] = 0
elif objs[j] * obj_opt < 0:
gamma_baseline[j] = 1
else:
gamma_baseline[j] = np.abs(objs[j] - obj_opt) / np.maximum(np.abs(objs[j]), np.abs(obj_opt)) #
# compute the primal gap of last objective
primal_gap_final = np.abs(objs[-1] - obj_opt) / np.abs(obj_opt)
# create step line
stepline = interp1d(times, gamma_baseline, 'previous')
# compute primal integral
primal_integral = 0
for j in range(len(objs) - 1):
primal_integral += gamma_baseline[j] * (times[j + 1] - times[j])
return primal_integral, primal_gap_final, stepline
def primal_integral(self, test_instance_size, total_time_limit=60, node_time_limit=30):
directory = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/' + 'rl/'
directory_lb_test = directory + 'imitation4lb-from-' + self.incumbent_mode + '-t_node' + str(
node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
if self.incumbent_mode == 'firstsol':
directory_2 = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + 'rootsol' + '/' + 'rl/'
directory_lb_test_2 = directory_2 + 'imitation4lb-from-' + 'rootsol' + '-t_node' + str(node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
elif self.incumbent_mode == 'rootsol':
directory_2 = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + 'firstsol' + '/' + 'rl/'
directory_lb_test_2 = directory_2 + 'imitation4lb-from-' + 'firstsol' + '-t_node' + str(node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
# primal_int_baselines = []
primal_int_reset_vanillas = []
primal_in_reset_imitations = []
# primal_gap_final_baselines = []
primal_gap_final_reset_vanillas = []
primal_gap_final_reset_imitations = []
# steplines_baseline = []
steplines_reset_vanillas = []
steplines_reset_imitations = []
for i in range(100,200):
instance_name = self.instance_type + '-' + str(i) # instance 100-199
filename = f'{directory_lb_test}lb-test-{instance_name}.pkl'
with gzip.open(filename, 'rb') as f:
data = pickle.load(f)
objs_reset_vanilla, times_reset_vanilla, objs_reset_imitation, times_reset_imitation = data # objs contains objs of a single instance of a lb test
filename_2 = f'{directory_lb_test_2}lb-test-{instance_name}.pkl'
with gzip.open(filename_2, 'rb') as f:
data = pickle.load(f)
objs_reset_vanilla_2, times_reset_vanilla_2, objs_reset_imitation_2, times_reset_imitation_2 = data # objs contains objs of a single instance of a lb test
a = [objs_reset_vanilla.min(), objs_reset_imitation.min(), objs_reset_vanilla_2.min(), objs_reset_imitation_2.min()]
# a = [objs.min(), objs_reset_vanilla.min(), objs_reset_imitation.min()]
obj_opt = np.amin(a)
# # compute primal gap for baseline localbranching run
# # if times[-1] < total_time_limit:
# times = np.append(times, total_time_limit)
# objs = np.append(objs, objs[-1])
#
# gamma_baseline = np.zeros(len(objs))
# for j in range(len(objs)):
# if objs[j] == 0 and obj_opt == 0:
# gamma_baseline[j] = 0
# elif objs[j] * obj_opt < 0:
# gamma_baseline[j] = 1
# else:
# gamma_baseline[j] = np.abs(objs[j] - obj_opt) / np.maximum(np.abs(objs[j]), np.abs(obj_opt)) #
#
# # compute the primal gap of last objective
# primal_gap_final_baseline = np.abs(objs[-1] - obj_opt) / np.abs(obj_opt)
# primal_gap_final_baselines.append(primal_gap_final_baseline)
#
# # create step line
# stepline_baseline = interp1d(times, gamma_baseline, 'previous')
# steplines_baseline.append(stepline_baseline)
#
# # compute primal integral
# primal_int_baseline = 0
# for j in range(len(objs) - 1):
# primal_int_baseline += gamma_baseline[j] * (times[j + 1] - times[j])
# primal_int_baselines.append(primal_int_baseline)
#
# lb-gnn
# if times_reset_vanilla[-1] < total_time_limit:
times_reset_vanilla = np.append(times_reset_vanilla, total_time_limit)
objs_reset_vanilla = np.append(objs_reset_vanilla, objs_reset_vanilla[-1])
gamma_reset_vanilla = np.zeros(len(objs_reset_vanilla))
for j in range(len(objs_reset_vanilla)):
if objs_reset_vanilla[j] == 0 and obj_opt == 0:
gamma_reset_vanilla[j] = 0
elif objs_reset_vanilla[j] * obj_opt < 0:
gamma_reset_vanilla[j] = 1
else:
gamma_reset_vanilla[j] = np.abs(objs_reset_vanilla[j] - obj_opt) / np.maximum(np.abs(objs_reset_vanilla[j]), np.abs(obj_opt)) #
primal_gap_final_vanilla = np.abs(objs_reset_vanilla[-1] - obj_opt) / np.abs(obj_opt)
primal_gap_final_reset_vanillas.append(primal_gap_final_vanilla)
stepline_reset_vanilla = interp1d(times_reset_vanilla, gamma_reset_vanilla, 'previous')
steplines_reset_vanillas.append(stepline_reset_vanilla)
#
# t = np.linspace(start=0.0, stop=total_time_limit, num=1001)
# plt.close('all')
# plt.clf()
# fig, ax = plt.subplots(figsize=(8, 6.4))
# fig.suptitle("Test Result: comparison of primal gap")
# fig.subplots_adjust(top=0.5)
# # ax.set_title(instance_name, loc='right')
# ax.plot(t, stepline_baseline(t), label='lb baseline')
# ax.plot(t, stepline_reset_vanilla(t), label='lb with k predicted')
# ax.set_xlabel('time /s')
# ax.set_ylabel("objective")
# ax.legend()
# plt.show()
# compute primal interal
primal_int_reset_vanilla = 0
for j in range(len(objs_reset_vanilla) - 1):
primal_int_reset_vanilla += gamma_reset_vanilla[j] * (times_reset_vanilla[j + 1] - times_reset_vanilla[j])
primal_int_reset_vanillas.append(primal_int_reset_vanilla)
# lb-gnn-reset
times_reset_imitation = np.append(times_reset_imitation, total_time_limit)
objs_reset_imitation = np.append(objs_reset_imitation, objs_reset_imitation[-1])
gamma_reset_imitation = np.zeros(len(objs_reset_imitation))
for j in range(len(objs_reset_imitation)):
if objs_reset_imitation[j] == 0 and obj_opt == 0:
gamma_reset_imitation[j] = 0
elif objs_reset_imitation[j] * obj_opt < 0:
gamma_reset_imitation[j] = 1
else:
gamma_reset_imitation[j] = np.abs(objs_reset_imitation[j] - obj_opt) / np.maximum(np.abs(objs_reset_imitation[j]), np.abs(obj_opt)) #
primal_gap_final_imitation = np.abs(objs_reset_imitation[-1] - obj_opt) / np.abs(obj_opt)
primal_gap_final_reset_imitations.append(primal_gap_final_imitation)
stepline_reset_imitation = interp1d(times_reset_imitation, gamma_reset_imitation, 'previous')
steplines_reset_imitations.append(stepline_reset_imitation)
# compute primal interal
primal_int_reset_imitation = 0
for j in range(len(objs_reset_imitation) - 1):
primal_int_reset_imitation += gamma_reset_imitation[j] * (times_reset_imitation[j + 1] - times_reset_imitation[j])
primal_in_reset_imitations.append(primal_int_reset_imitation)
# plt.close('all')
# plt.clf()
# fig, ax = plt.subplots(figsize=(8, 6.4))
# fig.suptitle("Test Result: comparison of objective")
# fig.subplots_adjust(top=0.5)
# ax.set_title(instance_name, loc='right')
# ax.plot(times, objs, label='lb baseline')
# ax.plot(times_reset_vanilla, objs_reset_vanilla, label='lb with k predicted')
# ax.set_xlabel('time /s')
# ax.set_ylabel("objective")
# ax.legend()
# plt.show()
#
# plt.close('all')
# plt.clf()
# fig, ax = plt.subplots(figsize=(8, 6.4))
# fig.suptitle("Test Result: comparison of primal gap")
# fig.subplots_adjust(top=0.5)
# ax.set_title(instance_name, loc='right')
# ax.plot(times, gamma_baseline, label='lb baseline')
# ax.plot(times_reset_vanilla, gamma_reset_vanilla, label='lb with k predicted')
# ax.set_xlabel('time /s')
# ax.set_ylabel("objective")
# ax.legend()
# plt.show()
# primal_int_baselines = np.array(primal_int_baselines).reshape(-1)
primal_int_reset_vanilla = np.array(primal_int_reset_vanillas).reshape(-1)
primal_in_reset_imitation = np.array(primal_in_reset_imitations).reshape(-1)
# primal_gap_final_baselines = np.array(primal_gap_final_baselines).reshape(-1)
primal_gap_final_reset_vanilla = np.array(primal_gap_final_reset_vanillas).reshape(-1)
primal_gap_final_reset_imitation = np.array(primal_gap_final_reset_imitations).reshape(-1)
# avarage primal integral over test dataset
# primal_int_base_ave = primal_int_baselines.sum() / len(primal_int_baselines)
primal_int_reset_vanilla_ave = primal_int_reset_vanilla.sum() / len(primal_int_reset_vanilla)
primal_int_reset_imitation_ave = primal_in_reset_imitation.sum() / len(primal_in_reset_imitation)
# primal_gap_final_baselines = primal_gap_final_baselines.sum() / len(primal_gap_final_baselines)
primal_gap_final_reset_vanilla = primal_gap_final_reset_vanilla.sum() / len(primal_gap_final_reset_vanilla)
primal_gap_final_reset_imitation = primal_gap_final_reset_imitation.sum() / len(primal_gap_final_reset_imitation)
print(self.instance_type + test_instance_size)
print(self.incumbent_mode + 'Solution')
# print('baseline primal integral: ', primal_int_base_ave)
print('baseline primal integral: ', primal_int_reset_vanilla_ave)
print('imitation primal integral: ', primal_int_reset_imitation_ave)
print('\n')
# print('baseline primal gap: ',primal_gap_final_baselines)
print('baseline primal gap: ', primal_gap_final_reset_vanilla)
print('imitation primal gap: ', primal_gap_final_reset_imitation)
t = np.linspace(start=0.0, stop=total_time_limit, num=1001)
# primalgaps_baseline = None
# for n, stepline_baseline in enumerate(steplines_baseline):
# primal_gap = stepline_baseline(t)
# if n==0:
# primalgaps_baseline = primal_gap
# else:
# primalgaps_baseline = np.vstack((primalgaps_baseline, primal_gap))
# primalgap_baseline_ave = np.average(primalgaps_baseline, axis=0)
primalgaps_reset_vanilla = None
for n, stepline_reset_vanilla in enumerate(steplines_reset_vanillas):
primal_gap = stepline_reset_vanilla(t)
if n == 0:
primalgaps_reset_vanilla = primal_gap
else:
primalgaps_reset_vanilla = np.vstack((primalgaps_reset_vanilla, primal_gap))
primalgap_reset_vanilla_ave = np.average(primalgaps_reset_vanilla, axis=0)
primalgaps_reset_imitation = None
for n, stepline_reset_imitation in enumerate(steplines_reset_imitations):
primal_gap = stepline_reset_imitation(t)
if n == 0:
primalgaps_reset_imitation = primal_gap
else:
primalgaps_reset_imitation = np.vstack((primalgaps_reset_imitation, primal_gap))
primalgap_reset_imitation_ave = np.average(primalgaps_reset_imitation, axis=0)
plt.close('all')
plt.clf()
fig, ax = plt.subplots(figsize=(6.4, 4.8))
fig.suptitle("Normalized primal gap")
# fig.subplots_adjust(top=0.5)
ax.set_title(self.instance_type + '-' + self.incumbent_mode, loc='right')
# ax.plot(t, primalgap_baseline_ave, label='lb-baseline')
ax.plot(t, primalgap_reset_vanilla_ave, label='lb-gnn-baseline')
ax.plot(t, primalgap_reset_imitation_ave,'--', label='lb-gnn-imitation')
ax.set_xlabel('time /s')
ax.set_ylabel("normalized primal gap")
ax.legend()
plt.show()
class RegressionInitialK:
def __init__(self, instance_type, instance_size, lbconstraint_mode, incumbent_mode, seed=100):
self.instance_type = instance_type
self.instance_size = instance_size
self.incumbent_mode = incumbent_mode
self.lbconstraint_mode = lbconstraint_mode
self.is_symmetric = True
if self.lbconstraint_mode == 'asymmetric':
self.is_symmetric = False
self.seed = seed
self.directory = './result/generated_instances/' + self.instance_type + '/' + self.instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
# self.generator = generator_switcher(self.instance_type + self.instance_size)
self.initialize_ecole_env()
self.env.seed(self.seed) # environment (SCIP)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def initialize_ecole_env(self):
if self.incumbent_mode == 'firstsol':
self.env = ecole.environment.Configuring(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
elif self.incumbent_mode == 'rootsol':
if self.instance_type == 'independentset':
self.env = SimpleConfiguring(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
else:
self.env = SimpleConfiguringEnablecuts(
# set up a few SCIP parameters
scip_params={
"presolving/maxrounds": 0, # deactivate presolving
"presolving/maxrestarts": 0,
},
observation_function=ecole.observation.MilpBipartite(),
reward_function=None,
# collect additional metrics for information purposes
information_function={
'time': ecole.reward.SolvingTime().cumsum(),
}
)
# elif self.instance_type == 'capacitedfacility':
# self.env = SimpleConfiguringEnableheuristics(
#
# # set up a few SCIP parameters
# scip_params={
# "presolving/maxrounds": 0, # deactivate presolving
# "presolving/maxrestarts": 0,
# },
#
# observation_function=ecole.observation.MilpBipartite(),
#
# reward_function=None,
#
# # collect additional metrics for information purposes
# information_function={
# 'time': ecole.reward.SolvingTime().cumsum(),
# }
# )
def set_and_optimize_MIP(self, MIP_model, incumbent_mode):
preprocess_off = True
if incumbent_mode == 'firstsol':
heuristics_off = False
cuts_off = False
elif incumbent_mode == 'rootsol':
if self.instance_type == 'independentset':
heuristics_off = True
cuts_off = True
else:
heuristics_off = True
cuts_off = False
# elif self.instance_type == 'capacitedfacility':
# heuristics_off = False
# cuts_off = True
if preprocess_off:
MIP_model.setParam('presolving/maxrounds', 0)
MIP_model.setParam('presolving/maxrestarts', 0)
if heuristics_off:
MIP_model.setHeuristics(pyscipopt.SCIP_PARAMSETTING.OFF)
if cuts_off:
MIP_model.setSeparating(pyscipopt.SCIP_PARAMSETTING.OFF)
if incumbent_mode == 'firstsol':
MIP_model.setParam('limits/solutions', 1)
elif incumbent_mode == 'rootsol':
MIP_model.setParam("limits/nodes", 1)
MIP_model.optimize()
t = MIP_model.getSolvingTime()
status = MIP_model.getStatus()
lp_status = MIP_model.getLPSolstat()
stage = MIP_model.getStage()
n_sols = MIP_model.getNSols()
# print("* Model status: %s" % status)
# print("* LP status: %s" % lp_status)
# print("* Solve stage: %s" % stage)
# print("* Solving time: %s" % t)
# print('* number of sol : ', n_sols)
incumbent_solution = MIP_model.getBestSol()
feasible = MIP_model.checkSol(solution=incumbent_solution)
return status, feasible, MIP_model, incumbent_solution
def initialize_MIP(self, MIP_model):
MIP_model_2, MIP_2_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=False)
incumbent_mode = self.incumbent_mode
if self.incumbent_mode == 'firstsol':
incumbent_mode_2 = 'rootsol'
elif self.incumbent_mode == 'rootsol':
incumbent_mode_2 = 'firstsol'
status, feasible, MIP_model, incumbent_solution = self.set_and_optimize_MIP(MIP_model, incumbent_mode)
status_2, feasible_2, MIP_model_2, incumbent_solution_2 = self.set_and_optimize_MIP(MIP_model_2, incumbent_mode_2)
feasible = feasible and feasible_2
if (not status == 'optimal') and (not status_2 == 'optimal'):
not_optimal = True
else:
not_optimal = False
if not_optimal and feasible:
valid = True
else:
valid = False
return valid, MIP_model, incumbent_solution
def solve_lp(self, MIP_model, lp_algo='s'):
# solve the LP relaxation of root node
# MIP_model.freeTransform()
status = MIP_model.getStatus()
print("* Model status: %s" % status)
MIP_model.resetParams()
MIP_model.setPresolve(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setHeuristics(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setSeparating(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setIntParam("lp/solvefreq", 0)
MIP_model.setParam("limits/nodes", 1)
# MIP_model.setParam("limits/solutions", 1)
MIP_model.setParam("display/verblevel", 0)
MIP_model.setParam("lp/disablecutoff", 1)
MIP_model.setParam("lp/initalgorithm", lp_algo)
MIP_model.setParam("lp/resolvealgorithm", lp_algo)
# MIP_model.setParam("limits/solutions", 1)
MIP_model.optimize()
#
status = MIP_model.getStatus()
lp_status = MIP_model.getLPSolstat()
stage = MIP_model.getStage()
n_sols = MIP_model.getNSols()
# root_time = MIP_model.getSolvingTime()
print("* Model status: %s" % status)
print("* Solve stage: %s" % stage)
print("* LP status: %s" % lp_status)
print('* number of sol : ', n_sols)
return MIP_model, lp_status
def compute_k_prime(self, MIP_model, incumbent):
# solve the root node and get the LP solution
# MIP_model.freeTransform()
# solve the LP relaxation of root node
MIP_model, lp_status = self.solve_lp(MIP_model)
if lp_status == 1:
sol_lp = MIP_model.createLPSol()
# sol_relax = MIP_model.createRelaxSol()
k_prime = haming_distance_solutions(MIP_model, incumbent, sol_lp)
if not self.is_symmetric:
k_prime = haming_distance_solutions_asym(MIP_model, incumbent, sol_lp)
k_prime = np.ceil(k_prime)
valid = True
else:
print("Warning: k_prime is not valid! Since LP is not solved to optimal! LP solution status is {}".format(str(lp_status)))
k_prime = 0
valid = False
return k_prime, MIP_model, valid
def sample_k_per_instance(self, t_limit, index_instance):
filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
MIP_model = Model()
MIP_model.readProblem(filename)
instance_name = MIP_model.getProbName()
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
incumbent = MIP_model.readSolFile(firstsol_filename)
feas = MIP_model.checkSol(incumbent)
try:
MIP_model.addSol(incumbent, False)
except:
print('Error: the root solution of ' + instance_name + ' is not feasible!')
initial_obj = MIP_model.getSolObjVal(incumbent)
print("Initial obj before LB: {}".format(initial_obj))
n_supportbinvars = binary_support(MIP_model, incumbent)
print('binary support: ', n_supportbinvars)
MIP_model.resetParams()
neigh_sizes = []
objs = []
t = []
n_supportbins = []
statuss = []
relax_grips = []
n_nodes = []
firstlp_times = []
n_lps = []
presolve_times = []
firstlp_times_test = []
solving_times_test = []
nsample = 11 # 101
# create a copy of the MIP to be 'locally branched'
MIP_copy, subMIP_model_vars, success = MIP_model.createCopy(problemName='MIPCopy',
origcopy=False)
MIP_copy.resetParams()
sol_MIP_copy = MIP_copy.createSol()
# create a primal solution for the copy MIP by copying the solution of original MIP
n_vars = MIP_model.getNVars()
subMIP_vars = MIP_model.getVars()
for j in range(n_vars):
val = MIP_model.getSolVal(incumbent, subMIP_vars[j])
MIP_copy.setSolVal(sol_MIP_copy, subMIP_model_vars[j], val)
feasible = MIP_copy.checkSol(solution=sol_MIP_copy)
if feasible:
# print("the trivial solution of subMIP is feasible ")
MIP_copy.addSol(sol_MIP_copy, False)
# print("the feasible solution of subMIP_model is added to subMIP_model")
else:
print("Warn: the trivial solution of subMIP_model is not feasible!")
n_supportbinvars = binary_support(MIP_copy, sol_MIP_copy)
print('binary support: ', n_supportbinvars)
k_base = n_binvars
if self.is_symmetric == False:
k_base = n_supportbinvars
# solve the root node and get the LP solution
k_prime, _, valid = self.compute_k_prime(MIP_model, incumbent)
if valid is True:
phi_prime = k_prime / k_base
n_bins = MIP_model.getNBinVars()
lpcands, lpcandssol, lpcadsfrac, nlpcands, npriolpcands, nfracimplvars = MIP_model.getLPBranchCands()
relax_grip = 1 - nlpcands / n_bins
print('relaxation grip of original problem :', relax_grip)
else:
phi_prime = 1
print('phi_prime :', phi_prime)
# MIP_model.freeProb()
# del MIP_model
for i in range(nsample):
# create a copy of the MIP to be 'locally branched', initialize it by 1. solving the LP 2. adding the incumbent
subMIP_model, subMIP_model_vars, success = MIP_copy.createCopy(problemName='MIPCopy',
origcopy=False)
# solve LP relaxation of root node of original problem
subMIP_model, lp_status = self.solve_lp(subMIP_model)
# create a primal solution for the copy MIP by copying the solution of original MIP
sol_subMIP_model = subMIP_model.createSol()
n_vars = MIP_copy.getNVars()
MIP_copy_vars = MIP_copy.getVars()
for j in range(n_vars):
val = MIP_copy.getSolVal(sol_MIP_copy, MIP_copy_vars[j])
subMIP_model.setSolVal(sol_subMIP_model, subMIP_model_vars[j], val)
feasible = subMIP_model.checkSol(solution=sol_subMIP_model)
if feasible:
# print("the trivial solution of subMIP is feasible ")
subMIP_model.addSol(sol_subMIP_model, False)
# print("the feasible solution of subMIP_model is added to subMIP_model")
else:
print("Warning: the trivial solution of subMIP_model is not feasible!")
# subMIP_model = MIP_copy
# sol_subMIP_model = sol_MIP_copy
# add LB constraint to subMIP model
alpha = 0.1 * (i)
# if nsample == 41:
# if i<11:
# alpha = 0.01*i
# elif i<31:
# alpha = 0.02*(i-5)
# else:
# alpha = 0.05*(i-20)
neigh_size = np.ceil(alpha * k_prime)
if self.lbconstraint_mode == 'asymmetric':
# neigh_size = np.ceil(alpha * n_supportbinvars)
subMIP_model, constraint_lb = addLBConstraintAsymmetric(subMIP_model, sol_subMIP_model, neigh_size)
else:
# neigh_size = np.ceil(alpha * n_binvars)
subMIP_model, constraint_lb = addLBConstraint(subMIP_model, sol_subMIP_model, neigh_size)
print('Neigh size:', alpha)
stage = subMIP_model.getStage()
print("* Solve stage: %s" % stage)
subMIP_model2 = subMIP_model
# subMIP_model2, MIP_copy_vars, success = subMIP_model.createCopy(
# problemName='Baseline', origcopy=True)
# subMIP_model2 = subMIP_model
subMIP_model2, lp_status = self.solve_lp(subMIP_model2, lp_algo='d')
relax_grip = 2
n_bins = subMIP_model2.getNBinVars()
lpcands, lpcandssol, lpcadsfrac, nlpcands, npriolpcands, nfracimplvars = subMIP_model2.getLPBranchCands()
relax_grip = 1 - nlpcands / n_bins
print('relaxation grip of subMIP :', relax_grip)
firstlp_time_test = subMIP_model2.getFirstLpTime() # time for solving first LP rexlaxation at the root node
solving_time_test = subMIP_model2.getSolvingTime() # total time used for solving (including presolving) the current problem
print('firstLP time for subMIP test :', firstlp_time_test)
print('root node time for LP subMIP test :', solving_time_test)
subMIP_model.resetParams()
subMIP_model.setParam('limits/time', t_limit)
# subMIP_model.setSeparating(pyscipopt.SCIP_PARAMSETTING.FAST)
# subMIP_model.setPresolve(pyscipopt.SCIP_PARAMSETTING.FAST)
subMIP_model.setParam("display/verblevel", 0)
subMIP_model.optimize()
status_subMIP = subMIP_model.getStatus()
best_obj = subMIP_model.getSolObjVal(subMIP_model.getBestSol())
solving_time = subMIP_model.getSolvingTime() # total time used for solving (including presolving) the current problem
n_node = subMIP_model.getNTotalNodes()
firstlp_time = subMIP_model.getFirstLpTime() # time for solving first LP rexlaxation at the root node
presolve_time = subMIP_model.getPresolvingTime()
n_lp = subMIP_model.getNLPs()
best_sol = subMIP_model.getBestSol()
vars_subMIP = subMIP_model.getVars()
n_binvars_subMIP = subMIP_model.getNBinVars()
n_supportbins_subMIP = 0
for i in range(n_binvars_subMIP):
val = subMIP_model.getSolVal(best_sol, vars_subMIP[i])
assert subMIP_model.isFeasIntegral(val), "Error: Value of a binary varialbe is not integral!"
if subMIP_model.isFeasEQ(val, 1.0):
n_supportbins_subMIP += 1
# subMIP_model2, MIP_copy_vars, success = subMIP_model.createCopy(
# problemName='Baseline', origcopy=True)
neigh_sizes.append(alpha)
objs.append(best_obj)
t.append(solving_time)
n_supportbins.append(n_supportbins_subMIP)
statuss.append(status_subMIP)
relax_grips.append(relax_grip)
n_nodes.append(n_node)
firstlp_times.append(firstlp_time)
presolve_times.append(presolve_time)
n_lps.append(n_lp)
firstlp_times_test.append(firstlp_time_test)
solving_times_test.append(solving_time_test)
subMIP_model.freeTransform()
subMIP_model.resetParams()
subMIP_model.delCons(constraint_lb)
subMIP_model.releasePyCons(constraint_lb)
subMIP_model.freeProb()
del subMIP_model
del constraint_lb
for i in range(len(t)):
print('Neighsize: {:.4f}'.format(neigh_sizes[i]),
'Best obj: {:.4f}'.format(objs[i]),
'Binary supports:{}'.format(n_supportbins[i]),
'Solving time: {:.4f}'.format(t[i]),
'Presolve_time: {:.4f}'.format(presolve_times[i]),
'FirstLP time: {:.4f}'.format(firstlp_times[i]),
'solved LPs: {:.4f}'.format(n_lps[i]),
'B&B nodes: {:.4f}'.format(n_nodes[i]),
'Relaxation grip: {:.4f}'.format(relax_grips[i]),
'Solving time of LP root : {:.4f}'.format(t[i]),
'FirstLP time of LP root: {:.4f}'.format(firstlp_times[i]),
'Status: {}'.format(statuss[i])
)
neigh_sizes = np.array(neigh_sizes).reshape(-1)
t = np.array(t).reshape(-1)
objs = np.array(objs).reshape(-1)
relax_grips = np.array(relax_grips).reshape(-1)
# normalize the objective and solving time
t = t / t_limit
objs_abs = objs
objs = (objs_abs - np.min(objs_abs))
objs = objs / np.max(objs)
t = mean_filter(t, 5)
objs = mean_filter(objs, 5)
# t = mean_forward_filter(t,10)
# objs = mean_forward_filter(objs, 10)
# compute the performance score
alpha = 1 / 2
perf_score = alpha * t + (1 - alpha) * objs
k_bests = neigh_sizes[np.where(perf_score == perf_score.min())]
k_init = k_bests[0]
print('k_0_star:', k_init)
plt.clf()
fig, ax = plt.subplots(4, 1, figsize=(6.4, 6.4))
fig.suptitle("Evaluation of size of lb neighborhood")
fig.subplots_adjust(top=0.5)
ax[0].plot(neigh_sizes, objs)
ax[0].set_title(instance_name, loc='right')
ax[0].set_xlabel(r'$\ r $ ' + '(Neighborhood size: ' + r'$K = r \times N$)') #
ax[0].set_ylabel("Objective")
ax[1].plot(neigh_sizes, t)
# ax[1].set_ylim([0,31])
ax[1].set_ylabel("Solving time")
ax[2].plot(neigh_sizes, perf_score)
ax[2].set_ylabel("Performance score")
ax[3].plot(neigh_sizes, relax_grips)
ax[3].set_ylabel("Relaxation grip")
plt.show()
# f = self.k_samples_directory + instance_name
# np.savez(f, neigh_sizes=neigh_sizes, objs=objs, t=t)
index_instance += 1
return index_instance
def generate_k_samples(self, t_limit, instance_size='-small'):
"""
For each MIP instance, sample k from [0,1] * n_binary(symmetric) or [0,1] * n_binary_support(asymmetric),
and evaluate the performance of 1st round of local-branching
:param t_limit:
:param k_samples_directory:
:return:
"""
self.k_samples_directory = self.directory + 'k_samples' + '/'
pathlib.Path(self.k_samples_directory).mkdir(parents=True, exist_ok=True)
direc = './data/generated_instances/' + self.instance_type + '/' + instance_size + '/'
self.directory_transformedmodel = direc + 'transformedmodel' + '/'
self.directory_sol = direc + self.incumbent_mode + '/'
index_instance = 0
# while index_instance < 86:
# instance = next(self.generator)
# MIP_model = instance.as_pyscipopt()
# MIP_model.setProbName(self.instance_type + '-' + str(index_instance))
# instance_name = MIP_model.getProbName()
# print(instance_name)
# index_instance += 1
while index_instance < 100:
index_instance = self.sample_k_per_instance(t_limit, index_instance)
# instance = next(self.generator)
# MIP_model = instance.as_pyscipopt()
# MIP_model.setProbName(self.instance_type + '-' + str(index_instance))
# instance_name = MIP_model.getProbName()
# print(instance_name)
#
# n_vars = MIP_model.getNVars()
# n_binvars = MIP_model.getNBinVars()
# print("N of variables: {}".format(n_vars))
# print("N of binary vars: {}".format(n_binvars))
# print("N of constraints: {}".format(MIP_model.getNConss()))
#
# status, feasible, MIP_model, incumbent_solution = self.initialize_MIP(MIP_model)
# if (not status == 'optimal') and feasible:
# initial_obj = MIP_model.getObjVal()
# print("Initial obj before LB: {}".format(initial_obj))
# print('Relative gap: ', MIP_model.getGap())
#
# n_supportbinvars = binary_support(MIP_model, incumbent_solution)
# print('binary support: ', n_supportbinvars)
#
#
# MIP_model.resetParams()
#
# neigh_sizes = []
# objs = []
# t = []
# n_supportbins = []
# statuss = []
# MIP_model.resetParams()
# nsample = 101
# for i in range(nsample):
#
# # create a copy of the MIP to be 'locally branched'
# subMIP_model, subMIP_model_vars, success = MIP_model.createCopy(problemName='subMIPmodelCopy',
# origcopy=False)
# sol_subMIP_model = subMIP_model.createSol()
#
# # create a primal solution for the copy MIP by copying the solution of original MIP
# n_vars = MIP_model.getNVars()
# subMIP_vars = MIP_model.getVars()
#
# for j in range(n_vars):
# val = MIP_model.getSolVal(incumbent_solution, subMIP_vars[j])
# subMIP_model.setSolVal(sol_subMIP_model, subMIP_model_vars[j], val)
# feasible = subMIP_model.checkSol(solution=sol_subMIP_model)
#
# if feasible:
# # print("the trivial solution of subMIP is feasible ")
# subMIP_model.addSol(sol_subMIP_model, False)
# # print("the feasible solution of subMIP_model is added to subMIP_model")
# else:
# print("Warn: the trivial solution of subMIP_model is not feasible!")
#
# # add LB constraint to subMIP model
# alpha = 0.01 * i
# # if nsample == 41:
# # if i<11:
# # alpha = 0.01*i
# # elif i<31:
# # alpha = 0.02*(i-5)
# # else:
# # alpha = 0.05*(i-20)
#
# if self.lbconstraint_mode == 'asymmetric':
# neigh_size = alpha * n_supportbinvars
# subMIP_model = addLBConstraintAsymmetric(subMIP_model, sol_subMIP_model, neigh_size)
# else:
# neigh_size = alpha * n_binvars
# subMIP_model = addLBConstraint(subMIP_model, sol_subMIP_model, neigh_size)
#
# subMIP_model.setParam('limits/time', t_limit)
# subMIP_model.optimize()
#
# status = subMIP_model.getStatus()
# best_obj = subMIP_model.getSolObjVal(subMIP_model.getBestSol())
# solving_time = subMIP_model.getSolvingTime() # total time used for solving (including presolving) the current problem
#
# best_sol = subMIP_model.getBestSol()
#
# vars_subMIP = subMIP_model.getVars()
# n_binvars_subMIP = subMIP_model.getNBinVars()
# n_supportbins_subMIP = 0
# for i in range(n_binvars_subMIP):
# val = subMIP_model.getSolVal(best_sol, vars_subMIP[i])
# assert subMIP_model.isFeasIntegral(val), "Error: Value of a binary varialbe is not integral!"
# if subMIP_model.isFeasEQ(val, 1.0):
# n_supportbins_subMIP += 1
#
# neigh_sizes.append(alpha)
# objs.append(best_obj)
# t.append(solving_time)
# n_supportbins.append(n_supportbins_subMIP)
# statuss.append(status)
#
# for i in range(len(t)):
# print('Neighsize: {:.4f}'.format(neigh_sizes[i]),
# 'Best obj: {:.4f}'.format(objs[i]),
# 'Binary supports:{}'.format(n_supportbins[i]),
# 'Solving time: {:.4f}'.format(t[i]),
# 'Status: {}'.format(statuss[i])
# )
#
# neigh_sizes = np.array(neigh_sizes).reshape(-1).astype('float64')
# t = np.array(t).reshape(-1)
# objs = np.array(objs).reshape(-1)
# f = self.k_samples_directory + instance_name
# np.savez(f, neigh_sizes=neigh_sizes, objs=objs, t=t)
# index_instance += 1
def generate_regression_samples(self, t_limit, instance_size='-small'):
self.k_samples_directory = self.directory + 'k_samples' + '/'
self.regression_samples_directory = self.directory + 'regression_samples' + '/'
pathlib.Path(self.regression_samples_directory).mkdir(parents=True, exist_ok=True)
direc = './data/generated_instances/' + self.instance_type + '/' + instance_size + '/'
self.directory_transformedmodel = direc + 'transformedmodel' + '/'
self.directory_sol = direc + self.incumbent_mode + '/'
index_instance = 0
while index_instance < 100:
filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
MIP_model = Model()
MIP_model.readProblem(filename)
instance_name = MIP_model.getProbName()
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
incumbent = MIP_model.readSolFile(firstsol_filename)
feas = MIP_model.checkSol(incumbent)
try:
MIP_model.addSol(incumbent, False)
except:
print('Error: the root solution of ' + instance_name + ' is not feasible!')
instance = ecole.scip.Model.from_pyscipopt(MIP_model)
instance_name = self.instance_type + '-' + str(index_instance)
data = np.load(self.k_samples_directory + instance_name + '.npz')
k = data['neigh_sizes']
t = data['t']
objs_abs = data['objs']
# normalize the objective and solving time
t = t / t_limit
objs = (objs_abs - np.min(objs_abs))
objs = objs / np.max(objs)
t = mean_filter(t, 5)
objs = mean_filter(objs, 5)
# t = mean_forward_filter(t,10)
# objs = mean_forward_filter(objs, 10)
# compute the performance score
alpha = 1 / 2
perf_score = alpha * t + (1 - alpha) * objs
k_bests = k[np.where(perf_score == perf_score.min())]
k_init = k_bests[0]
# plt.clf()
# fig, ax = plt.subplots(3, 1, figsize=(6.4, 6.4))
# fig.suptitle("Evaluation of size of lb neighborhood")
# fig.subplots_adjust(top=0.5)
# ax[0].plot(k, objs)
# ax[0].set_title(instance_name, loc='right')
# ax[0].set_xlabel(r'$\ r $ ' + '(Neighborhood size: ' + r'$K = r \times N$)') #
# ax[0].set_ylabel("Objective")
# ax[1].plot(k, t)
# # ax[1].set_ylim([0,31])
# ax[1].set_ylabel("Solving time")
# ax[2].plot(k, perf_score)
# ax[2].set_ylabel("Performance score")
# plt.show()
# instance = ecole.scip.Model.from_pyscipopt(MIP_model)
observation, _, _, done, _ = self.env.reset(instance)
data_sample = [observation, k_init]
filename = f'{self.regression_samples_directory}regression-{instance_name}.pkl'
with gzip.open(filename, 'wb') as f:
pickle.dump(data_sample, f)
index_instance += 1
def test_lp(self, t_limit, instance_size='-small'):
self.k_samples_directory = self.directory + 'k_samples' + '/'
self.regression_samples_directory = self.directory + 'regression_samples' + '/'
pathlib.Path(self.regression_samples_directory).mkdir(parents=True, exist_ok=True)
direc = './data/generated_instances/' + self.instance_type + '/' + instance_size + '/'
self.directory_transformedmodel = direc + 'transformedmodel' + '/'
self.directory_sol = direc + self.incumbent_mode + '/'
index_instance = 0
list_phi_prime = []
list_phi_lp2relax = []
list_phi_star = []
count_phi_star_smaller = 0
count_phi_lp_relax_diff = 0
# list_phi_prime_invalid = []
# list_phi_star_invalid = []
while index_instance < 100:
filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
MIP_model = Model()
MIP_model.readProblem(filename)
instance_name = MIP_model.getProbName()
# print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
# print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
# print("N of constraints: {}".format(MIP_model.getNConss()))
incumbent = MIP_model.readSolFile(firstsol_filename)
feas = MIP_model.checkSol(incumbent)
try:
MIP_model.addSol(incumbent, False)
except:
print('Error: the root solution of ' + instance_name + ' is not feasible!')
instance = ecole.scip.Model.from_pyscipopt(MIP_model)
instance_name = self.instance_type + '-' + str(index_instance)
data = np.load(self.k_samples_directory + instance_name + '.npz')
k = data['neigh_sizes']
t = data['t']
objs_abs = data['objs']
# normalize the objective and solving time
t = t / t_limit
objs = (objs_abs - np.min(objs_abs))
objs = objs / np.max(objs)
t = mean_filter(t, 5)
objs = mean_filter(objs, 5)
# t = mean_forward_filter(t,10)
# objs = mean_forward_filter(objs, 10)
# compute the performance score
alpha = 1 / 2
perf_score = alpha * t + (1 - alpha) * objs
k_bests = k[np.where(perf_score == perf_score.min())]
k_init = k_bests[0]
# solve the root node and get the LP solution
MIP_model.freeTransform()
status = MIP_model.getStatus()
print("* Model status: %s" % status)
MIP_model.resetParams()
MIP_model.setPresolve(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setHeuristics(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setSeparating(pyscipopt.SCIP_PARAMSETTING.OFF)
MIP_model.setIntParam("lp/solvefreq", 0)
MIP_model.setParam("limits/nodes", 1)
# MIP_model.setParam("limits/solutions", 1)
MIP_model.setParam("display/verblevel", 0)
MIP_model.setParam("lp/disablecutoff", 1)
# MIP_model.setParam("limits/solutions", 1)
MIP_model.optimize()
#
status = MIP_model.getStatus()
lp_status = MIP_model.getLPSolstat()
stage = MIP_model.getStage()
n_sols = MIP_model.getNSols()
t = MIP_model.getSolvingTime()
print("* Model status: %s" % status)
print("* Solve stage: %s" % stage)
print("* LP status: %s" % lp_status)
print('* number of sol : ', n_sols)
sol_lp = MIP_model.createLPSol()
# sol_relax = MIP_model.createRelaxSol()
k_prime = haming_distance_solutions(MIP_model, incumbent, sol_lp)
# k_lp2relax = haming_distance_solutions(MIP_model, sol_relax, sol_lp)
n_bins = MIP_model.getNBinVars()
k_base = n_bins
# compute relaxation grip
lpcands, lpcandssol, lpcadsfrac, nlpcands, npriolpcands, nfracimplvars = MIP_model.getLPBranchCands()
print('binvars :', n_bins)
print('nbranchingcands :', nlpcands)
print('nfracimplintvars :', nfracimplvars)
print('relaxation grip :', 1 - nlpcands / n_bins )
if self.is_symmetric == False:
k_prime = haming_distance_solutions_asym(MIP_model, incumbent, sol_lp)
binary_supports = binary_support(MIP_model, incumbent)
k_base = binary_supports
phi_prime = k_prime / k_base
# phi_lp2relax = k_lp2relax / k_base
# if phi_lp2relax > 0:
# count_phi_lp_relax_diff += 1
phi_star = k_init
list_phi_prime.append(phi_prime)
list_phi_star.append(phi_star)
# list_phi_lp2relax.append(phi_lp2relax)
if phi_star <= phi_prime:
count_phi_star_smaller += 1
else:
list_phi_prime_invalid = phi_prime
list_phi_star_invalid = list_phi_star
print('instance : ', MIP_model.getProbName())
print('phi_prime = ', phi_prime)
print('phi_star = ', phi_star)
# print('phi_lp2relax = ', phi_lp2relax)
print('valid count: ', count_phi_star_smaller)
# print('lp relax diff count:', count_phi_lp_relax_diff)
# plt.clf()
# fig, ax = plt.subplots(3, 1, figsize=(6.4, 6.4))
# fig.suptitle("Evaluation of size of lb neighborhood")
# fig.subplots_adjust(top=0.5)
# ax[0].plot(k, objs)
# ax[0].set_title(instance_name, loc='right')
# ax[0].set_xlabel(r'$\ r $ ' + '(Neighborhood size: ' + r'$K = r \times N$)') #
# ax[0].set_ylabel("Objective")
# ax[1].plot(k, t)
# # ax[1].set_ylim([0,31])
# ax[1].set_ylabel("Solving time")
# ax[2].plot(k, perf_score)
# ax[2].set_ylabel("Performance score")
# plt.show()
# instance = ecole.scip.Model.from_pyscipopt(MIP_model)
# observation, _, _, done, _ = self.env.reset(instance)
#
# data_sample = [observation, k_init]
# filename = f'{self.regression_samples_directory}regression-{instance_name}.pkl'
# with gzip.open(filename, 'wb') as f:
# pickle.dump(data_sample, f)
index_instance += 1
arr_phi_prime = np.array(list_phi_prime).reshape(-1)
arr_phi_star = np.array(list_phi_star).reshape(-1)
# arr_phi_lp2relax = np.array(list_phi_lp2relax).reshape(-1)
ave_phi_prime = arr_phi_prime.sum() / len(arr_phi_prime)
ave_phi_star = arr_phi_star.sum() / len(arr_phi_star)
# ave_phi_lp2relax = arr_phi_lp2relax.sum() / len(arr_phi_lp2relax)
print(self.instance_type + self.instance_size)
print(self.incumbent_mode + 'Solution')
print('number of valid phi data points: ', count_phi_star_smaller)
print('average phi_star :', ave_phi_star )
print('average phi_prime: ', ave_phi_prime)
# print('average phi_lp2relax: ', ave_phi_lp2relax)
def load_dataset(self, dataset_directory=None):
self.regression_samples_directory = dataset_directory
filename = 'regression-' + self.instance_type + '-*.pkl'
# print(filename)
sample_files = [str(path) for path in pathlib.Path(self.regression_samples_directory).glob(filename)]
train_files = sample_files[:int(0.7 * len(sample_files))]
valid_files = sample_files[int(0.7 * len(sample_files)):int(0.8 * len(sample_files))]
test_files = sample_files[int(0.8 * len(sample_files)):]
train_data = GraphDataset(train_files)
train_loader = torch_geometric.data.DataLoader(train_data, batch_size=1, shuffle=True)
valid_data = GraphDataset(valid_files)
valid_loader = torch_geometric.data.DataLoader(valid_data, batch_size=1, shuffle=False)
test_data = GraphDataset(test_files)
test_loader = torch_geometric.data.DataLoader(test_data, batch_size=1, shuffle=False)
return train_loader, valid_loader, test_loader
def train(self, gnn_model, data_loader, optimizer=None):
"""
training function
:param gnn_model:
:param data_loader:
:param optimizer:
:return:
"""
mean_loss = 0
n_samples_precessed = 0
with torch.set_grad_enabled(optimizer is not None):
for batch in data_loader:
k_model = gnn_model(batch.constraint_features, batch.edge_index, batch.edge_attr, batch.variable_features)
k_init = batch.k_init
loss = F.l1_loss(k_model.float(), k_init.float())
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
mean_loss += loss.item() * batch.num_graphs
n_samples_precessed += batch.num_graphs
mean_loss /= n_samples_precessed
return mean_loss
def test(self, gnn_model, data_loader):
n_samples_precessed = 0
loss_list = []
k_model_list = []
k_init_list = []
graph_index = []
for batch in data_loader:
k_model = gnn_model(batch.constraint_features, batch.edge_index, batch.edge_attr, batch.variable_features)
k_init = batch.k_init
loss = F.l1_loss(k_model, k_init)
if batch.num_graphs == 1:
loss_list.append(loss.item())
k_model_list.append(k_model.item())
k_init_list.append(k_init)
graph_index.append(n_samples_precessed)
n_samples_precessed += 1
else:
for g in range(batch.num_graphs):
loss_list.append(loss.item()[g])
k_model_list.append(k_model[g])
k_init_list.append(k_init(g))
graph_index.append(n_samples_precessed)
n_samples_precessed += 1
loss_list = np.array(loss_list).reshape(-1)
k_model_list = np.array(k_model_list).reshape(-1)
k_init_list = np.array(k_init_list).reshape(-1)
graph_index = np.array(graph_index).reshape(-1)
loss_ave = loss_list.mean()
k_model_ave = k_model_list.mean()
k_init_ave = k_init_list.mean()
return loss_ave, k_model_ave, k_init_ave
def execute_regression(self, lr=0.0000001, n_epochs=20):
saved_gnn_directory = './result/saved_models/'
pathlib.Path(saved_gnn_directory).mkdir(parents=True, exist_ok=True)
train_loaders = {}
val_loaders = {}
test_loaders = {}
# load the small dataset
small_dataset = self.instance_type + "-small"
small_directory = './result/generated_instances/' + self.instance_type + '/' + '-small' + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
small_regression_samples_directory = small_directory + 'regression_samples' + '/'
train_loader, valid_loader, test_loader = self.load_dataset(dataset_directory=small_regression_samples_directory)
train_loaders[small_dataset] = train_loader
val_loaders[small_dataset] = valid_loader
test_loaders[small_dataset] = test_loader
# large_dataset = self.instance_type + "-large"
# large_directory = './result/generated_instances/' + self.instance_type + '/' + '-large' + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
# test_regression_samples_directory = large_directory + 'regression_samples' + '/'
# train_loader, valid_loader, test_loader = self.load_dataset(dataset_directory=test_regression_samples_directory)
# train_loaders[large_dataset] = train_loader
# val_loaders[large_dataset] = valid_loader
# test_loaders[large_dataset] = test_loader
model_gnn = GNNPolicy()
train_dataset = small_dataset
valid_dataset = small_dataset
test_dataset = small_dataset
# LEARNING_RATE = 0.0000001 # setcovering:0.0000005 cap-loc: 0.00000005 independentset: 0.0000001
optimizer = torch.optim.Adam(model_gnn.parameters(), lr=lr)
k_init = []
k_model = []
loss = []
epochs = []
for epoch in range(n_epochs):
print(f"Epoch {epoch}")
if epoch == 0:
optim = None
else:
optim = optimizer
train_loader = train_loaders[train_dataset]
train_loss = self.train(model_gnn, train_loader, optim)
print(f"Train loss: {train_loss:0.6f}")
# torch.save(model_gnn.state_dict(), 'trained_params_' + train_dataset + '.pth')
# model_gnn2.load_state_dict(torch.load('trained_params_' + train_dataset + '.pth'))
valid_loader = val_loaders[valid_dataset]
valid_loss = self.train(model_gnn, valid_loader, None)
print(f"Valid loss: {valid_loss:0.6f}")
test_loader = test_loaders[test_dataset]
loss_ave, k_model_ave, k_init_ave = self.test(model_gnn, test_loader)
loss.append(loss_ave)
k_model.append(k_model_ave)
k_init.append(k_init_ave)
epochs.append(epoch)
loss_np = np.array(loss).reshape(-1)
k_model_np = np.array(k_model).reshape(-1)
k_init_np = np.array(k_init).reshape(-1)
epochs_np = np.array(epochs).reshape(-1)
plt.close('all')
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(8, 6.4))
fig.suptitle("Test Result: prediction of initial k")
fig.subplots_adjust(top=0.5)
ax[0].set_title(test_dataset + '-' + self.incumbent_mode, loc='right')
ax[0].plot(epochs_np, loss_np)
ax[0].set_xlabel('epoch')
ax[0].set_ylabel("loss")
ax[1].plot(epochs_np, k_model_np, label='k-prediction')
ax[1].plot(epochs_np, k_init_np, label='k-label')
ax[1].set_xlabel('epoch')
ax[1].set_ylabel("k")
ax[1].set_ylim([0, 1.1])
ax[1].legend()
plt.show()
# torch.save(model_gnn.state_dict(),
# saved_gnn_directory + 'trained_params_mean_' + train_dataset + '_' + self.lbconstraint_mode + '_' + self.incumbent_mode + '.pth')
def execute_regression_k_prime(self, lr=0.0000001, n_epochs=20):
saved_gnn_directory = './result/saved_models/'
pathlib.Path(saved_gnn_directory).mkdir(parents=True, exist_ok=True)
train_loaders = {}
val_loaders = {}
test_loaders = {}
# load the small dataset
small_dataset = self.instance_type + "-small"
small_directory = './result/generated_instances/' + self.instance_type + '/' + '-small' + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
small_regression_samples_directory = small_directory + 'regression_samples_k_prime' + '/'
train_loader, valid_loader, test_loader = self.load_dataset(dataset_directory=small_regression_samples_directory)
train_loaders[small_dataset] = train_loader
val_loaders[small_dataset] = valid_loader
test_loaders[small_dataset] = test_loader
# large_dataset = self.instance_type + "-large"
# large_directory = './result/generated_instances/' + self.instance_type + '/' + '-large' + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
# test_regression_samples_directory = large_directory + 'regression_samples' + '/'
# train_loader, valid_loader, test_loader = self.load_dataset(dataset_directory=test_regression_samples_directory)
# train_loaders[large_dataset] = train_loader
# val_loaders[large_dataset] = valid_loader
# test_loaders[large_dataset] = test_loader
model_gnn = GNNPolicy()
train_dataset = small_dataset
valid_dataset = small_dataset
test_dataset = small_dataset
# LEARNING_RATE = 0.0000001 # setcovering:0.0000005 cap-loc: 0.00000005 independentset: 0.0000001
optimizer = torch.optim.Adam(model_gnn.parameters(), lr=lr)
k_init = []
k_model = []
loss = []
epochs = []
for epoch in range(n_epochs):
print(f"Epoch {epoch}")
if epoch == 0:
optim = None
else:
optim = optimizer
train_loader = train_loaders[train_dataset]
train_loss = self.train(model_gnn, train_loader, optim)
print(f"Train loss: {train_loss:0.6f}")
# torch.save(model_gnn.state_dict(), 'trained_params_' + train_dataset + '.pth')
# model_gnn2.load_state_dict(torch.load('trained_params_' + train_dataset + '.pth'))
valid_loader = val_loaders[valid_dataset]
valid_loss = self.train(model_gnn, valid_loader, None)
print(f"Valid loss: {valid_loss:0.6f}")
test_loader = test_loaders[test_dataset]
loss_ave, k_model_ave, k_init_ave = self.test(model_gnn, test_loader)
loss.append(loss_ave)
k_model.append(k_model_ave)
k_init.append(k_init_ave)
epochs.append(epoch)
loss_np = np.array(loss).reshape(-1)
k_model_np = np.array(k_model).reshape(-1)
k_init_np = np.array(k_init).reshape(-1)
epochs_np = np.array(epochs).reshape(-1)
plt.close('all')
plt.clf()
fig, ax = plt.subplots(2, 1, figsize=(8, 6.4))
fig.suptitle("Test Result: prediction of initial k")
fig.subplots_adjust(top=0.5)
ax[0].set_title(test_dataset + '-' + self.incumbent_mode, loc='right')
ax[0].plot(epochs_np, loss_np)
ax[0].set_xlabel('epoch')
ax[0].set_ylabel("loss")
ax[1].plot(epochs_np, k_model_np, label='k-prediction')
ax[1].plot(epochs_np, k_init_np, label='k-label')
ax[1].set_xlabel('epoch')
ax[1].set_ylabel("k")
ax[1].set_ylim([0, 1.1])
ax[1].legend()
plt.show()
torch.save(model_gnn.state_dict(),
saved_gnn_directory + 'trained_params_mean_' + train_dataset + '_' + self.lbconstraint_mode + '_' + self.incumbent_mode + '_k_prime.pth')
# def evaluate_lb_per_instance(self, node_time_limit, total_time_limit, index_instance, reset_k_at_2nditeration=False):
# """
# evaluate a single MIP instance by two algorithms: lb-baseline and lb-pred_k
# :param node_time_limit:
# :param total_time_limit:
# :param index_instance:
# :return:
# """
# instance = next(self.generator)
# MIP_model = instance.as_pyscipopt()
# MIP_model.setProbName(self.instance_type + '-' + str(index_instance))
# instance_name = MIP_model.getProbName()
# print('\n')
# print(instance_name)
#
# n_vars = MIP_model.getNVars()
# n_binvars = MIP_model.getNBinVars()
# print("N of variables: {}".format(n_vars))
# print("N of binary vars: {}".format(n_binvars))
# print("N of constraints: {}".format(MIP_model.getNConss()))
#
# valid, MIP_model, incumbent_solution = self.initialize_MIP(MIP_model)
# conti = -1
# # if self.incumbent_mode == 'rootsol' and self.instance_type == 'independentset':
# # conti = 196
#
# if valid:
# if index_instance > -1 and index_instance > conti:
# gc.collect()
# observation, _, _, done, _ = self.env.reset(instance)
# del observation
# # print(observation)
#
# if self.incumbent_mode == 'firstsol':
# action = {'limits/solutions': 1}
# elif self.incumbent_mode == 'rootsol':
# action = {'limits/nodes': 1} #
# sample_observation, _, _, done, _ = self.env.step(action)
#
#
# # print(sample_observation)
# graph = BipartiteNodeData(sample_observation.constraint_features,
# sample_observation.edge_features.indices,
# sample_observation.edge_features.values,
# sample_observation.variable_features)
#
# # We must tell pytorch geometric how many nodes there are, for indexing purposes
# graph.num_nodes = sample_observation.constraint_features.shape[0] + \
# sample_observation.variable_features.shape[
# 0]
#
# filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
# firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
#
# model = Model()
# model.readProblem(filename)
# sol = model.readSolFile(firstsol_filename)
#
# feas = model.checkSol(sol)
# try:
# model.addSol(sol, False)
# except:
# print('Error: the root solution of ' + model.getProbName() + ' is not feasible!')
#
# instance2 = ecole.scip.Model.from_pyscipopt(model)
# observation, _, _, done, _ = self.env.reset(instance2)
# graph2 = BipartiteNodeData(observation.constraint_features,
# observation.edge_features.indices,
# observation.edge_features.values,
# observation.variable_features)
#
# # We must tell pytorch geometric how many nodes there are, for indexing purposes
# graph2.num_nodes = observation.constraint_features.shape[0] + \
# observation.variable_features.shape[
# 0]
#
# # instance = Loader().load_instance('b1c1s1' + '.mps.gz')
# # MIP_model = instance
#
# # MIP_model.optimize()
# # print("Status:", MIP_model.getStatus())
# # print("best obj: ", MIP_model.getObjVal())
# # print("Solving time: ", MIP_model.getSolvingTime())
#
# initial_obj = MIP_model.getSolObjVal(incumbent_solution)
# print("Initial obj before LB: {}".format(initial_obj))
#
# binary_supports = binary_support(MIP_model, incumbent_solution)
# print('binary support: ', binary_supports)
#
# model_gnn = GNNPolicy()
#
# model_gnn.load_state_dict(torch.load(
# self.saved_gnn_directory + 'trained_params_mean_' + self.train_dataset + '_' + self.lbconstraint_mode + '_' + self.incumbent_mode + '.pth'))
#
# # model_gnn.load_state_dict(torch.load(
# # 'trained_params_' + self.instance_type + '.pth'))
#
# k_model = model_gnn(graph.constraint_features, graph.edge_index, graph.edge_attr,
# graph.variable_features)
#
# k_pred = k_model.item() * n_binvars
# print('GNN prediction: ', k_model.item())
#
# k_model2 = model_gnn(graph2.constraint_features, graph2.edge_index, graph2.edge_attr,
# graph2.variable_features)
#
# print('GNN prediction of model2: ', k_model2.item())
#
# if self.is_symmetric == False:
# k_pred = k_model.item() * binary_supports
#
# del k_model
# del graph
# del sample_observation
# del model_gnn
#
# # create a copy of MIP
# MIP_model.resetParams()
# MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(
# problemName='Baseline', origcopy=False)
# MIP_model_copy2, MIP_copy_vars2, success2 = MIP_model.createCopy(
# problemName='GNN',
# origcopy=False)
# MIP_model_copy3, MIP_copy_vars3, success3 = MIP_model.createCopy(
# problemName='GNN+reset',
# origcopy=False)
#
# print('MIP copies are created')
#
# MIP_model_copy, sol_MIP_copy = copy_sol(MIP_model, MIP_model_copy, incumbent_solution,
# MIP_copy_vars)
# MIP_model_copy2, sol_MIP_copy2 = copy_sol(MIP_model, MIP_model_copy2, incumbent_solution,
# MIP_copy_vars2)
# MIP_model_copy3, sol_MIP_copy3 = copy_sol(MIP_model, MIP_model_copy3, incumbent_solution,
# MIP_copy_vars3)
#
# print('incumbent solution is copied to MIP copies')
# MIP_model.freeProb()
# del MIP_model
# del incumbent_solution
#
# # sol = MIP_model_copy.getBestSol()
# # initial_obj = MIP_model_copy.getSolObjVal(sol)
# # print("Initial obj before LB: {}".format(initial_obj))
#
# # execute local branching baseline heuristic by <NAME> Lodi
# lb_model = LocalBranching(MIP_model=MIP_model_copy, MIP_sol_bar=sol_MIP_copy, k=self.k_baseline,
# node_time_limit=node_time_limit,
# total_time_limit=total_time_limit)
# status, obj_best, elapsed_time, lb_bits, times, objs = lb_model.search_localbranch(is_symmetric=self.is_symmetric,
# reset_k_at_2nditeration=False)
# print("Instance:", MIP_model_copy.getProbName())
# print("Status of LB: ", status)
# print("Best obj of LB: ", obj_best)
# print("Solving time: ", elapsed_time)
# print('\n')
#
# MIP_model_copy.freeProb()
# del sol_MIP_copy
# del MIP_model_copy
#
# # sol = MIP_model_copy2.getBestSol()
# # initial_obj = MIP_model_copy2.getSolObjVal(sol)
# # print("Initial obj before LB: {}".format(initial_obj))
#
# # execute local branching with 1. first k predicted by GNN, 2. for 2nd iteration of lb, reset k to default value of baseline
# lb_model3 = LocalBranching(MIP_model=MIP_model_copy3, MIP_sol_bar=sol_MIP_copy3, k=k_pred,
# node_time_limit=node_time_limit,
# total_time_limit=total_time_limit)
# status, obj_best, elapsed_time, lb_bits_pred_reset, times_pred_rest, objs_pred_rest = lb_model3.search_localbranch(is_symmetric=self.is_symmetric,
# reset_k_at_2nditeration=reset_k_at_2nditeration)
#
# print("Instance:", MIP_model_copy3.getProbName())
# print("Status of LB: ", status)
# print("Best obj of LB: ", obj_best)
# print("Solving time: ", elapsed_time)
# print('\n')
#
# MIP_model_copy3.freeProb()
# del sol_MIP_copy3
# del MIP_model_copy3
#
# # execute local branching with 1. first k predicted by GNN; 2. from 2nd iteration of lb, continue lb algorithm with no further injection
# lb_model2 = LocalBranching(MIP_model=MIP_model_copy2, MIP_sol_bar=sol_MIP_copy2, k=k_pred,
# node_time_limit=node_time_limit,
# total_time_limit=total_time_limit)
# status, obj_best, elapsed_time, lb_bits_pred, times_pred, objs_pred = lb_model2.search_localbranch(is_symmetric=self.is_symmetric,
# reset_k_at_2nditeration=False)
#
# print("Instance:", MIP_model_copy2.getProbName())
# print("Status of LB: ", status)
# print("Best obj of LB: ", obj_best)
# print("Solving time: ", elapsed_time)
# print('\n')
#
# MIP_model_copy2.freeProb()
# del sol_MIP_copy2
# del MIP_model_copy2
#
# data = [objs, times, objs_pred, times_pred, objs_pred_rest, times_pred_rest]
# filename = f'{self.directory_lb_test}lb-test-{instance_name}.pkl' # instance 100-199
# with gzip.open(filename, 'wb') as f:
# pickle.dump(data, f)
# del data
# del objs
# del times
# del objs_pred
# del times_pred
# del objs_pred_rest
# del times_pred_rest
# del lb_model
# del lb_model2
# del lb_model3
#
# index_instance += 1
# del instance
# return index_instance
#
# def evaluate_localbranching(self, test_instance_size='-small', train_instance_size='-small', total_time_limit=60, node_time_limit=30, reset_k_at_2nditeration=False):
#
# self.train_dataset = self.instance_type + train_instance_size
# self.evaluation_dataset = self.instance_type + test_instance_size
#
# self.generator = generator_switcher(self.evaluation_dataset)
# self.generator.seed(self.seed)
#
# direc = './data/generated_instances/' + self.instance_type + '/' + test_instance_size + '/'
# self.directory_transformedmodel = direc + 'transformedmodel' + '/'
# self.directory_sol = direc + self.incumbent_mode + '/'
#
# self.k_baseline = 20
#
# self.is_symmetric = True
# if self.lbconstraint_mode == 'asymmetric':
# self.is_symmetric = False
# self.k_baseline = self.k_baseline / 2
# total_time_limit = total_time_limit
# node_time_limit = node_time_limit
#
# self.saved_gnn_directory = './result/saved_models/'
#
# directory = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
# self.directory_lb_test = directory + 'lb-from-' + self.incumbent_mode + '-t_node' + str(node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
# pathlib.Path(self.directory_lb_test).mkdir(parents=True, exist_ok=True)
#
# index_instance = 0
# while index_instance < 200:
# index_instance = self.evaluate_lb_per_instance(node_time_limit=node_time_limit, total_time_limit=total_time_limit, index_instance=index_instance, reset_k_at_2nditeration=reset_k_at_2nditeration)
def evaluate_lb_per_instance(self, node_time_limit, total_time_limit, index_instance,
reset_k_at_2nditeration=False):
"""
evaluate a single MIP instance by two algorithms: lb-baseline and lb-pred_k
:param node_time_limit:
:param total_time_limit:
:param index_instance:
:return:
"""
device = self.device
gc.collect()
filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
MIP_model = Model()
MIP_model.readProblem(filename)
instance_name = MIP_model.getProbName()
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
incumbent = MIP_model.readSolFile(firstsol_filename)
feas = MIP_model.checkSol(incumbent)
try:
MIP_model.addSol(incumbent, False)
except:
print('Error: the root solution of ' + instance_name + ' is not feasible!')
instance = ecole.scip.Model.from_pyscipopt(MIP_model)
observation, _, _, done, _ = self.env.reset(instance)
graph = BipartiteNodeData(observation.constraint_features,
observation.edge_features.indices,
observation.edge_features.values,
observation.variable_features)
# We must tell pytorch geometric how many nodes there are, for indexing purposes
graph.num_nodes = observation.constraint_features.shape[0] + \
observation.variable_features.shape[
0]
# instance = Loader().load_instance('b1c1s1' + '.mps.gz')
# MIP_model = instance
# MIP_model.optimize()
# print("Status:", MIP_model.getStatus())
# print("best obj: ", MIP_model.getObjVal())
# print("Solving time: ", MIP_model.getSolvingTime())
initial_obj = MIP_model.getSolObjVal(incumbent)
print("Initial obj before LB: {}".format(initial_obj))
binary_supports = binary_support(MIP_model, incumbent)
print('binary support: ', binary_supports)
k_model = self.regression_model_gnn(graph.constraint_features, graph.edge_index, graph.edge_attr,
graph.variable_features)
k_pred = k_model.item() * n_binvars
print('GNN prediction: ', k_model.item())
if self.is_symmetric == False:
k_pred = k_model.item() * binary_supports
del k_model
del graph
del observation
# create a copy of MIP
MIP_model.resetParams()
MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=False)
MIP_model_copy2, MIP_copy_vars2, success2 = MIP_model.createCopy(
problemName='GNN',
origcopy=False)
MIP_model_copy3, MIP_copy_vars3, success3 = MIP_model.createCopy(
problemName='GNN+reset',
origcopy=False)
print('MIP copies are created')
MIP_model_copy, sol_MIP_copy = copy_sol(MIP_model, MIP_model_copy, incumbent,
MIP_copy_vars)
MIP_model_copy2, sol_MIP_copy2 = copy_sol(MIP_model, MIP_model_copy2, incumbent,
MIP_copy_vars2)
MIP_model_copy3, sol_MIP_copy3 = copy_sol(MIP_model, MIP_model_copy3, incumbent,
MIP_copy_vars3)
print('incumbent solution is copied to MIP copies')
MIP_model.freeProb()
del MIP_model
del incumbent
# sol = MIP_model_copy.getBestSol()
# initial_obj = MIP_model_copy.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# execute local branching baseline heuristic by Fischetti and Lodi
lb_model = LocalBranching(MIP_model=MIP_model_copy, MIP_sol_bar=sol_MIP_copy, k=self.k_baseline,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits, times, objs, _, _ = lb_model.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=False,
policy=None,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy.freeProb()
del sol_MIP_copy
del MIP_model_copy
# sol = MIP_model_copy2.getBestSol()
# initial_obj = MIP_model_copy2.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# execute local branching with 1. first k predicted by GNN, 2. for 2nd iteration of lb, reset k to default value of baseline
lb_model3 = LocalBranching(MIP_model=MIP_model_copy3, MIP_sol_bar=sol_MIP_copy3, k=k_pred,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits_regression_reset, times_regression_reset, objs_regression_reset, _, _ = lb_model3.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=reset_k_at_2nditeration,
policy=None,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy3.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy3.freeProb()
del sol_MIP_copy3
del MIP_model_copy3
# execute local branching with 1. first k predicted by GNN; 2. from 2nd iteration of lb, continue lb algorithm with no further injection
lb_model2 = LocalBranching(MIP_model=MIP_model_copy2, MIP_sol_bar=sol_MIP_copy2, k=k_pred,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits_regression_noreset, times_regression_noreset, objs_regression_noreset, _, _ = lb_model2.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=False,
policy=None,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy2.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy2.freeProb()
del sol_MIP_copy2
del MIP_model_copy2
data = [objs, times, objs_regression_noreset, times_regression_noreset, objs_regression_reset, times_regression_reset]
filename = f'{self.directory_lb_test}lb-test-{instance_name}.pkl' # instance 100-199
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f)
del data
del objs
del times
del objs_regression_noreset
del times_regression_noreset
del objs_regression_reset
del times_regression_reset
del lb_model
del lb_model2
del lb_model3
index_instance += 1
del instance
return index_instance
def evaluate_lb_per_instance_k_prime(self, node_time_limit, total_time_limit, index_instance,
reset_k_at_2nditeration=False):
"""
evaluate a single MIP instance by two algorithms: lb-baseline and lb-pred_k
:param node_time_limit:
:param total_time_limit:
:param index_instance:
:return:
"""
device = self.device
gc.collect()
filename = f'{self.directory_transformedmodel}{self.instance_type}-{str(index_instance)}_transformed.cip'
firstsol_filename = f'{self.directory_sol}{self.incumbent_mode}-{self.instance_type}-{str(index_instance)}_transformed.sol'
MIP_model = Model()
MIP_model.readProblem(filename)
instance_name = MIP_model.getProbName()
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
incumbent = MIP_model.readSolFile(firstsol_filename)
feas = MIP_model.checkSol(incumbent)
try:
MIP_model.addSol(incumbent, False)
except:
print('Error: the root solution of ' + instance_name + ' is not feasible!')
instance = ecole.scip.Model.from_pyscipopt(MIP_model)
observation, _, _, done, _ = self.env.reset(instance)
graph = BipartiteNodeData(observation.constraint_features,
observation.edge_features.indices,
observation.edge_features.values,
observation.variable_features)
# We must tell pytorch geometric how many nodes there are, for indexing purposes
graph.num_nodes = observation.constraint_features.shape[0] + \
observation.variable_features.shape[
0]
# instance = Loader().load_instance('b1c1s1' + '.mps.gz')
# MIP_model = instance
# MIP_model.optimize()
# print("Status:", MIP_model.getStatus())
# print("best obj: ", MIP_model.getObjVal())
# print("Solving time: ", MIP_model.getSolvingTime())
# create a copy of MIP
MIP_model.resetParams()
MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=False)
# MIP_model_copy2, MIP_copy_vars2, success2 = MIP_model.createCopy(
# problemName='GNN',
# origcopy=False)
MIP_model_copy3, MIP_copy_vars3, success3 = MIP_model.createCopy(
problemName='GNN+reset',
origcopy=False)
print('MIP copies are created')
MIP_model_copy, sol_MIP_copy = copy_sol(MIP_model, MIP_model_copy, incumbent,
MIP_copy_vars)
# MIP_model_copy2, sol_MIP_copy2 = copy_sol(MIP_model, MIP_model_copy2, incumbent,
# MIP_copy_vars2)
MIP_model_copy3, sol_MIP_copy3 = copy_sol(MIP_model, MIP_model_copy3, incumbent,
MIP_copy_vars3)
print('incumbent solution is copied to MIP copies')
# solve the root node and get the LP solution, compute k_prime
k_prime = self.compute_k_prime(MIP_model, incumbent)
initial_obj = MIP_model.getSolObjVal(incumbent)
print("Initial obj before LB: {}".format(initial_obj))
binary_supports = binary_support(MIP_model, incumbent)
print('binary support: ', binary_supports)
k_model = self.regression_model_gnn(graph.constraint_features, graph.edge_index, graph.edge_attr,
graph.variable_features)
k_pred = k_model.item() * k_prime
print('GNN prediction: ', k_model.item())
if self.is_symmetric == False:
k_pred = k_model.item() * k_prime
del k_model
del graph
del observation
MIP_model.freeProb()
del MIP_model
del incumbent
# sol = MIP_model_copy.getBestSol()
# initial_obj = MIP_model_copy.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# execute local branching baseline heuristic by Fischetti and Lodi
lb_model = LocalBranching(MIP_model=MIP_model_copy, MIP_sol_bar=sol_MIP_copy, k=self.k_baseline,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits, times, objs, _, _ = lb_model.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=False,
policy=None,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy.freeProb()
del sol_MIP_copy
del MIP_model_copy
# sol = MIP_model_copy2.getBestSol()
# initial_obj = MIP_model_copy2.getSolObjVal(sol)
# print("Initial obj before LB: {}".format(initial_obj))
# execute local branching with 1. first k predicted by GNN, 2. for 2nd iteration of lb, reset k to default value of baseline
lb_model3 = LocalBranching(MIP_model=MIP_model_copy3, MIP_sol_bar=sol_MIP_copy3, k=k_pred,
node_time_limit=node_time_limit,
total_time_limit=total_time_limit)
status, obj_best, elapsed_time, lb_bits_regression_reset, times_regression_reset, objs_regression_reset, _, _ = lb_model3.mdp_localbranch(
is_symmetric=self.is_symmetric,
reset_k_at_2nditeration=reset_k_at_2nditeration,
policy=None,
optimizer=None,
device=device
)
print("Instance:", MIP_model_copy3.getProbName())
print("Status of LB: ", status)
print("Best obj of LB: ", obj_best)
print("Solving time: ", elapsed_time)
print('\n')
MIP_model_copy3.freeProb()
del sol_MIP_copy3
del MIP_model_copy3
# # execute local branching with 1. first k predicted by GNN; 2. from 2nd iteration of lb, continue lb algorithm with no further injection
#
# lb_model2 = LocalBranching(MIP_model=MIP_model_copy2, MIP_sol_bar=sol_MIP_copy2, k=k_pred,
# node_time_limit=node_time_limit,
# total_time_limit=total_time_limit)
# status, obj_best, elapsed_time, lb_bits_regression_noreset, times_regression_noreset, objs_regression_noreset, _, _ = lb_model2.mdp_localbranch(
# is_symmetric=self.is_symmetric,
# reset_k_at_2nditeration=False,
# policy=None,
# optimizer=None,
# device=device
# )
#
# print("Instance:", MIP_model_copy2.getProbName())
# print("Status of LB: ", status)
# print("Best obj of LB: ", obj_best)
# print("Solving time: ", elapsed_time)
# print('\n')
#
# MIP_model_copy2.freeProb()
# del sol_MIP_copy2
# del MIP_model_copy2
data = [objs, times, objs_regression_reset, times_regression_reset]
filename = f'{self.directory_lb_test}lb-test-{instance_name}.pkl' # instance 100-199
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f)
del data
del objs
del times
del objs_regression_reset
del times_regression_reset
del lb_model
del lb_model3
index_instance += 1
del instance
return index_instance
def evaluate_localbranching(self, test_instance_size='-small', train_instance_size='-small', total_time_limit=60,
node_time_limit=30, reset_k_at_2nditeration=False):
self.train_dataset = self.instance_type + train_instance_size
self.evaluation_dataset = self.instance_type + test_instance_size
direc = './data/generated_instances/' + self.instance_type + '/' + test_instance_size + '/'
self.directory_transformedmodel = direc + 'transformedmodel' + '/'
self.directory_sol = direc + self.incumbent_mode + '/'
self.k_baseline = 20
self.is_symmetric = True
if self.lbconstraint_mode == 'asymmetric':
self.is_symmetric = False
self.k_baseline = self.k_baseline / 2
total_time_limit = total_time_limit
node_time_limit = node_time_limit
self.saved_gnn_directory = './result/saved_models/'
self.regression_model_gnn = GNNPolicy()
self.regression_model_gnn.load_state_dict(torch.load(
self.saved_gnn_directory + 'trained_params_mean_' + self.train_dataset + '_' + self.lbconstraint_mode + '_' + self.incumbent_mode + '.pth'))
self.regression_model_gnn.to(self.device)
directory = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
self.directory_lb_test = directory + 'lb-from-' + self.incumbent_mode + '-t_node' + str(
node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
pathlib.Path(self.directory_lb_test).mkdir(parents=True, exist_ok=True)
index_instance = 161
while index_instance < 165:
index_instance = self.evaluate_lb_per_instance(node_time_limit=node_time_limit,
total_time_limit=total_time_limit,
index_instance=index_instance,
reset_k_at_2nditeration=reset_k_at_2nditeration)
def solve2opt_evaluation(self, test_instance_size='-small'):
self.evaluation_dataset = self.instance_type + test_instance_size
directory_opt = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + 'opt_solution' + '/'
pathlib.Path(directory_opt).mkdir(parents=True, exist_ok=True)
self.generator = generator_switcher(self.evaluation_dataset)
self.generator.seed(self.seed)
index_instance = 0
while index_instance < 200:
instance = next(self.generator)
MIP_model = instance.as_pyscipopt()
MIP_model.setProbName(self.instance_type + test_instance_size + '-' + str(index_instance))
instance_name = MIP_model.getProbName()
print(' \n')
print(instance_name)
n_vars = MIP_model.getNVars()
n_binvars = MIP_model.getNBinVars()
print("N of variables: {}".format(n_vars))
print("N of binary vars: {}".format(n_binvars))
print("N of constraints: {}".format(MIP_model.getNConss()))
valid, MIP_model, incumbent_solution = self.initialize_MIP(MIP_model)
if valid:
if index_instance > 99:
MIP_model.resetParams()
MIP_model_copy, MIP_copy_vars, success = MIP_model.createCopy(
problemName='Baseline', origcopy=False)
MIP_model_copy.setParam('presolving/maxrounds', 0)
MIP_model_copy.setParam('presolving/maxrestarts', 0)
MIP_model_copy.setParam("display/verblevel", 0)
MIP_model_copy.optimize()
status = MIP_model_copy.getStatus()
if status == 'optimal':
obj = MIP_model_copy.getObjVal()
time = MIP_model_copy.getSolvingTime()
data = [obj, time]
filename = f'{directory_opt}{instance_name}-optimal-obj-time.pkl'
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f)
del data
else:
print('Warning: solved problem ' + instance_name + ' is not optimal!')
print("instance:", MIP_model_copy.getProbName(),
"status:", MIP_model_copy.getStatus(),
"best obj: ", MIP_model_copy.getObjVal(),
"solving time: ", MIP_model_copy.getSolvingTime())
MIP_model_copy.freeProb()
del MIP_copy_vars
del MIP_model_copy
index_instance += 1
else:
print('This instance is not valid for evaluation')
MIP_model.freeProb()
del MIP_model
del incumbent_solution
del instance
def primal_integral(self, test_instance_size, total_time_limit=60, node_time_limit=30):
directory = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + self.incumbent_mode + '/'
directory_lb_test = directory + 'lb-from-' + self.incumbent_mode + '-t_node' + str(node_time_limit) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
if self.incumbent_mode == 'firstsol':
directory_2 = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + 'rootsol' + '/'
directory_lb_test_2 = directory_2 + 'lb-from-' + 'rootsol' + '-t_node' + str(30) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
elif self.incumbent_mode == 'rootsol':
directory_2 = './result/generated_instances/' + self.instance_type + '/' + test_instance_size + '/' + self.lbconstraint_mode + '/' + 'firstsol' + '/'
directory_lb_test_2 = directory_2 + 'lb-from-' + 'firstsol' + '-t_node' + str(30) + 's' + '-t_total' + str(total_time_limit) + 's' + test_instance_size + '/'
primal_int_baselines = []
primal_int_preds = []
primal_int_preds_reset = []
primal_gap_final_baselines = []
primal_gap_final_preds = []
primal_gap_final_preds_reset = []
steplines_baseline = []
steplines_pred = []
steplines_pred_reset = []
for i in range(100, 200):
instance_name = self.instance_type + '-' + str(i) + '_transformed' # instance 100-199
filename = f'{directory_lb_test}lb-test-{instance_name}.pkl'
with gzip.open(filename, 'rb') as f:
data = pickle.load(f)
objs, times, objs_pred, times_pred, objs_pred_reset, times_pred_reset = data # objs contains objs of a single instance of a lb test
# filename_2 = f'{directory_lb_test_2}lb-test-{instance_name}.pkl'
#
# with gzip.open(filename_2, 'rb') as f:
# data = pickle.load(f)
# objs_2, times_2, objs_pred_2, times_pred_2, objs_pred_reset_2, times_pred_reset_2 = data # objs contains objs of a single instance of a lb test
a = [objs.min(), objs_pred.min(), objs_pred_reset.min()] # objs_2.min(), objs_pred_2.min(), objs_pred_reset_2.min()
# a = [objs.min(), objs_pred.min(), objs_pred_reset.min()]
obj_opt = np.amin(a)
# compute primal gap for baseline localbranching run
# if times[-1] < total_time_limit:
times = np.append(times, total_time_limit)
objs = np.append(objs, objs[-1])
gamma_baseline = np.zeros(len(objs))
for j in range(len(objs)):
if objs[j] == 0 and obj_opt == 0:
gamma_baseline[j] = 0
elif objs[j] * obj_opt < 0:
gamma_baseline[j] = 1
else:
gamma_baseline[j] = np.abs(objs[j] - obj_opt) / np.maximum(np.abs(objs[j]), np.abs(obj_opt)) #
# compute the primal gap of last objective
primal_gap_final_baseline = np.abs(objs[-1] - obj_opt) / np.abs(obj_opt)
primal_gap_final_baselines.append(primal_gap_final_baseline)
# create step line
stepline_baseline = interp1d(times, gamma_baseline, 'previous')
steplines_baseline.append(stepline_baseline)
# compute primal integral
primal_int_baseline = 0
for j in range(len(objs) - 1):
primal_int_baseline += gamma_baseline[j] * (times[j + 1] - times[j])
primal_int_baselines.append(primal_int_baseline)
# lb-gnn
# if times_pred[-1] < total_time_limit:
times_pred = np.append(times_pred, total_time_limit)
objs_pred = np.append(objs_pred, objs_pred[-1])
gamma_pred = np.zeros(len(objs_pred))
for j in range(len(objs_pred)):
if objs_pred[j] == 0 and obj_opt == 0:
gamma_pred[j] = 0
elif objs_pred[j] * obj_opt < 0:
gamma_pred[j] = 1
else:
gamma_pred[j] = np.abs(objs_pred[j] - obj_opt) / np.maximum(np.abs(objs_pred[j]), np.abs(obj_opt)) #
primal_gap_final_pred = np.abs(objs_pred[-1] - obj_opt) / np.abs(obj_opt)
primal_gap_final_preds.append(primal_gap_final_pred)
stepline_pred = interp1d(times_pred, gamma_pred, 'previous')
steplines_pred.append(stepline_pred)
#
# t = np.linspace(start=0.0, stop=total_time_limit, num=1001)
# plt.close('all')
# plt.clf()
# fig, ax = plt.subplots(figsize=(8, 6.4))
# fig.suptitle("Test Result: comparison of primal gap")
# fig.subplots_adjust(top=0.5)
# # ax.set_title(instance_name, loc='right')
# ax.plot(t, stepline_baseline(t), label='lb baseline')
# ax.plot(t, stepline_pred(t), label='lb with k predicted')
# ax.set_xlabel('time /s')
# ax.set_ylabel("objective")
# ax.legend()
# plt.show()
# compute primal interal
primal_int_pred = 0
for j in range(len(objs_pred) - 1):
primal_int_pred += gamma_pred[j] * (times_pred[j + 1] - times_pred[j])
primal_int_preds.append(primal_int_pred)
# lb-gnn-reset
times_pred_reset = np.append(times_pred_reset, total_time_limit)
objs_pred_reset =
|
np.append(objs_pred_reset, objs_pred_reset[-1])
|
numpy.append
|
import re
import numpy as np
from lumicks import pylake
import pytest
from lumicks.pylake.kymotracker.detail.calibrated_images import CalibratedKymographChannel
from lumicks.pylake.kymo import EmptyKymo
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from .data.mock_confocal import generate_kymo
def test_kymo_properties(h5_file):
f = pylake.File.from_h5py(h5_file)
if f.format_version == 2:
kymo = f.kymos["Kymo1"]
reference_timestamps = np.array([[2.006250e+10, 2.109375e+10, 2.206250e+10, 2.309375e+10],
[2.025000e+10, 2.128125e+10, 2.225000e+10, 2.328125e+10],
[2.043750e+10, 2.146875e+10, 2.243750e+10, 2.346875e+10],
[2.062500e+10, 2.165625e+10, 2.262500e+10, 2.365625e+10],
[2.084375e+10, 2.187500e+10, 2.284375e+10, 2.387500e+10]], np.int64)
assert repr(kymo) == "Kymo(pixels=5)"
with pytest.deprecated_call():
kymo.json
with pytest.deprecated_call():
assert kymo.has_fluorescence
with pytest.deprecated_call():
assert not kymo.has_force
assert kymo.pixels_per_line == 5
assert len(kymo.infowave) == 64
assert kymo.rgb_image.shape == (5, 4, 3)
assert kymo.red_image.shape == (5, 4)
assert kymo.blue_image.shape == (5, 4)
assert kymo.green_image.shape == (5, 4)
np.testing.assert_allclose(kymo.timestamps, reference_timestamps)
assert kymo.fast_axis == "X"
np.testing.assert_allclose(kymo.pixelsize_um, 10/1000)
np.testing.assert_allclose(kymo.line_time_seconds, 1.03125)
np.testing.assert_allclose(kymo.center_point_um["x"], 58.075877109272604)
np.testing.assert_allclose(kymo.center_point_um["y"], 31.978375270573267)
np.testing.assert_allclose(kymo.center_point_um["z"], 0)
np.testing.assert_allclose(kymo.size_um, [0.050])
def test_kymo_slicing(h5_file):
f = pylake.File.from_h5py(h5_file)
if f.format_version == 2:
kymo = f.kymos["Kymo1"]
kymo_reference = np.transpose([[2, 0, 0, 0, 2], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]])
assert kymo.red_image.shape == (5, 4)
np.testing.assert_allclose(kymo.red_image.data, kymo_reference)
sliced = kymo[:]
assert sliced.red_image.shape == (5, 4)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference)
sliced = kymo["1s":]
assert sliced.red_image.shape == (5, 3)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, 1:])
sliced = kymo["0s":]
assert sliced.red_image.shape == (5, 4)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference)
sliced = kymo["0s":"2s"]
assert sliced.red_image.shape == (5, 2)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, :2])
sliced = kymo["0s":"-1s"]
assert sliced.red_image.shape == (5, 3)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, :-1])
sliced = kymo["0s":"-2s"]
assert sliced.red_image.shape == (5, 2)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, :-2])
sliced = kymo["0s":"3s"]
assert sliced.red_image.shape == (5, 3)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, :3])
sliced = kymo["1s":"2s"]
assert sliced.red_image.shape == (5, 1)
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, 1:2])
sliced = kymo["0s":"10s"]
assert sliced.red_image.shape == (5, 4)
|
np.testing.assert_allclose(sliced.red_image.data, kymo_reference[:, 0:10])
|
numpy.testing.assert_allclose
|
from CTL.causal_tree.ctl.binary_ctl import *
# from CTL.causal_tree.util import *
import numpy as np
class TriggerNode(CTLearnNode):
def __init__(self, trigger=0.0, **kwargs):
# ----------------------------------------------------------------
# Causal tree node
# ----------------------------------------------------------------
super().__init__(**kwargs)
self.trigger = trigger
class TriggerTree(CTLearn):
def __init__(self, quartile=True, old_trigger_code=False, **kwargs):
super().__init__(**kwargs)
self.quartile = quartile
self.old_trigger_code = old_trigger_code
self.root = TriggerNode()
@abstractmethod
def fit(self, x, y, t):
pass
def _eval(self, train_y, train_t, val_y, val_t):
"""Continuous case"""
total_train = train_y.shape[0]
total_val = val_y.shape[0]
return_val = (-np.inf, -np.inf, -np.inf)
if total_train == 0 or total_val == 0:
return return_val
if self.old_trigger_code:
unique_treatment = np.unique(train_t)
if unique_treatment.shape[0] == 1:
return return_val
unique_treatment = (unique_treatment[1:] + unique_treatment[:-1]) / 2
# ignore the first and last
unique_treatment = unique_treatment[1:-1]
if self.quartile:
first_quartile = int(np.floor(unique_treatment.shape[0] / 4))
third_quartile = int(np.ceil(3 * unique_treatment.shape[0] / 4))
unique_treatment = unique_treatment[first_quartile:third_quartile]
# ----------------------------------------------------------------
# Max values done later
# ----------------------------------------------------------------
# if self.max_values < 1:
# idx = np.round(np.linspace(
# 0, len(unique_treatment) - 1, self.max_values * len(unique_treatment))).astype(int)
# unique_treatment = unique_treatment[idx]
# else:
# idx = np.round(np.linspace(
# 0, len(unique_treatment) - 1, self.max_values)).astype(int)
# unique_treatment = unique_treatment[idx]
yyt =
|
np.tile(train_y, (unique_treatment.shape[0], 1))
|
numpy.tile
|
# Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import os, sys
import pickle
import pdb
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def main():
stats_url = os.path.join('stats', 'stats_' + 'detectability.pickle')
print("generating plots from: {}".format(stats_url))
statistics = pickle.load(open(stats_url, 'rb'), encoding='latin1')
#box plot CH-div vs Rand Index
#We create a list with the 8 boxes
data1 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.55)&(chd<0.65) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data1 = data1[fil&fil2].flatten()
data2 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.65)&(chd<0.75) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data2 = data2[fil&fil2].flatten()
data3 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.75)&(chd< 0.85) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data3 = data3[fil&fil2].flatten()
data4 =
|
np.array(statistics['ari_Z'])
|
numpy.array
|
"""Tests for :mod:`katsdpsigproc.rfi.twodflag`."""
import concurrent.futures
import numpy as np
import scipy.interpolate
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from nose.tools import assert_equal, assert_less, assert_raises
from nose.plugins.skip import SkipTest
from .. import twodflag
class TestAsbool:
def _test(self, dtype, expect_view):
a = np.array([0, 1, 1, 0, 1, 0, 0, 1], dtype)
expected = a.astype(np.bool_)
out = twodflag._asbool(a)
assert_equal(np.bool_, out.dtype)
np.testing.assert_array_equal(expected, out)
if expect_view:
# Change a, out must change because it is a view
a[0] = not a[0]
assert_equal(bool(a[0]), out[0])
def test_uint8(self):
self._test(np.uint8, True)
def test_uint16(self):
self._test(np.uint16, False)
def test_bool(self):
self._test(np.bool_, True)
class TestAverageFreq:
def setup(self):
self.small_data = np.arange(30, dtype=np.float32).reshape(5, 6, 1).repeat(2, axis=2)
self.small_flags = np.zeros(self.small_data.shape, np.bool_)
self.small_flags[3, :, 0] = 1
self.small_flags[:, 4, 0] = 1
self.small_flags[2, 0, :] = 1
self.small_flags[2, 5, :] = 1
def test_one(self):
"""_average_freq with 1 channel must have no effect on unflagged data."""
avg_data, avg_flags = twodflag._average_freq(self.small_data, self.small_flags,
twodflag._as_min_dtype(1))
expected = self.small_data.copy()
expected[self.small_flags] = 0
assert_equal(np.float32, avg_data.dtype)
assert_equal(np.bool_, avg_flags.dtype)
np.testing.assert_array_equal(np.moveaxis(expected, -1, 0), avg_data)
np.testing.assert_array_equal(np.moveaxis(self.small_flags, -1, 0), avg_flags)
def test_divides(self):
"""Test _average_freq when averaging factor divides in exactly."""
expected_data = np.array([
[
[0.5, 2.5, 5.0],
[6.5, 8.5, 11.0],
[13.0, 14.5, 0.0],
[0.0, 0.0, 0.0],
[24.5, 26.5, 29.0]
],
[
[0.5, 2.5, 4.5],
[6.5, 8.5, 10.5],
[13.0, 14.5, 16.0],
[18.5, 20.5, 22.5],
[24.5, 26.5, 28.5]
]], np.float32)
expected_flags = np.array([
[
[False, False, False],
[False, False, False],
[False, False, True],
[True, True, True],
[False, False, False]
],
[[False, False, False]] * 5])
avg_data, avg_flags = twodflag._average_freq(self.small_data, self.small_flags,
twodflag._as_min_dtype(2))
assert_equal(np.float32, avg_data.dtype)
assert_equal(np.bool_, avg_flags.dtype)
np.testing.assert_array_equal(expected_data, avg_data)
np.testing.assert_array_equal(expected_flags, avg_flags)
def test_uneven(self):
"""Test _average_freq when averaging factor does not divide number of channels."""
expected_data = np.array([
[
[1.5, 5.0],
[7.5, 11.0],
[14.0, 0.0],
[0.0, 0.0],
[25.5, 29.0],
],
[
[1.5, 4.5],
[7.5, 10.5],
[14.0, 16.0],
[19.5, 22.5],
[25.5, 28.5]
]], np.float32)
expected_flags = np.array([
[
[False, False],
[False, False],
[False, True],
[True, True],
[False, False]
], [[False, False]] * 5], np.bool_)
avg_data, avg_flags = twodflag._average_freq(self.small_data, self.small_flags,
twodflag._as_min_dtype(4))
assert_equal(np.float32, avg_data.dtype)
assert_equal(np.bool_, avg_flags.dtype)
np.testing.assert_array_equal(expected_data, avg_data)
np.testing.assert_array_equal(expected_flags, avg_flags)
def test_time_median():
"""Test for :func:`katsdpsigproc.rfi.twodflag._time_median`."""
data = np.array([
[2.0, 1.0, 2.0, 5.0],
[3.0, 1.0, 8.0, 6.0],
[4.0, 1.0, 4.0, 7.0],
[5.0, 1.0, 5.0, 6.5],
[1.5, 1.0, 1.5, 5.5]], np.float32)
flags = np.array([
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 0, 1]], np.bool_)
out_data, out_flags = twodflag._time_median(data, flags)
expected_data = np.array([[3.0, 0.0, 3.0, 6.0]], np.float32)
expected_flags = np.array([[0, 1, 0, 0]], np.bool_)
np.testing.assert_array_equal(expected_data, out_data)
np.testing.assert_array_equal(expected_flags, out_flags)
class TestMedianAbs:
"""Test :func:`.twodflag._median_abs` and :func:`.twodflag._median_abs_axis0`."""
def setup(self):
self.data = np.array([[-2.0, -6.0, 4.5], [1.5, 3.3, 0.5]], np.float32)
self.flags = np.array([[0, 0, 0], [0, 1, 0]], np.uint8)
def test(self):
out = twodflag._median_abs(self.data, self.flags)
assert_equal(2.0, out)
def test_all_flagged(self):
out = twodflag._median_abs(self.data, np.ones_like(self.flags))
assert np.isnan(out)
def test_axis0(self):
out = twodflag._median_abs_axis0(self.data, self.flags)
expected = np.array([[1.75, 6.0, 2.5]])
np.testing.assert_array_equal(expected, out)
def test_axis0_all_flagged(self):
self.flags[:, 1] = True
out = twodflag._median_abs_axis0(self.data, self.flags)
expected = np.array([[1.75, np.nan, 2.5]])
np.testing.assert_array_equal(expected, out)
class TestLinearlyInterpolateNans:
"""Tests for :func:`katsdpsigproc.rfi.twodflag._linearly_interpolate_nans`."""
def setup(self):
self.y = np.array([np.nan, np.nan, 4.0, np.nan, np.nan, 10.0, np.nan, -2.0, np.nan, np.nan])
self.expected = np.array([4.0, 4.0, 4.0, 6.0, 8.0, 10.0, 4.0, -2.0, -2.0, -2.0])
def test_basic(self):
twodflag._linearly_interpolate_nans1d(self.y)
np.testing.assert_allclose(self.expected, self.y)
def test_no_nans(self):
y = self.expected[:]
twodflag._linearly_interpolate_nans1d(y)
np.testing.assert_allclose(self.expected, y)
def test_all_nans(self):
self.y[:] = np.nan
self.expected[:] = 0
twodflag._linearly_interpolate_nans1d(self.y)
np.testing.assert_array_equal(self.expected, self.y)
def test_float32(self):
expected = self.expected.astype(np.float32)
y = self.y.astype(np.float32)
twodflag._linearly_interpolate_nans1d(y)
np.testing.assert_allclose(expected, y, rtol=1e-6)
def test_2d(self):
y = np.zeros((3, self.y.size))
y[0, :] = self.y
y[1, :] = self.expected
y[2, :] = np.nan
expected = np.zeros_like(y)
expected[0, :] = self.expected
expected[1, :] = self.expected
expected[2, :] = 0
twodflag._linearly_interpolate_nans(y)
np.testing.assert_allclose(expected, y)
class TestBoxGaussianFilter:
def test_one_pass(self):
"""Test that _box_gaussian_filter1d places the box correctly."""
a = np.array([50.0, 10.0, 60.0, -70.0, 30.0, 20.0, -15.0], np.float32)
b = np.empty_like(a)
twodflag._box_gaussian_filter1d(a, 2, b, 1)
np.testing.assert_equal(
np.array([24.0, 10.0, 16.0, 10.0, 5.0, -7.0, 7.0], np.float32), b)
def test_width(self):
"""Impulse response must have approximately correct standard deviation, \
and must be symmetric with sum 1."""
a = np.zeros((1, 200), np.float32)
a[:, a.size // 2] = 1.0
sigma = np.array([0.0, 10.0])
b = np.empty_like(a)
twodflag._box_gaussian_filter(a, sigma, b)
x = np.arange(a.size) - a.size // 2
total = np.sum(b)
np.testing.assert_allclose(1.0, total, rtol=1e-5)
mean = np.sum(x * b)
np.testing.assert_allclose(0.0, mean, atol=1e-5)
std = np.sqrt(np.sum(x * x * b))
# Very loose test, because box_gaussian_filter1d quantises
np.testing.assert_allclose(std, sigma[1], atol=1)
def test_bad_sigma_dim(self):
a = np.zeros((50, 50), np.float32)
with assert_raises(ValueError):
twodflag._box_gaussian_filter(a, np.array([3.0]), a)
def test_2d(self):
rs = np.random.RandomState(seed=1)
shape = (77, 53)
sigma = np.array([8, 2.3])
data = rs.uniform(size=shape).astype(np.float32)
expected = gaussian_filter(data, sigma, mode='constant')
actual = np.zeros_like(data)
twodflag._box_gaussian_filter(data, sigma, actual)
np.testing.assert_allclose(expected, actual, rtol=1e-1)
def test_axes(self):
"""Test that the axes are handled consistently."""
rs = np.random.RandomState(seed=1)
shape = (77, 53)
data = rs.uniform(size=shape).astype(np.float32)
out0 = np.zeros_like(data)
out1 = np.zeros_like(data)
twodflag._box_gaussian_filter(data, np.array([8.0, 0.0]), out0)
twodflag._box_gaussian_filter(data.T, np.array([0.0, 8.0]), out1.T)
np.testing.assert_array_equal(out0, out1)
def test_edge(self):
"""Test that values outside the boundary are handled like zeros."""
rs = np.random.RandomState(seed=1)
data = np.zeros((1, 200), np.float32)
core = data[:, 80:120]
core[:] = rs.uniform(size=core.shape)
fdata = np.ones_like(data)
fcore = np.ones_like(core)
twodflag._box_gaussian_filter(data, np.array([0.0, 3.0]), fdata)
twodflag._box_gaussian_filter(core, np.array([0.0, 3.0]), fcore)
np.testing.assert_allclose(fdata[:, 80:120], fcore, rtol=1e-5)
class TestMaskedGaussianFilter:
def setup(self):
self.rs = np.random.RandomState(seed=1)
shape = (77, 53)
self.data = self.rs.uniform(size=shape).astype(np.float32)
self.flags = self.rs.uniform(size=shape) >= 0.5
def _get_expected(self, sigma, truncate):
weight = 1.0 - self.flags
data = self.data * weight
for i, (s, t) in enumerate(zip(sigma, truncate)):
weight = gaussian_filter1d(weight, s, axis=i, mode='constant', truncate=t)
data = gaussian_filter1d(data, s, axis=i, mode='constant', truncate=t)
with np.errstate(invalid='ignore'):
data /= weight
return data
def test_basic(self):
sigma = np.array([5, 2.3])
expected = self._get_expected(sigma, (4.0, 4.0))
actual = np.ones_like(expected)
twodflag.masked_gaussian_filter(self.data, self.flags, sigma, actual)
np.testing.assert_allclose(expected, actual, rtol=1e-1)
def test_nan(self):
# Set a big block of zeros to get NaNs in the result
self.flags[:] = False
self.flags[30:70, 10:40] = True
# To match NaN positions, we need to match the footprint of the kernels
sigma =
|
np.array([3, 3.3])
|
numpy.array
|
"""
Reactor and numerical integration functions
"""
import numpy as np
from scipy.integrate import ode, solve_ivp
from estimator.utils import WeightedRMSE, para_values_to_dict
import timeit
count = 0
# # Assume we have
# # n_rxn reactions and m_spec species
#
# # %% ODE functions for numerical integration
#
# # Older version
# def dcdt_1d(t, concentrations, stoichiometry, rate_expression, para_dict, temperature):
# """
# Compute the derivatives
# """
# cur_rate = rate_expression(concentrations, para_dict, temperature)
# n_spec = len(stoichiometry)
#
# # dC/dt for each species
# cur_dcdt = np.zeros(n_spec)
# for i in range(n_spec):
# cur_dcdt[i] = stoichiometry[i] * cur_rate
#
# return cur_dcdt
#
#
# def ode_solver(func, y0, t0, tf, *func_inputs):
# """
# Set up the ode solver
# Older ode wrapper in scipy
# """
# # Construct the ode solver, ode45 with varying step size
# ans = []
#
# def get_ans(t, y):
# ans.append([t, *y])
#
# solver = ode(func).set_integrator('dopri5', rtol=1e-6, method='bdf')
# solver.set_solout(get_ans)
# # feed in argumenrs and initial conditions for odes
# solver.set_initial_value(y0, t0).set_f_params(*func_inputs)
# solver.integrate(tf)
# ans = np.array(ans)
#
# return ans
def dcdt(t, concentrations, stoichiometry, rate_expressions, para_dict, names=None, temperature=None, *rate_inputs):
"""
Compute the derivatives for multiple parallel reactions
"""
# stoichiometry matrix is in the shape of n_rxn * m_spec
if not isinstance(stoichiometry[0], list):
stoichiometry = [stoichiometry]
n_rxn = 1
else:
n_rxn = len(stoichiometry)
# expand expressions to a list
if not isinstance(rate_expressions, list):
rate_expressions = n_rxn * [rate_expressions]
if not isinstance(names, list):
names = n_rxn * [names]
if (n_rxn != len(rate_expressions)) or n_rxn != len(names):
raise ValueError("Input stoichiometry matrix must equal to the number of input rate expressions or names")
# rates are in the shape of 1 * n_rxn
cur_rate = np.zeros(n_rxn)
for i in range(n_rxn):
cur_rate[i] = rate_expressions[i](concentrations, para_dict, stoichiometry[i], names[i], temperature,
*rate_inputs)
# dC/dt for each species
# dcdt is in the shape of 1 * m_spec
# use matrix multiplication
cur_dcdt = np.matmul(cur_rate, np.array(stoichiometry))
return cur_dcdt
def ode_solver_ivp(func, y0, t0, tf, t_eval, method, *func_inputs):
"""
Set up the ode solver
Use solve_ivp
"""
global count
count += 1
sol = solve_ivp(func, t_span=[t0, tf], y0=y0, method=method, t_eval=t_eval, args=(*func_inputs,))
# Extract t and C from sol
t_vec = sol.t
C_vec = sol.y
# ans is a matrix for tC_profile
# 0th column is the time and ith column is the concentration of species i
n_species, n_points = sol.y.shape
ans = np.zeros((n_points, n_species + 1))
ans[:, 0] = t_vec
ans[:, 1:] = C_vec.T
return ans
class Reactor():
"""Reaction ODEs class"""
def __init__(self, stoichiometry, tf, P0=None, feed_composition=None, C0=None, names=None, temperature=None):
"""Initialize the constants"""
self.stoichiometry = stoichiometry
self.names = names # names of the reactions
self.P0 = P0
self.feed_composition = feed_composition
self.t0 = 0
self.tf = tf
self.temperature = temperature
if (P0 is not None) and (feed_composition is not None):
self.C0 = P0 *
|
np.array(feed_composition)
|
numpy.array
|
import pyCGM_Single.pyCGM as pyCGM
import pytest
import numpy as np
rounding_precision = 8
class TestUtils():
"""
This class tests the utils functions in pyCGM.py:
findwandmarker
cross
norm2d
norm3d
normDiv
matrixmult
rotmat
"""
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["frame", "thorax", "expected"], [
({'RSHO': [428.88476562, 270.552948, 1500.73010254], 'LSHO': [68.24668121, 269.01049805, 1510.1072998]}, [[[256.23991128535846, 365.30496976939753, 1459.662169500559], rand_coor, rand_coor], [256.149810236564, 364.3090603933987, 1459.6553639290375]], [[255.92550222678443, 364.3226950497605, 1460.6297868417887], [256.42380097331767, 364.27770361353487, 1460.6165849382387]]),
({'RSHO': [0, 0, 1], 'LSHO': [0, 1, 0]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 1, 0], [0, 0, 1]]),
({'RSHO': [0, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.70710678, -0.70710678], [0, -0.70710678, 0.70710678]]),
({'RSHO': [0, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 0], rand_coor, rand_coor], [-1, 0, 0]], [[-1, 0.70710678, -0.70710678], [-1, -0.70710678, 0.70710678]]),
({'RSHO': [1, 2, 1], 'LSHO': [2, 1, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.4472136, -0.89442719], [0, -0.89442719, 0.4472136]]),
({'RSHO': [1, 2, 1], 'LSHO': [2, 2, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.4472136, -0.89442719], [0, -0.70710678, 0.70710678]]),
({'RSHO': [1, 2, 2], 'LSHO': [2, 1, 2]}, [[[1, 0, 0], rand_coor, rand_coor], [0, 0, 0]], [[0, 0.70710678, -0.70710678], [0, -0.89442719, 0.4472136]]),
({'RSHO': [1, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 1], rand_coor, rand_coor], [0, 0, 0]], [[0.70710678, 0, -0.70710678], [-0.70710678, 0, 0.70710678]]),
({'RSHO': [1, 1, 1], 'LSHO': [1, 1, 1]}, [[[1, 0, 1], rand_coor, rand_coor], [0, 0, 1]], [[0, 0, 0], [0, 0, 2]]),
({'RSHO': [0, 1, 0], 'LSHO': [0, 0, -1]}, [[[0, 3, 4], rand_coor, rand_coor], [0, 0, 0]], [[1, 0, 0], [-1, 0, 0]]),
({'RSHO': [1, 0, 0], 'LSHO': [0, 1, 0]}, [[[7, 0, 24], rand_coor, rand_coor], [0, 0, 0]], [[0, -1, 0], [-0.96, 0, 0.28]]),
({'RSHO': [1, 0, 0], 'LSHO': [0, 0, 1]}, [[[8, 0, 6], rand_coor, rand_coor], [8, 0, 0]], [[8, 1, 0], [8, -1, 0]])])
def test_findwandmarker(self, frame, thorax, expected):
"""
This test provides coverage of the findwandmarker function in pyCGM.py, defined as findwandmarker(frame,thorax)
where frame is a dictionary of x, y, z positions and marker names and thorax is the thorax axis and origin.
The function takes in the xyz position of the Right Shoulder and Left Shoulder markers, as well as the thorax
frame, which is a list of [ xyz axis vectors, origin ]. The wand marker position is returned as a 2x3 array
containing the right wand marker x, y, z positions (1x3) followed by the left wand marker x, y, z positions
(1x3). The thorax axis is provided in global coordinates, which are subtracted inside the function to define
the unit vectors.
For the Right and Left wand markers, the function performs the same calculation, with the difference being the
corresponding sides marker. Each wand marker is defined as the cross product between the unit vector of the
x axis of the thorax frame, and the unit vector from the thorax frame origin to the Shoulder marker.
Given a marker SHO, representing the right (RSHO) or left (LSHO) shoulder markers and a thorax axis TH, the
wand marker W is defined as:
.. math::
W_R = (RSHO-TH_o) \times TH_x
W_L = TH_x \times (LSHO-TH_o)
where :math:`TH_o` is the origin of the thorax axis, :math:`TH_x` is the x unit vector of the thorax axis.
From this calculation, it should be clear that changing the thorax y and z vectors should not have an impact
on the results.
This unit test ensure that:
- The right and left markers do not impact the wand marker calculations for one another
- The function requires global positions
- The thorax y and z axis do not change the results
"""
result = pyCGM.findwandmarker(frame, thorax)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_findwandmarker_datatypes(self):
"""
This test provides coverage of the findwandmarker function in pyCGM.py, defined as findwandmarker(frame,thorax)
where frame is a dictionary of x, y, z positions and marker names and thorax is the thorax axis.
This test checks that the resulting output from calling cross is correct when called with ints or floats.
"""
frame_int = {'RSHO': [1, 0, 0], 'LSHO': [0, 0, 1]}
frame_float = {'RSHO': [1.0, 0.0, 0.0], 'LSHO': [0.0, 0.0, 1.0]}
thorax_int = [[[8, 0, 6], [0, 0, 0], [0, 0, 0]], [8, 0, 0]]
thorax_float = [[[8.0, 0.0, 6.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [8.0, 0.0, 0.0]]
expected = [[8, 1, 0], [8, -1, 0]]
# Check that calling findwandmarker yields the expected results when frame and thorax consist of ints
result_int_list = pyCGM.findwandmarker(frame_int, thorax_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling findwandmarker yields the expected results when frame and thorax consist of floats
result_float_list = pyCGM.findwandmarker(frame_float, thorax_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
@pytest.mark.parametrize(["a", "b", "expected"], [
([0.13232936, 0.98562946, -0.10499292], [-0.99119134, 0.13101088, -0.01938735], [-0.005353527183234709, 0.10663358915485248, 0.994283972218527]),
([0, 0, 0], [0, 0, 0], [0, 0, 0]),
([1, 1, 1], [1, 1, 1], [0, 0, 0]),
([0, 0, -2], [0, 4, 0], [8, 0, 0]),
([0, 0, 4], [-0.5, 0, 0], [0, -2, 0]),
([-1.5, 0, 0], [0, 4, 0], [0, 0, -6]),
([1, 0, 1], [0, 1, 0], [-1, 0, 1]),
([1, 2, 3], [3, 2, 1], [-4, 8, -4]),
([-2, 3, 1], [4, -1, 5], [16, 14, -10])
])
def test_cross(self, a, b, expected):
"""
This test provides coverage of the cross function in pyCGM.py, defined as cross(a, b) where a and b are both 3D vectors.
This test takes 3 parameters:
a: 3D vector
b: 3D vector
expected: the expected result from calling cross on a and b. This result is the cross product of the vectors
a and b.
"""
result = pyCGM.cross(a, b)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_cross_datatypes(self):
"""
This test provides coverage of the cross function in pyCGM.py, defined as cross(a, b) where a and b are both 3D vectors.
This test checks that the resulting output from calling cross is correct when called with a list of ints, a numpy
array of ints, a list of floats, and a numpy array of floats.
"""
A_int = [-2, 3, 1]
A_float = [-2.0, 3.0, 1.0]
B_int = [4, -1, 5]
B_float = [4.0, -1.0, 5.0]
expected = [16, 14, -10]
# Check the calling cross on a list of ints yields the expected results
result_int_list = pyCGM.cross(A_int, B_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling cross on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.cross(np.array(A_int, dtype='int'), np.array(B_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling cross on a list of floats yields the expected results
result_float_list = pyCGM.cross(A_float, B_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling cross on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.cross(np.array(A_float, dtype='float'), np.array(B_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["v", "expected"], [
([-9944.089508486479, -20189.20612828088, 150.42955108569652], 22505.812344655435),
([0, 0, 0], 0),
([2, 0, 0], 2),
([0, 0, -1], 1),
([0, 3, 4], 5),
([-3, 0, 4], 5),
([6, -8, 0], 10),
([-5, 0, -12], 13),
([1, -1, np.sqrt(2)], 2)])
def test_norm2d(self, v, expected):
"""
This test provides coverage of the norm2d function in pyCGM.py, defined as norm2d(v) where v is a 3D vector.
This test takes 2 parameters:
v: 3D vector
expected: the expected result from calling norm2d on v. This will be the value of the normalization of vector v,
returned as a float.
Given the vector v, the normalization is defined by:
normalization = :math:`\sqrt{v_x^2 + v_y^2 + v_z^2}`
where :math:`v_x` is the x-coordinate of the vector v
"""
result = pyCGM.norm2d(v)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_norm2d_datatypes(self):
"""
This test provides coverage of the norm2d function in pyCGM.py, defined as norm2d(v) where v is a 3D vector.
This test checks that the resulting output from calling norm2d is correct when called with a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
v_int = [6, 0, -8]
v_float = [6.0, 0, -8.0]
expected = 10
# Check the calling norm2d on a list of ints yields the expected results
result_int_list = pyCGM.norm2d(v_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling norm2d on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.norm2d(np.array(v_int, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check the calling norm2d on a list of floats yields the expected results
result_float_list = pyCGM.norm2d(v_float)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check the calling norm2d on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.norm2d(np.array(v_float, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["v", "expected"], [
([-212.5847168, 28.09841919, -4.15808105], np.array(214.47394390603984)),
([0, 0, 0], np.array(0)),
([2, 0, 0], np.array(2)),
([0, 0, -1], np.array(1)),
([0, 3, 4], np.array(5)),
([-3, 0, 4], np.array(5)),
([-6, 8, 0], np.array(10)),
([-5, 0, -12], np.array(13)),
([1, -1, np.sqrt(2)], np.array(2))])
def test_norm3d(self, v, expected):
"""
This test provides coverage of the norm3d function in pyCGM.py, defined as norm3d(v) where v is a 3D vector.
This test takes 2 parameters:
v: 3D vector
expected: the expected result from calling norm3d on v. This will be the normalization of the vector v,
inside of a numpy array.
Given the vector v, the normalization is defined by:
normalization = :math:`\sqrt{v_x^2 + v_y^2 + v_z^2}`
where :math:`v_x` is the x-coordinate of the vector v
"""
result = pyCGM.norm3d(v)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_norm3d_datatypes(self):
"""
This test provides coverage of the norm3d function in pyCGM.py, defined as norm3d(v) where v is a 3D vector.
This test checks that the resulting output from calling norm3d is correct when called with a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
v_int = [-6, 0, 8]
v_float = [-6.0, 0, 8.0]
expected = np.array(10)
# Check the calling norm3d on a list of ints yields the expected results
result_int_list = pyCGM.norm3d(v_int)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check the calling norm3d on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.norm3d(np.array(v_int, dtype='int'))
|
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
|
numpy.testing.assert_almost_equal
|
import numpy
import pandas
import pytest
from scipy.sparse.csgraph import connected_components
from partitions import Graph, Partition
from partitions.tree import (
ReCom,
bipartition_tree,
contract_leaves_until_balanced_or_None,
map_with_boolean_array,
random_cut_edge,
random_spanning_tree,
recursive_partition,
)
class TestRandomSpanningTree:
def test_on_four_cycle(self, four_cycle):
tree = random_spanning_tree(four_cycle)
assert len(tree.nodes) == 4
assert len(tree.edges) == 3
def test_on_nonregular(self, nonregular):
tree = random_spanning_tree(nonregular)
assert len(tree.nodes) == 6
assert len(tree.edges) == 5
# This edge has to be in it, because 0 is a leaf
assert (0, 1) in tree.edges
assert (1, 0) in tree.edges
# One of these must be in it
assert (1, 3) in tree.edges or (3, 5) in tree.edges
# One of these must be in it
assert any(edge in tree.edges for edge in [(2, 4), (2, 5), (2, 1)])
for node in nonregular:
assert any(
(node, neighbor) in tree.edges
for neighbor in nonregular.neighbors[node]
)
class TestContractEdgesUntilBalanced:
def test_on_10x10(self, grid10x10):
graph = grid10x10
population = numpy.ones_like(graph.nodes)
bounds = (10, 90)
assignment = contract_leaves_until_balanced_or_None(graph, population, bounds)
assert len(assignment) == len(graph.nodes)
assert len(numpy.unique(assignment)) == 2
subgraph = graph.subgraph(graph.nodes[assignment])
assert connected_components(subgraph.neighbors.matrix, return_labels=False) == 1
def test_on_small(self):
graph = Graph.from_edges([(0, 1), (1, 2)])
population = numpy.ones_like(graph.nodes)
bounds = (0, 3)
assignment = contract_leaves_until_balanced_or_None(
graph, population, bounds, choice=lambda x: 1
)
assert assignment[0] == assignment[1] or assignment[1] == assignment[2]
assert len(numpy.unique(assignment)) == 2
def test_on_medium(self):
graph = Graph.from_edges(
[(0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (5, 7), (5, 8)]
)
population = numpy.ones_like(graph.nodes)
bounds = (2, 7)
assignment = contract_leaves_until_balanced_or_None(graph, population, bounds)
assert len(numpy.unique(assignment)) == 2
subgraph = graph.subgraph(graph.nodes[assignment])
assert connected_components(subgraph.neighbors.matrix, return_labels=False) == 1
assert (2 <= population[assignment].sum()) and (
population[assignment].sum() <= 7
)
def test_impossible(self):
graph = Graph.from_edges([(0, 1), (1, 2), (2, 3)])
population = numpy.array([1, 5, 8, 5])
bounds = (3, 5)
assignment = contract_leaves_until_balanced_or_None(graph, population, bounds)
assert assignment is None
class TestBipartitionTree:
def test_on_10x10(self, grid10x10):
graph = grid10x10
population = numpy.ones_like(graph.nodes)
bounds = (30, 70)
assignment = bipartition_tree(graph, population, bounds)
partition = Partition.from_assignment(graph, assignment)
assert len(partition) == 2
assert set(node for part in partition for node in part.image) == set(
graph.nodes
)
for part in partition:
assert 30 <= len(part.nodes) and len(part.nodes) <= 70
for part in partition:
assert connected_components(part.neighbors.matrix, return_labels=False) == 1
class TestRecursivePartition:
def test_on_10x10(self, grid10x10):
graph = grid10x10
population = numpy.ones_like(graph.nodes)
ideal_pop = population.sum() / 5
bounds = (ideal_pop * 0.8, ideal_pop * 1.2)
assignment = recursive_partition(graph, 5, population, bounds)
partition = Partition.from_assignment(graph, assignment)
assert len(partition) == 5
assert set(node for part in partition for node in part.image) == set(
graph.nodes
)
# The 0th part made up of the left-over nodes often has too much population,
# so we are only sure that parts >= 1 have the right population.
for part in list(partition)[1:]:
assert bounds[0] < len(part.nodes)
assert len(part.nodes) < bounds[1]
for part in partition:
assert connected_components(part.neighbors.matrix, return_labels=False) == 1
def test_random_cut_edge(partition):
assert len(partition) == 2
i, j = random_cut_edge(partition)
assert (i in partition[0].image and j in partition[1].image) or (
i in partition[1].image and j in partition[0].image
)
class TestReCom:
def test_gives_a_partition(self, k8):
k8.data = pandas.DataFrame({"pop": [1] * 8})
# Allow 1-3 nodes per part
bounds = (0.9, 3.1)
partition = Partition.from_assignment(
k8, dict(enumerate([0, 0, 1, 1, 2, 2, 3, 3]))
)
recom = ReCom("pop", bounds)
assert len(partition) == 4
new_partition = recom(partition)
assert len(new_partition) == 4
assert all(len(part) in {1, 2, 3} for part in new_partition)
nodes = list(node for part in new_partition for node in part.image)
assert len(nodes) == 8
assert set(nodes) == set(k8.nodes)
def test_validates_bounds(self):
with pytest.raises(TypeError):
ReCom("pop", (4,))
with pytest.raises(TypeError):
ReCom("pop", 4)
with pytest.raises(TypeError):
ReCom("pop", (4, 5, 6))
with pytest.raises(TypeError):
ReCom("pop", ("a", "b"))
with pytest.raises(TypeError):
ReCom("pop", (100, 1))
def test_validates_pop_col(self):
with pytest.raises(TypeError):
ReCom(4, (10, 100))
with pytest.raises(TypeError):
ReCom(None, (10, 100))
def test_validates_method(self):
with pytest.raises(TypeError):
ReCom("pop", (10, 100), 1000)
with pytest.raises(TypeError):
ReCom("pop", (10, 100), False)
def test_map_with_boolean_array():
array =
|
numpy.arange(10)
|
numpy.arange
|
# --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by <NAME> (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
from __future__ import division
from torch.utils.data import Dataset
import numpy as np
import json
import random
import logging
from os.path import join
from utils.bbox_helper import *
from utils.anchors import Anchors
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import math
import sys
pyv = sys.version[0]
import cv2
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
logger = logging.getLogger('global')
sample_random = random.Random()
sample_random.seed(123456)
class SubDataSet(object):
def __init__(self, cfg):
for string in ['root', 'anno']:
if string not in cfg:
raise Exception('SubDataSet need "{}"'.format(string))
with open(cfg['anno']) as fin:
logger.info("loading " + cfg['anno'])
self.labels = self.filter_zero(json.load(fin), cfg)
def isint(x):
try:
int(x)
return True
except:
return False
# add frames args into labels
to_del = []
for video in self.labels:
for track in self.labels[video]:
frames = self.labels[video][track]
frames = list(map(int, filter(lambda x: isint(x), frames.keys())))
frames.sort()
self.labels[video][track]['frames'] = frames
if len(frames) <= 0:
logger.info("warning {}/{} has no frames.".format(video, track))
to_del.append((video, track))
# delete tracks with no frames
for video, track in to_del:
del self.labels[video][track]
# delete videos with no valid track
to_del = []
for video in self.labels:
if len(self.labels[video]) <= 0:
logger.info("warning {} has no tracks".format(video))
to_del.append(video)
for video in to_del:
del self.labels[video]
self.videos = list(self.labels.keys())
logger.info(cfg['anno'] + " loaded.")
# default args
self.root = "/"
self.start = 0
self.num = len(self.labels)
self.num_use = self.num
self.frame_range = 100
self.mark = "vid"
self.path_format = "{}.{}.{}.jpg"
self.pick = []
# input args
self.__dict__.update(cfg)
self.num_use = int(self.num_use)
# shuffle
self.shuffle()
def filter_zero(self, anno, cfg):
name = cfg.get('mark', '')
out = {}
tot = 0
new = 0
zero = 0
for video, tracks in anno.items():
new_tracks = {}
for trk, frames in tracks.items():
new_frames = {}
for frm, bbox in frames.items():
tot += 1
if 'kp' in frm:
new_frames[frm] = bbox
else:
if len(bbox) == 4:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 - y1
else:
w, h = bbox
if w == 0 or h == 0:
logger.info('Error, {name} {video} {trk} {bbox}'.format(**locals()))
zero += 1
continue
new += 1
new_frames[frm] = bbox
if len(new_frames) > 0:
new_tracks[trk] = new_frames
if len(new_tracks) > 0:
out[video] = new_tracks
return out
def log(self):
logger.info('SubDataSet {name} start-index {start} select [{select}/{num}] path {format}'.format(
name=self.mark, start=self.start, select=self.num_use, num=self.num, format=self.path_format
))
def shuffle(self):
lists = list(range(self.start, self.start + self.num))
m = 0
pick = []
while m < self.num_use:
sample_random.shuffle(lists)
pick += lists
m += self.num
self.pick = pick[:self.num_use]
return self.pick
def get_image_anno(self, video, track, frame):
frame = "{:06d}".format(frame)
image_path = join(self.root, video, self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
image_kp = self.labels[video][track]['kp_'+frame]
return image_path, image_anno, image_kp
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
if 'hard' not in track_info:
template_frame = random.randint(0, len(frames)-1)
left = max(template_frame - self.frame_range, 0)
right = min(template_frame + self.frame_range, len(frames)-1) + 1
search_range = frames[left:right]
template_frame = frames[template_frame]
search_frame = random.choice(search_range)
else:
search_frame = random.choice(track_info['hard'])
left = max(search_frame - self.frame_range, 0)
right = min(search_frame + self.frame_range, len(frames)-1) + 1 # python [left:right+1) = [left:right]
template_range = frames[left:right]
template_frame = random.choice(template_range)
search_frame = frames[search_frame]
return self.get_image_anno(video_name, track, template_frame), \
self.get_image_anno(video_name, track, search_frame)
def get_random_target(self, index=-1):
if index == -1:
index = random.randint(0, self.num-1)
video_name = self.videos[index]
video = self.labels[video_name]
track = random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
bbox = [float(x) for x in bbox]
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
class Augmentation:
def __init__(self, cfg):
# default args
self.shift = 0
self.scale = 0
self.blur = 0 #False
self.resize = False
self.rgbVar = np.array([[-0.55919361, 0.98062831, - 0.41940627],
[1.72091413, 0.19879334, - 1.82968581],
[4.64467907, 4.73710203, 4.88324118]], dtype=np.float32)
self.flip = 0
self.eig_vec = np.array([
[0.4009, 0.7192, -0.5675],
[-0.8140, -0.0045, -0.5808],
[0.4203, -0.6948, -0.5836],
], dtype=np.float32)
self.eig_val = np.array([[0.2175, 0.0188, 0.0045]], np.float32)
self.__dict__.update(cfg)
@staticmethod
def random():
return random.random() * 2 - 1.0
def blur_image(self, image):
def rand_kernel():
size = np.random.randn(1)
size = int(np.round(size)) * 2 + 1
if size < 0: return None
if random.random() < 0.5: return None
size = min(size, 45)
kernel = np.zeros((size, size))
c = int(size/2)
wx = random.random()
kernel[:, c] += 1. / size * wx
kernel[c, :] += 1. / size * (1-wx)
return kernel
kernel = rand_kernel()
if kernel is not None:
image = cv2.filter2D(image, -1, kernel)
return image
def __call__(self, image, bbox, size, gray=False):
if gray:
grayed = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = np.zeros((grayed.shape[0], grayed.shape[1], 3), np.uint8)
image[:, :, 0] = image[:, :, 1] = image[:, :, 2] = grayed
shape = image.shape
crop_bbox = center2corner((shape[0]//2, shape[1]//2, size-1, size-1))
param = {}
if self.shift:
param['shift'] = (Augmentation.random() * self.shift, Augmentation.random() * self.shift)
if self.scale:
param['scale'] = ((1.0 + Augmentation.random() * self.scale), (1.0 + Augmentation.random() * self.scale))
crop_bbox, _ = aug_apply(Corner(*crop_bbox), param, shape)
x1 = crop_bbox.x1
y1 = crop_bbox.y1
bbox = BBox(bbox.x1 - x1, bbox.y1 - y1,
bbox.x2 - x1, bbox.y2 - y1)
if self.scale:
scale_x, scale_y = param['scale']
bbox = Corner(bbox.x1 / scale_x, bbox.y1 / scale_y, bbox.x2 / scale_x, bbox.y2 / scale_y)
image = crop_hwc(image, crop_bbox, size)
offset = np.dot(self.rgbVar, np.random.randn(3, 1))
offset = offset[::-1] # bgr 2 rgb
offset = offset.reshape(3)
image = image - offset
if self.blur > random.random():
image = self.blur_image(image)
if self.resize:
imageSize = image.shape[:2]
ratio = max(math.pow(random.random(), 0.5), 0.2) # 25 ~ 255
rand_size = (int(round(ratio*imageSize[0])), int(round(ratio*imageSize[1])))
image = cv2.resize(image, rand_size)
image = cv2.resize(image, tuple(imageSize))
if self.flip and self.flip > Augmentation.random():
image = cv2.flip(image, 1)
width = image.shape[1]
bbox = Corner(width - 1 - bbox.x2, bbox.y1, width - 1 - bbox.x1, bbox.y2)
return image, bbox
class AnchorTargetLayer:
def __init__(self, cfg):
self.thr_high = 0.6
self.thr_low = 0.3
self.negative = 16
self.rpn_batch = 64
self.positive = 16
self.__dict__.update(cfg)
def __call__(self, anchor, target, size, neg=False, need_iou=False):
anchor_num = anchor.anchors.shape[0]
cls = np.zeros((anchor_num, size, size), dtype=np.int64)
cls[...] = -1 # -1 ignore 0 negative 1 positive
delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)
delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
if neg:
l = size // 2 - 3
r = size // 2 + 3 + 1
cls[:, l:r, l:r] = 0
neg, neg_num = select(np.where(cls == 0), self.negative)
cls[:] = -1
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
overlap = np.zeros((anchor_num, size, size), dtype=np.float32)
return cls, delta, delta_weight, overlap
tcx, tcy, tw, th = corner2center(target)
# print('tcx shape: ', tcx)
anchor_box = anchor.all_anchors[0]
anchor_center = anchor.all_anchors[1]
# print('anchor_center: ', anchor_center.shape)
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
# delta
delta[0] = (tcx - cx) / w
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / w)
delta[3] = np.log(th / h)
# IoU
overlap = IoU([x1, y1, x2, y2], target)
pos = np.where(overlap > self.thr_high)
neg = np.where(overlap < self.thr_low)
pos, pos_num = select(pos, self.positive)
neg, neg_num = select(neg, self.rpn_batch - pos_num)
# print('pos: ', pos)
cls[pos] = 1
delta_weight[pos] = 1. / (pos_num + 1e-6)
cls[neg] = 0
if not need_iou:
return cls, delta, delta_weight
else:
return cls, delta, delta_weight, overlap
class AnchorTargetWithKPLayer:
def __init__(self, cfg):
self.thr_high = 0.6
self.thr_low = 0.3
self.negative = 16
self.rpn_batch = 64
self.positive = 16
self.__dict__.update(cfg)
def __call__(self, anchor, target, kp, size, neg=False, need_iou=False):
anchor_num = anchor.anchors.shape[0]
# kp shape [17, 3]
cls = np.zeros((anchor_num, size, size), dtype=np.int64)
cls[...] = -1 # -1 ignore 0 negative 1 positive
delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)
delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)
kp_delta = np.zeros((3, 17, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
if neg:
l = size // 2 - 3
r = size // 2 + 3 + 1
cls[:, l:r, l:r] = 0
neg, neg_num = select(np.where(cls == 0), self.negative)
cls[:] = -1
cls[neg] = 0
if not need_iou:
return cls, delta, kp_delta, delta_weight
else:
overlap = np.zeros((anchor_num, size, size), dtype=np.float32)
return cls, delta, kp_delta, delta_weight, overlap
tcx, tcy, tw, th = corner2center(target)
anchor_box = anchor.all_anchors[0]
anchor_center = anchor.all_anchors[1]
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], anchor_center[2], anchor_center[3]
# cx shape: [anchor_num, size, size] -> [size, size]
# kp shape: [17, 3]
# kp delta shape: [17*2, size, size]
# kp_delta_x target shape: [17, size, size]
# kp_x [17, 1, 1] and cx_kp [1, size, size]
cx_kp = np.expand_dims(cx[0, :, :], 0)
cy_kp = np.expand_dims(cx[0, :, :], 0)
kp_x = np.expand_dims(np.expand_dims(kp[:, 0], -1), -1)
kp_y = np.expand_dims(np.expand_dims(kp[:, 1], -1), -1)
kp_vis = np.expand_dims(np.expand_dims(kp[:, 2], -1), -1)
kp_vis = np.repeat(kp_vis, size, axis=1)
kp_vis = np.repeat(kp_vis, size, axis=2)
kp_delta_x = (kp_x - cx_kp) # / w[0, ...] # (17, size, size)
kp_delta_y = (kp_y - cy_kp) # / h[0, ...]
kp_delta = np.stack([kp_delta_x, kp_delta_y, kp_vis], axis=0) # (3, 17, size, size)
kp_delta = kp_delta.astype(np.float32)
# delta
delta[0] = (tcx - cx) / w
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / w)
delta[3] = np.log(th / h)
# IoU
overlap = IoU([x1, y1, x2, y2], target)
pos = np.where(overlap > self.thr_high)
neg = np.where(overlap < self.thr_low)
pos, pos_num = select(pos, self.positive)
neg, neg_num = select(neg, self.rpn_batch - pos_num)
cls[pos] = 1
delta_weight[pos] = 1. / (pos_num + 1e-6)
cls[neg] = 0
if not need_iou:
return cls, delta, kp_delta, delta_weight
else:
return cls, delta, kp_delta, delta_weight, overlap
class DataSets(Dataset):
def __init__(self, cfg, anchor_cfg, num_epoch=1):
super(DataSets, self).__init__()
global logger
logger = logging.getLogger('global')
# anchors
self.anchors = Anchors(anchor_cfg)
# size
self.template_size = 127
self.origin_size = 127
self.search_size = 255
self.heatmap_size = (255, 255)
self.image_size = 255
self.size = 17
self.sigma = 4
self.base_size = 0
self.crop_size = 0
self.target_type = 'gaussian'
self.single_heatmap = False
self.output_res = 68
self.num_joints = 17 # added
self.mse_loss = False
self.hm_gauss = 5
if 'template_size' in cfg:
self.template_size = cfg['template_size']
if 'origin_size' in cfg:
self.origin_size = cfg['origin_size']
if 'search_size' in cfg:
self.search_size = cfg['search_size']
if 'base_size' in cfg:
self.base_size = cfg['base_size']
if 'size' in cfg:
self.size = cfg['size']
if 'single_heatmap' in cfg:
self.single_heatmap = cfg['single_heatmap']
if (self.search_size - self.template_size) / self.anchors.stride + 1 + self.base_size != self.size:
raise Exception("size not match!") # TODO: calculate size online
if 'crop_size' in cfg:
self.crop_size = cfg['crop_size']
self.template_small = False
if 'template_small' in cfg and cfg['template_small']:
self.template_small = True
self.anchors.generate_all_anchors(im_c=self.search_size//2, size=self.size)
if 'anchor_target' not in cfg:
cfg['anchor_target'] = {}
if 'kp_anchor' not in anchor_cfg:
self.anchor_target = AnchorTargetLayer(cfg['anchor_target'])
self.kp_anchor = False
else:
self.anchor_target = AnchorTargetWithKPLayer(cfg['anchor_target'])
self.kp_anchor = True
# data sets
if 'datasets' not in cfg:
raise(Exception('DataSet need "{}"'.format('datasets')))
self.all_data = []
start = 0
self.num = 0
for name in cfg['datasets']:
dataset = cfg['datasets'][name]
dataset['mark'] = name
dataset['start'] = start
dataset = SubDataSet(dataset)
dataset.log()
self.all_data.append(dataset)
start += dataset.num # real video number
self.num += dataset.num_use # the number used for subset shuffle
# data augmentation
aug_cfg = cfg['augmentation']
self.template_aug = Augmentation(aug_cfg['template'])
self.search_aug = Augmentation(aug_cfg['search'])
self.gray = aug_cfg['gray']
self.neg = aug_cfg['neg']
self.inner_neg = 0 if 'inner_neg' not in aug_cfg else aug_cfg['inner_neg']
self.pick = None # list to save id for each img
if 'num' in cfg: # number used in training for all dataset
self.num = int(cfg['num'])
self.num *= num_epoch
self.shuffle()
self.infos = {
'template': self.template_size,
'search': self.search_size,
'template_small': self.template_small,
'gray': self.gray,
'neg': self.neg,
'inner_neg': self.inner_neg,
'crop_size': self.crop_size,
'anchor_target': self.anchor_target.__dict__,
'num': self.num // num_epoch
}
logger.info('dataset informations: \n{}'.format(json.dumps(self.infos, indent=4)))
def generate_target(self, joints, joints_vis):
'''
:param joints: [num_joints, 3]
:param joints_vis: [num_joints, 3]
:return: target, target_weight(1: visible, 0: invisible)
'''
target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_vis[:, 0]
assert self.target_type == 'gaussian', \
'Only support gaussian map now!'
if self.target_type == 'gaussian':
target = np.zeros((self.num_joints,
self.heatmap_size[1],
self.heatmap_size[0]),
dtype=np.float32)
tmp_size = self.sigma * 3
for joint_id in range(self.num_joints):
feat_stride = [self.image_size / self.heatmap_size[0], self.image_size / self.heatmap_size[1]]
mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
# Check that any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight
def generate_target_in_single_map(self, joints, joints_vis):
'''
:param joints: [num_joints, 3]
:return: target, target_weight(1: visible, 0: invisible)
'''
target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_vis[:, 0]
assert self.target_type == 'gaussian', \
'Only support gaussian map now!'
if self.target_type == 'gaussian':
target = np.zeros((1,
self.heatmap_size[1],
self.heatmap_size[0]),
dtype=np.float32)
masked_gaussian = np.zeros((1,
self.heatmap_size[1],
self.heatmap_size[0]),
dtype=np.float32)
tmp_size = self.sigma * 3
for joint_id in range(self.num_joints):
feat_stride = [self.image_size / self.heatmap_size[0], self.image_size / self.heatmap_size[1]]
mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
# Check that any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
masked_gaussian[:, img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
np.maximum(target, masked_gaussian, out=target)
return target, target_weight
def imread(self, path):
img = cv2.imread(path)
if self.origin_size == self.template_size:
return img, 1.0
def map_size(exe, size):
return int(round(((exe + 1) / (self.origin_size + 1) * (size+1) - 1)))
nsize = map_size(self.template_size, img.shape[1])
img = cv2.resize(img, (nsize, nsize))
return img, nsize / img.shape[1]
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for subset in self.all_data:
sub_p = subset.shuffle()
p += sub_p
sample_random.shuffle(p)
pick += p
m = len(pick)
self.pick = pick
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
def __len__(self):
return self.num
def find_dataset(self, index):
for dataset in self.all_data:
if dataset.start + dataset.num > index:
return dataset, index - dataset.start
def __getitem__(self, index, debug=False):
index = self.pick[index]
dataset, index = self.find_dataset(index)
gray = self.gray and self.gray > random.random()
neg = self.neg and self.neg > random.random()
if neg:
template = dataset.get_random_target(index)
if self.inner_neg and self.inner_neg > random.random():
search = dataset.get_random_target()
else:
search = random.choice(self.all_data).get_random_target()
else:
template, search = dataset.get_positive_pair(index)
def center_crop(img, size):
shape = img.shape[1]
if shape == size: return img
c = shape // 2
l = c - size // 2
r = c + size // 2 + 1
return img[l:r, l:r]
template_image, scale_z = self.imread(template[0])
if self.template_small:
template_image = center_crop(template_image, self.template_size)
search_image, scale_x = self.imread(search[0])
if not neg:
search_kp = np.array(search[2], dtype=np.float32)
else:
search_kp = np.zeros(51, dtype=np.float32)
if self.crop_size > 0:
search_image = center_crop(search_image, self.crop_size)
def toBBox(image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = self.template_size # 127
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
template_box = toBBox(template_image, template[1])
search_box = toBBox(search_image, search[1])
# bbox = search_box
template, _ = self.template_aug(template_image, template_box, self.template_size, gray=gray)
search, bbox = self.search_aug(search_image, search_box, self.search_size, gray=gray)
def draw(image, box, name):
image = image.copy()
x1, y1, x2, y2 = map(lambda x: int(round(x)), box)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))
cv2.imwrite(name, image)
def crop_hwc(bbox, out_sz=255):
a = (out_sz - 1) / (bbox[2] - bbox[0])
b = (out_sz - 1) / (bbox[3] - bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
# crop = cv2.warpAffine(image, mapping, (out_sz, out_sz),
# borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return mapping
def crop_hwc1(image, bbox, out_sz, padding=(0, 0, 0)):
a = (out_sz - 1) / (bbox[2] - bbox[0])
b = (out_sz - 1) / (bbox[3] - bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz))
return crop
def pos_s_2_bbox(pos, s):
bbox = [pos[0] - s / 2, pos[1] - s / 2, pos[0] + s / 2, pos[1] + s / 2]
return bbox
def crop_like_SiamFCx(bbox, exemplar_size=127, context_amount=0.5, search_size=255):
target_pos = [(bbox[2] + bbox[0]) / 2., (bbox[3] + bbox[1]) / 2.]
target_size = [bbox[2] - bbox[0] + 1, bbox[3] - bbox[1] + 1]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (search_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# x = crop_hwc1(image, pos_s_2_bbox(target_pos, s_x), search_size, padding)
return target_pos, s_x
def kp_conversion(KeyPoints, matrix):
key_points = []
kps_conversion = []
skeleton = [0, 0]
Skeleton = []
for i in range(0, int(len(KeyPoints) / 3)):
skeleton[0] = KeyPoints[i * 3 + 0]
skeleton[1] = KeyPoints[i * 3 + 1]
Skeleton.append(skeleton[:])
lis = Skeleton[i]
lis.append(1)
key_points.append(lis)
key_points = np.array(key_points)
for i in range(0, int(len(KeyPoints) / 3)):
if KeyPoints[i * 3 + 2] != 0:
ky_conversion = np.matmul(matrix, key_points[i, :]).tolist()
kps_conversion.append(ky_conversion[0])
kps_conversion.append(ky_conversion[1])
kps_conversion.append(KeyPoints[i * 3 + 2])
else:
kps_conversion.append(0)
kps_conversion.append(0)
kps_conversion.append(0)
return kps_conversion
if debug:
draw(template_image, template_box, "debug/{:06d}_ot.jpg".format(index))
draw(search_image, search_box, "debug/{:06d}_os.jpg".format(index))
draw(template, _, "debug/{:06d}_t.jpg".format(index))
draw(search, bbox, "debug/{:06d}_s.jpg".format(index))
pos, s = crop_like_SiamFCx(search_box, exemplar_size=127, context_amount=0.5, search_size=255)
mapping_bbox = pos_s_2_bbox(pos, s)
mapping = crop_hwc(mapping_bbox, out_sz=255)
keypoints = kp_conversion(search_kp.tolist(), mapping)
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
for ipt in range(self.num_joints):
joints_3d[ipt, 0] = keypoints[ipt * 3 + 0]
joints_3d[ipt, 1] = keypoints[ipt * 3 + 1]
joints_3d[ipt, 2] = keypoints[ipt * 3 + 2]
t_vis = search_kp[ipt * 3 + 2]
if t_vis > 1:
t_vis = 1
joints_3d_vis[ipt, 0] = t_vis
joints_3d_vis[ipt, 1] = t_vis
joints_3d_vis[ipt, 2] = 0
img = search.copy()
template, search = map(lambda x: np.transpose(x, (2, 0, 1)).astype(np.float32), [template, search])
# joints_3d = joints_3d / 255
if self.kp_anchor is False:
cls, delta, delta_weight = self.anchor_target(self.anchors, bbox, self.size, neg)
else:
cls, delta, kp_delta, delta_weight = self.anchor_target(self.anchors, bbox, joints_3d, self.size, neg)
# template = template_image # .astype(np.int16) # np.array(template_image, dtype=np.int16)
# search = search_image # .astype(np.int16) # np.array(search_image, dtype=np.int16)
# search = crop_like_SiamFCx1(search, bbox, exemplar_size=127, context_amount=0.5, search_size=255,
# padding=avg_chans)
if not neg:
kp_weight = cls.max(axis=0, keepdims=True)
else:
kp_weight = np.zeros([1, cls.shape[1], cls.shape[2]], dtype=np.float32)
# now process the ct part
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
output_res = self.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
ind = np.zeros(1, dtype=np.int64)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
kps = np.zeros(num_joints * 2, dtype=np.float32)
kps_mask = np.zeros((self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((num_joints, 2), dtype=np.float32)
hp_ind =
|
np.zeros(num_joints, dtype=np.int64)
|
numpy.zeros
|
import ML.modelselection as modelselection
import ML.regression as regression
import data
import numpy as np
def test_best_subset():
X, y = data.tall_matrix_data_2()
error_measure = modelselection.Error.mse
model = regression.LinearRegression
subset = modelselection.best_subset(X, y, model, 2, error_measure)
assert 0 in subset
assert 2 not in subset
def test_best_subset_forward():
X, y = data.tall_matrix_data_2()
error_measure = modelselection.Error.mse
model = regression.LinearRegression
subset = modelselection.best_subset(X, y, model, 2, error_measure,
direction='forward')
assert 0 in subset
assert 2 not in subset
def test_best_subset_backward():
X, y = data.tall_matrix_data_2()
error_measure = modelselection.Error.mse
model = regression.LinearRegression
subset = modelselection.best_subset(X, y, model, 2, error_measure,
direction='backward')
assert 0 in subset
assert 2 not in subset
def test_best_subset_combinatorial():
X, y = data.tall_matrix_data_2()
error_measure = modelselection.Error.mse
model = regression.LinearRegression
subset = modelselection.best_subset(X, y, model, 2, error_measure,
direction='combinatorial')
assert 0 in subset
assert 2 not in subset
def test_error_mse():
y = np.array([1, 2, 3, 4, 5])
predictions =
|
np.array([1.1, 2, 3.2, 4, 5])
|
numpy.array
|
import torch.utils.data as data
import numpy as np
import pandas as pd
import torch
from imblearn.over_sampling import SMOTE
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PowerTransformer, StandardScaler
from torchvision import transforms
from sklearn.preprocessing import LabelEncoder
from datasets.dataset_setup import DatasetSetup
from my_utils.utils import train_val_split
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
class BcwDataset(data.Dataset):
def __init__(self, csv_path, train=True):
"""
Args:
csv_path (string): Path to the csv file.
"""
self.train = train
self.df = pd.read_csv(csv_path)
self.train_data = []
self.train_labels = []
self.test_data = []
self.test_labels = []
self.area_mean = None
self.df = self.df.drop('Unnamed: 32', axis=1)
self.df = self.df.drop('id', axis=1)
# sequence adjustment
radius_mean = self.df['radius_mean']
self.df = self.df.drop('radius_mean', axis=1)
self.df['radius_mean'] = radius_mean
perimeter_mean = self.df['perimeter_mean']
self.df = self.df.drop('perimeter_mean', axis=1)
self.df['perimeter_mean'] = perimeter_mean
self.area_mean = self.df['area_mean']
self.df = self.df.drop('area_mean', axis=1)
le = LabelEncoder()
self.df['diagnosis'] = le.fit_transform(self.df['diagnosis'])
x = self.df.iloc[:, 1:]
y = self.df.iloc[:, 0]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=16)
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.fit_transform(x_test)
self.train_data = x_train # numpy array
self.test_data = x_test
self.train_labels = y_train.tolist()
self.test_labels = y_test.tolist()
print(csv_path, "train", len(self.train_data), "test", len(self.test_data))
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __getitem__(self, index):
if self.train:
data, label = self.train_data[index], self.train_labels[index]
else:
data, label = self.test_data[index], self.test_labels[index]
return data, label
class BcwSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 2
self.size_bottom_out = 2
def set_datasets_for_ssl(self, file_path, n_labeled, party_num=None):
base_dataset = BcwDataset(file_path)
train_labeled_idxs, train_unlabeled_idxs = train_val_split(base_dataset.train_labels,
int(n_labeled / self.num_classes),
self.num_classes)
train_labeled_dataset = BcwLabeled(file_path, train_labeled_idxs, train=True)
train_unlabeled_dataset = BcwUnlabeled(file_path, train_unlabeled_idxs, train=True)
train_complete_dataset = BcwLabeled(file_path, None, train=True)
test_dataset = BcwLabeled(file_path, train=False)
print("#Labeled:", len(train_labeled_idxs), "#Unlabeled:", len(train_unlabeled_idxs))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_dataset
def get_transforms(self):
transforms_ = transforms.Compose([
transforms.ToTensor(),
])
return transforms_
def get_transformed_dataset(self, file_path, party_num=None, train=True):
_liver_dataset = BcwDataset(file_path, train)
return _liver_dataset
def clip_one_party_data(self, x, half):
x = x[:, :half]
return x
class BcwLabeled(BcwDataset):
def __init__(self, file_path, indexs=None, train=True):
super(BcwLabeled, self).__init__(file_path, train=train)
if indexs is not None:
self.train_data = self.train_data[indexs]
self.train_labels =
|
np.array(self.train_labels)
|
numpy.array
|
"""GLD module."""
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize, special, stats
class GLD:
r"""Univariate Generalized Lambda Distribution class.
GLD is flexible family of continuous probability distributions with wide variety of shapes.
GLD has 4 parameters and defined by quantile function. Probability density function and cumulative distribution function
are not available in closed form and can be calculated only with the help of numerical methods.
This tool implements three different parameterization types of GLD: 'RS' (introduced by <NAME> Schmeiser, 1974),
'FMKL' (introduced by Freimer, Mudholkar, Kollia and Lin, 1988) and 'VSL' (introduced by <NAME> Loots, 2009).
It provides methods for calculating different characteristics of GLD, parameter estimating, generating random variables and so on.
Attributes:
----------
param_type : str
Parameterization type of Generalized Lambda Distributions, should be 'RS', 'FMKL' or 'VSL'.
Notes:
-----
Different parameterization types of GLD are not equivalent and specify similar but deifferent distribution families,
there is no one-to-one correspondence between their parameters.
GLD of 'RS' type is characterized by quantile function :math:`Q(y)` and density quantile function :math:`f(y)`:
.. math::
Q(y) = \lambda_1 + \frac{y^{\lambda_3} - (1-y)^{\lambda_4}}{\lambda_2},
.. math::
f(y) = \frac{\lambda_2}{\lambda_3 y^{\lambda_3-1} - \lambda_4 (1-y)^{\lambda_4-1}},
where :math:`\lambda_1` - location parameter, :math:`\lambda_2` - inverse scale parameter,
:math:`\lambda_3, \lambda_4` - shape parameters.
GLD of 'RS' type is defined only for certain values of the shape parameters which provide
non-negative density function and there are a complex series of rules determining which parameters
specify a valid statistical distribution.
'FMKL' parameterization removes this restrictions. GLD of 'FMKL' type is defined for all values of
shape parameters and described by following quantile function :math:`Q(y)` and density quantile function :math:`f(y)`:
.. math::
Q(y) = \lambda_1 + \frac{(y^{\lambda_3}-1)/\lambda_3 - ((1-y)^{\lambda_4}-1)/\lambda_4}{\lambda_2},
.. math::
f(y) = \frac{\lambda_2}{y^{\lambda_3-1} - (1-y)^{\lambda_4-1}}.
'VSL' parameterization was introduced for simple parameter estimating in closed form using L-moments. Its quantile function :math:`Q(y)` and density quantile function :math:`f(y)` are:
.. math::
Q(y) = \alpha + \beta \Big((1-\delta)\frac{y^\lambda - 1}{\lambda} - \delta\frac{(1-y)^\lambda - 1}{\lambda}\Big),
.. math::
f(y) = \frac{1}{\beta ((1-\delta)y^{\lambda-1}+\delta(1-y)^{\lambda-1})},
where parameters have a different designation: :math:`\alpha` - location parameter, :math:`\beta` - scale parameter,
:math:`\delta` - skewness parameter (should be in the interval [0,1]), :math:`\lambda` - shape parameter.
References:
----------
.. [1] <NAME>., & <NAME>. 1974. An approximate method for generating asymmetric random variables.
Communications of the ACM, 17(2), 78–82
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>. 1988. A study of the
generalized Tukey lambda family. Communications in Statistics-Theory and Methods, 17, 3547–3567.
.. [3] <NAME>, <NAME>., & <NAME>. 2009. Method of L-moment estimation for generalized lambda distribution.
Third Annual ASEARC Conference. Newcastle, Australia.
"""
def __init__(self, param_type):
"""Create a new GLD with given parameterization type.
Parameters
----------
param_type : str
Parameterization type. Should be 'RS','FMKL' or 'VSL'.
Raises
------
ValueError
If param_type is not one of 'RS','FMKL' or 'VSL'.
"""
if param_type not in ['RS','FMKL','VSL']:
raise ValueError('Unknown parameterisation \'%s\' . Use \'RS\',\'FMKL\' or \'VSL\'' %param_type)
else:
self.param_type = param_type
def check_param(self,param):
"""Check if parameters specify a valid distribution with non-negative density function.
Parameters
----------
param : array-like
Parameters of GLD
Raises
------
ValueError
If number of parameters is not equal to 4.
Returns
-------
bool
True for valid parameters and False for invalid.
"""
if len(param)!=4:
raise ValueError('GLD has 4 parameters')
if not np.isfinite(param).all():
return False
else:
if self.param_type == 'RS':
r1 = (param[1]<0) and (param[2]<=-1) and (param[3]>=1)
r2 = (param[1]<0) and (param[2]>=1) and (param[3]<=-1)
r3 = (param[1]>0) and (param[2]>=0) and (param[3]>=0) and (param[2]!=0 or param[3]!=0)
r4 = (param[1]<0) and (param[2]<=0) and (param[3]<=0) and (param[2]!=0 or param[3]!=0)
r5 = (param[1]<0) and (param[2]<=0 and param[2]>=-1) and (param[3]>=1)
r6 = (param[1]<0) and (param[2]>=1) and (param[3]>=-1 and param[3]<=0)
if r5:
r5 = r5 and (1-param[2])**(1-param[2])*(param[3]-1)**(param[3]-1)/(param[3] - param[2])**(param[3]- param[2])<=-param[2]/param[3]
if r6:
r6 = r6 and (1-param[3])**(1-param[3])*(param[2]-1)**(param[2]-1)/(param[2] - param[3])**(param[2]- param[3])<=-param[3]/param[2]
return r1 or r2 or r3 or r4 or r5 or r6
if self.param_type == 'FMKL':
return param[1]>0
if self.param_type == 'VSL':
return np.logical_and(param[1]>0, np.logical_and(param[2]>=0, param[2]<=1))
def Q(self, y, param):
"""Calculate quantile function of GLD at `y` for given parameters.
Parameters
----------
y : array-like
Lower tail probability, must be between 0 and 1.
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of quantile function evaluated at `y`.
"""
y = np.array(y).astype(float)
param = np.array(param)
if np.logical_or(y>1, y<0).any():
raise ValueError('y should be in range [0,1]')
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if self.param_type == 'RS':
return param[0] + (y**param[2] - (1-y)**param[3])/param[1]
if self.param_type == 'FMKL':
f1 = (y**param[2]-1)/param[2] if param[2]!=0 else np.log(y)
f2 = ((1-y)**param[3] - 1)/param[3] if param[3]!=0 else np.log(1-y)
return param[0] + (f1 - f2)/param[1]
if self.param_type == 'VSL':
if param[3]!=0:
return param[0] + ((1 - param[2])*(y**param[3] - 1)/param[3] - param[2]*((1-y)**param[3] - 1)/param[3])*param[1]
else:
return param[0] + param[1]*np.log(y**(1-param[2])/(1-y)**param[2])
def PDF_Q(self, y, param):
"""Calculate density quantile function of GLD at `y` for given parameters.
Parameters
----------
y : array-like
Lower tail probability, must be between 0 and 1.
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of density quantile function evaluated at `y`.
"""
y = np.array(y).astype(float)
if np.logical_or(y>1, y<0).any():
raise ValueError('y should be in range [0,1]')
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if self.param_type == 'RS':
return param[1]/((param[2]*y**(param[2]-1) + param[3]*(1-y)**(param[3]-1)))
if self.param_type == 'FMKL':
return param[1]/((y**(param[2]-1) + (1-y)**(param[3]-1)))
if self.param_type == 'VSL':
return 1/((1 - param[2])*y**(param[3] - 1) + param[2]*(1-y)**(param[3] - 1))/param[1]
def CDF_num(self, x, param, xtol = 1e-05):
"""Calculate cumulative distribution function of GLD numerically at `x` for given parameters.
Parameters
----------
x : array-like
Argument of CDF.
param : array-like
Parameters of GLD.
xtol : float, optional
Absolute error parameter for optimization procedure. The default is 1e-05.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of cumulative distribution function evaluated at `x`.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
x = np.array([x]).ravel()
ans = x*np.nan
a,b = self.supp(param)
ans[x<a] = 0
ans[x>b] = 1
def for_calc_F(y):
"""Auxiliary function for optimization."""
return (self.Q(y,param) - x_arg)**2
ind = np.nonzero(np.isnan(ans))[0]
for i in ind:
x_arg = x[i]
ans[i] = optimize.fminbound(for_calc_F,0,1, xtol = xtol)
return ans
def PDF_num(self, x, param, xtol = 1e-05):
"""Calculate probability density function of GLD numerically at `x` for given parameters.
Parameters
----------
x : array-like
Argument of PDF.
param : array-like
Parameters of GLD.
xtol : float, optional
Absolute error parameter for optimization procedure. The default is 1e-05.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Value of probability density function evaluated at `x`.
"""
y = self.CDF_num(x, param, xtol)
ans = self.PDF_Q(y,param)
a,b = self.supp(param)
ans[np.logical_or(x<a, x>b)] = 0
return ans
def supp(self,param):
"""Return support of GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
array-like
Support of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
return self.Q(0,param), self.Q(1,param)
def rand(self, param, size = 1, random_state = None):
"""Generate random variables of GLD.
Parameters
----------
param : array-like
Parameters of GLD.
size : int, optional
Number of random variables. The default is 1.
random_state : None or int, optional
The seed of the pseudo random number generator. The default is None.
Returns
-------
array-like
Sample of GLD random variables of given size.
"""
if random_state:
np.random.seed(random_state)
alpha = np.random.random(size)
return self.Q(alpha,param)
def correct_supp(self, data, param, eps = 0.0001):
"""Correct support of GLD due to data.
In certain cases some data points can be outside of finite support of GLD.
This method corrects parameters of location and scale to fit support to data.
It is used as a component of some parameter estimation methods.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
eps : float, optional
Parameter of support fitting. Tail probability of minimum and maximum data points. The default is 0.0001.
Returns
-------
array-like
Corrected parameters of GLD.
"""
data = data.ravel()
def fun_opt(x):
"""Auxiliary function for optimization."""
A = np.min([np.min(data), self.Q(eps,param)])
B = np.max([np.max(data), self.Q(1-eps,param)])
par = np.hstack([x,param[2:]])
if not self.check_param(par):
return np.inf
return np.max([np.abs(self.Q(eps,par) - A), np.abs(self.Q(1-eps,par) - B)])
x = optimize.fmin(fun_opt,param[:2], disp=False)
param[:2] = x
return param
def GoF_Q_metric(self,data,param):
"""Calculate Goodness-of-Fit metric based on discrepancy between empirical and theoretical quantile functions.
It can be used for simple comparison of different fitted distributions.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
Returns
-------
float
Mean square deviation of empirical and theoretical quantiles.
"""
data = data.ravel()
return np.mean((np.sort(data) - self.Q((np.arange(len(data))+0.5)/len(data),param))**2)
def GoF_tests(self,param, data, bins_gof = 8):
"""Perform two Goodness-of_Fit tests: Kolmogorov-Smirnov test and one-way chi-square test from scipy.stats.
Parameters
----------
param : array-like
Parameters of GLD.
data : array-like
Input data.
bins_gof : int, optional
Number of bins for chi-square test. The default is 8.
Returns
-------
scipy.stats.stats.KstestResult
Result of Kolmogorov-Smirnov test including statistic and p-value.
scipy.stats.stats.Power_divergenceResult
Result of chi-square test including statistic and p-value.
"""
def cdf(x):
"""Auxiliary function for GoF test."""
return self.CDF_num(x,param)
ks = stats.kstest(data, cdf)
chi2 = stats.chisquare(np.histogram(data,self.Q(np.linspace(0, 1, bins_gof + 1),param))[0],[len(data)/bins_gof]*bins_gof )
return ks, chi2
def plot_cdf(self, param_list, data = None, ymin = 0.01, ymax = 0.99, n_points = 100, names = None, color_emp = 'lightgrey', colors = None):
"""Plot cumulative distribution functions of GLD.
This allows to compare GLD cumulative distribution functions with different parameters.
Also it is possible to add empirical CDF on the plot.
Parameters
----------
param_list : array-like or list of array-like
List of GLD parameters for plotting.
data : array-like, optional
If not None empirical CDF estimated by data will be added to the plot. The default is None.
ymin : float, optional
Minimal lower tail probability for plotting. The default is 0.01.
ymax : float, optional
Maximal lower tail probability for plotting. The default is 0.99.
n_points : int, optional
Number of points for plotting. The default is 100.
names : list of str, optional
Names of labels for the legend. Length of the list should be equal to the length of param_list.
color_emp : str, optional
Line color of empirical CDF. It's ignored if data is None. The default is 'lightgrey'.
colors : list of str, optional
Line colors of CDFs. Length of the list should be equal to the length of param_list.
plot_fitting(self, data, param, bins=None)
"""
param_list = np.array(param_list)
if param_list.ndim==1:
param_list = param_list.reshape(1,-1)
if names is None:
names = [str(x) for x in param_list]
if colors is None:
colors = [None]*len(param_list)
plt.figure()
plt.grid()
if not (data is None):
data = data.ravel()
plt.plot(np.sort(data), np.arange(len(data))/len(data),color = color_emp,lw = 2)
names = np.hstack(['empirical data', names ])
y = np.linspace(ymin,ymax,n_points)
for i in range(param_list.shape[0]):
param = param_list[i]
plt.plot(self.Q(y,param), y, color = colors[i])
plt.ylim(ymin = 0)
plt.legend(names,bbox_to_anchor=(1.0, 1.0 ))
plt.title('CDF')
def plot_pdf(self, param_list, data = None, ymin = 0.01, ymax = 0.99, n_points = 100, bins = None, names = None, color_emp = 'lightgrey', colors = None):
"""Plot probability density functions of GLD.
This allows to compare GLD probability density functions with different parameters.
Also it is possible to add data histogram on the plot.
Parameters
----------
param_list : array-like or list of array-like
List of GLD parameters for plotting.
data : array-like, optional
If not None empirical CDF estimated by data will be added to the plot.
ymin : float, optional
Minimal lower tail probability for plotting. The default is 0.01.
ymax : float, optional
Maximal lower tail probability for plotting. The default is 0.99.
n_points : int, optional
Number of points for plotting. The default is 100.
bins : int, optional
Number of bins for histogram. It's ignored if data is None.
names : list of str, optional
Names of labels for the legend. Length of the list should be equal to the length of param_list. The default is None.
color_emp : str, optional
Color of the histogram. It's ignored if data is None. The default is 'lightgrey'.
colors : list of str, optional
Line colors of PDFs. Length of the list should be equal to the length of param_list.
"""
param_list = np.array(param_list)
if param_list.ndim==1:
param_list = param_list.reshape(1,-1)
if names is None:
names = [str(x) for x in param_list]
plt.figure()
plt.grid()
pdf_max = 0
if not data is None:
data = data.ravel()
p = plt.hist(data, bins = bins, color = color_emp, density = True)
pdf_max = np.max(p[0])
if colors is None:
colors = [None]*len(param_list)
y = np.linspace(ymin,ymax,n_points)
for i in range(param_list.shape[0]):
param = param_list[i]
plt.plot(self.Q(y,param), self.PDF_Q(y,param),color = colors[i])
pdf_max = np.max([pdf_max,np.max(self.PDF_Q(y,param))])
plt.ylim(ymin = 0,ymax = pdf_max * 1.05)
plt.legend(names,bbox_to_anchor=(1.0, 1.0 ))
plt.title('PDF')
def plot_fitting(self,data,param, bins = None):
"""Construct plots for comparing fitted GLD with data.
It allows to compare data histogram and PDF of fitted GLD on the one plot,
empirical and theoretical CDFs on the second plot and
theoretical and empirical quantiles plotted against each other on the third plot.
Parameters
----------
data : array-like
Input data.
param : array-like
Parameters of GLD.
bins : int, optional
Number of bins for histogram.
"""
data = data.ravel()
fig,ax = plt.subplots(1,3,figsize = (15,3))
ax[0].hist(data,bins = bins,density = True,color = 'skyblue')
y = np.linspace(0.001,0.999,100)
ax[0].plot(self.Q(y,param),self.PDF_Q(y,param),lw = 2,color = 'r')
ax[0].set_title('PDF')
ax[0].grid()
ax[1].plot(np.sort(data), np.arange(len(data))/len(data))
ax[1].plot(self.Q(y,param), y)
ax[1].grid()
ax[1].set_title('CDF')
x = np.sort(data)
y = (np.arange(len(data))+0.5)/len(data)
ax[2].plot(self.Q(y,param), x,'bo',ms = 3)
m1 = np.min([x,self.Q(y,param)])
m2 = np.max([x,self.Q(y,param)])
ax[2].plot([m1,m2], [m1,m2],'r')
ax[2].grid()
ax[2].set_title('Q-Q-plot')
def __sum_Ez(self,k,p3,p4):
"""Auxiliary function for moments calculation."""
s = 0
p3 = np.array(p3)
p4 = np.array(p4)
if self.param_type == 'RS':
for i in range(0,k+1):
s+=special.binom(k,i)*(-1)**i *special.beta(p3*(k-i)+1, p4*i+1)
if self.param_type == 'FMKL':
for i in range(0,k+1):
for j in range(0, k-i+1):
s+=(p3-p4)**i/(p3*p4)**k * special.binom(k,i)*special.binom(k-i,j)*(-1)**j*p4**(k-i-j)*p3**j*special.beta(p3*(k-i-j)+1,p4*j+1)
if self.param_type=='VSL':
for i in range(0,k+1):
for j in range(0, k-i+1):
s+=(2*p3-1)**i/p4**k*special.binom(k,i)*special.binom(k-i,j)*(-1)**j*(1-p3)**(k-i-j)*p3**j*special.beta(p4*(k-i-j)+1,p4*j+1)
return s
def mean(self, param):
"""Calculate mean of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Mean of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1 and param[3]>-1:
A = self.__sum_Ez(1,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
return A/L + param[0]
else:
return np.nan
def var(self, param):
"""Calculate variance of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Variance of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/2 and param[3]>-1/2:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
return (B-A**2)/L**2
else:
return np.nan
def std(self, param):
"""Calculate standard deviation of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Standard deviation of GLD.
"""
return np.sqrt(self.var(param))
def skewness(self, param):
"""Calculate skewness of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Skewness of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/3 and param[3]>-1/3:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a2 = (B-A**2)/L**2
return (C-3*A*B+2*A**3)/L**3/a2**1.5
else:
return np.nan
def kurtosis(self, param):
"""Calculate kurtosis of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Kurtosis of GLD.
"""
if not self.check_param(param):
raise ValueError('Parameters are not valid')
if param[2]>-1/4 and param[3]>-1/4:
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
D = self.__sum_Ez(4,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a2 = (B-A**2)/L**2
return (D-4*A*C+6*A**2*B-3*A**4)/L**4/a2**2
else:
return np.nan
def median(self,param):
"""Calculate median of the GLD for given parameters.
Parameters
----------
param : array-like
Parameters of GLD.
Raises
------
ValueError
If input parameters are not valid.
Returns
-------
float
Median of GLD.
"""
return self.Q(0.5,param)
def fit_MM(self,data, initial_guess, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of moments.
It estimates parameters of GLD by setting first four sample moments equal to their GLD counterparts.
Resulting system of equations are solved using numerical methods for given initial guess.
There are some restrictions of this method related to existence of moments and computational difficulties.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] <NAME>., <NAME>. 2000. Fitting statistical distributions: the generalized
lambda distribution and generalized bootstrap methods. Chapman and Hall/CRC.
"""
initial_guess = np.array(initial_guess)
data = data.ravel()
def sample_moments(data):
"""Calculate first four sample moments."""
a1 = np.mean(data)
a2 = np.mean((data - a1)**2)
a3 = np.mean((data - a1)**3)/a2**1.5
a4 = np.mean((data - a1)**4)/a2**2
return a1,a2,a3,a4
def moments( param):
"""Calculate first four GLD moments."""
A = self.__sum_Ez(1,param[2], param[3])
B = self.__sum_Ez(2,param[2], param[3])
C = self.__sum_Ez(3,param[2], param[3])
D = self.__sum_Ez(4,param[2], param[3])
L = 1/param[1] if self.param_type=='VSL' else param[1]
a1 = A/L + param[0]
a2 = (B-A**2)/L**2
a3 = (C-3*A*B+2*A**3)/L**3/a2**1.5
a4 = (D-4*A*C+6*A**2*B-3*A**4)/L**4/a2**2
return a1,a2,a3,a4
def fun_VSL(x):
"""Auxiliary function for optimization."""
if x[0]<0 or x[0] >1 or x[1]<-0.25:
return np.inf
A = self.__sum_Ez(1,x[0],x[1])
B = self.__sum_Ez(2,x[0],x[1])
C = self.__sum_Ez(3,x[0],x[1])
D = self.__sum_Ez(4,x[0],x[1])
return np.max([np.abs((C-3*A*B+2*A**3)/(B-A**2)**1.5 - a3), np.abs( (D-4*A*C+6*A**2*B-3*A**4)/(B-A**2)**2 - a4)])
def fun_RS_FMKL(x):
"""Auxiliary function for optimization."""
if x[0] <-0.25 or x[1]<-0.25:
return np.inf
A = self.__sum_Ez(1,x[0],x[1])
B = self.__sum_Ez(2,x[0],x[1])
C = self.__sum_Ez(3,x[0],x[1])
D = self.__sum_Ez(4,x[0],x[1])
return np.max([np.abs((C-3*A*B+2*A**3)/(B-A**2)**1.5 - a3), np.abs( (D-4*A*C+6*A**2*B-3*A**4)/(B-A**2)**2 - a4)])
fun_opt = fun_VSL if self.param_type=='VSL' else fun_RS_FMKL
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
a1,a2,a3,a4 = sample_moments(data)
[p3,p4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
A = self.__sum_Ez(1,p3,p4)
B = self.__sum_Ez(2,p3,p4)
C = self.__sum_Ez(3,p3,p4)
D = self.__sum_Ez(4,p3,p4)
p2 = (((B-A**2)/a2)**0.5)**(-1 if self.param_type=='VSL' else 1)
p1 = a1 - A/(p2**(-1 if self.param_type=='VSL' else 1))
param = [p1,p2,p3,p4]
if self.param_type=='RS' and not self.check_param(param):
p3, p4 = p4,p3
p2 = p2* (-1)
p1 = a1 + A/(p2)
param = [p1,p2,p3,p4]
if disp_fit:
print('')
print('Sample moments: ', sample_moments(data))
print('Fitted moments: ', moments(param))
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_PM(self,data, initial_guess, u = 0.1, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of percentiles.
It estimates parameters of GLD by setting four percentile-based sample statistics equal to their corresponding GLD statistics.
To calculate this statistics it's necessary to specify parameter u (number between 0 and 0.25).
Resulting system of equations are solved using numerical methods for given initial guess.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters if parameterization type is 'RS' or 'FMKL'
and for only fourth parameter if parameterization type is 'VSL'.
u : float, optional
Parameter for calculating percentile-based statistics. Arbitrary number between 0 and 0.25. The default is 0.1.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect or parameter u is out of range [0,0.25].
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] <NAME>., <NAME>. 2000. Fitting statistical distributions: the generalized
lambda distribution and generalized bootstrap methods. Chapman and Hall/CRC.
"""
initial_guess = np.array(initial_guess)
data = data.ravel()
if u<0 or u>0.25:
raise ValueError('u should be in interval [0,0.25]')
def sample_statistics(data, u):
"""Calculate four sample percentile-based statistics."""
p1 = np.quantile(data, 0.5)
p2 = np.quantile(data, 1-u) - np.quantile(data, u)
p3 = (np.quantile(data, 0.5) - np.quantile(data, u))/(np.quantile(data, 1-u) - np.quantile(data, 0.5))
p4 = (np.quantile(data, 0.75) - np.quantile(data, 0.25))/p2
return p1,p2,p3,p4
a1,a2,a3,a4 = sample_statistics(data,u)
if self.param_type=='RS':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[l1,l2,l3,l4] = param
p1 = l1+(0.5**l3 - 0.5**l4)/l2
p2 = ((1-u)**l3 - u**l4 - u**l3+(1-u)**l4)/l2
p3 = (0.5**l3 - 0.5**l4 - u**l3 +(1-u)**l4)/((1-u)**l3 - u**l4 - 0.5**l3 +0.5**l4)
p4 = (0.75**l3 - 0.25**l4 - 0.25**l3 +0.75**l4)/((1-u)**l3-u**l4 - u**l3+(1-u)**l4)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
l3 = x[0]
l4 = x[1]
return np.max([( (0.75**l3 - 0.25**l4 - 0.25**l3 +0.75**l4)/((1-u)**l3-u**l4 - u**l3+(1-u)**l4) - a4),
np.abs((0.5**l3 - 0.5**l4 - u**l3 +(1-u)**l4)/((1-u)**l3 - u**l4 - 0.5**l3 +0.5**l4) - a3)])
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = 1/a2*((1-u)**l3-u**l3 + (1-u)**l4 - u**l4)
l1 = a1 - 1/l2*(0.5**l3 - 0.5**l4)
param = np.array([l1,l2,l3,l4]).ravel()
theor_stat = theor_statistics(param,u)
if self.param_type == 'FMKL':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[l1,l2,l3,l4] = param
p1 = l1+((0.5**l3-1)/l3 - (0.5**l4-1)/l4)/l2
p2 = (((1-u)**l3 - u**l3)/l3 +((1-u)**l4- u**l4)/l4)/l2
p3 = ((0.5**l3 - u**l3 )/l3 +((1-u)**l4- 0.5**l4)/l4) / (((1-u)**l3 - 0.5**l3)/l3 +(0.5**l4- u**l4)/l4)
p4 = ((0.75**l3 - 0.25**l3 )/l3 +(0.75**l4- 0.25**l4)/l4)/(((1-u)**l3 - u**l3)/l3 + ((1-u)**l4- u**l4)/l4)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
l3 = x[0]
l4 = x[1]
return np.max([np.abs(((0.75**l3 - 0.25**l3 )/l3 +(0.75**l4- 0.25**l4)/l4)/(((1-u)**l3 - u**l3)/l3 + ((1-u)**l4- u**l4)/l4) - a4),
np.abs(((0.5**l3 - u**l3 )/l3 +((1-u)**l4- 0.5**l4)/l4) / (((1-u)**l3 - 0.5**l3)/l3 +(0.5**l4- u**l4)/l4) - a3)])
if initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = 1/a2*(((1-u)**l3-u**l3)/l3 + ((1-u)**l4 - u**l4)/l4)
l1 = a1 - 1/l2*((0.5**l3 - 1)/l3 - (0.5**l4 - 1)/l4)
param = np.array([l1,l2,l3,l4]).ravel()
theor_stat = theor_statistics(param,u)
if self.param_type == 'VSL':
def theor_statistics(param,u):
"""Calculate four GLD percentile-based statistics."""
[a,b,d,l] = param
p1 = a+b*(0.5**l - 1)*(1-2*d)/l
p2 = b*((1-u)**l - u**l)/l
p3 = ((1-d)*(0.5**l - u**l)+d*((1-u)**l - 0.5**l))/((1-d)*((1-u)**l - 0.5**l)+d*(0.5**l - u**l))
p4 = (0.75**l - 0.25**l)/((1-u)**l - u**l)
return p1,p2,p3,p4
def fun_opt(x):
"""Auxiliary function for optimization."""
return np.abs((0.75**x - 0.25**x)/((1-u)**x - u**x) - a4)
if initial_guess.ndim!=0 and len(initial_guess)!=1:
raise ValueError('Specify initial guess for one parameter')
l = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)[0]
d = (a3*((1-u)**l - 0.5**l) - 0.5**l +u**l)/(a3+1)/((1-u)**l - 2*0.5**l+u**l)
d = np.max([0,np.min([1,d])])
b = a2*l/((1-u)**l - u**l)
a = a1 - b*(0.5**l - 1)*(1-2*d)/l
param = np.array([a,b,d,l]).ravel()
theor_stat = theor_statistics(param,u)
if disp_fit:
print('')
print('Sample statistics: ', sample_statistics(data,u))
print('Fitted statistics: ', theor_stat)
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_LMM(self,data, initial_guess = None, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of L-moments.
It estimates parameters of GLD by equating four sample L-moments and L-moments ratios and their GLD counterparts.
L-moments are linear combinations of order statistics analogous to conventional moments.
Resulting system of equations for 'RS' and 'FMKL' parameterizations are solved using numerical methods for given initial guess.
For 'VSL' parameterization there is exact analytical solution of the equations.
In general case there are two different sets of parameters which give the same values of L-moments.
The method returns solution which is closest to initial guess.
If initial_guess is None the best solution is chosen using GLD.GoF_Q_metric.
Parameters
----------
data : array-like
Input data.
initial_guess : array-like
Initial guess for third and fourth parameters. It's ignored for 'VSL' parameterization type.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001. It's ignored for 'VSL' parameterization type.
maxiter : int, optional
Maximum number of iterations for optimization procedure. It's ignored for 'VSL' parameterization type.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure. It's ignored for 'VSL' parameterization type.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True. It's ignored for 'VSL' parameterization type.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If length of initial guess is incorrect.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] <NAME>. and <NAME>. 2008. Characterizing the generalized lambda distribution by L-moments.
Computational Statistics & Data Analysis, 52(4):1971–1983.
.. [2] <NAME>, <NAME>., & <NAME>. 2009. Method of L-moment estimation for generalized lambda distribution.
Third Annual ASEARC Conference. Newcastle, Australia.
"""
if not initial_guess is None:
initial_guess = np.array(initial_guess)
data = data.ravel()
def sample_lm(data):
"""Calculate four sample L-moments and L-moment ratios."""
x = np.sort(data)
n = len(data)
l1 = np.mean(x)
l2 = np.sum(np.array([2*i-n - 1 for i in range(1,n+1)])*x)/2/special.binom(n,2)
l3 = np.sum(np.array([special.binom(i-1,2) - 2*(i-1)*(n-i)+special.binom(n-i,2) for i in range(1,n+1)])*x)/3/special.binom(n,3)
l4 = np.sum(np.array([special.binom(i-1,3) - 3*special.binom(i-1,2)*(n-i)+3*(i-1)*special.binom(n-i,2)-special.binom(n-i,3) for i in range(1,n+1)])*x)/4/special.binom(n,4)
return l1,l2,l3/l2,l4/l2
a1,a2,a3,a4 = sample_lm(data)
def lm(param):
"""Calculate four GLD L-moments and L-moment ratios."""
def lr(r,param):
"""Auxiliary function for L-moments calculation."""
if self.param_type=='VSL':
[a,b,d,l] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*((1-d-(-1)**(r-1)*d)/l/(l+k+1))
if r==1:
s = s*b+a+b*(2*d-1)/l
else:
s = s*b
return s
if self.param_type=='RS':
[l1,l2,l3,l4] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*(1/(l3+k+1) - (-1)**(r-1)/(l4+k+1))
if r==1:
s = s/l2+l1
else:
s = s/l2
return s
if self.param_type=='FMKL':
[l1,l2,l3,l4] = param
s = 0
for k in range(r):
s+=(-1)**(r-k-1)*special.binom(r-1,k)*special.binom(r+k-1,k)*(1/(l3+k+1)/l3 - (-1)**(r-1)/(l4+k+1)/l4)
if r==1:
s = s/l2+l1 - 1/l2/l3 +1/l2/l4
else:
s = s/l2
return s
l1 = lr(1,param)
l2 = lr(2,param)
l3 = lr(3,param)
l4 = lr(4,param)
return l1,l2,l3/l2, l4/l2
if self.param_type=='RS':
def fun_opt(x):
"""Auxiliary function for optimization."""
[l3, l4] = x
L2 = -1/(1+l3)+2/(2+l3)-1/(1+l4)+2/(2+l4)
return np.max([np.abs((1/(l3+1) - 6/(2+l3) + 6/(3+l3) - 1/(l4+1) + 6/(2+l4) - 6/(3+l4))/L2 - a3),
np.abs((-1/(1+l3) + 12/(2+l3) - 30/(3+l3) + 20/(4+l3)-1/(1+l4) + 12/(2+l4) - 30/(3+l4) + 20/(4+l4))/L2 - a4)])
if initial_guess is None or initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = (-1/(1+l3)+2/(2+l3)-1/(1+l4)+2/(2+l4))/a2
l1 = a1 + 1/l2*(1/(1+l4) - 1/(1+l3))
param = np.array([l1,l2,l3,l4]).ravel()
if self.param_type == 'FMKL':
def fun_opt(x):
"""Auxiliary function for optimization."""
[l3, l4] = x
L2 = -1/(1+l3)/l3+2/(2+l3)/l3-1/(1+l4)/l4+2/(2+l4)/l4
return np.max([np.abs((1/(l3+1)/l3 - 6/(2+l3)/l3 + 6/(3+l3)/l3 - 1/(l4+1)/l4 + 6/(2+l4)/l4 - 6/(3+l4)/l4)/L2 - a3),
np.abs((-1/(1+l3)/l3 + 12/(2+l3)/l3 - 30/(3+l3)/l3 + 20/(4+l3)/l3-1/(1+l4)/l4 + 12/(2+l4)/l4 - 30/(3+l4)/l4 + 20/(4+l4)/l4)/L2 - a4)])
if initial_guess is None or initial_guess.ndim==0 or len(initial_guess)!=2:
raise ValueError('Specify initial guess for two parameters')
[l3,l4] = optimize.fmin(fun_opt,initial_guess,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
l2 = (-1/(1+l3)/l3+2/(2+l3)/l3-1/(1+l4)/l4+2/(2+l4)/l4)/a2
l1 = a1 + 1/l2*(1/(1+l4)/l4 - 1/(1+l3)/l3)+1/l2/l3 - 1/l2/l4
param = np.array([l1,l2,l3,l4]).ravel()
if self.param_type == 'VSL':
if a4**2+98*a4 +1 <0:
a4 = (-98+(98**2 - 4)**0.5)/2+10**(-10)
p4 = np.array([(3+7*a4 + np.sqrt(a4**2+98*a4 +1))/(2*(1-a4)), (3+7*a4 - np.sqrt(a4**2+98*a4 +1))/(2*(1-a4))])
p3 = 0.5*(1-a3*(p4+3)/(p4-1))
p3[p4==1] = 0.5
p3[p3<0] = 0
p3[p3>1] = 1
p2 = a2*(p4+1)*(p4+2)
p1 = a1+p2*(1-2*p3)/(p4+1)
param1 = [p1[0], p2[0],p3[0],p4[0]]
param2 = [p1[1], p2[1],p3[1],p4[1]]
if initial_guess is None:
best = [self.check_param(param1)*1,self.check_param(param2)*1]
if np.sum(best)==2:
GoF = [self.GoF_Q_metric(data,param1),self.GoF_Q_metric(data,param2)]
best = (GoF == np.min(GoF))*1
param = np.array([param1,param2][np.argmax(best)]).ravel()
else:
if initial_guess.ndim!=0 and len(initial_guess)!=1:
raise ValueError('Specify initial guess for one parameter')
if np.abs(initial_guess - param1[3]) <= np.abs(initial_guess - param2[3]):
param = np.array(param1)
else:
param = np.array(param2)
if disp_fit:
print('')
print('Sample L-moments: ', sample_lm(data))
print('Fitted L-moments: ', lm(param))
print('')
print('Parameters: ', param)
if not self.check_param(param):
print('')
print('Parameters are not valid. Try another initial guess.')
else:
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def grid_search(self, data, fun_min, grid_min = -3, grid_max = 3, n_grid = 10):
"""Find parameters of GLD by grid search procedure.
It does grid search for third and fourth parameters. First two parameters are calculated by fitting
support to data. It returns parameters with minimum value of `fun_min`.
Parameters
----------
data : array-like
Input data.
fun_min : function
Function of parameters to minimize for choosing the best parameters. For example, negative log-likelihood function.
grid_min : float, optional
Minimum value of shape parameters for the grid. The default is -3.
grid_max : float, optional
Maximum value of shape parameters for the grid. The default is -3.
n_grid : int, optional
Number of grid points for each parameter. The default is 10.
Returns
-------
array-like
Parameters of GLD.
"""
eps = 0.01
def fun_opt_supp(x):
"""Auxiliary function for estimation of first two parameters by fitting support to data."""
A = np.min(data)
B = np.max(data)
par = np.hstack([x,param[2:]])
if not self.check_param(par):
return np.inf
return np.max([np.abs(self.Q(eps,par) - A), np.abs(self.Q(1-eps,par) - B)])
if self.param_type == 'VSL':
p3_list = np.linspace(0,1,n_grid)
p4_list = np.linspace(grid_min,grid_max,n_grid)
else:
p3_list = np.linspace(grid_min,grid_max,n_grid)
p4_list = np.linspace(grid_min,grid_max,n_grid)
res = np.zeros((n_grid, n_grid))
for i in range(n_grid):
for j in range(n_grid):
param = [np.mean(data),1,p3_list[i], p4_list[j]]
if self.param_type == 'RS' and not self.check_param(param):
param[1] = -1
x = optimize.fmin(fun_opt_supp,param[:2], disp=False, xtol = 10**(-8))
param[:2] = x
res[i,j] = fun_min(param)
ind = np.unravel_index(np.argmin(res, axis=None), res.shape)
p3,p4 = p3_list[ind[0]], p4_list[ind[1]]
param = np.hstack([np.mean(data),1,p3,p4])
x = optimize.fmin(fun_opt_supp,param[:2], disp=False, xtol = 10**(-8))
return np.hstack([x,p3,p4])
def fit_MPS(self,data, initial_guess = None, method = 'grid', u = 0.1,grid_min = -3, grid_max = 3, n_grid = 10, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of maximum product of spacing.
It estimates parameters of GLD by maximization of the geometric mean of spacings in the data,
which are the differences between the values of the cumulative distribution function at neighbouring data points.
This consists of two steps. The first step is finding initial values of parameters for maximization procedure
using method of moments, method of percentiles, method of L-moments or grid search procedure.
The second step is maximization of the geometric mean of spacings using numerical methods.
The optimization procedure is quite difficult and requires some time (especially for large samples).
Parameters
----------
data : array-like
Input data.
initial_guess : array-like, optional
Initial guess for the first step. Length of initial_guess depends on the method used at the first step.
It's ignored if method is 'grid'.
method : str, optional
Method used for finding initial parameters at the first step. Should be 'MM' for method of moments,
'PM' for method of percentiles, 'LMM' for method of L-moments or 'grid' for grid search procedure.
The default is 'grid'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
grid_min : float, optional
Minimum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
grid_max : float, optional
Maximum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
n_grid : int, optional
Number of grid points for the grid search. The default is 10. It's ignored if method is not 'grid'.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the first step are not valid.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] <NAME>., & <NAME>. 1983. Estimating parameters in continuous univariate
distributions with a shifted origin. Journal of the Royal Statistical Society: Series B
(Methodological), 45(3), 394–403.
.. [2] <NAME>. 1984. The maximum spacing method. an estimation method related to the
maximum likelihood method. Scandinavian Journal of Statistics, 93–112.
.. [3] <NAME>., <NAME>., & <NAME>. 2012. Flexible distribution modeling with the
generalized lambda distribution.
"""
data = np.sort(data.ravel())
unique, counts = np.unique(data, return_counts=True)
delta = np.min(np.diff(unique))/2
ind = np.nonzero(counts>1)[0]
ind1 = np.nonzero(np.isin(data, unique[ind]))[0]
data[ind1] = data[ind1] + stats.norm.rvs(0,delta/3,len(ind1))
def S(param):
"""Spacing function for optimization."""
if not self.check_param(param):
return np.inf
return -np.mean(np.log(np.abs(np.diff(self.CDF_num(np.sort((data)),param)))))
if method not in ['MM','LMM','PM','grid']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\' , \'PM\' or \'grid\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='grid':
param1 = self.grid_search(data, fun_min = S, grid_min = grid_min, grid_max = grid_max, n_grid = n_grid)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
if np.min(data)<self.supp(param1)[0] or np.max(data)>self.supp(param1)[1]:
param1 = self.correct_supp(data, param1)
param = optimize.fmin(S,param1,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
if disp_fit:
print('')
print('Initial point for Maximum Product of Spacing Method: ', param1)
print('Estimated by ', method)
print('')
print('Initial negative logarithm of mean spacing: ', S(param1))
print('Optimized negative logarithm of mean spacing: ', S(param))
print('')
print('Parameters: ', param)
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return np.array(param)
def fit_ML(self,data, initial_guess = None, method = 'grid', u = 0.1, grid_min = -3, grid_max = 3, n_grid = 10, xtol=0.0001, maxiter=None, maxfun=None , disp_optimizer=True, disp_fit = True, bins_hist = None, test_gof = True, bins_gof = 8):
"""Fit GLD to data using method of maximum likelihood.
It estimates parameters of GLD by maximizing a likelihood function.
This consists of two steps. The first step is finding initial values of parameters for maximization procedure
using method of moments, method of percentiles, method of L-moments or grid search procedure.
The second step is maximization of likelihood function using numerical methods.
The optimization procedure is quite difficult and requires some time (especially for large samples).
Parameters
----------
data : array-like
Input data.
initial_guess : array-like, optional
Initial guess for the first step. Length of initial_guess depends on the method used at the first step.
It's ignored if method is 'grid'.
method : str, optional
Method used for finding initial parameters at the first step. Should be 'MM' for method of moments,
'PM' for method of percentiles, 'LMM' for method of L-moments or 'grid' for grid search procedure.
The default is 'grid'.
u : float, optional
Parameter for calculating percentile-based statistics for method of percentiles.
Arbitrary number between 0 and 0.25. The default is 0.1. It's ignored if method is not 'PM'.
grid_min : float, optional
Minimum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
grid_max : float, optional
Maximum value of shape parameters for the grid search. The default is -3. It's ignored if method is not 'grid'.
n_grid : int, optional
Number of grid points for the grid search. The default is 10. It's ignored if method is not 'grid'.
xtol : float, optional
Absolute error for optimization procedure. The default is 0.0001.
maxiter : int, optional
Maximum number of iterations for optimization procedure.
maxfun : int, optional
Maximum number of function evaluations for optimization procedure.
disp_optimizer : bool, optional
Set True to display information about optimization procedure. The default is True.
disp_fit : bool, optional
Set True to display information about fitting. The default is True.
bins_hist : int, optional
Number of bins for histogram. It's ignored if disp_fit is False.
test_gof : bool, optional
Set True to perform Goodness-of-Fit tests and print results. It's ignored if disp_fit is False. The default is True.
bins_gof : int, optional
Number of bins for chi-square test. It's ignored if test_gof is False. The default is 8.
Raises
------
ValueError
If input parameters are incorrect or parameters of GLD from the first step are not valid.
Returns
-------
array-like
Fitted parameters of GLD.
References:
----------
.. [1] <NAME>. 2007. Numerical maximum log likelihood estimation for generalized lambda distributions.
Computational Statistics & Data Analysis, 51(8), 3983–3998.
"""
data = data.ravel()
def lnL(param):
"""Likelihood function for optimization."""
if not self.check_param(param):
return np.inf
return -np.sum(np.log(self.PDF_num(data,param)))
if method not in ['MM','LMM','PM','grid']:
raise ValueError('Unknown method \'%s\' . Use \'MM\',\'LMM\', \'PM\' or \'grid\'' %method)
if method=='MM':
param1 = self.fit_MM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='PM':
param1 = self.fit_PM(data, initial_guess, u = u, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='LMM':
param1 = self.fit_LMM(data, initial_guess, xtol=xtol, maxiter=maxiter, maxfun=maxfun , disp_optimizer=False, disp_fit = False, test_gof = False)
if method=='grid':
param1 = self.grid_search(data, fun_min = lnL, grid_min = grid_min, grid_max = grid_max, n_grid = n_grid)
if not self.check_param(param1):
raise ValueError('Parameters are not valid. Try another initial guess.')
if np.min(data)<self.supp(param1)[0] or np.max(data)>self.supp(param1)[1]:
param1 = self.correct_supp(data, param1)
param = optimize.fmin(lnL,param1,maxfun = maxfun,xtol=xtol, maxiter=maxiter, disp = disp_optimizer)
if disp_fit:
print('')
print('Initial point for Maximum Likilehood Method: ', param1)
print('Estimated by ', method)
print('')
print('Initial negative log-likelihood function: ', lnL(param1))
print('Optimized negative log-likelihood function: : ', lnL(param))
print('')
print('Parameters: ', param)
if test_gof:
ks, chi2 = self.GoF_tests(param, data, bins_gof)
print('')
print('Goodness-of-Fit')
print(ks)
print(chi2)
self.plot_fitting(data,param,bins = bins_hist)
return
|
np.array(param)
|
numpy.array
|
# my_cgs_worker.py
from pyrates.utility.grid_search import ClusterWorkerTemplate
import os
from pandas import DataFrame
from pyrates.utility import grid_search, welch
import numpy as np
from copy import deepcopy
class MinimalWorker(ClusterWorkerTemplate):
def worker_postprocessing(self, **kwargs):
self.processed_results = DataFrame(data=None, columns=self.results.columns)
for idx, data in self.results.iteritems():
self.processed_results.loc[:, idx] = data * 1e3
self.processed_results.index = self.results.index * 1e-3
class ExtendedWorker(MinimalWorker):
def worker_gs(self, *args, **kwargs):
kwargs_tmp = deepcopy(kwargs)
conditions = kwargs_tmp.pop('conditions')
model_vars = kwargs_tmp.pop('model_vars')
param_grid = kwargs_tmp.pop('param_grid')
results, gene_ids = [], param_grid.index
for c_dict in conditions:
for key in model_vars:
if key in c_dict and type(c_dict[key]) is float:
c_dict[key] = np.zeros((param_grid.shape[0],)) + c_dict[key]
elif key in param_grid:
c_dict[key] = param_grid[key]
param_grid_tmp = DataFrame.from_dict(c_dict)
f = terminate_at_threshold
f.terminal = True
r, self.result_map, sim_time = grid_search(*args, param_grid=param_grid_tmp, events=f, **deepcopy(kwargs_tmp))
r = r.droplevel(2, axis=1)
if any(r.values[-1, :] > 10.0):
invalid_genes = []
for id in param_grid.index:
if r.loc[r.index[-1], ('r_e', f'circuit_{id}')] > 10.0 or \
r.loc[r.index[-1], ('r_i', f'circuit_{id}')] > 10.0:
invalid_genes.append(id)
param_grid.drop(index=id, inplace=True)
kwargs['param_grid'] = param_grid
sim_time = self.worker_gs(*args, **kwargs)
for r in self.results:
for id in invalid_genes:
r[('r_e', f'circuit_{id}')] = np.zeros((r.shape[0],)) + 1e6
r[('r_i', f'circuit_{id}')] =
|
np.zeros((r.shape[0],))
|
numpy.zeros
|
#-------------------------------------------------------------------------------
# Main concept for testing returned arrays:
# 1). create ground truth e.g. with cross_val_predict
# 2). run vecstack
# 3). compare returned arrays with ground truth
# 4). compare arrays from file with ground truth
#-------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
import unittest
from numpy.testing import assert_array_equal
# from numpy.testing import assert_allclose
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from numpy.testing import assert_warns
import os
import glob
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
# from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.datasets import load_boston
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import make_scorer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from vecstack import stacking
from vecstack.core import model_action
n_folds = 5
temp_dir = 'tmpdw35lg54ms80eb42'
boston = load_boston()
X, y = boston.data, boston.target
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Make train/test split by hand to avoid strange errors probably related to testing suit:
# https://github.com/scikit-learn/scikit-learn/issues/1684
# https://github.com/scikit-learn/scikit-learn/issues/1704
# Note: Python 2.7, 3.4 - OK, but 3.5, 3.6 - error
np.random.seed(0)
ind = np.arange(500)
np.random.shuffle(ind)
ind_train = ind[:400]
ind_test = ind[400:]
X_train = X[ind_train]
X_test = X[ind_test]
y_train = y[ind_train]
y_test = y[ind_test]
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
class MinimalEstimator:
"""Has no get_params attribute"""
def __init__(self, random_state=0):
self.random_state = random_state
def __repr__(self):
return 'Demo string from __repr__'
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
def predict_proba(self, X):
return np.zeros(X.shape[0])
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
class TestFuncRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.mkdir(temp_dir)
except:
print('Unable to create temp dir')
@classmethod
def tearDownClass(cls):
try:
os.rmdir(temp_dir)
except:
print('Unable to remove temp dir')
def tearDown(self):
# Remove files after each test
files = glob.glob(os.path.join(temp_dir, '*.npy'))
files.extend(glob.glob(os.path.join(temp_dir, '*.log.txt')))
try:
for file in files:
os.remove(file)
except:
print('Unable to remove temp file')
#---------------------------------------------------------------------------
# Testing returned and saved arrays in each mode
#---------------------------------------------------------------------------
def test_oof_pred_mode(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(X_train, y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_mode(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
S_test_1 = None
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_pred_mode(self):
model = LinearRegression()
S_train_1 = None
_ = model.fit(X_train, y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_pred_bag_mode(self):
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = LinearRegression()
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1 = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred_bag', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_pred_bag_mode(self):
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = LinearRegression()
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1 = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
S_train_1 = None
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'pred_bag', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing <sample_weight> all ones
#---------------------------------------------------------------------------
def test_oof_pred_mode_sample_weight_one(self):
sw = np.ones(len(y_train))
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict',
fit_params = {'sample_weight': sw}).reshape(-1, 1)
_ = model.fit(X_train, y_train, sample_weight = sw)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0,
sample_weight = sw)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Test <sample_weight> all random
#---------------------------------------------------------------------------
def test_oof_pred_mode_sample_weight_random(self):
np.random.seed(0)
sw = np.random.rand(len(y_train))
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict',
fit_params = {'sample_weight': sw}).reshape(-1, 1)
_ = model.fit(X_train, y_train, sample_weight = sw)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0,
sample_weight = sw)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing <transform_target> and <transform_pred> parameters
#---------------------------------------------------------------------------
def test_oof_pred_mode_transformations(self):
model = LinearRegression()
S_train_1 = np.expm1(cross_val_predict(model, X_train, y = np.log1p(y_train), cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict')).reshape(-1, 1)
_ = model.fit(X_train, np.log1p(y_train))
S_test_1 = np.expm1(model.predict(X_test)).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0,
transform_target = np.log1p, transform_pred = np.expm1)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing <verbose> parameter
#---------------------------------------------------------------------------
def test_oof_pred_mode_verbose_1(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(X_train, y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
models = [LinearRegression()]
S_train_3, S_test_3 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 1)
models = [LinearRegression()]
S_train_4, S_test_4 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 2)
models = [LinearRegression()]
S_train_5, S_test_5 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False,
mode = 'oof_pred', random_state = 0, verbose = 0)
models = [LinearRegression()]
S_train_6, S_test_6 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False,
mode = 'oof_pred', random_state = 0, verbose = 1)
models = [LinearRegression()]
S_train_7, S_test_7 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False,
mode = 'oof_pred', random_state = 0, verbose = 2)
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
assert_array_equal(S_train_1, S_train_4)
assert_array_equal(S_test_1, S_test_4)
assert_array_equal(S_train_1, S_train_5)
assert_array_equal(S_test_1, S_test_5)
assert_array_equal(S_train_1, S_train_6)
assert_array_equal(S_test_1, S_test_6)
assert_array_equal(S_train_1, S_train_7)
assert_array_equal(S_test_1, S_test_7)
#---------------------------------------------------------------------------
# Test <metric> parameter and its default values depending on <regression> parameter
# Important. We use <greater_is_better = True> in <make_scorer> for any error function
# because we need raw scores (without minus sign)
#---------------------------------------------------------------------------
def test_oof_mode_metric(self):
model = LinearRegression()
scorer = make_scorer(mean_absolute_error)
scores = cross_val_score(model, X_train, y = y_train, cv = n_folds,
scoring = scorer, n_jobs = 1, verbose = 0)
mean_str_1 = '%.8f' % np.mean(scores)
std_str_1 = '%.8f' % np.std(scores)
models = [LinearRegression()]
S_train, S_test = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, save_dir=temp_dir,
mode = 'oof', random_state = 0, verbose = 0)
# Load mean score and std from file
# Normally if cleaning is performed there is only one .log.txt file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.log.txt')))[-1] # take the latest file
with open(file_name) as f:
for line in f:
if 'MEAN' in line:
split = line.strip().split()
break
mean_str_2 = split[1][1:-1]
std_str_2 = split[3][1:-1]
assert_equal(mean_str_1, mean_str_2)
assert_equal(std_str_1, std_str_2)
#-------------------------------------------------------------------------------
# Test several mdels in one run
#-------------------------------------------------------------------------------
def test_oof_pred_mode_2_models(self):
model = LinearRegression()
S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(X_train, y_train)
S_test_1_a = model.predict(X_test).reshape(-1, 1)
model = Ridge(random_state = 0)
S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(X_train, y_train)
S_test_1_b = model.predict(X_test).reshape(-1, 1)
S_train_1 = np.c_[S_train_1_a, S_train_1_b]
S_test_1 = np.c_[S_test_1_a, S_test_1_b]
models = [LinearRegression(),
Ridge(random_state = 0)]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_pred_bag_mode_2_models(self):
# Model a
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = LinearRegression()
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1_a = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
model = LinearRegression()
S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
# Model b
S_test_temp = np.zeros((X_test.shape[0], n_folds))
kf = KFold(n_splits = n_folds, shuffle = False, random_state = 0)
for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):
# Split data and target
X_tr = X_train[tr_index]
y_tr = y_train[tr_index]
X_te = X_train[te_index]
y_te = y_train[te_index]
model = Ridge(random_state = 0)
_ = model.fit(X_tr, y_tr)
S_test_temp[:, fold_counter] = model.predict(X_test)
S_test_1_b = np.mean(S_test_temp, axis = 1).reshape(-1, 1)
model = Ridge(random_state = 0)
S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
S_train_1 = np.c_[S_train_1_a, S_train_1_b]
S_test_1 = np.c_[S_test_1_a, S_test_1_b]
models = [LinearRegression(),
Ridge(random_state = 0)]
S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred_bag', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing sparse types CSR, CSC, COO
#---------------------------------------------------------------------------
def test_oof_pred_mode_sparse_csr(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, csr_matrix(X_train), y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(csr_matrix(X_train), y_train)
S_test_1 = model.predict(csr_matrix(X_test)).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, csr_matrix(X_train), y_train, csr_matrix(X_test),
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_pred_mode_sparse_csc(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, csc_matrix(X_train), y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(csc_matrix(X_train), y_train)
S_test_1 = model.predict(csc_matrix(X_test)).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, csc_matrix(X_train), y_train, csc_matrix(X_test),
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
def test_oof_pred_mode_sparse_coo(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, coo_matrix(X_train), y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(coo_matrix(X_train), y_train)
S_test_1 = model.predict(coo_matrix(X_test)).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, coo_matrix(X_train), y_train, coo_matrix(X_test),
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing X_train -> SCR, X_test -> COO
#---------------------------------------------------------------------------
def test_oof_pred_mode_sparse_csr_coo(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, csr_matrix(X_train), y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(csr_matrix(X_train), y_train)
S_test_1 = model.predict(coo_matrix(X_test)).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, csr_matrix(X_train), y_train, coo_matrix(X_test),
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing X_train -> SCR, X_test -> Dense
#---------------------------------------------------------------------------
def test_oof_pred_mode_sparse_csr_dense(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, csr_matrix(X_train), y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
_ = model.fit(csr_matrix(X_train), y_train)
S_test_1 = model.predict(X_test).reshape(-1, 1)
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, csr_matrix(X_train), y_train, X_test,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof_pred', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
assert_array_equal(S_train_1, S_train_3)
assert_array_equal(S_test_1, S_test_3)
#---------------------------------------------------------------------------
# Testing X_test=None
#---------------------------------------------------------------------------
def test_oof_mode_xtest_is_none(self):
model = LinearRegression()
S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds,
n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)
S_test_1 = None
models = [LinearRegression()]
S_train_2, S_test_2 = stacking(models, X_train, y_train, None,
regression = True, n_folds = n_folds, shuffle = False, save_dir=temp_dir,
mode = 'oof', random_state = 0, verbose = 0)
# Load OOF from file
# Normally if cleaning is performed there is only one .npy file at given moment
# But if we have no cleaning there may be more then one file so we take the latest
file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file
S = np.load(file_name)
S_train_3 = S[0]
S_test_3 = S[1]
assert_array_equal(S_train_1, S_train_2)
assert_array_equal(S_test_1, S_test_2)
|
assert_array_equal(S_train_1, S_train_3)
|
numpy.testing.assert_array_equal
|
#!/usr/bin/env python
#
# selection.py - The Selection class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`Selection` class, which represents a
selection of voxels in a 3D :class:`.Image`.
"""
import logging
import collections.abc as abc
import numpy as np
import scipy.ndimage.measurements as ndimeas
import fsl.utils.notifier as notifier
import fsleyes.gl.routines as glroutines
log = logging.getLogger(__name__)
class Selection(notifier.Notifier):
"""The ``Selection`` class represents a selection of voxels in a 3D
:class:`.Image`. The selection is stored as a ``numpy`` mask array,
the same shape as the image. Methods are available to query and update
the selection.
Changes to a ``Selection`` can be made through *blocks*, which are 3D
cuboid regions. The following methods allow a block to be
selected/deselected, where the block is specified by a voxel coordinate,
and a block size:
.. autosummary::
:nosignatures:
selectBlock
deselectBlock
The following methods offer more fine grained control over selection
blocks - with these methods, you pass in a block that you have created
yourself, and an offset into the selection, specifying its location:
.. autosummary::
:nosignatures:
setSelection
addToSelection
removeFromSelection
A third approach to making a selection is provided by the
:meth:`selectByValue` method, which allows a selection to be made
in a manner similar to a *bucket fill* technique found in any image
editor.
The related :meth:`invertRegion` method, given a seed location, will
invert the selected state of all voxels adjacent to that location. This
approach allows a *fill holes* type approach, where a region outline is
delineated, and then the interior inverted to select it.
A ``Selection`` object keeps track of the most recent change made through
any of the above methods. The most recent change can be retrieved through
the :meth:`getLastChange` method. The ``Selection`` class inherits from
the :class:`.Notifier` class - you can be notified whenever the selection
changes by registering as a listener.
Finally, the ``Selection`` class offers a few other methods for
convenience:
.. autosummary::
:nosignatures:
getSelection
getSelectionSize
clearSelection
getBoundedSelection
getIndices
"""
def __init__(self, image, display, selection=None):
"""Create a ``Selection`` instance.
:arg image: The :class:`.Image` instance associated with this
``Selection``.
:arg display: The :class:`.Display` instance for the ``image``.
:arg selection: Selection array. If not provided, one is created.
Must be a ``numpy.uint8`` array with the same shape
as ``image``. This array is *not* copied.
"""
self.__image = image
self.__display = display
self.__opts = display.opts
self.__clear = True
self.__lastChangeOffset = None
self.__lastChangeOldBlock = None
self.__lastChangeNewBlock = None
# We keep track of regions of the selection
# that have been modified, as a sequence of
# (xlo, ylo, zlo, xhi, yhi, zhi) values.
# This is used by the getBoundedSelection
# method,
self.__dirty = None
if selection is None:
selection = np.zeros(image.shape[:3], dtype=np.uint8)
elif selection.shape != image.shape[:3] or \
selection.dtype != np.uint8:
raise ValueError('Incompatible selection array: {} ({})'.format(
selection.shape, selection.dtype))
self.__selection = selection
log.debug('{}.init ({})'.format(type(self).__name__, id(self)))
def __del__(self):
"""Prints a log message."""
if log:
log.debug('{}.del ({})'.format(type(self).__name__, id(self)))
@property
def shape(self):
"""Returns the selection shape. """
return self.__selection.shape
def __getitem__(self, key):
"""Convenience wrapper around ``self.getSelection().__getitem__``. """
return self.__selection.__getitem__(key)
def getSelection(self):
"""Returns the selection array.
.. warning:: Do not modify the selection array directly - use the
``Selection`` instance methods
(e.g. :meth:`setSelection`) instead. If you modify the
selection directly through this attribute, the
:meth:`getLastChange` method, and selection notification,
will break.
"""
return self.__selection
def selectBlock(self,
voxel,
boxSize,
axes=(0, 1, 2),
bias=None,
combine=False):
"""Selects the block (sets all voxels to 1) specified by the given
voxel and block size. See the :func:`.routines.voxelBlock` function
for details on the arguments.
:arg combine: Combine this change with the previous stored change
(see :meth:`__storeChange`).
"""
block, offset = glroutines.voxelBlock(
voxel,
self.__selection.shape,
boxSize,
bias=bias,
axes=axes)
self.addToSelection(block, offset, combine)
def deselectBlock(self,
voxel,
boxSize,
axes=(0, 1, 2),
bias=None,
combine=False):
"""De-selects the block (sets all voxels to 0) specified by the given
voxel and box size. See the :func:`.routines.voxelBlock` function
for details on the arguments.
:arg combine: Combine this change with the previous stored change
(see :meth:`__storeChange`).
"""
block, offset = glroutines.voxelBlock(
voxel,
self.__selection.shape,
boxSize,
bias=bias,
axes=axes)
self.removeFromSelection(block, offset, combine)
def setSelection(self, block, offset, combine=False):
"""Copies the given ``block`` into the selection, starting at
``offset``.
:arg block: A ``numpy.uint8`` array containing a selection.
:arg offset: Voxel coordinates specifying the block location.
:arg combine: Combine this change with the previous stored change (see
:meth:`__storeChange`).
"""
self.__updateSelectionBlock(block, offset, combine)
def addToSelection(self, block, offset, combine=False):
"""Adds the selection (via a boolean OR operation) in the given
``block`` to the current selection, starting at ``offset``.
:arg block: A ``numpy.uint8`` array containing a selection.
:arg offset: Voxel coordinates specifying the block location.
:arg combine: Combine this change with the previous stored change (see
:meth:`__storeChange`).
"""
existing = self.__getSelectionBlock(block.shape, offset)
block = np.logical_or(block, existing)
self.__updateSelectionBlock(block, offset, combine)
def removeFromSelection(self, block, offset, combine=False):
"""Clears all voxels in the selection where the values in ``block``
are non-zero.
:arg block: A ``numpy.uint8`` array containing a selection.
:arg offset: Voxel coordinates specifying the block location.
:arg combine: Combine this change with the previous stored change (see
:meth:`__storeChange`).
"""
existing = self.__getSelectionBlock(block.shape, offset)
existing[block != 0] = False
self.__updateSelectionBlock(existing, offset, combine)
def getSelectionSize(self):
"""Returns the number of voxels that are currently selected. """
return self.__selection.sum()
def getBoundedSelection(self):
"""Extracts the smallest region from the :attr:`selection` which
contains all selected voxels.
Returns a tuple containing the region, as a ``numpy.uint8`` array, and
the coordinates specifying its location in the full :attr:`selection`
array.
"""
# If this method is called when a dirty
# region has not been saved (e.g. after
# a call to clearSelection), we fall
# back to manually calculating it, which
# is quite slow.
if self.__dirty is None:
xs, ys, zs = self.__selection.nonzero()
if len(xs) == 0:
xlo = ylo = zlo = xhi = yhi = zhi = 0
else:
xlo = int(xs.min())
ylo = int(ys.min())
zlo = int(zs.min())
xhi = int(xs.max() + 1)
yhi = int(ys.max() + 1)
zhi = int(zs.max() + 1)
self.__dirty = xlo, ylo, zlo, xhi, yhi, zhi
xlo, ylo, zlo, xhi, yhi, zhi = self.__dirty
selection = self.__selection[xlo:xhi, ylo:yhi, zlo:zhi]
return selection, (xlo, ylo, zlo)
def clearSelection(self, restrict=None, combine=False):
"""Clears (sets to 0) the entire selection, or the selection specified
by the ``restrict`` parameter, if it is given.
.. note:: Calling this method when the selection is already empty
will clear the most recently stored change - see
:meth:`getLastChange`.
:arg restrict: An optional sequence of three ``slice`` objects,
specifying the portion of the selection to clear.
:arg combine: Combine this change with the previous stored change (see
:meth:`__storeChange`).
"""
if self.__clear:
self.setChange(None, None)
return
fRestrict = fixSlices(restrict)
offset = [r.start if r.start is not None else 0 for r in fRestrict]
log.debug('Clearing selection ({}): {}'.format(id(self), fRestrict))
block = np.array(self.__selection[fRestrict])
self.__selection[fRestrict] = False
self.__storeChange(block,
np.array(self.__selection[fRestrict]),
offset,
combine)
# Set the internal clear flag to True,
# when the entire selection has been
# cleared, so we can skip subsequent
# redundant clears.
if restrict is None:
self.__clear = True
# Always clear the dirty region - in
# theory we could adjust the dirty
# region by the restrict slices (if
# provided), but this is awkward to
# do. The getBoundedSelection method
# will resort to np.ndarray.nonzero
# if the dirty region is not set.
self.__dirty = None
self.notify()
def getLastChange(self):
"""Returns the most recent change made to this ``Selection``.
A tuple is returned, containing the following:
- A ``numpy.uint8`` array containing the old block value
- A ``numpy.uint8`` array containing the new block value
- Voxel coordinates denoting the block location in the full
:attr:`selection` array.
If there is no stored change this method will return ``(None, None,
None)`` (see also the note in :meth:`clearSelection`).
"""
return (self.__lastChangeOldBlock,
self.__lastChangeNewBlock,
self.__lastChangeOffset)
def setChange(self, block, offset, oldBlock=None):
"""Sets/overwrites the most recently saved change made to this
``Selection``.
"""
self.__lastChangeOldBlock = oldBlock
self.__lastChangeNewBlock = block
self.__lastChangeOffset = offset
def __storeChange(self, old, new, offset, combine=False):
"""Stores the given selection change.
:arg old: A copy of the portion of the :attr:`selection` that
has changed,
:arg new: The new selection values.
:arg offset: Offset into the full :attr:`selection` array
:arg combine: If ``False`` (the default), the previously stored change
will be replaced by the current change. Otherwise the
previous and current changes will be combined.
"""
# Not combining changes (or there
# is no previously stored change).
# We store the change, replacing
# the previous one.
if (not combine) or (self.__lastChangeNewBlock is None):
if log.getEffectiveLevel() == logging.DEBUG:
log.debug('Replacing previously stored change with: '
'[({}, {}), ({}, {}), ({}, {})] ({} selected)'
.format(offset[0], offset[0] + old.shape[0],
offset[1], offset[1] + old.shape[1],
offset[2], offset[2] + old.shape[2],
new.sum()))
self.__lastChangeOldBlock = old
self.__lastChangeNewBlock = new
self.__lastChangeOffset = offset
return
# Otherwise, we combine the old
# change with the new one.
lcOld = self.__lastChangeOldBlock
lcNew = self.__lastChangeNewBlock
lcOffset = self.__lastChangeOffset
# The old block might be None, which
# implies all zeros
if lcOld is None:
lcOld = np.zeros(lcNew.shape, dtype=lcNew.dtype)
# Calculate/organise low/high indices
# for each change set:
#
# - one for the current change (passed
# in to this method call)
#
# - One for the last stored change
#
# - One for the combination of the above
currIdxs = []
lastIdxs = []
cmbIdxs = []
for ax in range(3):
currLo = offset[ ax]
lastLo = lcOffset[ax]
currHi = offset[ ax] + old .shape[ax]
lastHi = lcOffset[ax] + lcOld.shape[ax]
cmbLo = min(currLo, lastLo)
cmbHi = max(currHi, lastHi)
currIdxs.append((int(currLo), int(currHi)))
lastIdxs.append((int(lastLo), int(lastHi)))
cmbIdxs .append((int(cmbLo), int(cmbHi)))
# Make slice objects for each of the indices,
# to make indexing easier. The last/current
# slice objects are defined relative to the
# combined space of both.
cmbSlices = tuple([slice(lo, hi) for lo, hi in cmbIdxs])
lastSlices = tuple([slice(lLo - cmLo, lHi - cmLo)
for ((lLo, lHi), (cmLo, cmHi))
in zip(lastIdxs, cmbIdxs)])
currSlices = tuple([slice(cuLo - cmLo, cuHi - cmLo)
for ((cuLo, cuHi), (cmLo, cmHi))
in zip(currIdxs, cmbIdxs)])
cmbOld = np.array(self.__selection[cmbSlices])
cmbNew = np.array(cmbOld)
cmbOld[lastSlices] = lcOld
cmbNew[lastSlices] = lcNew
cmbNew[currSlices] = new
if log.getEffectiveLevel() == logging.DEBUG:
log.debug('Combining changes: '
'[({}, {}), ({}, {}), ({}, {})] ({} selected) + '
'[({}, {}), ({}, {}), ({}, {})] ({} selected) = '
'[({}, {}), ({}, {}), ({}, {})] ({} selected)'.format(
lastIdxs[0][0], lastIdxs[0][1],
lastIdxs[1][0], lastIdxs[1][1],
lastIdxs[2][0], lastIdxs[2][1],
lcNew.sum(),
currIdxs[0][0], currIdxs[0][1],
currIdxs[1][0], currIdxs[1][1],
currIdxs[2][0], currIdxs[2][1],
new.sum(),
cmbIdxs[0][0], cmbIdxs[0][1],
cmbIdxs[1][0], cmbIdxs[1][1],
cmbIdxs[2][0], cmbIdxs[2][1],
cmbNew.sum()))
self.__lastChangeOldBlock = cmbOld
self.__lastChangeNewBlock = cmbNew
self.__lastChangeOffset = cmbIdxs[0][0], cmbIdxs[1][0], cmbIdxs[2][0]
def getIndices(self, restrict=None):
"""Returns a :math:`N \\times 3` array which contains the
coordinates of all voxels that are currently selected.
If the ``restrict`` argument is not provided, the entire
selection image is searched.
:arg restrict: A ``slice`` object specifying a sub-set of the
full selection to consider.
"""
restrict = fixSlices(restrict)
xs, ys, zs = np.where(self.__selection[restrict])
result = np.vstack((xs, ys, zs)).T
for ax in range(3):
off = restrict[ax].start
if off is not None:
result[:, ax] += off
return result
def selectByValue(self,
seedLoc,
precision=None,
searchRadius=None,
local=False,
restrict=None,
combine=False):
"""A *bucket fill* style selection routine.
:arg combine: Combine with the previous stored change (see
:meth:`__storeChange`).
See the :func:`selectByValue` function for details on the other
arguments.
:returns: The generated selection array (a ``numpy`` boolean array),
and offset of this array into the full selection image.
"""
data = self.__image[self.__opts.index()]
block, offset = selectByValue(data,
seedLoc,
precision,
searchRadius,
local,
restrict)
self.setSelection(block, offset, combine)
return block, offset
def invertRegion(self, seedLoc, restrict=None):
"""Inverts the selected state of the region adjacent to ``seedLoc``.
See the :func:`selectByValue` function for details on the other
arguments.
"""
data = self.__selection
val = data[tuple(seedLoc)]
block, offset = selectByValue(data,
seedLoc,
0.5,
local=True,
restrict=restrict)
if val == 0: self.addToSelection( block, offset)
else: self.removeFromSelection(block, offset)
return block, offset
def selectLine(self,
from_,
to,
boxSize,
axes=(0, 1, 2),
bias=None,
combine=False):
"""Selects a line from ``from_`` to ``to``.
:arg combine: Combine with the previous stored change (see
:meth:`__storeChange`).
See the :func:`selectLine` function for details on the other arguments.
"""
block, offset = selectLine(self.__selection.shape,
self.__image.pixdim[:3],
from_,
to,
boxSize,
axes,
bias)
self.addToSelection(block, offset, combine)
return block, offset
def deselectLine(self,
from_,
to,
boxSize,
axes=(0, 1, 2),
bias=None,
combine=False):
"""Deselects a line from ``from_`` to ``to``.
:arg combine: Combine with the previous stored change (see
:meth:`__storeChange`).
See the :func:`selectLine` function for details on the other arguments.
"""
block, offset = selectLine(self.__selection.shape,
self.__image.pixdim[:3],
from_,
to,
boxSize,
axes,
bias)
self.removeFromSelection(block, offset, combine)
return block, offset
def transferSelection(self, destImg, destDisplay):
"""Re-samples the current selection into the destination image
space.
Each ``Selection`` instance is in terms of a specific :class:`.Image`
instance, which has a specific dimensionality. In order to apply
a ``Selection`` which is in terms of one ``Image``, the selection
array needs to be re-sampled.
:arg destImg: The :class:`.Image` that the selection is to be
transferred to.
:arg destDisplay: The :class:`.Display` instance associated with
``destImg``.
:returns: a new ``numpy.uint8`` array, suitable for creating a new
``Selection`` object for use with the given ``destImg``.
"""
raise NotImplementedError('todo')
def __updateSelectionBlock(self, block, offset, combine=False):
"""Replaces the current selection at the specified ``offset`` with the
given ``block``.
The old values for the block are stored, and can be retrieved via the
:meth:`getLastChange` method.
:arg block: A ``numpy.uint8`` array containing the new selection
values.
:arg offset: Voxel coordinates specifying the location of ``block``.
:arg combine: Combine with the previous stored change (see
:meth:`__storeChange`).
"""
if block.size == 0:
return
if offset is None:
offset = (0, 0, 0)
xlo, ylo, zlo = [int(o) for o in offset]
xhi = int(xlo + block.shape[0])
yhi = int(ylo + block.shape[1])
zhi = int(zlo + block.shape[2])
self.__storeChange(
np.array(self.__selection[xlo:xhi, ylo:yhi, zlo:zhi]),
np.array(block, dtype=np.uint8),
offset,
combine)
log.debug('Updating selection (%i) block [%i:%i, %i:%i, %i:%i]',
id(self), xlo, xhi, ylo, yhi, zlo, zhi)
self.__selection[xlo:xhi, ylo:yhi, zlo:zhi] = block
# Save/update the dirty region - the region
# of the selection that has been modified
# and therefore is potentially non-zero.
if self.__dirty is None:
self.__dirty = [xlo, ylo, zlo, xhi, yhi, zhi]
else:
self.__dirty[:3] = np.min([self.__dirty[:3],
[xlo, ylo, zlo]], axis=0)
self.__dirty[3:] = np.max([self.__dirty[3:],
[xhi, yhi, zhi]], axis=0)
self.__clear = False
self.notify()
def __getSelectionBlock(self, size, offset):
"""Extracts a block from the selection image starting from the
specified ``offset``, and of the specified ``size``.
"""
xlo, ylo, zlo = [int(o) for o in offset]
xhi, yhi, zhi = [int(s) for s in size]
xhi = xlo + size[0]
yhi = ylo + size[1]
zhi = zlo + size[2]
return np.array(self.__selection[xlo:xhi, ylo:yhi, zlo:zhi])
def fixSlices(slices):
"""A convenience function used by :meth:`selectByValue`,
:meth:`clearSelection` and :meth:`getIndices`, to sanitise their
``restrict`` parameter.
"""
if slices is None:
slices = [None, None, None]
if len(slices) != 3:
raise ValueError('Three slice objects are required')
for i, s in enumerate(slices):
if s is None:
slices[i] = slice(None)
return tuple(slices)
def selectByValue(data,
seedLoc,
precision=None,
searchRadius=None,
local=False,
restrict=None):
"""A *bucket fill* style selection routine. Given a seed location,
finds all voxels which have a value similar to that of that location.
The current selection is replaced with all voxels that were found.
:arg seedLoc: Voxel coordinates specifying the seed location
:arg precision: Voxels which have a value that is less than
``precision`` from the seed location value will
be selected.
:arg searchRadius: May be either a single value, or a sequence of
three values - one for each axis. If provided, the
search is limited to a sphere (in the voxel
coordinate system), centred on the seed location,
with the specified ``searchRadius`` (in voxels). If
not provided, the search will cover the entire
image space.
:arg local: If ``True``, a voxel will only be selected if it
is adjacent to an already selected voxel (using
8-neighbour connectivity).
:arg restrict: An optional sequence of three ``slice`` object,
specifying a sub-set of the image to search.
:returns: The generated selection array (a ``numpy`` boolean array),
and offset of this array into the data.
"""
if precision is not None and precision < 0:
precision = 0
shape = data.shape
seedLoc = np.array(seedLoc)
value = float(data[seedLoc[0], seedLoc[1], seedLoc[2]])
# Search radius may be either None, a scalar value,
# or a sequence of three values (one for each axis).
# If it is one of the first two options (None/scalar),
# turn it into the third.
if searchRadius is None:
searchRadius = np.array([0, 0, 0])
elif not isinstance(searchRadius, abc.Sequence):
searchRadius = np.array([searchRadius] * 3)
searchRadius = np.ceil(searchRadius)
searchOffset = (0, 0, 0)
# Reduce the data set if
# restrictions have been
# specified
if restrict is not None:
restrict = fixSlices(restrict)
xs, xe = restrict[0].start, restrict[0].step
ys, ye = restrict[1].start, restrict[1].step
zs, ze = restrict[2].start, restrict[2].step
if xs is None: xs = 0
if ys is None: ys = 0
if zs is None: zs = 0
if xe is None: xe = data.shape[0]
if ye is None: ye = data.shape[1]
if ze is None: ze = data.shape[2]
# The seed location has to be in the sub-set
# o the image specified by the restrictions
if seedLoc[0] < xs or seedLoc[0] >= xe or \
seedLoc[1] < ys or seedLoc[1] >= ye or \
seedLoc[2] < zs or seedLoc[2] >= ze:
raise ValueError('Seed location ({}) is outside '
'of restrictions ({})'.format(
seedLoc, ((xs, xe), (ys, ye), (zs, ze))))
data = data[restrict]
shape = data.shape
searchOffset = [xs, ys, zs]
seedLoc = [sl - so for sl, so in zip(seedLoc, searchOffset)]
# No search radius - search
# through the entire image
if np.any(searchRadius == 0):
searchSpace = data
searchMask = None
# Search radius specified - limit
# the search space, and specify
# an ellipsoid mask with the
# specified per-axis radii
else:
ranges = [None, None, None]
slices = [None, None, None]
# Calculate xyz indices
# of the search space
for ax in range(3):
idx = seedLoc[ ax]
rad = searchRadius[ax]
lo = int(round(idx - rad))
hi = int(round(idx + rad + 1))
if lo < 0: lo = 0
if hi > shape[ax] - 1: hi = shape[ax]
ranges[ax] = np.arange(lo, hi)
slices[ax] = slice( lo, hi)
xs, ys, zs =
|
np.meshgrid(*ranges, indexing='ij')
|
numpy.meshgrid
|
"""
This script can be used to generate conic problems. You must give to the
script the problem structure and where to save it and it will generate a
conic problem for you. You can also specify the problem's format (sparse |
dense).
The problem structure parameters have the following format:
{
'sol_dim': [min[, max]], # The solution dimension
'A_dim': [min[, max]], # The number of lines in A
'Q_dim': [min[, max]], # The number of lines in Q matrices
'Q_len': [min[, max]], # The number of conic constraints
'type': 'sparse' | 'dense', # The type of the problem
'density': 0 <= nb <= 1 # The density of the matrices if type == sparse
}
The list items represent a range (min and max). If only a element is found in
a list, then it will constrain the generator to use that exact number.
"""
import os
import sys
import time
import argparse
import logging
import pprint as p
import json
from enum import Enum
from random import randint, random
import scipy.sparse as ss
import numpy as np
import matrix_ops as mo
from tqdm import trange
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
DEF_PROBLEM_PARAMS = {
'sol_dim': [10],
'A_dim': [10],
'Q_dim': [4],
'Q_len': [10],
'type': 'sparse',
'density': 0.4
}
class ProblemType(Enum):
"""
This enum represents the Problem Type.
"""
SPARSE = 'sparse'
DENSE = 'dense'
def generate_problem(params):
"""
This function generates the matrices that represents the conic problem.
The matrices are returned in the defined format.
Args:
params: A dict that contains the problem specifications. It must
have the format of DEF_PROBLEM_PARAMS
Returns:
A tuple containing the generated matrices
Example:
>>> generate_problem(params)
(b, c, Q_list)
"""
# Randomize the parameters except for the Q_dim
rand_params = dict(params)
for key, value in rand_params.items():
if key != 'Q_dim' and isinstance(value, list):
rand_params[key] = randint(*value)
_logger.info('Auto generating matrices with params:')
p.pprint(rand_params)
problem_type = ProblemType(rand_params['type'])
# Compute the solution, c and A matrices
if problem_type == ProblemType.DENSE:
sol = np.random.rand(rand_params['sol_dim'], 1)
c = np.random.rand(1, rand_params['sol_dim'])
A = np.random.rand(rand_params['A_dim'], rand_params['sol_dim'])
Q_creator_fcn = mo.create_dense_Q
else:
sol = ss.rand(rand_params['sol_dim'], 1, density=rand_params['density'],
format='csr')
c = ss.rand(1, rand_params['sol_dim'], density=rand_params['density'],
format='csc')
A = ss.rand(rand_params['A_dim'], rand_params['sol_dim'],
density=rand_params['density'], format='csr')
Q_creator_fcn = mo.create_sparse_Q
# Compute the linear constraints
b = A.dot(sol)
# Put the linear constraint A matrix into the defined format
Q_list = [Q_creator_fcn(A)]
del A
# Generate conic constraints and put them into the right format
for _ in trange(rand_params['Q_len'], mininterval=3):
dim = randint(*rand_params['Q_dim'])
if problem_type == ProblemType.DENSE:
Q = np.random.rand(dim, rand_params['sol_dim'])
q = np.random.rand(dim, 1)
f = np.random.rand(1, rand_params['sol_dim'])
# f = np.zeros((1, rand_params['sol_dim']))
d = np.linalg.norm(Q.dot(sol) + q, 2) - f.dot(sol) + random()
else:
Q = ss.rand(dim, rand_params['sol_dim'],
density=rand_params['density'], format='csr')
q = ss.rand(dim, 1, density=rand_params['density'], format='csr')
f = ss.rand(1, rand_params['sol_dim'],
density=rand_params['density'], format='csc')
print(f.dot(sol).data)
print(mo.sparse_vec_norm2(Q.dot(sol) + q, 2))
aux_var = f.dot(sol).data
if aux_var:
d = mo.sparse_vec_norm2(Q.dot(sol) + q, 2) - aux_var[0] + random()
else:
d = mo.sparse_vec_norm2(Q.dot(sol) + q, 2) - + random()
Q_list.append(Q_creator_fcn(Q, q, f, d))
del Q, q, f, d
# Put b and c vectors into the defined format
if problem_type == ProblemType.SPARSE:
b = ss.vstack([b, ss.csr_matrix([[1]])], format='csr')
c = ss.hstack([c, ss.csr_matrix([[0]])], format='csc')
else:
b = np.vstack((b, np.array([[1]])))
c = np.hstack((c,
|
np.array([[0]])
|
numpy.array
|
""" Tests for the fileio.jcampdx submodule """
import os
import numpy as np
import nmrglue as ng
from setup import DATA_DIR
def test_jcampdx1():
'''JCAMP-DX read: format testset'''
cases = []
# these 4 files have exactly the same datapoints
cases.append("BRUKAFFN.DX") # 0
cases.append("BRUKPAC.DX") # 1
cases.append("BRUKSQZ.DX") # 2
cases.append("TEST32.DX") # 3
# the rest have the same data than the first ones but slightly
# different values probably due to some processing steps
cases.append("BRUKNTUP.DX") # 4
cases.append("BRUKDIF.DX") # 5
cases.append("TESTSPEC.DX") # 6
cases.append("TESTNTUP.DX") # 7
npoints_target = 16384
# target data values are from the BRUKAFFN.DX file
# which is most human-readable
# some values from the beginning:
target_first20 = [
2259260, -5242968, -7176216, -1616072,
10650432, 4373926, -3660824, 2136488,
8055988, 1757248, 3559312, 1108422,
-5575546, -233168, -1099612, -4657542,
-4545530, -1996712, -5429568, -7119772]
# some values from the middle
target_7144_7164 = [
4613558, 9603556, 11823620, 3851634,
14787192, 17047672, 34585306, 69387092,
307952794, 542345870, 143662704, 52472738,
29157730, 12017988, 10142310, -6518692,
11292386, 4692342, 2839598, 4948336]
# some values from the end:
target_last20 = [
-2731004, 2823836, 1542934, 8096410,
1143092, -5356388, 4028632, 121858,
3829486, 5562002, -3851528, 919686,
1060812, -4446420, -716388, 2080534,
7145886, 11400102, 5012684, 1505988]
for i, case in enumerate(cases):
print(case)
# read
casepath = os.path.join(DATA_DIR, "jcampdx", case)
dic, data = ng.jcampdx.read(casepath)
# check some dic entries:
assert "DATATYPE" in dic
assert "DATA TYPE" not in dic
assert "END" not in dic
if "BRUK" in case:
assert dic["DATATYPE"][0] == "NMR Spectrum"
assert dic["$SOLVENT"][0] == "<MeOH>" # single $ is not comment
assert "Bruker" not in dic
assert "Bruker NMR JCAMP-DX V1.0" in dic["_comments"] # comment
# check data point counts:
if "NTUP" not in case: # no ##NPOINTS in NTUPLES format
npoints_dic = int(dic["NPOINTS"][0])
assert npoints_dic == npoints_target
npoints_read = len(data)
else: # NTUPLES has both real & imag arrays
npoints_read = len(data[0]) # R
assert len(data[1]) == npoints_target # I
data = data[0]
assert npoints_read == npoints_target
# check data:
epsilon_e = 1e-9
epsilon_r = 15000
for idx in range(20):
print(target_first20[idx], data[idx])
if i < 4: # exactly same data
assert np.abs(target_first20[idx]-data[idx]) < epsilon_e
else: # roughly same data
assert np.abs(target_first20[idx]-data[idx]) < epsilon_r
for idx in range(20):
dslice = data[7144:7164]
print(target_7144_7164[idx], dslice[idx])
if i < 4: # exactly same data
assert np.abs(target_7144_7164[idx]-dslice[idx]) < epsilon_e
else: # roughly same data
assert np.abs(target_7144_7164[idx]-dslice[idx]) < epsilon_r
for idx in range(20):
dslice = data[-20:]
print(target_last20[idx], dslice[idx])
if i < 4: # exactly same data
assert np.abs(target_last20[idx]-dslice[idx]) < epsilon_e
else: # roughly same data
assert np.abs(target_last20[idx]-dslice[idx]) < epsilon_r
# check udic:
udic = ng.jcampdx.guess_udic(dic, data)
assert np.abs(udic[0]["obs"]-100.4) < epsilon_e
assert np.abs(udic[0]["sw"]-24038.5) < epsilon_e
assert udic[0]["size"] == npoints_target
assert udic[0]["label"] == "13C"
def test_jcampdx2():
'''JCAMP-DX read: miscellaneous files '''
cases = []
# check the following values from each read:
# npoints, first, last, freq, sweep
# note: first and last are raw values from datalines for convenience,
# i.e. not scaled with YFACTORS
cases.append(("TESTFID.DX", 16384, 573, -11584, 100.4, 0.6815317))
cases.append(("bruker1.dx", 16384, -5, -51, 200.13, 4098.3606557377))
cases.append(("bruker2.dx", 16384, 42, 422, 300.13336767, 6024.096385479))
cases.append(("bruker3.dx", 16384, 22, -313, 300.13336729, 6024.096385479))
cases.append(("aug07.dx",
4096, -22288, -25148, 400.13200065, 4006.41025641027))
cases.append(("aug07b.dx",
4096, -324909, -205968, 400.13200065, 4006.41025641027))
cases.append(("aug07c.dx",
4096, -322709, -64216, 400.13200065, 4006.41025641027))
cases.append(("aug07d.dx",
4096, -21208, 3029, 400.13200065, 4006.41025641027))
cases.append(("aug07e.dx",
4096, -501497, 79397, 400.13200065, 4006.41025641027))
epsilon = 1e-9
for case in cases:
print(case[0])
# read
casepath = os.path.join(DATA_DIR, "jcampdx", case[0])
dic, data = ng.jcampdx.read(casepath)
if isinstance(data, list):
data = data[0] # for data with both R&I, check only R
# since first and last are raw values, do yfactor
# back-scaling here
is_ntuples = ng.jcampdx.get_is_ntuples(dic)
if is_ntuples:
yfactor_r, _yfactor_i = ng.jcampdx.find_yfactors(dic)
data = data / yfactor_r
else:
yfactor = float(dic["YFACTOR"][0])
data = data / yfactor
# check data
assert len(data) == case[1]
assert np.abs(data[0]-case[2]) < epsilon
assert np.abs(data[-1]-case[3]) < epsilon
# check udic
udic = ng.jcampdx.guess_udic(dic, data)
assert np.abs(udic[0]["obs"]-case[4]) < epsilon
assert
|
np.abs(udic[0]["sw"]-case[5])
|
numpy.abs
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import Ordination, OrdinationResults
from ._utils import corr, svd_rank, scale
class CCA(Ordination):
r"""Compute constrained (also known as canonical) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of individuals and environmental variables,
and outputs ordination axes that maximize niche separation among
species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
Y : array_like Community data matrix of shape (n, m): a
contingency table for m species at n sites.
X : array_like Constraining matrix of shape (n, q): q quantitative
environmental variables at n sites.
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(Y, X)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
In data analysis, ordination (or multivariate gradient analysis)
complements clustering by arranging objects (species, samples...)
along gradients so that similar ones are closer and dissimilar
ones are further. There's a good overview of the available
techniques in http://ordination.okstate.edu/overview.htm.
See Also
--------
CA
RDA
References
----------
.. [1] <NAME>, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] <NAME> and <NAME>, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
short_method_name = 'CCA'
long_method_name = 'Canonical Correspondence Analysis'
def __init__(self, Y, X, site_ids, species_ids):
self.Y = np.asarray(Y, dtype=np.float64)
self.X =
|
np.asarray(X, dtype=np.float64)
|
numpy.asarray
|
import numpy as np
from matplotlib import pyplot as pl
import greensinversion
import greensconvolution
# Simple function returning 0 curvature for everything
# and pythagorean theorem for line length
def eval_linelength_avgcurvature_mirroredbox(boxu1,boxv1,boxu2,boxv2,u1,v1,u2,v2):
linelength=np.sqrt((u1-u2)**2.0 + (v1-v2)**2.0)
shape=np.broadcast(u1,v1,u2,v2).shape
avgcurvature=
|
np.zeros(shape,dtype='d')
|
numpy.zeros
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# <License info will go here...>
#
# Written: 10-Apr-2011
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# This module implements Richard Schwartz's design for a time test.
#
# TODO
# 1] In Test 3, sort works in place so the second sort does not need to do
# any work. Need to give it more arrays to sort.
#pylint: disable=W0404
'''Richard Schwartz's time test (equivalent of time_testr.pro)
The tests are
Test 1 - Matrix Multiplication Large Arrays (500,500) 10*scale_factor times
Test 2 - Matrix Multiplication Small Array (50,50) 10000*scale_factor times
Test 3 - Sorting 1 million elements 10*scale_factor times
Test 4 - Moving 1 million elements 1000*scale_factor times
Test 5 - indirect addressing 1 million elements 100*scale_factor times
Test 6 - shifting 1 million elements 1000*scale_factor times
Test 7 - cosine 1 million elements 100*scale_factor times
Test 8 - alog 1 million elements 100*scale_factor
Test 9 - writing and reading bytarr(1e6) 1000*scale_factor times
'''
import numpy as np
import sys
import benchmark
import os
import sys
def main():
"""Main application"""
timer = benchmark.BenchmarkTimer()
options = timer.parse_arguments()
timer.print_header("TIME_TESTR(ichard)")
run_tests(timer, options.scale_factor)
timer.print_summary()
def run_tests(timer, scale_factor):
'''Go through each test and print out the results'''
#nofileio = True
siz = 500
a = np.arange(siz**2, dtype=np.float32).reshape(siz, siz)
b =
|
np.arange(siz**2, dtype=np.float32)
|
numpy.arange
|
#/usr/bin/env python3
#
# JM: 11 Apr 2018
#
# the stafft.f90 adapted for python
# standalone and it shouldn't depend on anything
#
#-------------------------------------------------------------------------------
# Fourier transform module.
# This is not a general purpose transform package but is designed to be
# quick for arrays of length 2^n. It will work if the array length is of
# the form 2^i * 3^j * 4^k * 5^l * 6^m (integer powers obviously).
#
# Minimal error-checking is performed by the code below. The only check is that
# the initial factorisation can be performed.
# Therefore if the transforms are called with an array of length <2, or a trig array
# not matching the length of the array to be transformed the code will fail in a
# spectacular way (eg. Seg. fault or nonsense returned).
# It is up to the calling code to ensure everything is called sensibly.
# The reason for stripping error checking is to speed up the backend by performing
# less if() evaluations - as errors in practice seem to occur very rarely.
# So the good news is this should be a fast library - the bad is that you may have to pick
# around in it if there are failures.
#
# To initialise the routines call init(n,factors,trig,ierr).
# This fills a factorisation array (factors), and a sin/cos array (trig).
# These must be kept in memory by the calling program.
# The init routine can be called multiple times with different arrays if more than
# one length of array is to be transformed.
# If a factorisation of the array length n cannot be found (as specified above)
# then the init routine will exit immediately and the integer ierr will be set to 1.
# If the init returns with ierr=0 then the call was successful.
#
# Top-level subroutines contained in this module are:
# 1) initfft(n,factors,trig) :
# Performs intialisation of the module, by working out the factors of n (the FFT length).
# This will fail if n is not factorised completely by 2,3,4,5,6.
# The trig array contains the necessary cosine and sine values.
# Both arrays passed to init **must** be kept between calls to routines in this module.
# 2) forfft(m,n,x,trig,factors) :
# This performs a FFT of an array x containing m vectors of length n.
# The transform length is n.
# This inverse of this transform is obtained by revfft.
# 3) revfft(m,n,x,trig,factors) :
# This performs an inverse FFT of an array x containing m vectors of length n.
# The transform length is n.
# This inverse of this transform is forfft.
# 4) dct(m,n,x,trig,factors) :
# This performs a discrete cosine transform of an array x containing m vectors of length n.
# The transform length is n.
# This routine calls forfft and performs pre- and post- processing to obtain the transform.
# This transform is it's own inverse.
# 5) dst(m,n,x,trig,factors) :
# This performs a discrete sine transform of an array x containing m vectors of length n.
# The transform length is n.
# This routine calls forfft and performs pre- and post- processing to obtain the transform.
# This transform is it's own inverse.
#
# The storage of the transformed array is in 'Hermitian form'. This means that, for the jth vector
# the values x(j,1:nw) contain the cosine modes of the transform, while the values x(j,nw+1:n) contain
# the sine modes (in reverse order ie. wave number increasing from n back to nw+1).
# [Here, for even n, nw=n/2, and for odd n, nw=(n-1)/2].
from sys import exit
from numpy import array, zeros, pi, sin, cos, mod, sqrt
#----------------------------
def initfft(n):
"""
Subroutine performs initialisation work for all the transforms.
It calls routines to factorise the array length n and then sets up
a trig array full of sin/cos values used in the transform backend.
Input:
n = an integer
Returns:
factors = factors of n in a length 5 array
trig = 0 if ok otherwise 1 if n has factors other than 2,3,4,5,6
"""
# First factorise n:
factors, ierr = factorisen(n)
# Return if factorisation unsuccessful:
if (ierr == 1):
# Catastrophic end to run if factorisation fails:
print('****************************')
print(' Factorisation not possible.')
print(' Only factors from 2-6 allowed.')
print(' STOPPING...')
print('****************************')
exit("breaking in stafft/initfft")
# Define list of factors array:
fac = array((6, 4, 2, 3, 5))
# Define constants needed in trig array definition:
ftwopin = 2.0 * pi / n
rem = n
## TO FIX: there is a bug here, the outputs don't agree with the fortran one
m = 0 #?? fortran one starts at 1 but this is going to be an index
trig = zeros(2 * n)
for i in range(5):
for j in range(int(factors[i])):
rem /= fac[i]
for k in range(1, fac[i]):
for l in range(int(rem)):
trig[m] = ftwopin * (k * l)
m += 1
ftwopin *= fac[i]
for i in range(n-1):
trig[n+i] = -sin(trig[i])
trig[i ] = cos(trig[i])
return (factors, trig)
#============================================
def factorisen(n):
"""
Subroutine to factorise factors of n
Input:
n = an integer
Returns:
factors = factors of n in a length 5 array
ierr = 0 if ok otherwise 1 if n has factors other than 2,3,4,5,6
"""
ierr = 0
# Initialiase factors array:
factors = zeros(5)
rem = n
# Find factors of 6:
while (mod(rem, 6) == 0):
factors[0] += 1
rem /= 6
if (rem == 1):
return (factors, ierr)
# Find factors of 4:
while (mod(rem, 4) == 0):
factors[1] += 1
rem /= 4
if (rem == 1):
return (factors, ierr)
# Find factors of 2:
while (mod(rem, 2) == 0):
factors[2] += 1
rem /= 2
if (rem == 1):
return (factors, ierr)
# Find factors of 3:
while (mod(rem, 3) == 0):
factors[3] += 1
rem /= 3
if (rem == 1):
return (factors, ierr)
# Find factors of 5:
while (mod(rem, 5) == 0):
factors[4] += 1
rem /= 5
if (rem == 1):
return (factors, ierr)
# If code reaches this point factorisation has
# failed - return error code in ierr:
ierr = 1
return (factors, ierr)
#============================================
def forfft(m, n, x, trig, factors):
"""
Main physical to spectral (forward) FFT routine.
Performs m transforms of length n in the array x which is dimensioned x(m,n).
The arrays trig and factors are filled by the init routine and
should be kept from call to call.
Backend consists of mixed-radix routines, with 'decimation in time'.
Transform is stored in Hermitian form.
Input:
m =
n =
x = input array
trig =
factors =
Returns:
xhat = transformed array
"""
# #Arguments declarations:
#double precision:: x(0:m*n-1),trig(0:2*n-1)
#integer:: m,n,factors(5)
# #Local declarations:
#double precision:: wk(0:m*n-1),normfac
#integer:: i,rem,cum,iloc
#logical:: orig
# Initialise flip/flop logical and counters
orig = True
rem = n
cum = 1
# Use factors of 5:
for i in range(int(factors[4])):
rem /= 5
iloc = int((rem - 1) * 5 * cum)
if orig:
print("orig")
print(trig[iloc])
print(trig[n + iloc])
x, wk = forrdx5(int(m * rem), cum, trig[iloc], trig[n + iloc])
else:
print("not orig")
print(trig[iloc])
print(trig[n + iloc])
wk, x = forrdx5(int(m * rem), cum, trig[iloc], trig[n + iloc])
orig = not orig
cum *= 5
#do i=1,factors(5)
# rem=rem/5
# iloc=(rem-1)*5*cum
# if (orig) then
# call forrdx5(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx5(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*5
#enddo
# #Use factors of 3:
#do i=1,factors(4)
# rem=rem/3
# iloc=(rem-1)*3*cum
# if (orig) then
# call forrdx3(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx3(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*3
#enddo
# #Use factors of 2:
#do i=1,factors(3)
# rem=rem/2
# iloc=(rem-1)*2*cum
# if (orig) then
# call forrdx2(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx2(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*2
#enddo
# #Use factors of 4:
#do i=1,factors(2)
# rem=rem/4
# iloc=(rem-1)*4*cum
# if (orig) then
# call forrdx4(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx4(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*4
#enddo
# #Use factors of 6:
#do i=1,factors(1)
# rem=rem/6
# iloc=(rem-1)*6*cum
# if (orig) then
# call forrdx6(x,wk,m*rem,cum,trig(iloc),trig(n+iloc))
# else
# call forrdx6(wk,x,m*rem,cum,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*6
#enddo
# Multiply by the normalisation constant and put
# transformed array in the right location:
normfac = 1.0 / sqrt(n)
if (orig):
xhat = x * normfac
else:
xhat = wk * normfac
return xhat
##=====================================================
#subroutine revfft(m,n,x,trig,factors)
## Main spectral to physical (reverse) FFT routine.
## Performs m reverse transforms of length n in the array x which is dimensioned x(m,n).
## The arrays trig and factors are filled by the init routine and
## should be kept from call to call.
## Backend consists of mixed-radix routines, with 'decimation in frequency'.
## Reverse transform starts in Hermitian form.
#implicit none
# #Arguments declarations:
#double precision:: x(0:m*n-1),trig(0:2*n-1)
#integer:: m,n,factors(5)
# #Local declarations:
#double precision:: wk(0:m*n-1),normfac
#integer:: i,k,cum,rem,iloc
#logical:: orig
##----------------------------------------
# #Flip the sign of the sine coefficients:
#do i=(n/2+1)*m,n*m-1
# x(i)=-x(i)
#enddo
# #Scale 0 and Nyquist frequencies:
#do i=0,m-1
# x(i)=0.5d0*x(i)
#enddo
#if (mod(n,2) .eq. 0) then
# k=m*n/2
# do i=0,m-1
# x(k+i)=0.5d0*x(k+i)
# enddo
#endif
# #Initialise flip/flop logical and counters
#orig=.true.
#cum=1
#rem=n
# #Use factors of 6:
#do i=1,factors(1)
# rem=rem/6
# iloc=(cum-1)*6*rem
# if (orig) then
# call revrdx6(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx6(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*6
#enddo
# #Use factors of 4:
#do i=1,factors(2)
# rem=rem/4
# iloc=(cum-1)*4*rem
# if (orig) then
# call revrdx4(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx4(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*4
#enddo
# #Use factors of 2:
#do i=1,factors(3)
# rem=rem/2
# iloc=(cum-1)*2*rem
# if (orig) then
# call revrdx2(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx2(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*2
#enddo
# #Use factors of 3:
#do i=1,factors(4)
# rem=rem/3
# iloc=(cum-1)*3*rem
# if (orig) then
# call revrdx3(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx3(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*3
#enddo
# #Use factors of 5:
#do i=1,factors(5)
# rem=rem/5
# iloc=(cum-1)*5*rem
# if (orig) then
# call revrdx5(x,wk,m*cum,rem,trig(iloc),trig(n+iloc))
# else
# call revrdx5(wk,x,m*cum,rem,trig(iloc),trig(n+iloc))
# endif
# orig=.not. orig
# cum=cum*5
#enddo
# #Multiply by the normalisation constant and put
# #transformed array in the right location:
#normfac=2.0d0/sqrt(dble(n))
#if (orig) then
# do i=0,m*n-1
# x(i)=x(i)*normfac
# enddo
#else
# do i=0,m*n-1
# x(i)=wk(i)*normfac
# enddo
#endif
#return
#end subroutine
##============================================
#subroutine dct(m,n,x,trig,factors)
## This routine computes multiple fourier cosine transforms of sequences
## of doubles using the forfft routine to compute the FFT,
## along with pre- and post-processing steps to extract the dst.
#implicit none
##Argument declarations:
#integer:: m,n,factors(5)
#double precision:: x(m,0:n),trig(2*n)
##Local declarations:
#double precision,parameter:: pi=3.141592653589793238462643383279502884197169399375105820974944592307816d0
#double precision,parameter:: rt2=1.414213562373095048801688724209698078569671875376948073176679737990732d0
#double precision:: wk(1:m,0:n-1),fpin,rtn,rowsum
#integer:: i,j,nd2
##--------------------------------------------------
#fpin=pi/dble(n)
#rtn=sqrt(dble(n))
# #Pre-process the array and store it in wk:
#do i=1,m
# wk(i,0)=0.5d0*(x(i,0)+x(i,n))
#enddo
#do j=1,n-1
# do i=1,m
# wk(i,j)=0.5d0*(x(i,j)+x(i,n-j))-sin(dble(j)*fpin)*(x(i,j)-x(i,n-j))
# enddo
#enddo
# #Get the first element of the transform x(i,1) and store
# #in x(i,n), as this is not overwritten when x is used
# #as a work array in the forfft routine called next:
#do i=1,m
# rowsum=0.0d0
# rowsum=rowsum+0.5d0*x(i,0)
# do j=1,n-1
# rowsum=rowsum+x(i,j)*cos(dble(j)*fpin)
# enddo
# rowsum=rowsum-0.5d0*x(i,n)
# x(i,n)=rt2*rowsum/rtn
#enddo
# #Transform the wk array by use of the general FFT routine:
#call forfft(m,n,wk,trig,factors)
# #Post-process the result of the FFT to get the dst of x and
# #put the result back into the x array:
#do i=1,m
# x(i,0)=rt2*wk(i,0)
#enddo
#do i=1,m
# x(i,1)=x(i,n)
#enddo
#if (mod(n,2) .eq. 0) then
# nd2=n/2
# do j=1,nd2-1
# do i=1,m
# x(i,2*j)=rt2*wk(i,j)
# x(i,2*j+1)=x(i,2*j-1)-rt2*wk(i,n-j)
# enddo
# enddo
# do i=1,m
# x(i,n)=rt2*wk(i,nd2)
# enddo
#else if (mod(n,2) .eq. 1) then
# do j=1,(n-1)/2
# do i=1,m
# x(i,2*j)=rt2*wk(i,j)
# x(i,2*j+1)=x(i,2*j-1)-rt2*wk(i,n-j)
# enddo
# enddo
#endif
#return
#end subroutine
##=============================================================
#subroutine dst(m,n,x,trig,factors)
## This routine computes multiple fourier sine transforms of sequences
## of doubles using the forfft routine to compute the FFT,
## along with pre- and post-processing steps to extract the dst.
#implicit none
##Argument declarations:
#integer:: m,n,factors(5)
#double precision:: x(m,n),trig(2*n)
##Local declarations:
#double precision,parameter:: pi=3.141592653589793238462643383279502884197169399375105820974944592307816d0
#double precision,parameter:: rt2=1.414213562373095048801688724209698078569671875376948073176679737990732d0
#double precision:: wk(1:m,0:n-1),fpin
#integer:: i,j
##------------------------------------------
#fpin=pi/dble(n)
# #Pre-process the array and store it in wk:
# #First set 0 frequency element to zero:
#do i=1,m
# wk(i,0)=0.0d0
#enddo
# #Next set up the rest of the array:
#do j=1,n-1
# do i=1,m
# wk(i,j)=0.5d0*(x(i,j)-x(i,n-j))+sin(dble(j)*fpin)*(x(i,j)+x(i,n-j))
# enddo
#enddo
# #Transform the wk array by use of the general FFT routine:
#call forfft(m,n,wk,trig,factors)
# #Post-process the result of the FFT to get the dst of x and
# #put the result back into the x array:
#do i=1,m
# x(i,1)=wk(i,0)/rt2
#enddo
#if (mod(n,2) .eq. 0) then
# do j=1,n/2-1
# do i=1,m
# x(i,2*j)=-rt2*wk(i,n-j)
# enddo
# do i=1,m
# x(i,2*j+1)=rt2*wk(i,j)+x(i,2*j-1)
# enddo
# enddo
#else if (mod(n,2) .eq. 1) then
# do j=1,(n-1)/2-1
# do i=1,m
# x(i,2*j)=-rt2*wk(i,n-j)
# x(i,2*j+1)=rt2*wk(i,j)+x(i,2*j-1)
# enddo
# enddo
# do i=1,m
# x(i,n-1)=-rt2*wk(i,(n+1)/2)
# enddo
#endif
# # Set the Nyquist frequency element to zero:
#do i=1,m
# x(i,n)=0.0d0
#enddo
#return
#end subroutine
##==================================================
##====================================================
## Internal radix routines only beyond this point...
## Abandon hope all ye who enter in#
##====================================================
## Physical to spectral (forward) routines:
##====================================================
#subroutine forrdx6(a,b,nv,lv,cosine,sine)
## Radix six physical to Hermitian FFT with 'decimation in time'.
#implicit none
#
# #Arguments declarations:
#integer:: nv,lv
#double precision:: a(0:nv-1,0:5,0:lv-1),b(0:nv-1,0:lv-1,0:5),cosine(0:lv-1,5),sine(0:lv-1,5)
# #Local declarations:
#double precision,parameter:: sinfpi3=0.8660254037844386467637231707529361834714026269051903140279034897259665d0
#double precision:: x1p,x2p,x3p,x4p,x5p
#double precision:: y1p,y2p,y3p,y4p,y5p
#double precision:: s1k,s2k,s3k,s4k,s5k
#double precision:: c1k,c2k,c3k,c4k,c5k
#double precision:: t1i,t1r,t2i,t2r,t3i,t3r
#double precision:: u0i,u0r,u1i,u1r,u2i,u2r
#double precision:: v0i,v0r,v1i,v1r,v2i,v2r
#double precision:: q1,q2,q3,q4,q5,q6
#integer:: i,k,kc,lvd2
##-----------------------------------------
# #Do k=0 first:
#do i=0,nv-1
# t1r=a(i,2,0)+a(i,4,0)
# t2r=a(i,0,0)-0.5d0*t1r
# t3r=sinfpi3*(a(i,4,0)-a(i,2,0))
# u0r=a(i,0,0)+t1r
# t1i=a(i,5,0)+a(i,1,0)
# t2i=a(i,3,0)-0.5d0*t1i
# t3i=sinfpi3*(a(i,5,0)-a(i,1,0))
# v0r=a(i,3,0)+t1i
# b(i,0,0)=u0r+v0r
# b(i,0,1)=t2r-t2i
# b(i,0,2)=t2r+t2i
# b(i,0,3)=u0r-v0r
# b(i,0,4)=t3i-t3r
# b(i,0,5)=t3r+t3i
#enddo
# #Next do remaining k:
#if (nv .le. (lv-1)/2) then
# do i=0,nv-1
# do k=1,(lv-1)/2
# kc=lv-k
# x1p=cosine(k,1)*a(i,1, k)-sine(k,1)*a(i,1,kc)
# y1p=cosine(k,1)*a(i,1,kc)+sine(k,1)*a(i,1, k)
# x2p=cosine(k,2)*a(i,2, k)-sine(k,2)*a(i,2,kc)
# y2p=cosine(k,2)*a(i,2,kc)+sine(k,2)*a(i,2, k)
# x3p=cosine(k,3)*a(i,3, k)-sine(k,3)*a(i,3,kc)
# y3p=cosine(k,3)*a(i,3,kc)+sine(k,3)*a(i,3, k)
# x4p=cosine(k,4)*a(i,4, k)-sine(k,4)*a(i,4,kc)
# y4p=cosine(k,4)*a(i,4,kc)+sine(k,4)*a(i,4, k)
# x5p=cosine(k,5)*a(i,5, k)-sine(k,5)*a(i,5,kc)
# y5p=cosine(k,5)*a(i,5,kc)+sine(k,5)*a(i,5, k)
# t1r=x2p+x4p
# t1i=y2p+y4p
# t2r=a(i,0,k)-0.5d0*t1r
# t2i=a(i,0,kc)-0.5d0*t1i
# t3r=sinfpi3*(x2p-x4p)
# t3i=sinfpi3*(y2p-y4p)
# u0r=a(i,0,k)+t1r
# u0i=a(i,0,kc)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=x5p+x1p
# t1i=y5p+y1p
# t2r=x3p-0.5d0*t1r
# t2i=y3p-0.5d0*t1i
# t3r=sinfpi3*(x5p-x1p)
# t3i=sinfpi3*(y5p-y1p)
# v0r=x3p+t1r
# v0i=y3p+t1i
# v1r=t2r+t3i
# v1i=t3r-t2i
# v2r=t2r-t3i
# v2i=t2i+t3r
# b(i, k,0)=u0r+v0r
# b(i,kc,0)=u2r-v2r
# b(i, k,1)=u1r-v1r
# b(i,kc,1)=u1r+v1r
# b(i, k,2)=u2r+v2r
# b(i,kc,2)=u0r-v0r
# b(i, k,3)=v0i-u0i
# b(i,kc,3)=u2i+v2i
# b(i, k,4)=v1i-u1i
# b(i,kc,4)=u1i+v1i
# b(i, k,5)=v2i-u2i
# b(i,kc,5)=u0i+v0i
# enddo
# enddo
#else
# do k=1,(lv-1)/2
# kc=lv-k
# c1k=cosine(k,1)
# s1k=sine(k,1)
# c2k=cosine(k,2)
# s2k=sine(k,2)
# c3k=cosine(k,3)
# s3k=sine(k,3)
# c4k=cosine(k,4)
# s4k=sine(k,4)
# c5k=cosine(k,5)
# s5k=sine(k,5)
# do i=0,nv-1
# x1p=c1k*a(i,1, k)-s1k*a(i,1,kc)
# y1p=c1k*a(i,1,kc)+s1k*a(i,1, k)
# x2p=c2k*a(i,2, k)-s2k*a(i,2,kc)
# y2p=c2k*a(i,2,kc)+s2k*a(i,2, k)
# x3p=c3k*a(i,3, k)-s3k*a(i,3,kc)
# y3p=c3k*a(i,3,kc)+s3k*a(i,3, k)
# x4p=c4k*a(i,4, k)-s4k*a(i,4,kc)
# y4p=c4k*a(i,4,kc)+s4k*a(i,4, k)
# x5p=c5k*a(i,5, k)-s5k*a(i,5,kc)
# y5p=c5k*a(i,5,kc)+s5k*a(i,5, k)
# t1r=x2p+x4p
# t1i=y2p+y4p
# t2r=a(i,0,k)-0.5d0*t1r
# t2i=a(i,0,kc)-0.5d0*t1i
# t3r=sinfpi3*(x2p-x4p)
# t3i=sinfpi3*(y2p-y4p)
# u0r=a(i,0,k)+t1r
# u0i=a(i,0,kc)+t1i
# u1r=t2r+t3i
# u1i=t2i-t3r
# u2r=t2r-t3i
# u2i=t2i+t3r
# t1r=x5p+x1p
# t1i=y5p+y1p
# t2r=x3p-0.5d0*t1r
# t2i=y3p-0.5d0*t1i
# t3r=sinfpi3*(x5p-x1p)
# t3i=sinfpi3*(y5p-y1p)
# v0r=x3p+t1r
# v0i=y3p+t1i
# v1r=t2r+t3i
# v1i=t3r-t2i
# v2r=t2r-t3i
# v2i=t2i+t3r
# b(i, k,0)=u0r+v0r
# b(i,kc,0)=u2r-v2r
# b(i, k,1)=u1r-v1r
# b(i,kc,1)=u1r+v1r
# b(i, k,2)=u2r+v2r
# b(i,kc,2)=u0r-v0r
# b(i, k,3)=v0i-u0i
# b(i,kc,3)=u2i+v2i
# b(i, k,4)=v1i-u1i
# b(i,kc,4)=u1i+v1i
# b(i, k,5)=v2i-u2i
# b(i,kc,5)=u0i+v0i
# enddo
# enddo
#endif
# #Catch the case k=lv/2 when lv even:
#if (mod(lv,2) .eq. 0) then
# lvd2=lv/2
# do i=0,nv-1
# q1=a(i,2,lvd2)-a(i,4,lvd2)
# q2=a(i,0,lvd2)+0.5d0*q1
# q3=sinfpi3*(a(i,2,lvd2)+a(i,4,lvd2))
# q4=a(i,1,lvd2)+a(i,5,lvd2)
# q5=-a(i,3,lvd2)-0.5d0*q4
# q6=sinfpi3*(a(i,1,lvd2)-a(i,5,lvd2))
# b(i,lvd2,0)=q2+q6
# b(i,lvd2,1)=a(i,0,lvd2)-q1
# b(i,lvd2,2)=q2-q6
# b(i,lvd2,3)=q5+q3
# b(i,lvd2,4)=a(i,3,lvd2)-q4
# b(i,lvd2,5)=q5-q3
# enddo
#endif
#
#return
#end subroutine
##================================================
def forrdx5(nv, lv, cosine, sine):
"""
Radix five physical to Hermitian FFT with 'decimation in time'.
Input: TO ADD
nv =
lv =
cosine =
sine =
Returns: TO ADD
a =
b =
"""
# define some parameters and variables
rtf516 = 0.5590169943749474241022934171828190588601545899028814310677243113526302
sinf2pi5= 0.9510565162951535721164393333793821434056986341257502224473056444301532
sinfpi5 = 0.5877852522924731291687059546390727685976524376431459910722724807572785
sinrat = 0.6180339887498948482045868343656381177203091798057628621354486227052605
a =
|
zeros((nv, 5, lv))
|
numpy.zeros
|
import copy
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from .losses import HingeLoss
from .batcher import batch_it
def train(model, train_data, valid_data, nb_epochs, batch_size, margin, cuda=False, early_stop=10):
if cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=0.01) ## TODO: more flexible
criterion = HingeLoss(margin)
train_losses, valid_losses = [], []
min_valid_loss = np.inf
early_stop_cnt = 0
#if cuda:
# model.cuda()
start = time.time()
for i_epoch in range(nb_epochs):
tlosses = []
## training
model.train()
for i_batch, batch in batch_it(train_data, batch_size, random=True):
if cuda:
batch = batch.cuda()
vbatch = Variable(batch)
y_right, y_left = model(vbatch[:,0]), model(vbatch[:,1])
optimizer.zero_grad()
loss = criterion(y_right, y_left)
loss.backward()
optimizer.step()
tlosses.append(loss.data.cpu().numpy()[0])
vlosses = []
## validation
model.eval()
for i_batch, batch in batch_it(valid_data, batch_size):
if cuda:
batch = batch.cuda()
vbatch = Variable(batch, volatile=True)
y_right, y_left = model(vbatch[:,0]), model(vbatch[:,1])
loss = criterion(y_right, y_left)
vlosses.append(loss.data.cpu().numpy()[0])
if np.mean(vlosses) < min_valid_loss:
early_stop_cnt = 0
effective_epochs = i_epoch
min_train_loss = np.mean(tlosses)
min_valid_loss =
|
np.mean(vlosses)
|
numpy.mean
|
import cv2
import numpy as np
from scipy import signal
from sklearn.linear_model import LinearRegression
import cubic
from sklearn.preprocessing import MinMaxScaler
import math
from scipy import ndimage
import scipy.cluster.hierarchy as hcluster
import random
import os, shutil
import time
DEFAULT_LINE_LIST = ['CD']
DEFAULT_LINE_INFO = [[(770, 170), 150, 50, 0]]
class LineDetector:
# the list of current lines information at the starting point
# C = Curb, S = Solid (white) line, D = Dashed (white) line
# SS = Straight Solid line, CD = Curved Dashed line, CS = Curved Solid line ..
# each line should be a detectable visual unit
# line info: [mid point(x, y) at bottom for initial detect window,
# detect window height,
# detect window width,
# initial angel (in radians)]
def __init__(self,
min_over_hough = 50,
min_length = 56/3,
max_length = 56*1.2,
image_dimension = (800, 400),
dash_interval_pxl = 90,
line_list= DEFAULT_LINE_LIST,
line_info= DEFAULT_LINE_INFO,
lane_width = 53,
initial_angle = 0,
max_turning_angle = [-100, 100],
current_image_name = 'um_000000',
current_image_path= "../../../../KITTI/data_road/transformed/",
vis_folder_prefix = 'visualization/',
step_window= False,
visualization = False
):
self.min_over_hough = min_over_hough
self.min_length = min_length
self.max_length= max_length
self.image_dimension= image_dimension
self.dash_interval_pxl= dash_interval_pxl # the interval + one line length, 0-300 is legal value for window_h=200
self.line_list= line_list
self.line_info= line_info
self.lane_width= lane_width # used for solid line mode
self.initial_angle= initial_angle
self.current_image_name= current_image_name
self.current_image_path= current_image_path
self.vis_folder_prefix = vis_folder_prefix
assert len(self.line_info) == len(self.line_list)
self.step_window= step_window
self.visualization = visualization
self.max_turning_angle = max_turning_angle
@staticmethod
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
point = (3, 4)
origin = (2, 2)
rotate(origin, point, math.radians(10))
(2.6375113976783475, 4.143263683691346)
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
@staticmethod
def prepare_visualization_folder(vis_folder):
# clear visualization folder
for filename in os.listdir(vis_folder):
file_path = os.path.join(vis_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
def detect_and_save(self):
start_time = time.perf_counter()
vis_folder = self.vis_folder_prefix+str(self.current_image_name)
self.prepare_visualization_folder(vis_folder=vis_folder)
img = cv2.imread(self.current_image_path+str(self.current_image_name)+".png")
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
y, u, v = cv2.split(img_yuv)
normal_k_v_7 = np.array([[-10, 1, 0, 1, +10],
[0,-10, 1, +10, 0],
[0, -10, 1, +10, 0],
[0, -10, 1, +10, 0],
[-10, 1, 0, 1, +10]])
grad = signal.convolve2d(y, normal_k_v_7, boundary='symm', mode='same')
scaler = MinMaxScaler()
scaler.fit(np.array(grad).reshape(-1,1))
grad_n = scaler.transform(grad)*255
origin = (self.image_dimension[0]/2, self.image_dimension[1]/2)
line_detection_results = []
for i, line_info_list in enumerate(self.line_info): # for each line
print("searching no.", i, " line :", self.line_list[i])
bottom_middle_end, window_h, window_w, initial_angle = line_info_list
if self.initial_angle == 0:
current_mid_end = bottom_middle_end
else: # rotate based on middle end of the image
d_grad = np.zeros((self.image_dimension[0]*2, self.image_dimension[1]))
d_grad[0:self.image_dimension[0],:] = grad_n
angle_in_degree = self.initial_angle *180 / np.pi
grad_n = ndimage.rotate(input=np.array(d_grad), angle=angle_in_degree, reshape=False)[0:self.image_dimension[0],:]
cv2.imwrite(os.path.join(vis_folder, "init_rotated.png"), grad_n)
bottom_middle_end = self.rotate(origin = (self.image_dimension[0], int(self.image_dimension[1]/2)),
point = bottom_middle_end,
angle = self.initial_angle)
bottom_middle_end = (int(bottom_middle_end[0]), int(bottom_middle_end[1]))
current_mid_end = bottom_middle_end
mid_x, mid_y = bottom_middle_end
window_coor = [] # this is for visualization windows
line_type = self.line_list[i]
line_vis_folder = os.path.join(vis_folder, line_type)
if self.visualization:
if not os.path.exists(line_vis_folder):
os.mkdir(line_vis_folder)
if line_type == 'SS':
window1 = grad_n[:mid_x, mid_y-int(window_w/2): mid_y+int(window_w/2)]
window_h = mid_x
else:
window1 = grad_n[mid_x-window_h: mid_x, mid_y-int(window_w/2): mid_y+int(window_w/2)]
line_ended = False
# line_detected = False
iteration = 0
angle_to_clockwise_current = 0
angle_to_clockwise_total = 0
final_mask_for_the_line = np.zeros(grad_n.shape)
while not line_ended: # for each window
iteration += 1
if self.visualization:
cv2.imwrite(os.path.join(line_vis_folder, 'current_window'+str(iteration)+'.png'),window1)
cluster_and_spline = True
clusters_no = 0
max_line_angle = 0
index_for_mask = ([], [])
# re-normalize in the window
scaler_2 = MinMaxScaler()
scaler_2.fit(np.array(window1).reshape(-1,1))
window1_n = scaler_2.transform(window1)*255
if self.visualization:
cv2.imwrite(os.path.join(line_vis_folder, 'window_n.png'),window1_n)
# erosion will enhance the dark area
# print("test: ",np.mean(window1_n), min(220, 140/110*np.mean(window1_n)), np.max(window1_n)) # inspect clip
min_threshold = min(220, 140/110*np.mean(window1_n))
window_clip = np.clip(window1_n, min_threshold, 255) - min_threshold
window_clip = window_clip / (255 - min_threshold) * 255
cv2.imwrite(os.path.join(line_vis_folder, 'clip.png'),window_clip)
if abs(angle_to_clockwise_current)>0.1 or abs(self.initial_angle)>0.1:
print("sharp turns, skipping erosion")
erosion = window_clip
else:
kernel = np.ones((5,1),np.uint8)
erosion = cv2.erode(window_clip,kernel,iterations = 3)
if self.visualization:
cv2.imwrite(os.path.join(line_vis_folder, 'bin.png'),erosion)
# kernel = np.ones((9,9),np.uint8) # backup kernel
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
erosion = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel)
# clustering lines
img_np = np.array(erosion)
img_idx_tp = np.where(img_np > self.min_over_hough)
if len(img_idx_tp[0]) < 5:
print("little or no pixel above minimal hough")
else:
img_idx_list = [img_idx_tp[0], img_idx_tp[1]]
img_idx_arr = np.transpose(np.array(img_idx_list))
# the minimal distance in pixel for clustering, 4 is the experiential value
d_thresh = 4
clusters = hcluster.fclusterdata(img_idx_arr, d_thresh, criterion="distance")
clusters_no = np.max(clusters)
print("total cluster: ",clusters_no)
h, w = img_np.shape
if clusters_no > 10:
print("too noisy, quite detecting lines")
cluster_and_spline = False
elif clusters_no == 0:
print("no cluster result, quite detecting lines")
cluster_and_spline = False
else:
# for visualization
copy = np.zeros((h, w, 3))
color_pallet = []
for i in range(0, clusters_no):
color_pallet.append([random.random()*255, random.random()*255, random.random()*255])
for i, pos in enumerate(img_idx_arr):
x, y = pos
group_id = clusters[i]
copy[x, y] = color_pallet[group_id-1]
if self.visualization:
cv2.imwrite(os.path.join(line_vis_folder, 'cluster_before_filter.png'),copy)
# end of visualization
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(9,9))
copy_1 = np.zeros((h, w, 3))
copy_2 = np.zeros((h, w, 3))
max_x_span = 0
max_line = np.array([])
max_x_center = 0
if cluster_and_spline:
# get the lines
# this init is for final marking
x_list_before_r = []
y_list_before_r = []
for c_no in range(0, clusters_no):
# for each line (cluster)
zero_img = np.zeros(img_np.shape)
pos_list = []
for i, pos in enumerate(img_idx_arr):
if clusters[i] == c_no+1:
x, y = pos
pos_list.append(pos)
zero_img[x, y] = img_np[x, y]
x_list, y_list = np.where(zero_img > self.min_over_hough)
if len(x_list) > 0:
x_span = np.max(x_list) - np.min(x_list)
y_span = np.max(y_list) - np.min(y_list)
if self.max_length > x_span > self.min_length or y_span > self.min_length:
# filter small and weak lines
# print("valid:", c_no, x_span, y_span)
# visulization after filter
for pos in pos_list:
x, y = pos
copy_1[x, y] = color_pallet[c_no]
# 1. randomly select a few points (deprecated)
# all_points_num = len(x_list)
# random select makes line bend because choosing points varying horizontally
# if all_points_num > SPLINE_SEEDS_NUMBER:
# for i in range(0, SPLINE_SEEDS_NUMBER):
# seed = int(random.random()*(all_points_num-1))
# x_spline.append(x_list[seed])
# y_spline.append(y_list[seed])
# else:
# print("points are fewer than 50, continue")
# continue
# choose all points to spline
x_spline = x_list
y_spline = y_list
x_y_arr = np.array([x_spline, y_spline])
x_y_arr_1 =
|
np.transpose(x_y_arr)
|
numpy.transpose
|
"""
Raw radar RHIs processing.
@title: calibrate_rhi
@author: <NAME> <<EMAIL>>
@institution: Australian Bureau of Meteorology
@date: 07/10/2020
@version: 0.1
.. autosummary::
:toctree: generated/
mkdir
remove
extract_zip
get_radar_archive_file
get_metadata
get_calib_offset
get_zdr_offset
get_dbz_name
create_level1a
buffer
main
"""
import gc
import os
import sys
import glob
import gzip
import uuid
import pickle
import zipfile
import argparse
import datetime
import warnings
import traceback
import pyart
import cftime
import crayons
import numpy as np
import pandas as pd
import xarray as xr
import dask.bag as db
import cpol_processing
def mkdir(path: str):
"""
Create the DIRECTORY(ies), if they do not already exist
"""
try:
os.mkdir(path)
except FileExistsError:
pass
return None
def remove(flist):
"""
Remove file if it exists.
"""
flist = [f for f in flist if f is not None]
for f in flist:
try:
os.remove(f)
except FileNotFoundError:
pass
return None
def extract_zip(inzip: str, path: str):
"""
Extract content of a zipfile inside a given directory.
Parameters:
===========
inzip: str
Input zip file.
path: str
Output path.
Returns:
========
namelist: List
List of files extracted from the zip.
"""
with zipfile.ZipFile(inzip) as zid:
zid.extractall(path=path)
namelist = [os.path.join(path, f) for f in zid.namelist()]
return namelist
def get_radar_archive_file(date) -> str:
"""
Return the archive containing the radar file for a given date.
Parameters:
===========
date: datetime
Date.
Returns:
========
file: str
Radar archive if it exists at the given date.
"""
datestr = date.strftime("%Y%m%d")
file = os.path.join(INPATH, f"{date.year}", f"{datestr}.zip")
if not os.path.exists(file):
return None
return file
def get_metadata(radar):
# Lat/lon informations
today = datetime.datetime.utcnow()
radar_start_date = cftime.num2pydate(radar.time["data"][0], radar.time["units"])
radar_end_date = cftime.num2pydate(radar.time["data"][-1], radar.time["units"])
latitude = radar.gate_latitude["data"]
longitude = radar.gate_longitude["data"]
maxlon = longitude.max()
minlon = longitude.min()
maxlat = latitude.max()
minlat = latitude.min()
origin_altitude = "50"
origin_latitude = "-12.2491"
origin_longitude = "131.0444"
unique_id = str(uuid.uuid4())
metadata = {
"Conventions": "CF-1.6, ACDD-1.3",
"acknowledgement": "This work has been supported by the U.S. Department of Energy Atmospheric Systems Research Program through the grant DE-SC0014063. Data may be freely distributed.",
"country": "Australia",
"creator_email": "<EMAIL>",
"creator_name": "<NAME>",
"creator_url": "github.com/vlouf",
"date_created": today.isoformat(),
"geospatial_bounds": f"POLYGON(({minlon:0.6} {minlat:0.6},{minlon:0.6} {maxlat:0.6},{maxlon:0.6} {maxlat:0.6},{maxlon:0.6} {minlat:0.6},{minlon:0.6} {minlat:0.6}))",
"geospatial_lat_max": f"{maxlat:0.6}",
"geospatial_lat_min": f"{minlat:0.6}",
"geospatial_lat_units": "degrees_north",
"geospatial_lon_max": f"{maxlon:0.6}",
"geospatial_lon_min": f"{minlon:0.6}",
"geospatial_lon_units": "degrees_east",
"history": "created by <NAME> on gadi.nci.org.au at " + today.isoformat() + " using Py-ART",
"id": unique_id,
"institution": "Bureau of Meteorology",
"instrument": "radar",
"instrument_name": "CPOL",
"instrument_type": "radar",
"keywords": "radar, tropics, Doppler, dual-polarization",
"license": "CC BY-NC-SA 4.0",
"naming_authority": "au.org.nci",
"origin_altitude": origin_altitude,
"origin_latitude": origin_latitude,
"origin_longitude": origin_longitude,
"platform_is_mobile": "false",
"processing_level": "b1",
"project": "CPOL",
"publisher_name": "NCI",
"publisher_url": "nci.gov.au",
"product_version": f"v{today.year}.{today.month:02}",
"references": "doi:10.1175/JTECH-D-18-0007.1",
"site_name": "Gunn Pt",
"source": "radar",
"state": "NT",
"standard_name_vocabulary": "CF Standard Name Table v71",
"summary": "RHI scan from CPOL dual-polarization Doppler radar (Darwin, Australia)",
"time_coverage_start": radar_start_date.isoformat(),
"time_coverage_end": radar_end_date.isoformat(),
"time_coverage_duration": "P10M",
"time_coverage_resolution": "PT10M",
"title": "radar RHI volume from CPOL",
"uuid": unique_id,
"version": radar.metadata["version"],
}
return metadata
def get_calib_offset(mydate) -> float:
"""
Get calibration offset for given date.
Parameter:
==========
mydate: datetime
Date of treatment.
Returns:
========
calib_offset: float
Calibration offset value. Z_calib = Z_cpol + calib_offset.
"""
calib_offset = None
if IS_CALIB_PERIOD:
for datest, dateed, rval in zip(CALIB_DATE_START, CALIB_DATE_END, CALIB_VALUE):
if (mydate >= datest) & (mydate <= dateed):
calib_offset = rval
# If no calibration offset has been found, then looking for the closest one.
if calib_offset is None:
daydelta = np.array([(cd - mydate).days for cd in CALIB_DATE_START])
pos =
|
np.argmax(daydelta[daydelta < 0])
|
numpy.argmax
|
"""Numba-compiled functions.
Provides an arsenal of Numba-compiled functions that are used by accessors
and in many other parts of the backtesting pipeline, such as technical indicators.
These only accept NumPy arrays and other Numba-compatible types.
The module can be accessed directly via `vbt.nb`.
```python-repl
>>> import numpy as np
>>> import vectorbt as vbt
>>> # vectorbt.generic.nb.rolling_mean_1d_nb
>>> vbt.nb.rolling_mean_1d_nb(np.array([1, 2, 3, 4]), 2)
array([nan, 1.5, 2.5, 3.5])
```
!!! note
vectorbt treats matrices as first-class citizens and expects input arrays to be
2-dim, unless function has suffix `_1d` or is meant to be input to another function.
Data is processed along index (axis 0).
Rolling functions with `minp=None` have `min_periods` set to the window size.
All functions passed as argument should be Numba-compiled."""
import numpy as np
from numba import njit, generated_jit
from numba.np.numpy_support import as_dtype
from numba.typed import Dict
from numba.core.types import Omitted
from vectorbt import _typing as tp
from vectorbt.generic.enums import DrawdownStatus, drawdown_dt
@njit(cache=True)
def shuffle_1d_nb(a: tp.Array1d, seed: tp.Optional[int] = None) -> tp.Array1d:
"""Shuffle each column in `a`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
return np.random.permutation(a)
@njit(cache=True)
def shuffle_nb(a: tp.Array2d, seed: tp.Optional[int] = None) -> tp.Array2d:
"""2-dim version of `shuffle_1d_nb`."""
if seed is not None:
np.random.seed(seed)
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = np.random.permutation(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def set_by_mask_1d_nb(a: tp.Array1d, mask: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Set each element to a value by boolean mask."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_1d_nb(a, mask, value):
out = a.astype(dtype)
out[mask] = value
return out
if not nb_enabled:
return _set_by_mask_1d_nb(a, mask, value)
return _set_by_mask_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_nb(a: tp.Array2d, mask: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `set_by_mask_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(value)
else:
a_dtype = a.dtype
value_dtype = np.array(value).dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_nb(a, mask, value):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = value
return out
if not nb_enabled:
return _set_by_mask_nb(a, mask, value)
return _set_by_mask_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_1d_nb(a: tp.Array1d, mask: tp.Array1d, values: tp.Array1d) -> tp.Array1d:
"""Set each element in one array to the corresponding element in another by boolean mask.
`values` should be of the same shape as in `a`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_1d_nb(a, mask, values):
out = a.astype(dtype)
out[mask] = values[mask]
return out
if not nb_enabled:
return _set_by_mask_mult_1d_nb(a, mask, values)
return _set_by_mask_mult_1d_nb
@generated_jit(nopython=True, cache=True)
def set_by_mask_mult_nb(a: tp.Array2d, mask: tp.Array2d, values: tp.Array2d) -> tp.Array2d:
"""2-dim version of `set_by_mask_mult_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
value_dtype = as_dtype(values.dtype)
else:
a_dtype = a.dtype
value_dtype = values.dtype
dtype = np.promote_types(a_dtype, value_dtype)
def _set_by_mask_mult_nb(a, mask, values):
out = a.astype(dtype)
for col in range(a.shape[1]):
out[mask[:, col], col] = values[mask[:, col], col]
return out
if not nb_enabled:
return _set_by_mask_mult_nb(a, mask, values)
return _set_by_mask_mult_nb
@njit(cache=True)
def fillna_1d_nb(a: tp.Array1d, value: tp.Scalar) -> tp.Array1d:
"""Replace NaNs with value.
Numba equivalent to `pd.Series(a).fillna(value)`."""
return set_by_mask_1d_nb(a, np.isnan(a), value)
@njit(cache=True)
def fillna_nb(a: tp.Array2d, value: tp.Scalar) -> tp.Array2d:
"""2-dim version of `fillna_1d_nb`."""
return set_by_mask_nb(a, np.isnan(a), value)
@generated_jit(nopython=True, cache=True)
def bshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift backward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`.
!!! warning
Shift backward means looking ahead."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[-n:] = fill_value
out[:-n] = a[n:]
return out
if not nb_enabled:
return _bshift_1d_nb(a, n, fill_value)
return _bshift_1d_nb
@generated_jit(nopython=True, cache=True)
def bshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `bshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _bshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = bshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _bshift_nb(a, n, fill_value)
return _bshift_nb
@generated_jit(nopython=True, cache=True)
def fshift_1d_nb(a: tp.Array1d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array1d:
"""Shift forward by `n` positions.
Numba equivalent to `pd.Series(a).shift(n)`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_1d_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
out[:n] = fill_value
out[n:] = a[:-n]
return out
if not nb_enabled:
return _fshift_1d_nb(a, n, fill_value)
return _fshift_1d_nb
@generated_jit(nopython=True, cache=True)
def fshift_nb(a: tp.Array2d, n: int = 1, fill_value: tp.Scalar = np.nan) -> tp.Array2d:
"""2-dim version of `fshift_1d_nb`."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
if isinstance(fill_value, Omitted):
fill_value_dtype = np.asarray(fill_value.value).dtype
else:
fill_value_dtype = as_dtype(fill_value)
else:
a_dtype = a.dtype
fill_value_dtype = np.array(fill_value).dtype
dtype = np.promote_types(a_dtype, fill_value_dtype)
def _fshift_nb(a, n, fill_value):
out = np.empty_like(a, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = fshift_1d_nb(a[:, col], n=n, fill_value=fill_value)
return out
if not nb_enabled:
return _fshift_nb(a, n, fill_value)
return _fshift_nb
@njit(cache=True)
def diff_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the 1-th discrete difference.
Numba equivalent to `pd.Series(a).diff()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] - a[:-n]
return out
@njit(cache=True)
def diff_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `diff_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = diff_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def pct_change_1d_nb(a: tp.Array1d, n: int = 1) -> tp.Array1d:
"""Return the percentage change.
Numba equivalent to `pd.Series(a).pct_change()`."""
out = np.empty_like(a, dtype=np.float_)
out[:n] = np.nan
out[n:] = a[n:] / a[:-n] - 1
return out
@njit(cache=True)
def pct_change_nb(a: tp.Array2d, n: int = 1) -> tp.Array2d:
"""2-dim version of `pct_change_1d_nb`."""
out = np.empty_like(a, dtype=np.float_)
for col in range(a.shape[1]):
out[:, col] = pct_change_1d_nb(a[:, col], n=n)
return out
@njit(cache=True)
def ffill_1d_nb(a: tp.Array1d) -> tp.Array1d:
"""Fill NaNs by propagating last valid observation forward.
Numba equivalent to `pd.Series(a).fillna(method='ffill')`."""
out = np.empty_like(a, dtype=a.dtype)
lastval = a[0]
for i in range(a.shape[0]):
if np.isnan(a[i]):
out[i] = lastval
else:
lastval = out[i] = a[i]
return out
@njit(cache=True)
def ffill_nb(a: tp.Array2d) -> tp.Array2d:
"""2-dim version of `ffill_1d_nb`."""
out = np.empty_like(a, dtype=a.dtype)
for col in range(a.shape[1]):
out[:, col] = ffill_1d_nb(a[:, col])
return out
@generated_jit(nopython=True, cache=True)
def nanprod_nb(a):
"""Numba-equivalent of `np.nanprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nanprod_nb(a):
out = np.empty(a.shape[1], dtype=dtype)
for col in range(a.shape[1]):
out[col] = np.nanprod(a[:, col])
return out
if not nb_enabled:
return _nanprod_nb(a)
return _nanprod_nb
@generated_jit(nopython=True, cache=True)
def nancumsum_nb(a):
"""Numba-equivalent of `np.nancumsum` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumsum_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumsum(a[:, col])
return out
if not nb_enabled:
return _nancumsum_nb(a)
return _nancumsum_nb
@generated_jit(nopython=True, cache=True)
def nancumprod_nb(a):
"""Numba-equivalent of `np.nancumprod` along axis 0."""
nb_enabled = not isinstance(a, np.ndarray)
if nb_enabled:
a_dtype = as_dtype(a.dtype)
else:
a_dtype = a.dtype
dtype = np.promote_types(a_dtype, int)
def _nancumprod_nb(a):
out = np.empty(a.shape, dtype=dtype)
for col in range(a.shape[1]):
out[:, col] = np.nancumprod(a[:, col])
return out
if not nb_enabled:
return _nancumprod_nb(a)
return _nancumprod_nb
@njit(cache=True)
def nancnt_nb(a: tp.Array2d) -> tp.Array1d:
"""Compute count while ignoring NaNs."""
out = np.empty(a.shape[1], dtype=np.int_)
for col in range(a.shape[1]):
out[col] = np.sum(~
|
np.isnan(a[:, col])
|
numpy.isnan
|
"""
Model observing, this module is built to simulate actual observing. The
object is known, and given sight parameters, the data is given. In particular,
these functions actually give the values of terms derived from the object
model also provided.
"""
import copy
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy as ap
import astropy.units as ap_u
import astropy.coordinates as ap_coord
import Robustness as Robust
import Backend as _Backend
import data_systematization as d_systize
class Sightline():
"""
This is a sightline. It contains the information for a given sightline
through space. The sightline is always given by the RA and DEC values.
The notation for the accepted values of RA and DEC is found in the
Astropy module's :py:class:`~.astropy.coordinates.SkyCoord` class.
Attributes
----------
self.coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object.
This is the sky coordinates of the sightline.
Methods
-------
sightline_parameters() : function (returns | ndarray,ndarray)
This method returns back both the sightline's center and slopes for
an actual geometrical representation of the line. Converting from
the equatorial coordinate system to the cartesian coordinate system.
"""
def __init__(self, right_ascension, declination,
SkyCoord_object=None):
"""Initialization of a sightline.
The creates the sightline's main parameters, the defineing elements
of the sightline is the location that it is throughout space. This
is a specific wrapper around :py:class:`~.astropy.coordinates.SkyCoord`.
Arguments
---------
right_ascension : string
The right ascension value for the sightline. This term must be
formatted in the Astropy :py:class:`~.astropy.coordinates.SkyCoord` format: ``00h00m00.00s``.
For the values of the seconds are decimal and may extend to any
precision.
declination : string
The declination value for the sightline. This term must be
formatted in the Astropy :py:class:`~.astropy.coordinates.SkyCoord` format: ``±00d00m00.00s``.
For the values of the seconds are decimal and may extend to any
precision.
Skycord_object : :py:class:`~.astropy.coordinates.SkyCoord` object; optional
It may be easier to also just pass an Astropy
:py:class:`~.astropy.coordinates.SkyCoord` object in
general. The other strings are ignored if it is successful.
"""
# Type check.
if (isinstance(SkyCoord_object, ap_coord.SkyCoord)):
sky_coordinates = SkyCoord_object
else:
# Type check for RA and dec before conversion
right_ascension = Robust.valid.validate_string(right_ascension)
declination = Robust.valid.validate_string(declination)
# Convert the strings to sky cords.
sky_coordinates = ap_coord.SkyCoord(right_ascension,
declination,
frame='icrs')
# Automatically calculate the wrap angle along with the radian version
# of the angles.
ra_radians = float(sky_coordinates.ra.hour * (np.pi / 12))
dec_radians = float(sky_coordinates.dec.radian)
ra_wrap_angle = _Backend.astrcoord.auto_ra_wrap_angle(ra_radians)
# Define the member arguments.
self.coordinates = sky_coordinates
self._ra_wrap_angle = ra_wrap_angle * ap_u.rad
def sightline_parameters(self):
""" This function returns the sightline linear parameters.
The sightline is by definition always parallel to the x-axis
of the object to be observed. The plane of the sky is the yz-plane
of the object. This function returns first the central defining
point, then the deltas for the equation.
Returns
-------
sightline_center : ndarray
This returns a cartsian point based on the approximation
that, if the x-axis and the r-axis are the same of cartesian
and spherical cordinates, then so too are the yz-plane and the
theta-phi plane.
sightline_slopes : ndarray
This returns the slopes of the cartesian point values given
by the center. Because of the approximation from above, it is
always [1,0,0].
Notes
-----
The coordinates of the sightline in relation to the object are as
follows:
- The x-axis of the object is equal to the r-axis of the telescope. Both pointing away from the telescope, deeper into space.
- The y-axis of the object is equal to the RA-axis/phi-axis of the
telescope, westward (as y increases, RA decreases)
- The z-axis of the object is equal to the DEC-axis of the telescope. It is also equal to the negative of the theta-axis
when it is centered on theta = pi/2. Points north-south of the
telescope.
"""
# Work in radians.
ra_radians, dec_radians = self._radianize_coordinates()
sightline_center = np.array([0, ra_radians, dec_radians])
sightline_slopes = np.array([1, 0, 0])
return sightline_center, sightline_slopes
def _radianize_coordinates(self):
"""This method returns the RA and DEC in radians.
This method converts the RA and DEC coordinate measurements into
radians for better accounting.
Returns
-------
ra_radians : float
The RA coordinate in radians.
dec_radians : float
The DEC coordinate in radians.
"""
# Change the wrapping location if necessary. Astropy requires a unit.
self.coordinates.ra.wrap_angle = self._ra_wrap_angle
ra_radians = float(self.coordinates.ra.hour * (np.pi / 12))
dec_radians = float(self.coordinates.dec.radian)
return ra_radians, dec_radians
class ProtostarModel():
"""
This is an object that represents a model of an object in space. It
contains all the required functions and parameters associated with
one of the objects that would be observed for polarimetry data.
Attributes
----------
self.coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object
This is the coordinates of the object that this class defines.
self.cloud_model : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the protostar cloud.
self.magnetic_field : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the magnetic field.
self.density_model : function
This is an implicit function (or a numerical approximation thereof) of
the shape of the density model of the cloud.
self.polarization_model : function
This is an implicit function (or a numerical approximation thereof) of
the polarization model of the cloud.
"""
def __init__(self, coordinates, cloud_model, magnetic_field_model,
density_model=None, polarization_model=None, zeros_guess_count=100):
"""Object form of a model object to be observed.
This is the object representation of an object in the sky. The
required terms are present.
Arguments
---------
coordinates : Astropy :py:class:`~.astropy.coordinates.SkyCoord` object
These are the coordinates of the observation object. It is up
to the user to put as complete information as possible.
cloud_model : function or string,
An implicit equation of the cloud. The origin of this equation
must also be the coordinate specified by self.coordinates. Must
be cartesian in the form ``f(x,y,z) = 0``, for the function or
string is ``f(x,y,z)``. The x-axis is always aligned with a
telescope as it is the same as a telescope's r-axis.
magnetic_field_model : function or :py:class:`~.InterpolationTable`
A function that, given a single point in cartesian space, will
return the value of the magnitude of the magnetic field's three
orthogonal vectors in xyz-space. If an interpolation table is
given, a numerical approximation function will be used instead.
density_model : function or string, or :py:class:`~.InterpolationTable`; optional
A function that, given a point in cartesian space, will return
a value pertaining to the density of the gas/dust within at that
point. Defaults to uniform. If an interpolation table is
given, a numerical approximation function will be used instead.
polarization_model: function, string, float or :py:class:`~.InterpolationTable`; optional
This is the percent of polarization of the light. Either given as
a function (or string representing a function) ``f(x,y,z)``, or
as a constant float value. Default is uniform value of 1. If an
interpolation table is given, a numerical approximation function
will be used instead.
Parameters
----------
zeros_guess_count : int; optional
This value stipulates how many spread out test points there should
be when finding sightline intersection points. A higher number
should be used for complex shapes. Defaults at 100.
"""
# Initialization of boolean checks.
# Check if the user input a interpolated data table instead of a
# function. The integration method must change if so.
input_interpolated_tables = False
# Type check
if (not isinstance(coordinates, ap_coord.SkyCoord)):
raise TypeError('The input for coordinates must be an Astropy '
'SkyCord object.'
' --Kyubey')
if (callable(cloud_model)):
cloud_model = \
Robust.valid.validate_function_call(cloud_model,
n_parameters=3)
elif (isinstance(cloud_model, str)):
cloud_model = \
Robust.inparse.user_equation_parse(cloud_model,
('x', 'y', 'z'))
else:
raise TypeError('The input for the cloud equation must either '
'be a callable function or a string that can '
'be converted into an implicit callable function.'
' --Kyubey')
# Test magnetic field model.
if (callable(magnetic_field_model)):
magnetic_field_model = \
Robust.valid.validate_function_call(magnetic_field_model,
n_parameters=3)
elif (isinstance(magnetic_field_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (magnetic_field_model.classification == 'vector'):
magnetic_field_model = \
magnetic_field_model.numerical_function()
else:
raise TypeError('The magnetic field lookup table must be a '
'vector based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=magnetic_field_model.classification))
# Test density model.
if (callable(density_model)):
density_model = \
Robust.valid.validate_function_call(density_model,
n_parameters=3)
elif (isinstance(density_model, str)):
density_model = \
Robust.inparse.user_equation_parse(density_model,
('x', 'y', 'z'))
elif (isinstance(density_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (density_model.classification == 'scalar'):
density_model = density_model.numerical_function()
else:
raise TypeError('The density model lookup table must be a '
'scalar based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=density_model.classification))
elif (density_model is None):
# The user likely did not input a density model, the default
# is uniform distribution.
def uniform_density_function(x, y, z): return np.ones_like(x)
density_model = uniform_density_function
else:
raise TypeError('The input for the density equation must either '
'be a callable function or a string that can '
'be converted into an implicit callable function.'
' --Kyubey')
# Test polarization model factor
if (callable(polarization_model)):
polarization_model = \
Robust.valid.validate_function_call(polarization_model,
n_parameters=3)
elif (isinstance(polarization_model, str)):
polarization_model = \
Robust.inparse.user_equation_parse(polarization_model,
('x', 'y', 'z'))
elif (isinstance(polarization_model, (float, int))):
percent_polarized = float(copy.deepcopy(polarization_model))
# The user desires a constant value for the percent polarization.
def constant_function(x, y, z):
return np.full_like(x, percent_polarized)
polarization_model = constant_function
elif (isinstance(polarization_model, d_systize.InterpolationTable)):
# The user has inputted an interpolation table, record such.
input_interpolated_tables = True
if (polarization_model.classification == 'scalar'):
polarization_model = polarization_model.numerical_function()
else:
raise TypeError('The polarization model lookup table must be '
'a scalar based table. It is currently a '
'< {tb} > based table.'
' --Kyubey'
.format(tb=polarization_model.classification))
elif (polarization_model is None):
# The user likely did not input a density model, the default
# is uniform total distribution.
def uniform_polarization_function(x, y, z): return np.ones_like(x)
polarization_model = uniform_polarization_function
else:
raise TypeError('The input for the polarization model must either '
'be a callable function, a string that can '
'be converted into an implicit callable function,'
'or a constant float/int value.'
' --Kyubey')
zeros_guess_count = Robust.valid.validate_int_value(zeros_guess_count,
greater_than=0)
# Automatically calculate the wrap angle along with the radian version
# of the angles.
ra_radians = float(coordinates.ra.hour * (np.pi / 12))
dec_radians = float(coordinates.dec.radian)
ra_wrap_angle = \
_Backend.astrcoord.auto_ra_wrap_angle(ra_radians) * ap_u.rad
# All models equations must be offset by the coordinates. This
# transformation assumes the flat approximation of the astronomical
# sky.
coordinates.ra.wrap_angle = ra_wrap_angle
# Translate the cloud model function.
def translate_cloud_model(x, y, z):
return cloud_model(x, y - ra_radians, z - dec_radians)
# Translate the magnetic field function.
def translate_magnetic_field(x, y, z):
return magnetic_field_model(x, y - ra_radians, z - dec_radians)
# Translate the density model function.
def translate_density_model(x, y, z):
return density_model(x, y - ra_radians, z - dec_radians)
# Translate the polarization model function.
def translate_polarization_model(x, y, z):
return polarization_model(x, y - ra_radians, z - dec_radians)
self.coordinates = coordinates
self.cloud_model = translate_cloud_model
self.magnetic_field = translate_magnetic_field
self.density_model = translate_density_model
self.polarization_model = translate_polarization_model
self._ra_wrap_angle = ra_wrap_angle
self._interpolated_tables = input_interpolated_tables
self._zeros_guess_count = zeros_guess_count
def _radianize_coordinates(self):
"""This method returns the RA and DEC in radians.
This method converts the RA and DEC coordinate measurements into
radians for better accounting.
Returns
--------
ra_radians : float
The RA coordinate in radians.
dec_radians : float
The DEC coordinate in radians.
"""
# Change the wrapping location if necessary. Astropy requires a unit.
self.coordinates.ra.wrap_angle = self._ra_wrap_angle
ra_radians = float(self.coordinates.ra.hour * (np.pi / 12))
dec_radians = float(self.coordinates.dec.radian)
return ra_radians, dec_radians
class ObservingRun():
"""Execute a mock observing run of an object.
This class is the main model observations of an object. Taking a
central sightline and the field of view, it then gives back a set of
plots, similar to those that an observer would see after data reduction.
The class itself does the computation in its methods, returning back
a heatmap/contour object plot from the observing depending on the method.
Attributes
----------
self.observe_target : :py:class:`ProtostarModel` object
The model target for simulated observing. Conceptually, the object
that the telescope observes.
self.sightline : :py:class:`Sightline` object
The primary sightline that is used for the model observing,
conceptually where the telescope is aimed at.
self.field_of_view : float
The field of view value of the observation, given as the length
of the observing chart.
Methods
-------
Stokes_parameter_contours() : function {returns | ndarray,ndarray}
Compute the value of Stoke parameters at random sightlines from the
primary sightline and plot them. Returns the values that was used
to plot.
"""
def __init__(self, observe_target, sightline, field_of_view):
"""Doing an observing run.
Create an observing run object, compiling the primary sightline and
the field of view.
Arguments
---------
observe_target : :py:class:`ProtostarModel` object
This is the object to be observed.
sightline : Sightline object
This is the primary sightline, in essence, where the telescope
is pointing in this simulation.
field_of_view : float
The width of the sky segment that is being observed. Must be in
radians. Applies to both RA and DEC evenly for a square image.
Seen range is `` (RA,DEC) ± field_of_view/2 ``.
"""
# Basic type checking
if (not isinstance(observe_target, ProtostarModel)):
raise TypeError('The observed target must be a ProtostarModel '
'class object.'
' --Kyubey')
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a Sightline class object.'
' --Kyubey')
field_of_view = Robust.valid.validate_float_value(field_of_view,
greater_than=0)
# Check if both objects have the same RA wraping angle. If not, then
# it is highly likely that the mapping will be incorrect.
if (observe_target._ra_wrap_angle != sightline._ra_wrap_angle):
Robust.kyubey_warning(Robust.AstronomyWarning,
('The RA wrapping angle for both objects '
'are different. This may result in '
'improper mapping during computations.'))
# Check if the object is actually within the field of view.
obs_target_ra_radians, obs_target_dec_radians = \
observe_target._radianize_coordinates()
sightline_ra_radians, sightline_dec_radians = \
sightline._radianize_coordinates()
if (((sightline_ra_radians - field_of_view/2)
<= obs_target_ra_radians <=
(sightline_ra_radians + field_of_view/2)) and
((sightline_dec_radians - field_of_view/2)
<= obs_target_dec_radians <=
(sightline_dec_radians + field_of_view/2))):
# If at this stage, it should be fine.
pass
else:
raise Robust.AstronomyError('Object is not within the sightline '
'and field of view. Please revise. '
' --Kyubey')
# Assign and create.
self.target = observe_target
self.sightline = sightline
self.offset = field_of_view/2
def Stokes_parameter_contours(self,
plot_parameters=True, n_axial_samples=25):
"""This function produces a contour plot of the stoke values.
This function generates a large number of random sightlines to
traceout contour information of the of the fields. From
there, is creates and returns a contour plot.
The values of the intensity, I, the two polarization values, Q,U, and
the polarization intensity, hypt(Q,U) is plotted.
Parameters
----------
plot_parameters : bool; optional
A boolean value to specify if the user wanted the parameters to be
plotted.
n_axial_samples : int; optional
The number of points along one RA or DEC axis to be sampled. The
resulting sample is a mesh n**2 between the bounds. Default is 25.
Returns
-------
ra_dec_array : tuple(ndarray)
This is a tuple of the values of the RA and DEC of the random
sightlines (arranged in parallel arrays).
stokes_parameters : tuple(ndarray)
This is a tuple of ndarrays of the stoke parameters calculated by
the random sightlines.
"""
# Type check
n_axial_samples = Robust.valid.validate_int_value(n_axial_samples,
greater_than=0)
# Make a plotting background.
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(1, 5,
figsize=(15, 2), dpi=100,
sharex=True, sharey=True)
# Extract Stokes parameter data.
stokes_parameters, ra_dec_array, _ = \
self._Stoke_parameters(n_axial_samples)
# Decompose the stokes parameters into I,Q,U,V along with the angle
# of polarization.
I, Q, U, V = stokes_parameters
polar_I = np.hypot(Q, U)
angle = _Backend.efp.angle_from_Stokes_parameters(Q, U)
# Double check if the user actually wanted them plotted.
if (plot_parameters):
# Arrange the values into plottable values. The x-axis is RA, and
# the y-axis is DEC.
x_axis_plot = ra_dec_array[0]
y_axis_plot = ra_dec_array[1]
# Color maps, each gets their own special-ish kind of color map
# depending on the plot.
intensity_maps = mpl.cm.get_cmap('inferno')
seismic_map = mpl.cm.get_cmap('seismic')
Q_polarization_map = \
_Backend.pltcust.zeroedColorMap(seismic_map, Q.min(), Q.max())
U_polarization_map = \
_Backend.pltcust.zeroedColorMap(seismic_map, U.min(), U.max())
PuOr_map = mpl.cm.get_cmap('PuOr')
angle_map = \
_Backend.pltcust.zeroedColorMap(PuOr_map,
angle.min(), angle.max())
# Extrapolate and plot a contour based on irregularly spaced data.
ax1_o = ax1.tricontourf(x_axis_plot, y_axis_plot, I, 50,
cmap=intensity_maps)
ax2_o = ax2.tricontourf(x_axis_plot, y_axis_plot, polar_I, 50,
cmap=intensity_maps)
ax3_o = ax3.tricontourf(x_axis_plot, y_axis_plot, Q, 50,
cmap=Q_polarization_map)
ax4_o = ax4.tricontourf(x_axis_plot, y_axis_plot, U, 50,
cmap=U_polarization_map)
ax5_o = ax5.tricontourf(x_axis_plot, y_axis_plot, angle, 50,
cmap=angle_map)
# Assign titles.
ax1.set_title('Total Intensity')
ax2.set_title('Polar Intensity')
ax3.set_title('Q Values')
ax4.set_title('U Values')
ax5.set_title('Angle')
# Assign a color bar legends
fig1.colorbar(ax1_o, ax=ax1)
fig1.colorbar(ax2_o, ax=ax2)
fig1.colorbar(ax3_o, ax=ax3)
fig1.colorbar(ax4_o, ax=ax4)
fig1.colorbar(ax5_o, ax=ax5)
plt.show()
# Just in case they want to play with the data.
return ra_dec_array, stokes_parameters
def _compute_integrated_intensity(self, sightline):
"""Computes the total strength of the light/E-field.
Given a sightline independent of the primary one, this function
computes the integrated value of the magnitude of the E-field. It
is assumed that the magnitude of the E-field is directly related to
energy given by the Poynting vector.
Parameters
----------
sightline : :py:class:`Sightline` object
The sightline through which the intensity will be calculated
through, using the density function.
Returns
-------
integrated_intensity : float
The total integrated intensity.
polarized_integrated_intensity : float
The total integrated intensity from polarization contribution,
given by the polarization model function.
error : float
The error of the integrated intensity.
"""
# Basic type checking.
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a sightline object.'
' --Kyubey')
# Extract information about the target. The coefficient is rather
# arbitrary.
box_width = 10 * self.offset
# Extract sightline information
sightline_center, sightline_slopes = sightline.sightline_parameters()
# If the Protostar model contains an interpolation table instead of
# a normal function. Assume the usage of a Simpson's integration.
if (self.target._interpolated_tables):
integral_method = 'simpsons'
else:
integral_method = 'scipy'
# Integration function with a polarization dependence, as the amount of
# polarization influences. The polarization model must be sqrt(f(x))
# because the user expects a I_p = I_t * p, while the most efficient
# method of implementation (modifying the E-fields), produces a
# relationship of I_p = I_t * p**2.
def total_intensity(x, y, z):
total = self.target.density_model(x, y, z)
return total
def polarization_intensity(x, y, z):
total = (self.target.density_model(x, y, z)
* np.sqrt(np.abs(self.target.polarization_model(x, y, z))))
return total
# Integrate over the density function.
integrated_intensity, int_error = _Backend.cli.cloud_line_integral(
total_intensity, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
# Also find out the total polarized intensity.
polarized_integrated_intensity, pol_error = \
_Backend.cli.cloud_line_integral(
polarization_intensity, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
# Error propagates in quadrature
error = np.hypot(int_error, pol_error)
# Return
return integrated_intensity, polarized_integrated_intensity, error
def _compute_integrated_magnetic_field(self, sightline):
"""Computes total magnetic field vectors over a sightline.
Given a sightline independent of the primary one, compute the
integrated values of the magnetic field vectors. The values given
is of little importance because of their computation of an improper summation, but the angles are most important. Nonetheless, magnitude
is preserved.
Parameters
----------
sightline : :py:class:`Sightline` object
The sightline through which the magnetic fields will be calculated
through.
Returns
-------
Bfield_x_integrated : float
The total value of all x-axial magnetic field vectors added
together through the sightline and object cloud.
Bfield_y_integrated : float
The total value of all y-axial magnetic field vectors added
together through the sightline and object cloud.
Bfield_z_integrated : float
The total value of all z-axial magnetic field vectors added
together through the sightline and object cloud.
errors : ndarray
A collection of error values, parallel to the float value
collection above.
"""
# Basic type checking.
if (not isinstance(sightline, Sightline)):
raise TypeError('The sightline must be a sightline object.'
' --Kyubey')
# Extract information about the target. The coefficient is rather
# arbitrary.
box_width = 10 * self.offset
# If the Protostar model contains an interpolation table instead of
# a normal function. Assume the usage of a Simpson's integration.
if (self.target._interpolated_tables):
integral_method = 'simpsons'
else:
integral_method = 'scipy'
# Define custom functions such that integrating over a vector function
# is instead an integration over the three independent dimensions.
def target_cloud_Bfield_x(x, y, z):
return self.target.magnetic_field(x, y, z)[0]
def target_cloud_Bfield_y(x, y, z):
return self.target.magnetic_field(x, y, z)[1]
def target_cloud_Bfield_z(x, y, z):
return self.target.magnetic_field(x, y, z)[2]
# Extract sightline information
sightline_center, sightline_slopes = sightline.sightline_parameters()
# Begin computation.
Bfield_x_integrated, error_x = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_x, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
Bfield_y_integrated, error_y = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_y, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
Bfield_z_integrated, error_z = _Backend.cli.cloud_line_integral(
target_cloud_Bfield_z, self.target.cloud_model,
sightline_center, box_width,
view_line_deltas=sightline_slopes,
n_guesses=self.target._zeros_guess_count,
integral_method=integral_method)
error = np.array([error_x, error_y, error_z], dtype=float)
return (Bfield_x_integrated,
Bfield_y_integrated,
Bfield_z_integrated,
error)
def _Stoke_parameters(self, n_axial_samples):
"""Return the stoke parameters for a large range of random sightlines.
This function computes an entire slew of Stokes parameters by
generating random sightlines within the field of view of the primary
sightline. This function is the precursor for all of the contour plots.
Parameters
----------
n_axial_samples : int
The number of points along one RA or DEC axis to be sampled. The
resulting sample is a mesh n**2 between the bounds.
Returns
-------
stokes_parameters : ndarray
This is the array of all four Stoke parameters over all of the
random sightlines.
ra_dec_array : ndarray
This is the array of all of the random sightline's RA and DEC
values.
sightline_list : ndarray
This is an array containing all of the sightline's SkyCord objects,
just in case for whatever need.
"""
# Type checking.
n_axial_samples = Robust.valid.validate_int_value(n_axial_samples,
greater_than=0)
# Work in radians for the sightline's center.
target_ra, target_dec = self.sightline._radianize_coordinates()
# Create a large list of sightlines.
ra_range = np.linspace(target_ra - self.offset,
target_ra + self.offset,
n_axial_samples)
dec_range = np.linspace(target_dec - self.offset,
target_dec + self.offset,
n_axial_samples)
# Establish a mesh grid, the flatten to 1D arrays of points.
ra_mesh, dec_mesh = np.meshgrid(ra_range, dec_range)
ra_array =
|
np.ravel(ra_mesh)
|
numpy.ravel
|
import numpy as np
import math
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import Matern
def DistCalculation(pt1,pt2):
dist_sq = (pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2
dist = math.sqrt(dist_sq)
return dist
# This file is used to test can GP represent certain occupancy grid map
x = np.arange(0.5,10.5,1.0)
y =
|
np.arange(0.5,10.5,1.0)
|
numpy.arange
|
################################################################################
# Copyright (c) 2009-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for the projection module."""
from __future__ import print_function, division, absolute_import
import threading
import unittest
import numpy as np
import katpoint
from katpoint.projection import (OutOfRangeError, out_of_range_context, treat_out_of_range_values,
set_out_of_range_treatment, get_out_of_range_treatment)
try:
from .aips_projection import newpos, dircos
found_aips = True
except ImportError:
found_aips = False
def skip(reason=''):
"""Use nose to skip a test."""
try:
import nose
raise nose.SkipTest(reason)
except ImportError:
pass
def assert_angles_almost_equal(x, y, decimal):
def primary_angle(x):
return x - np.round(x / (2.0 * np.pi)) * 2.0 * np.pi
x = np.asarray(x)
y = np.asarray(y)
np.testing.assert_array_equal(0 * x, 0 * y,
'Array shapes and/or NaN patterns differ')
d = primary_angle(np.nan_to_num(x - y))
np.testing.assert_almost_equal(d, np.zeros(np.shape(x)), decimal=decimal)
class TestOutOfRangeTreatment(unittest.TestCase):
"""Test treatment of out-of-range values."""
def setUp(self):
self._old_treatment = get_out_of_range_treatment()
def test_treatment_setup(self):
set_out_of_range_treatment('raise')
self.assertEqual(get_out_of_range_treatment(), 'raise')
set_out_of_range_treatment('nan')
self.assertEqual(get_out_of_range_treatment(), 'nan')
set_out_of_range_treatment('clip')
self.assertEqual(get_out_of_range_treatment(), 'clip')
with self.assertRaises(ValueError):
set_out_of_range_treatment('bad treatment')
with out_of_range_context('raise'):
self.assertEqual(get_out_of_range_treatment(), 'raise')
self.assertEqual(get_out_of_range_treatment(), 'clip')
def test_out_of_range_handling_array(self):
x = [1, 2, 3, 4]
y = treat_out_of_range_values(x, 'Should not happen', lower=0, upper=5)
np.testing.assert_array_equal(y, x)
with out_of_range_context('raise'):
with self.assertRaises(OutOfRangeError):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
np.testing.assert_array_equal(y, [np.nan, np.nan, 3.0, 4.0])
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
np.testing.assert_array_equal(y, [1.0, 1.1, 1.1, 1.1])
def test_out_of_range_handling_scalar(self):
x = 2
y = treat_out_of_range_values(x, 'Should not happen', lower=0, upper=5)
np.testing.assert_array_equal(y, x)
with out_of_range_context('raise'):
with self.assertRaises(OutOfRangeError):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Out of range', lower=2.1)
np.testing.assert_array_equal(y, np.nan)
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
np.testing.assert_array_equal(y, 1.1)
def test_scalar_vs_0d(self):
with out_of_range_context('clip'):
x = 2.0
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
assert np.isscalar(y)
x = np.array(2.0)
y = treat_out_of_range_values(x, 'Out of range', upper=1.1)
assert not np.isscalar(y)
def test_clipping_of_minor_outliers(self):
x = 1.0 + np.finfo(float).eps
with out_of_range_context('raise'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
with out_of_range_context('nan'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
with out_of_range_context('clip'):
y = treat_out_of_range_values(x, 'Should not trigger false alarm', upper=1.0)
assert y == 1.0
def test_threading(self):
def my_thread():
try:
result.append(treat_out_of_range_values(2.0, 'Should raise', upper=1.0))
except Exception as exc:
result.append(exc)
result = []
thread = threading.Thread(target=my_thread)
with out_of_range_context('nan'):
# Make sure the thread code runs inside our out_of_range_context
thread.start()
thread.join()
assert isinstance(result[0], OutOfRangeError)
def tearDown(self):
set_out_of_range_treatment(self._old_treatment)
class TestProjectionSIN(unittest.TestCase):
"""Test orthographic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['SIN']
self.sphere_to_plane = katpoint.sphere_to_plane['SIN']
N = 100
max_theta = np.pi / 2.0
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# (x, y) points within unit circle
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
self.x = np.sin(theta) * np.cos(phi)
self.y = np.sin(theta) * np.sin(phi)
def test_random_closure(self):
"""SIN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=10)
np.testing.assert_almost_equal(self.y, yy, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_aips_compatibility(self):
"""SIN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
2, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
2, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases_sphere_to_plane(self):
"""SIN projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 1e-8))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""SIN projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""SIN projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""SIN projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 2.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 0.0, 2.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
class TestProjectionTAN(unittest.TestCase):
"""Test gnomonic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['TAN']
self.sphere_to_plane = katpoint.sphere_to_plane['TAN']
N = 100
# Stay away from edge of hemisphere
max_theta = np.pi / 2.0 - 0.01
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
# Perform inverse TAN mapping to spread out points on plane
self.x = np.tan(theta) * np.cos(phi)
self.y = np.tan(theta) * np.sin(phi)
def test_random_closure(self):
"""TAN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""TAN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
# AIPS TAN only deprojects (x, y) coordinates within unit circle
r = self.x * self.x + self.y * self.y
az0, el0 = self.az0[r <= 1.0], self.el0[r <= 1.0]
x, y = self.x[r <= 1.0], self.y[r <= 1.0]
az, el = self.plane_to_sphere(az0, el0, x, y)
xx, yy = self.sphere_to_plane(az0, el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
3, az0[n], el0[n], x[n], y[n])
x_aips[n], y_aips[n], ierr = dircos(
3, az0[n], el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=10)
assert_angles_almost_equal(el, el_aips, decimal=10)
np.testing.assert_almost_equal(xx, x_aips, decimal=10)
np.testing.assert_almost_equal(yy, y_aips, decimal=10)
def test_corner_cases_sphere_to_plane(self):
"""TAN projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 45 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""TAN projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, -np.pi / 4.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""TAN projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1e6], decimal=4)
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [-1e6, 0.0], decimal=4)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, 1e6], decimal=4)
def test_out_of_range_cases_plane_to_sphere(self):
"""TAN projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
class TestProjectionARC(unittest.TestCase):
"""Test zenithal equidistant projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['ARC']
self.sphere_to_plane = katpoint.sphere_to_plane['ARC']
N = 100
# Stay away from edge of circle
max_theta = np.pi - 0.01
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# (x, y) points within circle of radius pi
theta = max_theta * rs.rand(N)
phi = 2 * np.pi * rs.rand(N)
self.x = theta * np.cos(phi)
self.y = theta * np.sin(phi)
def test_random_closure(self):
"""ARC projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""ARC projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
4, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
4, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=8)
assert_angles_almost_equal(el, el_aips, decimal=8)
np.testing.assert_almost_equal(xx, x_aips, decimal=8)
np.testing.assert_almost_equal(yy, y_aips, decimal=8)
def test_corner_cases_sphere_to_plane(self):
"""ARC projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
# Point diametrically opposite the reference point on sphere
xy = np.array(self.sphere_to_plane(np.pi, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(np.abs(xy), [np.pi, 0.0], decimal=12)
def test_corner_cases_plane_to_sphere(self):
"""ARC projection: test special corner cases (plane->sphere)."""
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 1.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -1.0], decimal=12)
# Points on circle with radius pi in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, np.pi, 0.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -np.pi, 0.0))
assert_angles_almost_equal(ae, [-np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, np.pi / 2.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -np.pi / 2.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
def test_out_of_range_cases_sphere_to_plane(self):
"""ARC projection: test out-of-range cases (sphere->plane)."""
# Points outside allowed domain on sphere
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
with out_of_range_context('nan'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_array_equal(xy, [np.nan, np.nan])
with out_of_range_context('clip'):
xy = np.array(self.sphere_to_plane(0.0, np.pi, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
def test_out_of_range_cases_plane_to_sphere(self):
"""ARC projection: test out-of-range cases (plane->sphere)."""
# Points outside allowed domain in plane
with out_of_range_context('raise'):
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, np.pi, 0.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 4.0, 0.0)
self.assertRaises(OutOfRangeError,
self.plane_to_sphere, 0.0, 0.0, 0.0, 4.0)
with out_of_range_context('nan'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 4.0, 0.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 4.0))
np.testing.assert_array_equal(ae, [np.nan, np.nan])
with out_of_range_context('clip'):
ae = np.array(self.plane_to_sphere(0.0, np.pi, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 4.0, 0.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 4.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
class TestProjectionSTG(unittest.TestCase):
"""Test stereographic projection."""
def setUp(self):
rs = np.random.RandomState(42)
self.plane_to_sphere = katpoint.plane_to_sphere['STG']
self.sphere_to_plane = katpoint.sphere_to_plane['STG']
N = 100
# Stay well away from point of projection
max_theta = 0.8 * np.pi
self.az0 = np.pi * (2.0 * rs.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (rs.rand(N) - 0.5)
# Perform inverse STG mapping to spread out points on plane
theta = max_theta * rs.rand(N)
r = 2.0 * np.sin(theta) / (1.0 + np.cos(theta))
phi = 2 * np.pi * rs.rand(N)
self.x = r * np.cos(phi)
self.y = r * np.sin(phi)
def test_random_closure(self):
"""STG projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=9)
np.testing.assert_almost_equal(self.y, yy, decimal=9)
assert_angles_almost_equal(az, aa, decimal=9)
assert_angles_almost_equal(el, ee, decimal=9)
def test_aips_compatibility(self):
"""STG projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in range(len(az)):
az_aips[n], el_aips[n], ierr = newpos(
6, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = dircos(
6, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
# AIPS NEWPOS STG has poor accuracy on azimuth angle (large closure errors by itself)
# assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases_sphere_to_plane(self):
"""STG projection: test special corner cases (sphere->plane)."""
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
|
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
|
numpy.testing.assert_almost_equal
|
#!/usr/bin/env python
# coding: utf-8
import pickle
import numpy as np
import pandas as pds
from pyro.ops.stats import quantile
from scipy.stats import norm
import data_loader
import pyro_model.helper
# ## loading data
countries = [
'United Kingdom',
'Italy',
'Germany',
'Spain',
'US',
'France',
'Belgium',
'Korea, South',
'Brazil',
'Iran',
'Netherlands',
'Canada',
'Turkey',
'Romania',
'Portugal',
'Sweden',
'Switzerland',
'Ireland',
'Hungary',
'Denmark',
'Austria',
'Mexico',
'India',
'Ecuador',
'Russia',
'Peru',
'Indonesia',
'Poland',
'Philippines',
'Japan',
'Pakistan'
]
prefix = 'trained_models/'
# prefix = ''
pad = 24
data_dict = data_loader.get_data_pyro(countries, smart_start=False, pad=pad)
data_dict = pyro_model.helper.smooth_daily(data_dict)
days = 14
train_len = data_dict['cum_death'].shape[0] - days
test_dates = data_dict['date_list'][train_len:]
len(data_dict['date_list'][train_len:])
# ## loading results
seed_list = []
predictive_list = []
samples_list = []
for seed in range(15):
model_id = 'day-{}-rng-{}'.format(days, seed)
try:
with open(prefix + 'Loop{}/{}-predictive.pkl'.format(days, model_id), 'rb') as f:
predictive = pickle.load(f)
except Exception:
continue
predictive_list.append(predictive)
with open(prefix + 'Loop{}/{}-samples.pkl'.format(days, model_id), 'rb') as f:
samples = pickle.load(f)
samples_list.append(samples)
seed_list.append(seed)
# validation accuracy
val_window = 14
seir_error_list = []
for i in range(len(predictive_list)):
seir_train = quantile(predictive_list[i]['prediction'].squeeze(), 0.5, dim=0)[-val_window + 1:, :].numpy()
seir_train = np.diff(seir_train, axis=0)
seir_label = data_dict['daily_death'][train_len - val_window:train_len, :].numpy()
seir_error = np.abs(np.sum(seir_train, axis=0) - np.sum(seir_label, axis=0))
seir_error_list.append(seir_error)
seir_error = np.stack(seir_error_list, axis=0)
best_model = np.argmin(seir_error, axis=0)
best_seed = [seed_list[x] for x in best_model]
test_len = 14
best_error_list = []
pred_low_list = []
pred_high_list = []
covered_list = []
length_list = []
crps_list = []
test_len = test_len - 1
for j, i in zip(range(len(countries)), best_model):
c = countries[j]
# get daily death label
seir_label = data_dict['daily_death'][train_len:, j].numpy()
samples = samples_list[i]
sample_daily = np.diff(samples, axis=1)
model_pred = np.mean(sample_daily, axis=0)[:, j]
err = np.mean(np.abs(model_pred - seir_label)[:test_len + 1])
best_error_list.append(err)
# percentiles
sample_daily[sample_daily < 0] = 0
model_pred_low = np.quantile(sample_daily, 0.025, axis=0)[:, j]
model_pred_high = np.quantile(sample_daily, 0.975, axis=0)[:, j]
covered = np.mean((seir_label >= model_pred_low)[:test_len + 1] & (seir_label <= model_pred_high)[:test_len + 1])
length = np.mean((model_pred_high - model_pred_low)[:test_len + 1])
# crps
q = [0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3,
0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75,
0.8, 0.85, 0.9, 0.95, 0.975, 0.99]
model_quantile = np.quantile(sample_daily, q, axis=0)[:, :, j]
crps_list0 = list()
for k in range(model_quantile.shape[1]):
pred = model_quantile[:, k]
proba = q.copy()
less_ind = pred < seir_label[k]
proba_label = np.ones_like(proba)
proba_label[less_ind] = 0
crps_list0.append(np.mean((proba_label - proba) ** 2))
crps_list.append(np.mean(
|
np.array(crps_list0)
|
numpy.array
|
import os
import pandas as pd
import numpy as np
import uproot
import h5py
from twaml.data import dataset
from twaml.data import scale_weight_sum
from twaml.data import from_root, from_pytables, from_h5
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds = from_root(
["tests/data/test_file.root"], name="myds", branches=branches, TeXlabel=r"$t\bar{t}$"
)
def test_name():
assert ds.name == "myds"
assert ds.TeXlabel == "$t\\bar{t}$"
def test_no_name():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.name == "test_file.root"
def test_content():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("pT_lep1") for t in ts]
raw = np.concatenate([raws])
bins = np.linspace(0, 800, 21)
n1, bins1 = np.histogram(raw, bins=bins)
n2, bins2 = np.histogram(ds.df.pT_lep1.to_numpy(), bins=bins)
np.testing.assert_array_equal(n1, n2)
def test_nothing():
dst = from_root(["tests/data/test_file.root"], branches=branches)
assert dst.files[0].exists()
def test_with_executor():
lds = from_root(["tests/data/test_file.root"], branches=branches, nthreads=4)
np.testing.assert_array_almost_equal(lds.weights, ds.weights, 8)
def test_weight():
ts = [uproot.open(f)[ds.tree_name] for f in ds.files]
raws = [t.array("weight_nominal") for t in ts]
raw = np.concatenate(raws)
raw = raw * 150.0
ds.weights = ds.weights * 150.0
np.testing.assert_array_almost_equal(raw, ds.weights, 6)
def test_add():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 22
combined = ds + ds2
comb_w = np.concatenate([ds.weights, ds2.weights])
comb_df = pd.concat([ds.df, ds2.df])
np.testing.assert_array_almost_equal(comb_w, combined.weights, 5)
np.testing.assert_array_almost_equal(comb_df.get_values(), combined.df.get_values(), 5)
assert ds.name == combined.name
assert ds.tree_name == combined.tree_name
assert ds.label == combined.label
def test_selection():
ds2 = from_root(
["tests/data/test_file.root"],
name="ds2",
selection="(reg2j2b==True) & (OS == True) & (pT_lep1 > 50)",
)
upt = uproot.open("tests/data/test_file.root")["WtLoop_nominal"]
reg2j2b = upt.array("reg2j2b")
OS = upt.array("OS")
pT_lep1 = upt.array("pT_lep1")
sel = np.logical_and(np.logical_and(reg2j2b, OS), pT_lep1 > 50)
w = upt.array("weight_nominal")[sel]
assert np.allclose(w, ds2.weights)
# np.testing.assert_array_almost_equal(w, ds2.weights)
def test_append():
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds1 = from_root(["tests/data/test_file.root"], name="myds", branches=branches)
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
ds2.weights = ds2.weights * 5
# raw
comb_w = np.concatenate([ds1.weights, ds2.weights])
comb_df = pd.concat([ds1.df, ds2.df])
# appended
ds1.append(ds2)
# now test
np.testing.assert_array_almost_equal(comb_w, ds1.weights, 5)
np.testing.assert_array_almost_equal(comb_df.get_values(), ds1.df.get_values(), 5)
def test_auxweights():
branches = ["pT_lep1", "pT_lep2", "eta_lep1", "eta_lep2"]
ds1 = from_root(
["tests/data/test_file.root"],
name="myds",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
ds2 = from_root(
["tests/data/test_file.root"],
name="ds2",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
ds1.append(ds2)
dsa = from_root(
["tests/data/test_file.root"],
name="myds",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
dsb = from_root(
["tests/data/test_file.root"],
name="ds2",
branches=branches,
auxweights=["phi_lep1", "phi_lep2"],
)
dsc = dsa + dsb
np.testing.assert_array_almost_equal(
ds1.auxweights["phi_lep1"], dsc.auxweights["phi_lep1"], 5
)
dsc.change_weights("phi_lep2")
assert dsc.weight_name == "phi_lep2"
pl2 = uproot.open("tests/data/test_file.root")["WtLoop_nominal"].array("phi_lep2")
nw2 = uproot.open("tests/data/test_file.root")["WtLoop_nominal"].array("weight_nominal")
ds2.change_weights("phi_lep2")
np.testing.assert_array_almost_equal(ds2.weights, pl2, 5)
assert "phi_lep2" not in ds2.auxweights
assert "weight_nominal" in ds2.auxweights
ds2.to_pytables("outfile1.h5")
ds2pt = from_pytables("outfile1.h5", "ds2", weight_name="phi_lep2")
print(ds2pt.auxweights)
np.testing.assert_array_almost_equal(ds2pt.auxweights["weight_nominal"].to_numpy(), nw2)
os.remove("outfile1.h5")
assert True
def test_label():
ds2 = from_root(["tests/data/test_file.root"], name="ds2", branches=branches)
assert ds2.label is None
assert ds2.label_asarray() is None
ds2.label = 6
la = ds2.label_asarray()
la_raw = np.ones_like(ds2.weights, dtype=np.int64) * 6
|
np.testing.assert_array_equal(la, la_raw)
|
numpy.testing.assert_array_equal
|
import os
import numpy as np
from optparse import OptionParser
import glob
import pandas as pd
"""
def batch(dbDir, dbName, scriptref, nproc, season, diffflux, outDir):
cwd = os.getcwd()
dirScript = cwd + "/scripts"
if not os.path.isdir(dirScript):
os.makedirs(dirScript)
dirLog = cwd + "/logs"
if not os.path.isdir(dirLog):
os.makedirs(dirLog)
id = '{}_{}_{}'.format(dName, season, diffflux)
name_id = 'metric_{}'.format(id)
log = dirLog + '/'+name_id+'.log'
qsub = 'qsub -P P_lsst -l sps=1,ct=10:00:00,h_vmem=16G -j y -o {} -pe multicores {} <<EOF'.format(
log, nproc)
# qsub = "qsub -P P_lsst -l sps=1,ct=05:00:00,h_vmem=16G -j y -o "+ log + " <<EOF"
scriptName = dirScript+'/'+name_id+'.sh'
script = open(scriptName, "w")
script.write(qsub + "\n")
# script.write("#!/usr/local/bin/bash\n")
script.write("#!/bin/env bash\n")
script.write(" cd " + cwd + "\n")
script.write(" echo 'sourcing setups' \n")
script.write(" source setup_release.sh CCIN2P3\n")
script.write("echo 'sourcing done' \n")
cmd = 'python {} --dbDir {} --dbName {} --nproc {} --season {} --diffflux {} --outDir {}'.format(
scriptref, dbDir, dbName, nproc, season, diffflux, outDir)
script.write(cmd+" \n")
script.write("EOF" + "\n")
script.close()
os.system("sh "+scriptName)
def batch_family(dbDir, familyName, arrayDb, scriptref, nproc, diffflux, outDir, x1, color, zmin, zmax):
cwd = os.getcwd()
dirScript = cwd + "/scripts"
if not os.path.isdir(dirScript):
os.makedirs(dirScript)
dirLog = cwd + "/logs"
if not os.path.isdir(dirLog):
os.makedirs(dirLog)
id = '{}_{}'.format(familyName, diffflux)
name_id = 'simulation_{}'.format(id)
log = dirLog + '/'+name_id+'.log'
qsub = 'qsub -P P_lsst -l sps=1,ct=05:00:00,h_vmem=16G -j y -o {} -pe multicores {} <<EOF'.format(
log, nproc)
# qsub = "qsub -P P_lsst -l sps=1,ct=05:00:00,h_vmem=16G -j y -o "+ log + " <<EOF"
scriptName = dirScript+'/'+name_id+'.sh'
script = open(scriptName, "w")
script.write(qsub + "\n")
# script.write("#!/usr/local/bin/bash\n")
script.write("#!/bin/env bash\n")
script.write(" cd " + cwd + "\n")
script.write(" echo 'sourcing setups' \n")
script.write(" source setup_release.sh CCIN2P3\n")
script.write("echo 'sourcing done' \n")
for dbName in arrayDb['dbName']:
for season in range(1, 11):
cmd = 'python {} --dbDir {} --dbName {} --nproc {} --season {} --diffflux {} --outDir {}'.format(
scriptref, dbDir, dbName, 1, season, diffflux, '{}/{}'.format(outDir, familyName))
cmd += ' --x1 {} --color {} --zmin {} --zmax {}'.format(
x1, color, zmin, zmax)
script.write(cmd+" \n")
script.write("EOF" + "\n")
script.close()
os.system("sh "+scriptName)
"""
"""
dbDir ='/sps/lsst/cadence/LSST_SN_CADENCE/cadence_db/2018-06-WPC'
dbNames=['kraken_2026','kraken_2042','kraken_2035','kraken_2044']
dbNames = ['kraken_2026','kraken_2042','kraken_2035','kraken_2044',
'colossus_2667','pontus_2489','pontus_2002','mothra_2049','nexus_2097']
for dbName in dbNames:
batch(dbDir,dbName,'run_metric',8)
dbDir = '/sps/lsst/cadence/LSST_SN_CADENCE/cadence_db'
dbNames = ['alt_sched', 'alt_sched_rolling',
'rolling_10yrs', 'rolling_mix_10yrs']
dbNames += ['kraken_2026', 'kraken_2042', 'kraken_2035', 'kraken_2044',
'colossus_2667', 'pontus_2489', 'pontus_2002', 'mothra_2049', 'nexus_2097']
dbNames += ['baseline_1exp_nopairs_10yrs', 'baseline_1exp_pairsame_10yrs', 'baseline_1exp_pairsmix_10yrs', 'baseline_2exp_pairsame_10yrs',
'baseline_2exp_pairsmix_10yrs', 'ddf_0.23deg_1exp_pairsmix_10yrs', 'ddf_0.70deg_1exp_pairsmix_10yrs',
'ddf_pn_0.23deg_1exp_pairsmix_10yrs', 'ddf_pn_0.70deg_1exp_pairsmix_10yrs', 'exptime_1exp_pairsmix_10yrs', 'baseline10yrs',
'big_sky10yrs', 'big_sky_nouiy10yrs', 'gp_heavy10yrs', 'newA10yrs', 'newB10yrs', 'roll_mod2_mixed_10yrs',
'roll_mod3_mixed_10yrs', 'roll_mod6_mixed_10yrs', 'simple_roll_mod10_mixed_10yrs', 'simple_roll_mod2_mixed_10yrs',
'simple_roll_mod3_mixed_10yrs', 'simple_roll_mod5_mixed_10yrs', 'twilight_1s10yrs',
'altsched_1exp_pairsmix_10yrs', 'rotator_1exp_pairsmix_10yrs', 'hyak_baseline_1exp_nopairs_10yrs',
'hyak_baseline_1exp_pairsame_10yrs']
# dbNames = ['alt_sched','alt_sched_rolling','rolling_10yrs','rolling_mix_10yrs','kraken_2026','kraken_2042']
dbNames = ['alt_sched', 'alt_sched_rolling', 'kraken_2026']
diffflux = 0
outDir = '/sps/lsst/users/gris/Output_Simu_pipeline_{}'.format(diffflux)
for dbName in dbNames:
for season in range(1,11):
batch(dbDir,dbName,'run_scripts/run_simulation_fromnpy.py',8,season,diffflux,outDir)
toprocess = np.loadtxt('for_batch/OpsimDB.txt',
dtype={'names': ('family', 'dbName'), 'formats': ('U11', 'U36')})
print(toprocess)
x1 = -2.0
color = 0.2
zmin = 0.0
zmax = 0.95
for family in np.unique(toprocess['family']):
idx = toprocess['family'] == family
sel = toprocess[idx]
batch_family(dbDir, family, sel, 'run_scripts/run_simulation_fromnpy.py',
8, diffflux, outDir, x1, color, zmin, zmax)
"""
def go_for_batch(toproc, splitSky,
dbDir, dbExtens, outDir,
nodither, nside, fieldType,
pixelmap_dir, npixels,
x1Type, x1min, x1max, x1step,
colorType, colormin, colormax, colorstep,
zType, zmin, zmax, zstep,
daymaxType, daymaxstep,simulator):
"""
Function to prepare and start batches
Parameters
----------------
toproc: numpy array
data (dbName, ...) to process
splitSky: bool
to split the batches in sky patches
dbDir: str
dir where observing strategy files are located
dbExtens: str
extension of obs. strategy files (npy or db)
outDir: str
output directory for the produced data
nodither: bool
to remove the dithering (useful for dedicated DD studies)
nside: int
healpix nside parameter
fieldType: str
type of field to process (DD, WFD, Fakes)
pixelmap_dir: str
directory where pixel maps (ie matched pixel positions and observations) are located
npixels: int
number of pixels to process
x1Type: str
x1 type for simulation (unique, uniform, random)
x1min: float
x1 min for simulation
x1max: float
x1 max for simulation
x1step: float
x1 step for simulation (type: uniform)
colorType: str
color type for simulation (unique, uniform, random)
colormin: float
color min for simulation
colormax: float
color max for simulation
colorstep: float
color step for simulation (type: uniform)
zType: str
z type for simulation (unique, uniform, random)
zmin: float
z min for simulation
zmax: float
z max for simulation
zstep: float
z step for simulation (type: uniform)
daymaxType: str
daymax type for simulation (unique, uniform, random)
daymaxstep: float
daymax step for simulation (type: uniform)
"""
# get the observing strategy name
#dbName = toproc['dbName'].decode()
dbName = toproc['dbName']
if pixelmap_dir == '':
# first case: no pixelmap - run on all the pixels - possibility to split the sky
n_per_slice = 1
RAs = [0., 360.]
if splitSky:
RAs =
|
np.linspace(0., 360., 11)
|
numpy.linspace
|
import os
import matplotlib.pyplot as plt
import numpy as np
n = 4
X =
|
np.arange(n)
|
numpy.arange
|
#! -*- coding:utf-8 -*-
'''
@Author: ZM
@Date and Time: 2021/5/2 9:45
@File: snippets.py
'''
import math
import numpy as np
from nms import nms
MODEL_INPUT_SHAPE = (416, 480) # 适合VOC数据集
NUM_LAYERS = 3
DOWNSAMPLING_SCALES = [32, 16, 8]
NUM_CLUSTER = 9
ANCHOR_MASK = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
MAX_NUM_BOXES = 8
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def yolo_head(feats, anchors, model_input_shape):
dtype = feats.dtype
num_anchors = len(anchors)
anchors = np.reshape(anchors, [1, 1, 1, num_anchors, 2])
grid_shape = np.shape(feats)[1:3]
grid_x = np.tile(np.reshape(np.arange(grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid_y = np.tile(np.reshape(np.arange(grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid =
|
np.concatenate([grid_x, grid_y], axis=-1)
|
numpy.concatenate
|
# Copyright (c) 2019, <NAME>
from .base import EvaluatorBase
import numpy as np
from ...experiment_design import initial_design
from ...acquisitions.EST import compute_beta_EST
from dppy.finite_dpps import FiniteDPP
from ...core.task.space import Design_space
from ...optimization.acquisition_optimizer import ContextManager
class DPP(EvaluatorBase):
"""
Class for the batch method on 'Efficient and Scalable Batch Bayesian Optimization Using K-Means' (Groves et al., 2018).
:param acquisition: acquisition function to be used to compute the batch.
:param batch size: the number of elements in the batch.
"""
def __init__(self, acquisition, batch_size, base_points=None, N_points=10000, design_name="random", suppress_emb=False, randomize=False, verbose=False):
super(DPP, self).__init__(acquisition, batch_size)
self.acquisition = acquisition
self.batch_size = batch_size
self.space = acquisition.space
self.design_name = design_name
self.suppress_emb = suppress_emb
self.randomize = randomize
self.verbose = verbose
if base_points is None:
self.N_points = N_points
self.base_points = None
else:
assert base_points.shape[1] == acquisition.model.input_dim
self.base_points = base_points
self.N_points = base_points.shape[0]
def sample_base_points(self, N_points=None, context_manager=None,
randomize=None, random_bound_axes=False,
max_rejection_low2high=10):
if N_points is None:
N_points = self.N_points
if randomize is None:
randomize = self.randomize
kern = self.acquisition.model.model.kern
if self.base_points is None:
"""
if not self.suppress_emb and hasattr(kern, "emb_min") and hasattr(kern, "emb_max"):
# if kernel is LinEmbKern, base_points are placed in reduced subspace
base_points = kern.sample_X_uniform_on_emb(N_points, randomize_low2high=randomize,
random_bound_axes=random_bound_axes,
max_rejection_low2high=max_rejection_low2high,
max_rejection_Z=1000)
"""
#elif
if not self.suppress_emb and context_manager and context_manager.A_reduce is not None:
# if context is specified, base_points are placed in reduced subspace
base_points_emb = initial_design(self.design_name, context_manager.space_reduced, N_points)
base_points = context_manager._expand_vector(base_points_emb)
else:
# base_points are placed in original space
base_points = initial_design(self.design_name, self.space, N_points)
else:
base_points = self.base_points
return base_points
def compute_batch(self, duplicate_manager=None, context_manager=None, batch_context_manager=None):
"""
Computes the elements of the batch.
"""
assert not batch_context_manager or len(batch_context_manager) == self.batch_size
if batch_context_manager:
self.acquisition.optimizer.context_manager = batch_context_manager[0]
raise NotImplementedError("batch_context is not supported")
model = self.acquisition.model
N_points = self.N_points
base_points = np.empty((0, model.input_dim))
beta = None
# get points in relevance region as many as batch size
while True:
# sample base points
base_points = np.concatenate([
base_points,
self.sample_base_points(N_points, context_manager)
])
# first point is greedy
X_batch = np.empty((self.batch_size, model.input_dim))
acq_on_points = self.acquisition.acquisition_function(base_points)
X_batch[0] = base_points[np.argmin(acq_on_points)] #self.acquisition.optimize()[0]
if self.verbose: print("first point:", X_batch[0])
if self.batch_size == 1:
return X_batch
# to get posterior covariance after first point selection
model_post = model.model.copy()
X_ = np.vstack([model_post.X, X_batch[0]])
Y_ = np.vstack([model_post.Y, [0]]) #0 is arbitrary
model_post.set_XY(X_, Y_)
# using beta from EST
if beta is None:
beta = compute_beta_EST(model=model,
space=self.acquisition.space,
points=base_points)
if self.verbose: print("beta:", beta)
m, s = model.predict(base_points)
ucb = (m + beta*s).flatten()
lcb2 = (m - 2*beta*s).flatten()
in_relevance_region = lcb2 <
|
np.min(ucb)
|
numpy.min
|
from numpy import full, ones, zeros, identity, hstack, vstack, array, pi as PI
from numpy.ma.testutils import assert_array_equal, assert_array_almost_equal
import pandas as pd
import sstspack.GaussianModelDesign as md
model_columns = ["Z", "d", "H", "T", "c", "R", "Q"]
short_model_rows = 1
long_model_rows = 100
abc_model_index = ["a", "b", "c"]
def test_get_local_level_model_design():
""""""
sigma2_eta = 2
sigma2_epsilon = 1.1
H = full((1, 1), sigma2_epsilon)
Q = full((1, 1), sigma2_eta)
data_df = md.get_local_level_model_design(
short_model_rows, sigma2_eta, sigma2_epsilon
)
assert all(x in data_df.columns for x in model_columns)
assert len(data_df) == short_model_rows
assert_array_equal(data_df.loc[0, "Z"], ones((1, 1)))
assert_array_equal(data_df.loc[0, "d"], zeros((1, 1)))
assert_array_equal(data_df.loc[0, "H"], H)
assert_array_equal(data_df.loc[0, "T"], ones((1, 1)))
assert_array_equal(data_df.loc[0, "c"], zeros((1, 1)))
assert_array_equal(data_df.loc[0, "R"], ones((1, 1)))
assert_array_equal(data_df.loc[0, "Q"], Q)
data_df = md.get_local_level_model_design(short_model_rows, Q, H)
assert_array_equal(data_df.loc[0, "H"], H)
assert_array_equal(data_df.loc[0, "Q"], Q)
H = sigma2_epsilon * identity(2)
data_df = md.get_local_level_model_design(short_model_rows, Q, H)
assert_array_equal(data_df.loc[0, "Z"], ones((2, 1)))
assert_array_equal(data_df.loc[0, "d"], zeros((2, 1)))
assert_array_equal(data_df.loc[0, "H"], H)
H = full((1, 1), sigma2_epsilon)
Q = full((1, 1), sigma2_eta)
data_df = md.get_local_level_model_design(abc_model_index, Q, H)
assert_array_equal(data_df.index, abc_model_index)
for idx in data_df.index:
assert_array_equal(data_df.loc[idx, "Z"], ones((1, 1)))
assert_array_equal(data_df.loc[idx, "d"], zeros((1, 1)))
assert_array_equal(data_df.loc[idx, "H"], H)
assert_array_equal(data_df.loc[idx, "T"], ones((1, 1)))
assert_array_equal(data_df.loc[idx, "c"], zeros((1, 1)))
assert_array_equal(data_df.loc[idx, "R"], ones((1, 1)))
assert_array_equal(data_df.loc[idx, "Q"], Q)
def test_get_local_linear_trend_model_design():
H = ones((1, 1))
Q = ones((2, 2))
data_df = md.get_local_linear_trend_model_design(short_model_rows, Q, H)
assert len(data_df) == short_model_rows
Z = zeros((1, 2))
Z[0, 0] = 1
assert_array_equal(data_df.loc[0, "Z"], Z)
assert_array_equal(data_df.loc[0, "d"], zeros((1, 1)))
assert_array_equal(data_df.loc[0, "H"], H)
T = ones((2, 2))
T[1, 0] = 0
assert_array_equal(data_df.loc[0, "T"], T)
assert_array_equal(data_df.loc[0, "c"], zeros((2, 1)))
assert_array_equal(data_df.loc[0, "R"], identity(2))
assert_array_equal(data_df.loc[0, "Q"], Q)
H = identity(2)
data_df = md.get_local_linear_trend_model_design(short_model_rows, Q, H)
assert_array_equal(data_df.loc[0, "Z"], hstack([ones((2, 1)), zeros((2, 1))]))
assert_array_equal(data_df.loc[0, "d"], zeros((2, 1)))
assert_array_equal(data_df.loc[0, "H"], H)
def test_get_time_domain_seasonal_model_design():
s = 3
H = 5
sigma2_omega = 2
data_df = md.get_time_domain_seasonal_model_design(
short_model_rows, s, sigma2_omega, H
)
Z = zeros((1, s))
Z[0, 0] = 1
assert_array_equal(data_df.loc[0, "Z"], Z)
assert_array_equal(data_df.loc[0, "d"], zeros((1, 1)))
assert_array_equal(data_df.loc[0, "H"], full((1, 1), H))
T = zeros((3, 3))
T[0, 1] = T[0, 0] = -1
T[1, 0] = T[2, 1] = 1
R = zeros((s, 1))
R[0, 0] = 1
assert_array_equal(data_df.loc[0, "T"], T)
assert_array_equal(data_df.loc[0, "c"], zeros((3, 1)))
assert_array_equal(data_df.loc[0, "R"], R)
assert_array_equal(data_df.loc[0, "Q"], full((1, 1), sigma2_omega))
H = identity(2)
data_df = md.get_time_domain_seasonal_model_design(
short_model_rows, s, sigma2_omega, H
)
assert_array_equal(data_df.loc[0, "Z"], vstack([Z, Z]))
assert_array_equal(data_df.loc[0, "d"], zeros((2, 1)))
assert_array_equal(data_df.loc[0, "H"], H)
def test_get_static_model_df():
a, b, c = (2, 3, 4)
data_df = md.get_static_model_df(long_model_rows, a=a, b=b, c=c)
assert len(data_df) == long_model_rows
assert all(data_df.a == a)
assert all(data_df.b == b)
assert all(data_df.c == c)
def test_combine_model_design():
H1 = 3
Q1 = 2
model1 = md.get_local_level_model_design(short_model_rows, Q1, H1)
sigma2_omega = 4
s = 3
H = 5
model2 = md.get_time_domain_seasonal_model_design(
short_model_rows, s, sigma2_omega, H
)
combined_model = md.combine_model_design([model1, model2])
Z = zeros((1, 4))
Z[0, 0] = Z[0, 1] = 1
assert_array_equal(combined_model.loc[0, "Z"], Z)
assert_array_equal(combined_model.loc[0, "d"], zeros((1, 1)))
assert_array_equal(combined_model.loc[0, "H"], full((1, 1), 8))
T = zeros((4, 4))
T[0, 0] = T[2, 1] = T[3, 2] = 1
T[1, 1] = T[1, 2] = -1
assert_array_equal(combined_model.loc[0, "T"], T)
assert_array_equal(combined_model.loc[0, "c"], zeros((4, 1)))
R = zeros((4, 2))
R[0, 0] = R[1, 1] = 1
assert_array_equal(combined_model.loc[0, "R"], R)
Q = zeros((2, 2))
Q[0, 0] = 2
Q[1, 1] = 4
assert_array_equal(combined_model.loc[0, "Q"], Q)
model1 = md.get_local_level_model_design(3, Q=1, H=1)
model2 = md.get_intervention_model_design(3, 1)
full_model = md.combine_model_design([model1, model2])
assert_array_equal(full_model.Z[0], array([[1, 0]]))
assert_array_equal(full_model.Z[1], array([[1, 1]]))
assert_array_equal(full_model.Z[2], array([[1, 1]]))
assert_array_equal(full_model.R[0], array([[1], [0]]))
assert_array_equal(full_model.R[1], array([[1], [0]]))
assert_array_equal(full_model.R[2], array([[1], [0]]))
assert_array_equal(full_model.Q[0], ones((1, 1)))
assert_array_equal(full_model.Q[1], ones((1, 1)))
assert_array_equal(full_model.Q[2], ones((1, 1)))
H1 =
|
identity(2)
|
numpy.identity
|
# Lint as: python3
"""A torque based stance controller framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from motion_imitation.robots.a1 import foot_position_in_hip_frame
from typing import Any, Sequence, Tuple
import time
import numpy as np
# import time
from casadi import *
import scipy.linalg as la
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import leg_controller
from mpc_controller import qp_torque_optimizer
_FORCE_DIMENSION = 3
KP = np.array((0., 0., 100., 100., 100., 0.))
KD = np.array((40., 30., 10., 10., 10., 30.))
MAX_DDQ = np.array((10., 10., 10., 20., 20., 20.))
MIN_DDQ = -MAX_DDQ
MPC_BODY_MASS = 108 / 9.8
MPC_BODY_INERTIA = np.array((0.017, 0, 0, 0, 0.057, 0, 0, 0, 0.064)) * 4.
class TorqueStanceLegController(leg_controller.LegController):
"""A torque based stance leg controller framework.
Takes in high level parameters like walking speed and turning speed, and
generates necessary the torques for stance legs.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
num_legs: int = 4,
friction_coeffs: Sequence[float] = (0.45, 0.45, 0.45, 0.45),
):
"""Initializes the class.
Tracks the desired position/velocity of the robot by computing proper joint
torques using MPC module.
Args:
robot: A robot instance.
gait_generator: Used to query the locomotion phase and leg states.
state_estimator: Estimate the robot states (e.g. CoM velocity).
desired_speed: desired CoM speed in x-y plane.
desired_twisting_speed: desired CoM rotating speed in z direction.
desired_body_height: The standing height of the robot.
body_mass: The total mass of the robot.
body_inertia: The inertia matrix in the body principle frame. We assume
the body principle coordinate frame has x-forward and z-up.
num_legs: The number of legs used for force planning.
friction_coeffs: The friction coeffs on the contact surfaces.
"""
self._robot = robot
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self.desired_q = np.zeros((12,1))
self._num_legs = num_legs
self._friction_coeffs = np.array(friction_coeffs)
self.K_dain = np.array((12,12))
self.force_des = np.array((4,3))
def reset(self, current_time):
del current_time
def update(self, current_time):
del current_time
def _estimate_robot_height(self, contacts):
if np.sum(contacts) == 0:
# All foot in air, no way to estimate
return self._desired_body_height
else:
base_orientation = self._robot.GetBaseOrientation()
rot_mat = self._robot.pybullet_client.getMatrixFromQuaternion(
base_orientation)
rot_mat = np.array(rot_mat).reshape((3, 3))
foot_positions = self._robot.GetFootPositionsInBaseFrame()
foot_positions_world_frame = (rot_mat.dot(foot_positions.T)).T
# pylint: disable=unsubscriptable-object
useful_heights = contacts * (-foot_positions_world_frame[:, 2])
return np.sum(useful_heights) / np.sum(contacts)
def get_action(self):
"""Computes the torque for stance legs."""
# Actual q and dq
contacts = np.array(
[(leg_state in (gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.EARLY_CONTACT))
for leg_state in self._gait_generator.desired_leg_state],
dtype=np.int32)
#robot_com_position = np.array(self._robot.GetBasePosition())#np.array(
# (0., 0., self._estimate_robot_height(contacts)))
p = self._robot.GetFootPositionsInBaseFrame()
robot_com_position = np.array(
(-np.mean(p[:,0]), -np.mean(p[:,1]), self._estimate_robot_height(contacts)))
robot_com_velocity = self._state_estimator.com_velocity_body_frame
robot_com_roll_pitch_yaw = np.array(self._robot.GetBaseRollPitchYaw())
robot_com_roll_pitch_yaw[2] = 0 # To prevent yaw drifting
robot_com_roll_pitch_yaw_rate = self._robot.GetBaseRollPitchYawRate()
robot_q = np.hstack((robot_com_position, robot_com_roll_pitch_yaw))
robot_dq =
|
np.hstack((robot_com_velocity, robot_com_roll_pitch_yaw_rate))
|
numpy.hstack
|
# Originally written by <NAME>
# Extended and currently maintained by <NAME>
from __future__ import absolute_import, print_function
import pandas as pd
import numpy as np
import numpy.linalg as la
import math
import copy
def neuroCombat(dat,
covars,
batch_col,
categorical_cols=None,
continuous_cols=None,
eb=True,
parametric=True,
mean_only=False,
ref_batch=None):
"""
Run ComBat to remove scanner effects in multi-site imaging data
Arguments
---------
dat : a pandas data frame or numpy array
- neuroimaging data to correct with shape = (features, samples) e.g. cortical thickness measurements, image voxels, etc
covars : a pandas data frame w/ shape = (samples, covariates)
- contains the batch/scanner covariate as well as additional covariates (optional) that should be preserved during harmonization.
batch_col : string
- indicates batch (scanner) column name in covars (e.g. "scanner")
categorical_cols : list of strings
- specifies column names in covars data frame of categorical variables to be preserved during harmonization (e.g. ["sex", "disease"])
continuous_cols : list of strings
- indicates column names in covars data frame of continuous variables to be preserved during harmonization (e.g. ["age"])
eb : should Empirical Bayes be performed?
- True by default
parametric : should parametric adjustements be performed?
- True by default
mean_only : should only be the mean adjusted (no scaling)?
- False by default
ref_batch : batch (site or scanner) to be used as reference for batch adjustment.
- None by default
Returns
-------
A dictionary of length 3:
- data: A numpy array with the same shape as `dat` which has now been ComBat-harmonized
- estimates: A dictionary of the ComBat estimates used for harmonization
- info: A dictionary of the inputs needed for ComBat harmonization
"""
##############################
### CLEANING UP INPUT DATA ###
##############################
if not isinstance(covars, pd.DataFrame):
raise ValueError('covars must be pandas dataframe -> try: covars = pandas.DataFrame(covars)')
if not isinstance(categorical_cols, (list,tuple)):
if categorical_cols is None:
categorical_cols = []
else:
categorical_cols = [categorical_cols]
if not isinstance(continuous_cols, (list,tuple)):
if continuous_cols is None:
continuous_cols = []
else:
continuous_cols = [continuous_cols]
covar_labels = np.array(covars.columns)
covars = np.array(covars, dtype='object')
if isinstance(dat, pd.DataFrame):
dat = np.array(dat, dtype='float32')
##############################
# get column indices for relevant variables
batch_col = np.where(covar_labels==batch_col)[0][0]
cat_cols = [np.where(covar_labels==c_var)[0][0] for c_var in categorical_cols]
num_cols = [np.where(covar_labels==n_var)[0][0] for n_var in continuous_cols]
# convert batch col to integer
if ref_batch is None:
ref_level=None
else:
ref_indices = np.argwhere(covars[:, batch_col] == ref_batch).squeeze()
if ref_indices.shape[0]==0:
ref_level=None
ref_batch=None
print('[neuroCombat] batch.ref not found. Setting to None.')
covars[:,batch_col] = np.unique(covars[:,batch_col],return_inverse=True)[-1]
else:
covars[:,batch_col] = np.unique(covars[:,batch_col],return_inverse=True)[-1]
ref_level = covars[np.int(ref_indices[0]),batch_col]
# create dictionary that stores batch info
(batch_levels, sample_per_batch) = np.unique(covars[:,batch_col],return_counts=True)
# create design matrix
print('[neuroCombat] Creating design matrix')
design = make_design_matrix(covars, batch_col, cat_cols, num_cols, ref_level)
info_dict = {
'batch_levels': batch_levels,
'ref_level': ref_level,
'n_batch': len(batch_levels),
'n_sample': int(covars.shape[0]),
'sample_per_batch': sample_per_batch.astype('int'),
'batch_info': [list(np.where(covars[:,batch_col]==idx)[0]) for idx in batch_levels],
'design': design
}
# standardize data across features
print('[neuroCombat] Standardizing data across features')
s_data, s_mean, v_pool, mod_mean = standardize_across_features(dat, design, info_dict)
# fit L/S models and find priors
print('[neuroCombat] Fitting L/S model and finding priors')
LS_dict = fit_LS_model_and_find_priors(s_data, design, info_dict, mean_only)
# find parametric adjustments
if eb:
if parametric:
print('[neuroCombat] Finding parametric adjustments')
gamma_star, delta_star = find_parametric_adjustments(s_data, LS_dict, info_dict, mean_only)
else:
print('[neuroCombat] Finding non-parametric adjustments')
gamma_star, delta_star = find_non_parametric_adjustments(s_data, LS_dict, info_dict, mean_only)
else:
print('[neuroCombat] Finding L/S adjustments without Empirical Bayes')
gamma_star, delta_star = find_non_eb_adjustments(s_data, LS_dict, info_dict)
# adjust data
print('[neuroCombat] Final adjustment of data')
bayes_data = adjust_data_final(s_data, design, gamma_star, delta_star,
s_mean, mod_mean, v_pool, info_dict,dat)
bayes_data = np.array(bayes_data)
estimates = {'batches': info_dict['batch_levels'], 'var.pooled': v_pool, 'stand.mean': s_mean, 'mod.mean': mod_mean, 'gamma.star': gamma_star, 'delta.star': delta_star}
estimates = {**LS_dict, **estimates, }
return {
'data': bayes_data,
'estimates': estimates,
'info': info_dict
}
def make_design_matrix(Y, batch_col, cat_cols, num_cols, ref_level):
"""
Return Matrix containing the following parts:
- one-hot matrix of batch variable (full)
- one-hot matrix for each categorical_cols (removing the first column)
- column for each continuous_cols
"""
def to_categorical(y, nb_classes=None):
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
hstack_list = []
### batch one-hot ###
# convert batch column to integer in case it's string
batch = np.unique(Y[:,batch_col],return_inverse=True)[-1]
batch_onehot = to_categorical(batch, len(np.unique(batch)))
if ref_level is not None:
batch_onehot[:,ref_level] = np.ones(batch_onehot.shape[0])
hstack_list.append(batch_onehot)
### categorical one-hots ###
for cat_col in cat_cols:
cat = np.unique(np.array(Y[:,cat_col]),return_inverse=True)[1]
cat_onehot = to_categorical(cat, len(np.unique(cat)))[:,1:]
hstack_list.append(cat_onehot)
### numerical vectors ###
for num_col in num_cols:
num = np.array(Y[:,num_col],dtype='float32')
num = num.reshape(num.shape[0],1)
hstack_list.append(num)
design = np.hstack(hstack_list)
return design
def standardize_across_features(X, design, info_dict):
n_batch = info_dict['n_batch']
n_sample = info_dict['n_sample']
sample_per_batch = info_dict['sample_per_batch']
batch_info = info_dict['batch_info']
ref_level = info_dict['ref_level']
def get_beta_with_nan(yy, mod):
wh = np.isfinite(yy)
mod = mod[wh,:]
yy = yy[wh]
B = np.dot(np.dot(la.inv(np.dot(mod.T, mod)), mod.T), yy.T)
return B
betas = []
for i in range(X.shape[0]):
betas.append(get_beta_with_nan(X[i,:], design))
B_hat = np.vstack(betas).T
#B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), X.T)
if ref_level is not None:
grand_mean = np.transpose(B_hat[ref_level,:])
else:
grand_mean = np.dot((sample_per_batch/ float(n_sample)).T, B_hat[:n_batch,:])
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, n_sample)))
#var_pooled = np.dot(((X - np.dot(design, B_hat).T)**2), np.ones((n_sample, 1)) / float(n_sample))
if ref_level is not None:
X_ref = X[:,batch_info[ref_level]]
design_ref = design[batch_info[ref_level],:]
n_sample_ref = sample_per_batch[ref_level]
var_pooled = np.dot(((X_ref - np.dot(design_ref, B_hat).T)**2), np.ones((n_sample_ref, 1)) / float(n_sample_ref))
else:
var_pooled = np.dot(((X - np.dot(design, B_hat).T)**2), np.ones((n_sample, 1)) / float(n_sample))
var_pooled[var_pooled==0] = np.median(var_pooled!=0)
mod_mean = 0
if design is not None:
tmp = copy.deepcopy(design)
tmp[:,range(0,n_batch)] = 0
mod_mean = np.transpose(np.dot(tmp, B_hat))
######### Continue here.
#tmp = np.array(design.copy())
#tmp[:,:n_batch] = 0
#stand_mean += np.dot(tmp, B_hat).T
s_data = ((X- stand_mean - mod_mean) / np.dot(np.sqrt(var_pooled), np.ones((1, n_sample))))
return s_data, stand_mean, var_pooled, mod_mean
def aprior(delta_hat):
m = np.mean(delta_hat)
s2 = np.var(delta_hat,ddof=1)
return (2 * s2 +m**2) / float(s2)
def bprior(delta_hat):
m = delta_hat.mean()
s2 = np.var(delta_hat,ddof=1)
return (m*s2+m**3)/s2
def postmean(g_hat, g_bar, n, d_star, t2):
return (t2*n*g_hat+d_star * g_bar) / (t2*n+d_star)
def postvar(sum2, n, a, b):
return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0)
def convert_zeroes(x):
x[x==0] = 1
return x
def fit_LS_model_and_find_priors(s_data, design, info_dict, mean_only):
n_batch = info_dict['n_batch']
batch_info = info_dict['batch_info']
batch_design = design[:,:n_batch]
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T)
delta_hat = []
for i, batch_idxs in enumerate(batch_info):
if mean_only:
delta_hat.append(np.repeat(1, s_data.shape[0]))
else:
delta_hat.append(np.var(s_data[:,batch_idxs],axis=1,ddof=1))
delta_hat = list(map(convert_zeroes,delta_hat))
gamma_bar = np.mean(gamma_hat, axis=1)
t2 = np.var(gamma_hat,axis=1, ddof=1)
if mean_only:
a_prior = None
b_prior = None
else:
a_prior = list(map(aprior, delta_hat))
b_prior = list(map(bprior, delta_hat))
LS_dict = {}
LS_dict['gamma_hat'] = gamma_hat
LS_dict['delta_hat'] = delta_hat
LS_dict['gamma_bar'] = gamma_bar
LS_dict['t2'] = t2
LS_dict['a_prior'] = a_prior
LS_dict['b_prior'] = b_prior
return LS_dict
#Helper function for parametric adjustements:
def it_sol(sdat, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001):
n = (1 - np.isnan(sdat)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
while change > conv:
g_new = postmean(g_hat, g_bar, n, d_old, t2)
sum2 = ((sdat - np.dot(g_new.reshape((g_new.shape[0], 1)), np.ones((1, sdat.shape[1])))) ** 2).sum(axis=1)
d_new = postvar(sum2, n, a, b)
change = max((abs(g_new - g_old) / g_old).max(), (abs(d_new - d_old) / d_old).max())
g_old = g_new #.copy()
d_old = d_new #.copy()
count = count + 1
adjust = (g_new, d_new)
return adjust
#Helper function for non-parametric adjustements:
def int_eprior(sdat, g_hat, d_hat):
r = sdat.shape[0]
gamma_star, delta_star = [], []
for i in range(0,r,1):
g = np.delete(g_hat,i)
d = np.delete(d_hat,i)
x = sdat[i,:]
n = x.shape[0]
j = np.repeat(1,n)
A = np.repeat(x, g.shape[0])
A = A.reshape(n,g.shape[0])
A = np.transpose(A)
B = np.repeat(g, n)
B = B.reshape(g.shape[0],n)
resid2 = np.square(A-B)
sum2 = resid2.dot(j)
LH = 1/(2*math.pi*d)**(n/2)*np.exp(-sum2/(2*d))
LH = np.nan_to_num(LH)
gamma_star.append(sum(g*LH)/sum(LH))
delta_star.append(sum(d*LH)/sum(LH))
adjust = (gamma_star, delta_star)
return adjust
def find_parametric_adjustments(s_data, LS, info_dict, mean_only):
batch_info = info_dict['batch_info']
ref_level = info_dict['ref_level']
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
if mean_only:
gamma_star.append(postmean(LS['gamma_hat'][i], LS['gamma_bar'][i], 1, 1, LS['t2'][i]))
delta_star.append(np.repeat(1, s_data.shape[0]))
else:
temp = it_sol(s_data[:,batch_idxs], LS['gamma_hat'][i],
LS['delta_hat'][i], LS['gamma_bar'][i], LS['t2'][i],
LS['a_prior'][i], LS['b_prior'][i])
gamma_star.append(temp[0])
delta_star.append(temp[1])
gamma_star =
|
np.array(gamma_star)
|
numpy.array
|
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def setup_long_fig(num_subfigs, y_label, x_label, font_size, width=6.4, height=4.8):
fig = plt.figure(figsize=[num_subfigs * width, height])
ax = fig.add_subplot(111)
ax.set_ylabel(y_label, fontsize=font_size)
ax.set_xlabel(x_label, fontsize=font_size)
remove_axis_lines_ticks(ax)
return fig
def remove_axis_lines_ticks(ax):
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
def get_suc_rew_figs(data_dirs, x_label, font_size):
# avg_suc_fig = plt.figure(0, figsize=plt.figaspect(1 / len(data_dirs)))
avg_suc_fig = plt.figure(0, figsize=[len(data_dirs) * 6.4, 4.8])
avg_suc_fig_ax = avg_suc_fig.add_subplot(111)
avg_suc_fig_ax.set_ylabel('Success Rate', fontsize=font_size)
avg_suc_fig_ax.set_xlabel(x_label, fontsize=font_size)
remove_axis_lines_ticks(avg_suc_fig_ax)
# plt.suptitle('Average Success')
avg_rew_fig = plt.figure(1, figsize=plt.figaspect(1 / len(data_dirs)))
avg_rew_fig_ax = avg_rew_fig.add_subplot(111)
avg_rew_fig_ax.set_ylabel('Average Reward (Scaled)', fontsize=font_size)
avg_rew_fig_ax.set_xlabel(x_label, fontsize=font_size)
remove_axis_lines_ticks(avg_rew_fig_ax)
# plt.suptitle('Average Reward (Scaled)')
avg_rew_all_fig = plt.figure(2, figsize=plt.figaspect(1 / len(data_dirs)))
avg_rew_all_fig_ax = avg_rew_all_fig.add_subplot(111)
avg_rew_all_fig_ax.set_ylabel('Average Reward (Scaled)', fontsize=font_size)
avg_rew_all_fig_ax.set_xlabel(x_label, fontsize=font_size)
remove_axis_lines_ticks(avg_rew_all_fig_ax)
# plt.suptitle('Average Reward (Scaled)')
return avg_suc_fig, avg_rew_fig, avg_rew_all_fig
def setup_pretty_plotting():
# pretty plotting stuff
font_params = {
"font.family": "serif",
"font.serif": "Times",
"text.usetex": True,
"pgf.rcfonts": False
}
plt.rcParams.update(font_params)
def final_ax_formatting(ax, i_dir, font_size):
# formatting
if i_dir == 0:
ax.legend(fontsize=font_size - 6)
ax.tick_params(axis='both', which='minor')
ax.xaxis.set_tick_params(labelsize=font_size - 8)
ax.yaxis.set_tick_params(labelsize=font_size - 8)
# if i_dir > 0:
# ax.set_yticklabels([])
def get_max_values_of_conds(stats_dict):
# for ROC
all_max_consec_non_inc_q_neg_ds = []
all_max_consec_neg_ds = []
for c, ckpt in enumerate(stats_dict['total_numsteps']):
d_q_shape = stats_dict['d_values'][c].shape # num seeds x num eps x ep length
d_values = stats_dict['d_values'][c].reshape([d_q_shape[0] * d_q_shape[1], d_q_shape[2]])
q_values = stats_dict['q_values'][c].reshape([d_q_shape[0] * d_q_shape[1], d_q_shape[2]])
max_consec_non_inc_q_neg_ds = []
max_consec_neg_ds = []
for ep in range(d_q_shape[0] * d_q_shape[1]):
max_consec_non_inc_q_neg_d = 0
max_consec_neg_d = 0
consec_non_inc_q_neg_d = 0
consec_neg_d = 0
last_q = -1e10
for t in range(d_q_shape[2]):
if d_values[ep, t] < 0:
# if d_values[ep, t] < .05:
consec_neg_d += 1
max_consec_neg_d = max(consec_neg_d, max_consec_neg_d)
if q_values[ep, t] - last_q < 0:
consec_non_inc_q_neg_d += 1
max_consec_non_inc_q_neg_d = max(consec_non_inc_q_neg_d, max_consec_non_inc_q_neg_d)
else:
consec_non_inc_q_neg_d = 0
else:
consec_neg_d = 0
consec_non_inc_q_neg_d = 0
last_q = q_values[ep, t]
max_consec_non_inc_q_neg_ds.append(max_consec_non_inc_q_neg_d)
max_consec_neg_ds.append(max_consec_neg_d)
all_max_consec_non_inc_q_neg_ds.append(max_consec_non_inc_q_neg_ds)
all_max_consec_neg_ds.append(max_consec_neg_ds)
return
|
np.array(all_max_consec_non_inc_q_neg_ds)
|
numpy.array
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the python library parsing Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from delf.python.detect_to_retrieve import dataset
class DatasetTest(tf.test.TestCase):
def testParseEasyMediumHardGroundTruth(self):
# Define input.
ground_truth = [{
'easy': np.array([10, 56, 100]),
'hard':
|
np.array([0])
|
numpy.array
|
from numba import njit, jit
import numpy as np
import matplotlib.pyplot as plt
from cnn_bounds_full_core_with_LP import pool, conv, conv_bound, conv_full, conv_bound_full, pool_linear_bounds
from solve import *
# from tensorflow.contrib.keras.api.keras.models import Sequential
# from tensorflow.contrib.keras.api.keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D, Lambda
# from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, InputLayer, BatchNormalization, Reshape
# from tensorflow.contrib.keras.api.keras.models import load_model
# from tensorflow.contrib.keras.api.keras import backend as K
# import tensorflow.contrib.keras.api.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D, Lambda
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, InputLayer, BatchNormalization, Reshape
from tensorflow.keras.models import load_model
import tensorflow.keras as keras
from train_myself_model import ResidualStart, ResidualStart2
import tensorflow as tf
from utils import generate_data_myself
import time
import datetime
from activations import sigmoid_linear_bounds, sigmoid
from pgd_attack import *
linear_bounds = None
import random
def loss(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted)
class Model:
def __init__(self, model, inp_shape = (28,28,1)):
temp_weights = [layer.get_weights() for layer in model.layers]
self.shapes = []
self.sizes = []
self.weights = []
self.biases = []
self.pads = []
self.strides = []
self.types = []
self.model = model
cur_shape = inp_shape
print('cur_shape:', cur_shape)
self.shapes.append(cur_shape)
i = 0
while i < len(model.layers):
layer = model.layers[i]
i += 1
print(cur_shape)
weights = layer.get_weights()
if type(layer) == Conv2D:
print('conv')
if len(weights) == 1:
W = weights[0].astype(np.float32)
b = np.zeros(W.shape[-1], dtype=np.float32)
else:
W, b = weights
W = W.astype(np.float32)
b = b.astype(np.float32)
padding = layer.get_config()['padding']
stride = layer.get_config()['strides']
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+W.shape[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+W.shape[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-W.shape[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-W.shape[1])/stride[1])+1, W.shape[-1])
W = np.ascontiguousarray(W.transpose((3,0,1,2)).astype(np.float32))
b = np.ascontiguousarray(b.astype(np.float32))
self.types.append('conv')
self.sizes.append(None)
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == GlobalAveragePooling2D:
print('global avg pool')
b = np.zeros(cur_shape[-1], dtype=np.float32)
W = np.zeros((cur_shape[0],cur_shape[1],cur_shape[2],cur_shape[2]), dtype=np.float32)
for f in range(W.shape[2]):
W[:,:,f,f] = 1/(cur_shape[0]*cur_shape[1])
pad = (0,0,0,0)
stride = ((1,1))
cur_shape = (1,1,cur_shape[2])
W = np.ascontiguousarray(W.transpose((3,0,1,2)).astype(np.float32))
b = np.ascontiguousarray(b.astype(np.float32))
self.types.append('conv')
self.sizes.append(None)
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == AveragePooling2D:
print('avg pool')
b = np.zeros(cur_shape[-1], dtype=np.float32)
padding = layer.get_config()['padding']
pool_size = layer.get_config()['pool_size']
stride = layer.get_config()['strides']
W = np.zeros((pool_size[0],pool_size[1],cur_shape[2],cur_shape[2]), dtype=np.float32)
for f in range(W.shape[2]):
W[:,:,f,f] = 1/(pool_size[0]*pool_size[1])
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+pool_size[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+pool_size[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-pool_size[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-pool_size[1])/stride[1])+1, cur_shape[2])
W = np.ascontiguousarray(W.transpose((3,0,1,2)).astype(np.float32))
b = np.ascontiguousarray(b.astype(np.float32))
self.types.append('conv')
self.sizes.append(None)
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == Activation or type(layer) == Lambda:
print('activation')
self.types.append('relu')
self.sizes.append(None)
self.strides.append(None)
self.pads.append(None)
self.shapes.append(cur_shape)
self.weights.append(None)
self.biases.append(None)
elif type(layer) == InputLayer:
print('input')
elif type(layer) == BatchNormalization:
print('batch normalization')
gamma, beta, mean, std = weights
std = np.sqrt(std+0.001) #Avoids zero division
a = gamma/std
b = -gamma*mean/std+beta
self.weights[-1] = a*self.weights[-1]
self.biases[-1] = a*self.biases[-1]+b
elif type(layer) == Dense:
print('FC')
W, b = weights
b = b.astype(np.float32)
W = W.reshape(list(cur_shape)+[W.shape[-1]]).astype(np.float32)
cur_shape = (1,1,W.shape[-1])
W = np.ascontiguousarray(W.transpose((3,0,1,2)).astype(np.float32))
b = np.ascontiguousarray(b.astype(np.float32))
self.types.append('conv')
self.sizes.append(None)
self.strides.append((1,1))
self.pads.append((0,0,0,0))
self.shapes.append(cur_shape)
self.weights.append(W)
self.biases.append(b)
elif type(layer) == Dropout:
print('dropout')
elif type(layer) == MaxPooling2D:
print('pool')
pool_size = layer.get_config()['pool_size']
stride = layer.get_config()['strides']
padding = layer.get_config()['padding']
pad = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride[0]))
desired_w = int(np.ceil(cur_shape[0]/stride[1]))
total_padding_h = stride[0]*(desired_h-1)+pool_size[0]-cur_shape[0]
total_padding_w = stride[1]*(desired_w-1)+pool_size[1]-cur_shape[1]
pad = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad[0]+pad[1]-pool_size[0])/stride[0])+1, int((cur_shape[1]+pad[2]+pad[3]-pool_size[1])/stride[1])+1, cur_shape[2])
self.types.append('pool')
self.sizes.append(pool_size)
self.strides.append(stride)
self.pads.append(pad)
self.shapes.append(cur_shape)
self.weights.append(None)
self.biases.append(None)
elif type(layer) == Flatten:
print('flatten')
elif type(layer) == Reshape:
print('reshape')
elif type(layer) == ResidualStart2:
print('basic block 2')
conv1 = model.layers[i]
bn1 = model.layers[i+1]
conv2 = model.layers[i+3]
conv3 = model.layers[i+4]
bn2 = model.layers[i+5]
bn3 = model.layers[i+6]
i = i+8
W1, bias1 = conv1.get_weights()
W2, bias2 = conv2.get_weights()
W3, bias3 = conv3.get_weights()
gamma1, beta1, mean1, std1 = bn1.get_weights()
std1 = np.sqrt(std1+0.001) #Avoids zero division
a1 = gamma1/std1
b1 = gamma1*mean1/std1+beta1
W1 = a1*W1
bias1 = a1*bias1+b1
gamma2, beta2, mean2, std2 = bn2.get_weights()
std2 = np.sqrt(std2+0.001) #Avoids zero division
a2 = gamma2/std2
b2 = gamma2*mean2/std2+beta2
W2 = a2*W2
bias2 = a2*bias2+b2
gamma3, beta3, mean3, std3 = bn3.get_weights()
std3 = np.sqrt(std3+0.001) #Avoids zero division
a3 = gamma3/std3
b3 = gamma3*mean3/std3+beta3
W3 = a3*W3
bias3 = a3*bias3+b3
padding1 = conv1.get_config()['padding']
stride1 = conv1.get_config()['strides']
pad1 = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding1 == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride1[0]))
desired_w = int(np.ceil(cur_shape[0]/stride1[1]))
total_padding_h = stride1[0]*(desired_h-1)+W1.shape[0]-cur_shape[0]
total_padding_w = stride1[1]*(desired_w-1)+W1.shape[1]-cur_shape[1]
pad1 = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad1[0]+pad1[1]-W1.shape[0])/stride1[0])+1, int((cur_shape[1]+pad1[2]+pad1[3]-W1.shape[1])/stride1[1])+1, W1.shape[3])
padding2 = conv2.get_config()['padding']
stride2 = conv2.get_config()['strides']
pad2 = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding2 == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride2[0]))
desired_w = int(np.ceil(cur_shape[0]/stride2[1]))
total_padding_h = stride2[0]*(desired_h-1)+W2.shape[0]-cur_shape[0]
total_padding_w = stride2[1]*(desired_w-1)+W2.shape[1]-cur_shape[1]
pad2 = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
padding3 = conv3.get_config()['padding']
stride3 = conv3.get_config()['strides']
pad3 = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding3 == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride3[0]))
desired_w = int(np.ceil(cur_shape[0]/stride3[1]))
total_padding_h = stride3[0]*(desired_h-1)+W3.shape[0]-cur_shape[0]
total_padding_w = stride3[1]*(desired_w-1)+W3.shape[1]-cur_shape[1]
pad3 = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
W1 = np.ascontiguousarray(W1.transpose((3,0,1,2)).astype(np.float32))
bias1 = np.ascontiguousarray(bias1.astype(np.float32))
W2 = np.ascontiguousarray(W2.transpose((3,0,1,2)).astype(np.float32))
bias2 = np.ascontiguousarray(bias2.astype(np.float32))
W3 = np.ascontiguousarray(W3.transpose((3,0,1,2)).astype(np.float32))
bias3 = np.ascontiguousarray(bias3.astype(np.float32))
self.types.append('basic_block_2')
self.sizes.append(None)
self.strides.append((stride1, stride2, stride3))
self.pads.append((pad1, pad2, pad3))
self.shapes.append(cur_shape)
self.weights.append((W1, W2, W3))
self.biases.append((bias1, bias2, bias3))
elif type(layer) == ResidualStart:
print('basic block')
conv1 = model.layers[i]
bn1 = model.layers[i+1]
conv2 = model.layers[i+3]
bn2 = model.layers[i+4]
i = i+6
W1, bias1 = conv1.get_weights()
W2, bias2 = conv2.get_weights()
gamma1, beta1, mean1, std1 = bn1.get_weights()
std1 = np.sqrt(std1+0.001) #Avoids zero division
a1 = gamma1/std1
b1 = gamma1*mean1/std1+beta1
W1 = a1*W1
bias1 = a1*bias1+b1
gamma2, beta2, mean2, std2 = bn2.get_weights()
std2 = np.sqrt(std2+0.001) #Avoids zero division
a2 = gamma2/std2
b2 = gamma2*mean2/std2+beta2
W2 = a2*W2
bias2 = a2*bias2+b2
padding1 = conv1.get_config()['padding']
stride1 = conv1.get_config()['strides']
pad1 = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding1 == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride1[0]))
desired_w = int(np.ceil(cur_shape[0]/stride1[1]))
total_padding_h = stride1[0]*(desired_h-1)+W1.shape[0]-cur_shape[0]
total_padding_w = stride1[1]*(desired_w-1)+W1.shape[1]-cur_shape[1]
pad1 = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
cur_shape = (int((cur_shape[0]+pad1[0]+pad1[1]-W1.shape[0])/stride1[0])+1, int((cur_shape[1]+pad1[2]+pad1[3]-W1.shape[1])/stride1[1])+1, W1.shape[3])
padding2 = conv2.get_config()['padding']
stride2 = conv2.get_config()['strides']
pad2 = (0,0,0,0) #p_hl, p_hr, p_wl, p_wr
if padding2 == 'same':
desired_h = int(np.ceil(cur_shape[0]/stride2[0]))
desired_w = int(np.ceil(cur_shape[0]/stride2[1]))
total_padding_h = stride2[0]*(desired_h-1)+W2.shape[0]-cur_shape[0]
total_padding_w = stride2[1]*(desired_w-1)+W2.shape[1]-cur_shape[1]
pad2 = (int(np.floor(total_padding_h/2)),int(np.ceil(total_padding_h/2)),int(np.floor(total_padding_w/2)),int(np.ceil(total_padding_w/2)))
W1 = np.ascontiguousarray(W1.transpose((3,0,1,2)).astype(np.float32))
bias1 = np.ascontiguousarray(bias1.astype(np.float32))
W2 = np.ascontiguousarray(W2.transpose((3,0,1,2)).astype(np.float32))
bias2 = np.ascontiguousarray(bias2.astype(np.float32))
self.types.append('basic_block')
self.sizes.append(None)
self.strides.append((stride1, stride2))
self.pads.append((pad1, pad2))
self.shapes.append(cur_shape)
self.weights.append((W1, W2))
self.biases.append((bias1, bias2))
else:
print(str(type(layer)))
raise ValueError('Invalid Layer Type')
print(cur_shape)
def predict(self, data):
return self.model(data)
@njit
def UL_conv_bound(A, B, pad, stride, shape, W, b, inner_pad, inner_stride, inner_shape):
A_new = np.zeros((A.shape[0], A.shape[1], A.shape[2], inner_stride[0]*(A.shape[3]-1)+W.shape[1], inner_stride[1]*(A.shape[4]-1)+W.shape[2], W.shape[3]), dtype=np.float32)
B_new = B.copy()
assert A.shape[5] == W.shape[0]
for x in range(A_new.shape[0]):
p_start = np.maximum(0, pad[0]-stride[0]*x)
p_end = np.minimum(A.shape[3], shape[0]+pad[0]-stride[0]*x)
t_start = np.maximum(0, -stride[0]*inner_stride[0]*x+inner_stride[0]*pad[0]+inner_pad[0])
t_end = np.minimum(A_new.shape[3], inner_shape[0]-stride[0]*inner_stride[0]*x+inner_stride[0]*pad[0]+inner_pad[0])
for y in range(A_new.shape[1]):
q_start = np.maximum(0, pad[2]-stride[1]*y)
q_end = np.minimum(A.shape[4], shape[1]+pad[2]-stride[1]*y)
u_start = np.maximum(0, -stride[1]*inner_stride[1]*y+inner_stride[1]*pad[2]+inner_pad[2])
u_end = np.minimum(A_new.shape[4], inner_shape[1]-stride[1]*inner_stride[1]*y+inner_stride[1]*pad[2]+inner_pad[2])
for t in range(t_start, t_end):
for u in range(u_start, u_end):
for p in range(p_start, p_end):
for q in range(q_start, q_end):
if 0<=t-inner_stride[0]*p<W.shape[1] and 0<=u-inner_stride[1]*q<W.shape[2]:
A_new[x,y,:,t,u,:] += np.dot(A[x,y,:,p,q,:],W[:,t-inner_stride[0]*p,u-inner_stride[1]*q,:])
for p in range(p_start, p_end):
for q in range(q_start, q_end):
B_new[x,y,:] += np.dot(A[x,y,:,p,q,:],b)
return A_new, B_new
basic_block_2_cache = {}
def UL_basic_block_2_bound(A, B, pad, stride, W1, W2, W3, b1, b2, b3, pad1, pad2, pad3, stride1, stride2, stride3, upper=True):
LB, UB = basic_block_2_cache[np.sum(W1)]
A1, B1 = UL_conv_bound(A, B, np.asarray(pad), np.asarray(stride), np.asarray(UB.shape), W2, b2, np.asarray(pad2), np.asarray(stride2), np.asarray(UB.shape))
inter_pad = (stride2[0]*pad[0]+pad2[0], stride2[0]*pad[1]+pad2[1], stride2[1]*pad[2]+pad2[2], stride2[1]*pad[3]+pad2[3])
inter_stride = (stride2[0]*stride[0], stride2[1]*stride[1])
alpha_u, alpha_l, beta_u, beta_l = linear_bounds(LB, UB)
if upper:
A1, B1 = UL_relu_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), alpha_u, alpha_l, beta_u, beta_l)
else:
A1, B1 = UL_relu_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), alpha_l, alpha_u, beta_l, beta_u)
A1, B1 = UL_conv_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), np.asarray(UB.shape), W1, b1, np.asarray(pad1), np.asarray(stride1), np.asarray(UB.shape))
A2, B2 = UL_conv_bound(A, B, np.asarray(pad), np.asarray(stride), np.asarray(UB.shape), W3, b3, np.asarray(pad3), np.asarray(stride3), np.asarray(UB.shape))
height_diff = A1.shape[3]-A2.shape[3]
width_diff = A1.shape[4]-A2.shape[4]
assert height_diff % 2 == 0
assert width_diff % 2 == 0
d_h = height_diff//2
d_w = width_diff//2
A1[:,:,:,d_h:A1.shape[3]-d_h,d_w:A1.shape[4]-d_w,:] += A2
return A1, B1+B2-B
basic_block_cache = {}
def UL_basic_block_bound(A, B, pad, stride, W1, W2, b1, b2, pad1, pad2, stride1, stride2, upper=True):
LB, UB = basic_block_cache[np.sum(W1)]
A1, B1 = UL_conv_bound(A, B, np.asarray(pad), np.asarray(stride), np.asarray(UB.shape), W2, b2, np.asarray(pad2), np.asarray(stride2), np.asarray(UB.shape))
inter_pad = (stride2[0]*pad[0]+pad2[0], stride2[0]*pad[1]+pad2[1], stride2[1]*pad[2]+pad2[2], stride2[1]*pad[3]+pad2[3])
inter_stride = (stride2[0]*stride[0], stride2[1]*stride[1])
alpha_u, alpha_l, beta_u, beta_l = linear_bounds(LB, UB)
if upper:
A1, B1 = UL_relu_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), alpha_u, alpha_l, beta_u, beta_l)
else:
A1, B1 = UL_relu_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), alpha_l, alpha_u, beta_l, beta_u)
A1, B1 = UL_conv_bound(A1, B1, np.asarray(inter_pad), np.asarray(inter_stride), np.asarray(UB.shape), W1, b1, np.asarray(pad1), np.asarray(stride1), np.asarray(UB.shape))
height_diff = A1.shape[3]-A.shape[3]
width_diff = A1.shape[4]-A.shape[4]
assert height_diff % 2 == 0
assert width_diff % 2 == 0
d_h = height_diff//2
d_w = width_diff//2
A1[:,:,:,d_h:A1.shape[3]-d_h,d_w:A1.shape[4]-d_w,:] += A
return A1, B1
@njit
def UL_relu_bound(A, B, pad, stride, alpha_u, alpha_l, beta_u, beta_l):
A_new = np.zeros_like(A)
A_plus = np.maximum(A, 0)
A_minus = np.minimum(A, 0)
B_new = B.copy()
for x in range(A_new.shape[0]):
p_start = np.maximum(0, pad[0]-stride[0]*x)
p_end = np.minimum(A.shape[3], alpha_u.shape[0]+pad[0]-stride[0]*x)
for y in range(A_new.shape[1]):
q_start = np.maximum(0, pad[2]-stride[1]*y)
q_end = np.minimum(A.shape[4], alpha_u.shape[1]+pad[2]-stride[1]*y)
for z in range(A_new.shape[2]):
for p in range(p_start, p_end):
for q in range(q_start, q_end):
for r in range(A.shape[5]):
A_new[x,y,z,p,q,r] += A_plus[x,y,z,p,q,r]*alpha_u[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]
A_new[x,y,z,p,q,r] += A_minus[x,y,z,p,q,r]*alpha_l[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]
B_new[x,y,z] += A_plus[x,y,z,p,q,r]*beta_u[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]
B_new[x,y,z] += A_minus[x,y,z,p,q,r]*beta_l[p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],r]
return A_new, B_new
@njit
def UL_pool_bound(A, B, pad, stride, pool_size, inner_pad, inner_stride, inner_shape, alpha_u, alpha_l, beta_u, beta_l):
A_new = np.zeros((A.shape[0], A.shape[1], A.shape[2], inner_stride[0]*(A.shape[3]-1)+pool_size[0], inner_stride[1]*(A.shape[4]-1)+pool_size[1], A.shape[5]), dtype=np.float32)
B_new = B.copy()
A_plus = np.maximum(A, 0)
A_minus = np.minimum(A, 0)
for x in range(A_new.shape[0]):
for y in range(A_new.shape[1]):
for t in range(A_new.shape[3]):
for u in range(A_new.shape[4]):
inner_index_x = t+stride[0]*inner_stride[0]*x-inner_stride[0]*pad[0]-inner_pad[0]
inner_index_y = u+stride[1]*inner_stride[1]*y-inner_stride[1]*pad[2]-inner_pad[2]
if 0<=inner_index_x<inner_shape[0] and 0<=inner_index_y<inner_shape[1]:
for p in range(A.shape[3]):
for q in range(A.shape[4]):
if 0<=t-inner_stride[0]*p<alpha_u.shape[0] and 0<=u-inner_stride[1]*q<alpha_u.shape[1] and 0<=p+stride[0]*x-pad[0]<alpha_u.shape[2] and 0<=q+stride[1]*y-pad[2]<alpha_u.shape[3]:
A_new[x,y,:,t,u,:] += A_plus[x,y,:,p,q,:]*alpha_u[t-inner_stride[0]*p,u-inner_stride[1]*q,p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],:]
A_new[x,y,:,t,u,:] += A_minus[x,y,:,p,q,:]*alpha_l[t-inner_stride[0]*p,u-inner_stride[1]*q,p+stride[0]*x-pad[0],q+stride[1]*y-pad[2],:]
B_new += conv_full(A_plus,beta_u,pad,stride) + conv_full(A_minus,beta_l,pad,stride)
return A_new, B_new
def compute_bounds(weights, biases, out_shape, nlayer, x0, eps, p_n, pads, strides, sizes, types, LBs, UBs, activation):
if types[nlayer-1] == 'relu':
if activation == 'relu':
return np.maximum(LBs[nlayer-1], 0), np.maximum(UBs[nlayer-1], 0), None, None, None, None, None, None
elif activation == 'sigmoid':
return sigmoid(LBs[nlayer-1]), sigmoid(UBs[nlayer-1]), None, None, None, None, None, None
elif types[nlayer-1] == 'conv':
A_u = weights[nlayer-1].reshape((1, 1, weights[nlayer-1].shape[0], weights[nlayer-1].shape[1], weights[nlayer-1].shape[2], weights[nlayer-1].shape[3]))*np.ones((out_shape[0], out_shape[1], weights[nlayer-1].shape[0], weights[nlayer-1].shape[1], weights[nlayer-1].shape[2], weights[nlayer-1].shape[3]), dtype=np.float32)
B_u = biases[nlayer-1]*np.ones((out_shape[0], out_shape[1], out_shape[2]), dtype=np.float32)
A_l = A_u.copy()
B_l = B_u.copy()
pad = pads[nlayer-1]
stride = strides[nlayer-1]
elif types[nlayer-1] == 'pool':
A_u = np.eye(out_shape[2]).astype(np.float32).reshape((1,1,out_shape[2],1,1,out_shape[2]))*np.ones((out_shape[0], out_shape[1], out_shape[2], 1,1,out_shape[2]), dtype=np.float32)
B_u =
|
np.zeros(out_shape, dtype=np.float32)
|
numpy.zeros
|
"""
<NAME> 2018
Contributions from <NAME>, <NAME>
"""
import tensorflow as tf
import numpy as np
import stimulus
import time
import parameters as p
#from parameters import *
import os, sys
import pickle
import AdamOpt
import importlib
# Ignore "use compiled version of TensorFlow" errors
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
class Model:
def __init__(self, input_data, target_data, mask, generator_var):
# Load the input activity, the target data, and the training mask for this batch of trials
self.input_data = tf.unstack(input_data, axis=1)
self.target_data = tf.unstack(target_data, axis=1)
self.mask = tf.unstack(mask, axis=0)
self.generator_var = generator_var
# Load meta network state
self.W_in = tf.constant(p.par['W_in'], dtype=tf.float32)
self.W_ei = tf.constant(p.par['EI_matrix'], dtype=tf.float32)
self.hidden_init = tf.constant(p.par['h_init'], dtype=tf.float32)
self.w_rnn_mask = tf.constant(p.par['w_rnn_mask'], dtype=tf.float32)
self.w_out_mask = tf.constant(p.par['w_out_mask'], dtype=tf.float32)
# Load the initial synaptic depression and facilitation to be used at the start of each trial
self.synapse_x_init = tf.constant(p.par['syn_x_init'])
self.synapse_u_init = tf.constant(p.par['syn_u_init'])
# Declare all necessary variables for each network
self.declare_variables()
# Build the TensorFlow graph
self.run_model()
# Train the model
self.optimize()
def declare_variables(self):
C = 0.01
for n in range(len(p.par['generator_dims']) - 1):
with tf.variable_scope('generator'+str(n)):
w = tf.random_uniform([p.par['generator_dims'][n+1],p.par['generator_dims'][n]], \
-C, C, dtype=tf.float32)
tf.get_variable('W', initializer=w, trainable=True)
#b = tf.zeros([p.par['generator_dims'][n+1], 1], dtype=tf.float32)
#tf.get_variable('b', initializer=b, trainable=True)
with tf.variable_scope('generator_output'):
w = tf.random_uniform([p.par['wrnn_generator_dims'][1],p.par['wrnn_generator_dims'][0]], \
-C, C, dtype=tf.float32)
tf.get_variable('W_rnn', initializer=w, trainable=True)
w = tf.random_uniform([p.par['wout_generator_dims'][1],p.par['wout_generator_dims'][0]], \
-C, C, dtype=tf.float32)
tf.get_variable('W_out', initializer=w, trainable=True)
w = tf.random_uniform([p.par['brnn_generator_dims'][1],p.par['brnn_generator_dims'][0]], \
-C, C, dtype=tf.float32)
tf.get_variable('b_rnn', initializer=w, trainable=True)
w = tf.random_uniform([p.par['bout_generator_dims'][1],p.par['bout_generator_dims'][0]], \
-C, C, dtype=tf.float32)
tf.get_variable('b_out', initializer=w, trainable=True)
def run_model(self):
self.networks_hidden = []
self.networks_output = []
self.networks_syn_x = []
self.networks_syn_u = []
z = tf.reshape(self.generator_var,(p.par['generator_dims'][0], 1))
z = tf.nn.dropout(z, 0.999999)
for m in range(len(p.par['generator_dims']) - 1):
with tf.variable_scope('generator'+str(m), reuse=True):
W = tf.get_variable('W')
#b = tf.get_variable('b')
z = tf.nn.relu(tf.matmul(W, z))
z = tf.nn.dropout(z, 0.999999)
with tf.variable_scope('generator_output', reuse=True):
W_rnn_gen = tf.get_variable('W_rnn')
W_out_gen = tf.get_variable('W_out')
b_rnn_gen = tf.get_variable('b_rnn')
b_out_gen = tf.get_variable('b_out')
W_rnn = tf.nn.relu(tf.matmul(W_rnn_gen, z))
W_out = tf.nn.relu(tf.matmul(W_out_gen, z))
b_rnn = tf.matmul(b_rnn_gen, z)
b_out = tf.matmul(b_out_gen, z)
W_rnn = tf.reshape(W_rnn,(p.par['n_hidden'], p.par['n_hidden']))
W_out = tf.reshape(W_out,(p.par['n_output'], p.par['n_hidden']))
b_rnn = tf.reshape(b_rnn,(p.par['n_hidden'], 1))
b_out = tf.reshape(b_out,(p.par['n_output'], 1))
#b_out = tf.constant(np.zeros((3,1)), dtype = tf.float32)
if p.par['EI']:
W_rnn *= self.w_rnn_mask
W_rnn = tf.matmul(tf.nn.relu(W_rnn), self.W_ei)
W_out *= self.w_out_mask
#hidden_state_hist = []
syn_x_hist = []
syn_u_hist = []
#output_rec = []
h = self.hidden_init
syn_x = self.synapse_x_init
syn_u = self.synapse_u_init
for t, x in enumerate(self.input_data):
# Calculate effect of STP
if p.par['synapse_config'] == 'std_stf':
# implement both synaptic short term facilitation and depression
syn_x += p.par['alpha_std']*(1-syn_x) - p.par['dt_sec']*syn_u*syn_x*h
syn_u += p.par['alpha_stf']*(p.par['U']-syn_u) + p.par['dt_sec']*p.par['U']*(1-syn_u)*h
syn_x = tf.minimum(np.float32(1), tf.nn.relu(syn_x))
syn_u = tf.minimum(np.float32(1), tf.nn.relu(syn_u))
h_post = syn_u*syn_x*h
elif p.par['synapse_config'] == 'std':
# implement synaptic short term derpression, but no facilitation
# we assume that syn_u remains constant at 1
syn_x += p.par['alpha_std']*(1-syn_x) - p.par['dt_sec']*syn_x*h
syn_x = tf.minimum(np.float32(1), tf.nn.relu(syn_x))
syn_u = tf.minimum(np.float32(1), tf.nn.relu(syn_u))
h_post = syn_x*h
elif p.par['synapse_config'] == 'stf':
# implement synaptic short term facilitation, but no depression
# we assume that syn_x remains constant at 1
syn_u += p.par['alpha_stf']*(p.par['U']-syn_u) + p.par['dt_sec']*p.par['U']*(1-syn_u)*h
syn_u = tf.minimum(
|
np.float32(1)
|
numpy.float32
|
import traceback
from collections import OrderedDict
from enum import Enum
from functools import reduce
from math import pi
from typing import Any, Callable, Dict, Iterator, List, MutableMapping, NamedTuple, Optional, Set, Tuple, Union
import numpy as np
import SimpleITK
from scipy.spatial.distance import cdist
from sympy import symbols
from .. import autofit as af
from ..algorithm_describe_base import AlgorithmProperty, Register
from ..channel_class import Channel
from ..class_generator import enum_register
from ..mask_partition_utils import BorderRim, MaskDistanceSplit
from ..universal_const import UNIT_SCALE, Units
from ..utils import class_to_dict
from .measurement_base import AreaType, Leaf, MeasurementEntry, MeasurementMethodBase, Node, PerComponent
# TODO change image to channel in signature of measurement calculate_property
class ProhibitedDivision(Exception):
pass
class SettingsValue(NamedTuple):
function: Callable
help_message: str
arguments: Optional[dict]
is_component: bool
default_area: Optional[AreaType] = None
class ComponentsInfo(NamedTuple):
segmentation_components: np.ndarray
mask_components: np.ndarray
components_translation: Dict[int, List[int]]
def empty_fun(_a0=None, _a1=None):
"""This function is be used as dummy reporting function."""
pass
MeasurementValueType = Union[float, List[float], str]
MeasurementResultType = Tuple[MeasurementValueType, str]
MeasurementResultInputType = Tuple[MeasurementValueType, str, Tuple[PerComponent, AreaType]]
FILE_NAME_STR = "File name"
class MeasurementResult(MutableMapping[str, MeasurementResultType]):
"""
Class for storage measurements info.
"""
def __init__(self, components_info: ComponentsInfo):
self.components_info = components_info
self._data_dict = OrderedDict()
self._units_dict: Dict[str, str] = dict()
self._type_dict: Dict[str, Tuple[PerComponent, AreaType]] = dict()
self._units_dict["Mask component"] = ""
self._units_dict["Segmentation component"] = ""
def __str__(self):
text = ""
for key, val in self._data_dict.items():
text += f"{key}: {val}; type {self._type_dict[key]}, units {self._units_dict[key]}\n"
return text
def __setitem__(self, k: str, v: MeasurementResultInputType) -> None:
self._data_dict[k] = v[0]
self._units_dict[k] = v[1]
self._type_dict[k] = v[2]
if k == FILE_NAME_STR:
self._data_dict.move_to_end(FILE_NAME_STR, False)
def __delitem__(self, v: str) -> None:
del self._data_dict[v]
del self._units_dict[v]
del self._type_dict[v]
def __getitem__(self, k: str) -> MeasurementResultType:
return self._data_dict[k], self._units_dict[k]
def __len__(self) -> int:
return len(self._data_dict)
def __iter__(self) -> Iterator[str]:
return iter(self._data_dict)
def set_filename(self, path_fo_file: str):
"""
Set name of file to be presented as first position.
"""
self._data_dict[FILE_NAME_STR] = path_fo_file
self._type_dict[FILE_NAME_STR] = PerComponent.No, AreaType.ROI
self._units_dict[FILE_NAME_STR] = ""
self._data_dict.move_to_end(FILE_NAME_STR, False)
def get_component_info(self) -> Tuple[bool, bool]:
"""
Get information which type of components are in storage.
:return: has_mask_components, has_segmentation_components
"""
has_mask_components = any([x == PerComponent.Yes and y != AreaType.ROI for x, y in self._type_dict.values()])
has_segmentation_components = any(
[x == PerComponent.Yes and y == AreaType.ROI for x, y in self._type_dict.values()]
)
return has_mask_components, has_segmentation_components
def get_labels(self) -> List[str]:
"""Get labels for measurement. Base are keys of this storage.
If has mask components, or has segmentation_components then add this labels"""
has_mask_components, has_segmentation_components = self.get_component_info()
labels = list(self._data_dict.keys())
index = 1 if FILE_NAME_STR in self._data_dict else 0
if has_mask_components:
labels.insert(index, "Mask component")
if has_segmentation_components:
labels.insert(index, "Segmentation component")
return labels
def get_units(self) -> List[str]:
return [self._units_dict[x] for x in self.get_labels()]
def get_global_names(self):
"""Get names for only parameters which are not 'PerComponent.Yes'"""
labels = list(self._data_dict.keys())
return [x for x in labels if self._type_dict[x] != PerComponent.Yes]
def get_global_parameters(self):
"""Get only parameters which are not 'PerComponent.Yes'"""
if FILE_NAME_STR in self._data_dict:
name = self._data_dict[FILE_NAME_STR]
res = [name]
iterator = iter(self._data_dict.keys())
next(iterator)
else:
res = []
iterator = iter(self._data_dict.keys())
for el in iterator:
per_comp = self._type_dict[el][0]
val = self._data_dict[el]
if per_comp != PerComponent.Yes:
res.append(val)
return res
def get_separated(self) -> List[List[MeasurementValueType]]:
"""Get measurements separated for each component"""
has_mask_components, has_segmentation_components = self.get_component_info()
if not (has_mask_components or has_segmentation_components):
return [list(self._data_dict.values())]
if has_mask_components and has_segmentation_components:
translation = self.components_info.components_translation
component_info = [(x, y) for x in translation.keys() for y in translation[x]]
elif has_mask_components:
component_info = [(0, x) for x in self.components_info.mask_components]
else:
component_info = [(x, 0) for x in self.components_info.segmentation_components]
counts = len(component_info)
mask_to_pos = {val: i for i, val in enumerate(self.components_info.mask_components)}
segmentation_to_pos = {val: i for i, val in enumerate(self.components_info.segmentation_components)}
if FILE_NAME_STR in self._data_dict:
name = self._data_dict[FILE_NAME_STR]
res = [[name] for _ in range(counts)]
iterator = iter(self._data_dict.keys())
next(iterator)
else:
res = [[] for _ in range(counts)]
iterator = iter(self._data_dict.keys())
if has_segmentation_components:
for i, num in enumerate(component_info):
res[i].append(num[0])
if has_mask_components:
for i, num in enumerate(component_info):
res[i].append(num[1])
for el in iterator:
per_comp, area_type = self._type_dict[el]
val = self._data_dict[el]
if per_comp != PerComponent.Yes:
for i in range(counts):
res[i].append(val)
else:
if area_type == AreaType.ROI:
for i, (seg, _mask) in enumerate(component_info):
res[i].append(val[segmentation_to_pos[seg]])
else:
for i, (_seg, mask) in enumerate(component_info):
res[i].append(val[mask_to_pos[mask]])
return res
class MeasurementProfile:
PARAMETERS = ["name", "chosen_fields", "reversed_brightness", "use_gauss_image", "name_prefix"]
def __init__(self, name, chosen_fields: List[MeasurementEntry], name_prefix=""):
self.name = name
self.chosen_fields: List[MeasurementEntry] = chosen_fields
self._need_mask = False
for cf_val in chosen_fields:
self._need_mask = self._need_mask or self.need_mask(cf_val.calculation_tree)
self.name_prefix = name_prefix
def to_dict(self):
return {"name": self.name, "chosen_fields": self.chosen_fields, "name_prefix": self.name_prefix}
def need_mask(self, tree):
if isinstance(tree, Leaf):
return tree.area == AreaType.Mask or tree.area == AreaType.Mask_without_ROI
else:
return self.need_mask(tree.left) or self.need_mask(tree.right)
def _need_mask_without_segmentation(self, tree):
if isinstance(tree, Leaf):
return tree.area == AreaType.Mask_without_ROI
else:
return self._need_mask_without_segmentation(tree.left) or self._need_mask_without_segmentation(tree.right)
def _get_par_component_and_area_type(self, tree: Union[Node, Leaf]) -> Tuple[PerComponent, AreaType]:
if isinstance(tree, Leaf):
method = MEASUREMENT_DICT[tree.name]
area_type = method.area_type(tree.area)
if tree.per_component == PerComponent.Mean:
return PerComponent.No, area_type
return tree.per_component, area_type
else:
left_par, left_area = self._get_par_component_and_area_type(tree.left)
right_par, right_area = self._get_par_component_and_area_type(tree.left)
if PerComponent.Yes == left_par or PerComponent.Yes == right_par:
res_par = PerComponent.Yes
else:
res_par = PerComponent.No
area_set = {left_area, right_area}
if len(area_set) == 1:
res_area = area_set.pop()
elif AreaType.ROI in area_set:
res_area = AreaType.ROI
else:
res_area = AreaType.Mask_without_ROI
return res_par, res_area
def get_channels_num(self) -> Set[Channel]:
resp = set()
for el in self.chosen_fields:
resp.update(el.get_channel_num(MEASUREMENT_DICT))
return resp
def __str__(self):
text = "Set name: {}\n".format(self.name)
if self.name_prefix != "":
text += "Name prefix: {}\n".format(self.name_prefix)
text += "Measurements list:\n"
for el in self.chosen_fields:
text += "{}\n".format(el.name)
return text
def get_component_info(self, unit: Units):
"""
:return: list[((str, str), bool)]
"""
res = []
# Fixme remove binding to 3 dimensions
for el in self.chosen_fields:
res.append(
(
(self.name_prefix + el.name, el.get_unit(unit, 3)),
self._is_component_measurement(el.calculation_tree),
)
)
return res
def get_parameters(self):
return class_to_dict(self, *self.PARAMETERS)
def is_any_mask_measurement(self):
for el in self.chosen_fields:
if self.need_mask(el.calculation_tree):
return True
return False
def _is_component_measurement(self, node):
if isinstance(node, Leaf):
return node.per_component == PerComponent.Yes
else:
return self._is_component_measurement(node.left) or self._is_component_measurement(node.right)
def calculate_tree(
self, node: Union[Node, Leaf], segmentation_mask_map: ComponentsInfo, help_dict: dict, kwargs: dict
) -> Tuple[Union[float, np.ndarray], symbols, AreaType]:
"""
Main function for calculation tree of measurements. It is executed recursively
:param node: measurement to calculate
:param segmentation_mask_map: map from mask segmentation components to mask components. Needed for division
:param help_dict: dict to cache calculation result. It reduce recalculations of same measurements.
:param kwargs: additional info needed by measurements
:return: measurement value
"""
if isinstance(node, Leaf):
method: MeasurementMethodBase = MEASUREMENT_DICT[node.name]
kw = dict(kwargs)
kw.update(node.dict)
hash_str = hash_fun_call_name(method, node.dict, node.area, node.per_component, node.channel)
area_type = method.area_type(node.area)
if hash_str in help_dict:
val = help_dict[hash_str]
else:
if node.channel is not None:
kw["channel"] = kw[f"chanel_{node.channel}"]
kw["channel_num"] = node.channel
else:
kw["channel_num"] = -1
kw["help_dict"] = help_dict
kw["_area"] = node.area
kw["_per_component"] = node.per_component
kw["_cache"] = True
if area_type == AreaType.Mask:
kw["area_array"] = kw["mask"]
elif area_type == AreaType.Mask_without_ROI:
kw["area_array"] = kw["mask_without_segmentation"]
elif area_type == AreaType.ROI:
kw["area_array"] = kw["segmentation"]
else:
raise ValueError(f"Unknown area type {node.area}")
if node.per_component != PerComponent.No:
kw["_cache"] = False
val = []
area_array = kw["area_array"]
if area_type == AreaType.ROI:
components = segmentation_mask_map.segmentation_components
else:
components = segmentation_mask_map.mask_components
for i in components:
kw["area_array"] = area_array == i
val.append(method.calculate_property(**kw))
if node.per_component == PerComponent.Mean:
val = np.mean(val) if len(val) else 0
else:
val = np.array(val)
else:
val = method.calculate_property(**kw)
help_dict[hash_str] = val
unit: symbols = method.get_units(3) if kw["channel"].shape[0] > 1 else method.get_units(2)
if node.power != 1:
return pow(val, node.power), pow(unit, node.power), area_type
return val, unit, area_type
elif isinstance(node, Node):
left_res, left_unit, left_area = self.calculate_tree(node.left, segmentation_mask_map, help_dict, kwargs)
right_res, right_unit, right_area = self.calculate_tree(
node.right, segmentation_mask_map, help_dict, kwargs
)
if node.op == "/":
if isinstance(left_res, np.ndarray) and isinstance(right_res, np.ndarray) and left_area != right_area:
area_set = {left_area, right_area}
if area_set == {AreaType.ROI, AreaType.Mask_without_ROI}:
raise ProhibitedDivision("This division is prohibited")
if area_set == {AreaType.ROI, AreaType.Mask}:
res = []
# TODO Test this part of code
for val, num in zip(left_res, segmentation_mask_map.segmentation_components):
div_vals = segmentation_mask_map.components_translation[num]
if len(div_vals) != 1:
raise ProhibitedDivision("Cannot calculate when object do not belongs to one mask area")
if left_area == AreaType.ROI:
res.append(val / right_res[div_vals[0] - 1])
else:
res.append(right_res[div_vals[0] - 1] / val)
return np.array(res), left_unit / right_unit, AreaType.ROI
left_area = AreaType.Mask_without_ROI
return left_res / right_res, left_unit / right_unit, left_area
raise ValueError("Wrong measurement: {}".format(node))
@staticmethod
def get_segmentation_to_mask_component(segmentation: np.ndarray, mask: Optional[np.ndarray]) -> ComponentsInfo:
"""
Calculate map from segmentation component num to mask component num
:param segmentation: numpy array with segmentation labeled as positive integers
:param mask: numpy array with mask labeled as positive integer
:return: map
"""
components = np.unique(segmentation)
if components[0] == 0 or components[0] is None:
components = components[1:]
mask_components = np.unique(mask)
if mask_components[0] == 0 or mask_components[0] is None:
mask_components = mask_components[1:]
res = OrderedDict()
if mask is None:
res = {i: [] for i in components}
elif np.max(mask) == 1:
res = {i: [1] for i in components}
else:
for num in components:
res[num] = list(np.unique(mask[segmentation == num]))
return ComponentsInfo(components, mask_components, res)
def get_component_and_area_info(self) -> List[Tuple[PerComponent, AreaType]]:
"""For each measurement check if is per component and in which types """
res = []
for el in self.chosen_fields:
tree = el.calculation_tree
res.append(self._get_par_component_and_area_type(tree))
return res
def calculate(
self,
channel: np.ndarray,
segmentation: np.ndarray,
mask: Optional[np.ndarray],
voxel_size,
result_units: Units,
range_changed: Callable[[int, int], Any] = None,
step_changed: Callable[[int], Any] = None,
time: int = 0,
time_pos: int = 0,
**kwargs,
) -> MeasurementResult:
"""
Calculate measurements on given set of parameters
:param channel: main channel on which measurements should be calculated
:param segmentation: array with segmentation labeled as positive
:param full_mask:
:param mask:
:param voxel_size:
:param result_units:
:param range_changed: callback function to set information about steps range
:param step_changed: callback function fo set information about steps done
:param time: which data point should be measured
:param time_pos: axis of time
:param kwargs: additional data required by measurements. Ex additional channels
:return: measurements
"""
def get_time(array: np.ndarray):
if array is not None and array.ndim == 4:
return array.take(time, axis=time_pos)
return array
if range_changed is None:
range_changed = empty_fun
if step_changed is None:
step_changed = empty_fun
if self._need_mask and mask is None:
raise ValueError("measurement need mask")
channel = channel.astype(np.float)
help_dict = dict()
segmentation_mask_map = self.get_segmentation_to_mask_component(segmentation, mask)
result = MeasurementResult(segmentation_mask_map)
result_scalar = UNIT_SCALE[result_units.value]
kw = {
"channel": get_time(channel),
"segmentation": get_time(segmentation),
"mask": get_time(mask),
"voxel_size": voxel_size,
"result_scalar": result_scalar,
}
for el in kwargs.keys():
if not el.startswith("channel_"):
raise ValueError(f"unknown parameter {el} of calculate function")
for num in self.get_channels_num():
if f"channel_{num}" not in kwargs:
raise ValueError(f"channel_{num} need to be passed as argument of calculate function")
kw.update(kwargs)
for el in self.chosen_fields:
if self._need_mask_without_segmentation(el.calculation_tree):
mm = mask.copy()
mm[kw["segmentation"] > 0] = 0
kw["mask_without_segmentation"] = mm
break
range_changed(0, len(self.chosen_fields))
for i, el in enumerate(self.chosen_fields):
step_changed(i)
tree, user_name = el.calculation_tree, el.name
component_and_area = self._get_par_component_and_area_type(tree)
try:
val, unit, _area = self.calculate_tree(tree, segmentation_mask_map, help_dict, kw)
if isinstance(val, np.ndarray):
val = list(val)
result[self.name_prefix + user_name] = val, str(unit).format(str(result_units)), component_and_area
except ZeroDivisionError:
result[self.name_prefix + user_name] = "Div by zero", "", component_and_area
except TypeError:
traceback.print_exc()
result[self.name_prefix + user_name] = "None div", "", component_and_area
except AttributeError:
result[self.name_prefix + user_name] = "No attribute", "", component_and_area
except ProhibitedDivision as e:
result[self.name_prefix + user_name] = e.args[0], "", component_and_area
return result
def calculate_main_axis(area_array: np.ndarray, channel: np.ndarray, voxel_size):
# TODO check if it produces good values
if len(channel.shape) == 4:
if channel.shape[0] != 1:
raise ValueError("This measurements do not support time data")
channel = channel[0]
cut_img = np.copy(channel)
cut_img[area_array == 0] = 0
if np.all(cut_img == 0):
return (0,) * len(voxel_size)
orientation_matrix, _ = af.find_density_orientation(cut_img, voxel_size, 1)
center_of_mass = af.density_mass_center(cut_img, voxel_size)
positions = np.array(np.nonzero(cut_img), dtype=np.float64)
for i, v in enumerate(reversed(voxel_size), start=1):
positions[-i] *= v
positions[-i] -= center_of_mass[i - 1]
centered = np.dot(orientation_matrix.T, positions)
size = np.max(centered, axis=1) - np.min(centered, axis=1)
return size
def get_main_axis_length(
index: int, area_array: np.ndarray, channel: np.ndarray, voxel_size, result_scalar, _cache=False, **kwargs
):
_cache = _cache and "_area" in kwargs and "_per_component" in kwargs
if _cache:
help_dict: Dict = kwargs["help_dict"]
_area: AreaType = kwargs["_area"]
_per_component: PerComponent = kwargs["_per_component"]
hash_name = hash_fun_call_name(calculate_main_axis, {}, _area, _per_component, kwargs["channel_num"])
if hash_name not in help_dict:
help_dict[hash_name] = calculate_main_axis(area_array, channel, [x * result_scalar for x in voxel_size])
return help_dict[hash_name][index]
else:
return calculate_main_axis(area_array, channel, [x * result_scalar for x in voxel_size])[index]
def hash_fun_call_name(
fun: Union[Callable, MeasurementMethodBase],
arguments: Dict,
area: AreaType,
per_component: PerComponent,
channel: Channel,
) -> str:
"""
Calculate string for properly cache measurements result.
:param fun: method for which hash string should be calculated
:param arguments: its additional arguments
:param area: type of rea
:param per_component: If it is per component
:param channel: channel number on which calculation is performed
:return: unique string for such set of arguments
"""
if hasattr(fun, "__module__"):
fun_name = f"{fun.__module__}.{fun.__name__}"
else:
fun_name = fun.__name__
return "{}: {} # {} & {} * {}".format(fun_name, arguments, area, per_component, channel)
class Volume(MeasurementMethodBase):
text_info = "Volume", "Calculate volume of current segmentation"
@classmethod
def calculate_property(cls, area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
return np.count_nonzero(area_array) * pixel_volume(voxel_size, result_scalar)
@classmethod
def get_units(cls, ndim):
return symbols("{}") ** ndim
class Voxels(MeasurementMethodBase):
text_info = "Voxels", "Calculate number of voxels of current segmentation"
@classmethod
def calculate_property(cls, area_array, **_): # pylint: disable=W0221
return np.count_nonzero(area_array)
@classmethod
def get_units(cls, ndim):
return symbols("1")
# From <NAME>., & <NAME>. (2002). Computing the diameter of a point set,
# 12(6), 489–509. https://doi.org/10.1142/S0218195902001006
def double_normal(point_index: int, point_positions: np.ndarray, points_array: np.ndarray):
"""
:param point_index: index of starting points
:param point_positions: points array of size (points_num, number of dimensions)
:param points_array: bool matrix with information about which points are in set
:return:
"""
delta = 0
dn = 0, 0
while True:
new_delta = delta
points_array[point_index] = 0
dist_array = np.sum(np.array((point_positions - point_positions[point_index]) ** 2), 1)
dist_array[points_array == 0] = 0
point2_index = np.argmax(dist_array)
if dist_array[point2_index] > new_delta:
delta = dist_array[point2_index]
dn = point_index, point2_index
point_index = point2_index
if new_delta == delta:
return dn, delta
def iterative_double_normal(points_positions: np.ndarray):
"""
:param points_positions: points array of size (points_num, number of dimensions)
:return: square power of diameter, 2-tuple of points index gave information which points ar chosen
"""
delta = 0
dn = 0, 0
point_index = 0
points_array = np.ones(points_positions.shape[0], dtype=np.bool)
while True:
dn_r, delta_r = double_normal(point_index, points_positions, points_array)
if delta_r > delta:
delta = delta_r
dn = dn_r
mid_point = (points_positions[dn[0]] + points_positions[dn[1]]) / 2
dist_array = np.sum(np.array((points_positions - mid_point) ** 2), 1)
dist_array[~points_array] = 0
if np.any(dist_array >= delta / 4):
point_index = np.argmax(dist_array)
else:
break
else:
break
return delta, dn
class Diameter(MeasurementMethodBase):
text_info = "Diameter", "Diameter of area"
@staticmethod
def calculate_property(area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
pos = np.transpose(np.nonzero(get_border(area_array))).astype(np.float)
if pos.size == 0:
return 0
for i, val in enumerate([x * result_scalar for x in reversed(voxel_size)], start=1):
pos[:, -i] *= val
diam_sq = iterative_double_normal(pos)[0]
return np.sqrt(diam_sq)
@classmethod
def get_units(cls, ndim):
return symbols("{}")
class DiameterOld(MeasurementMethodBase):
text_info = "Diameter old", "Diameter of area (Very slow)"
@staticmethod
def calculate_property(area_array, voxel_size, result_scalar, **_): # pylint: disable=W0221
return calc_diam(get_border(area_array), [x * result_scalar for x in voxel_size])
@classmethod
def get_units(cls, ndim):
return symbols("{}")
class PixelBrightnessSum(MeasurementMethodBase):
text_info = "Pixel brightness sum", "Sum of pixel brightness for current segmentation"
@staticmethod
def calculate_property(area_array: np.ndarray, channel: np.ndarray, **_): # pylint: disable=W0221
"""
:param area_array: mask for area
:param channel: data. same shape like area_type
:return: Pixels brightness sum on given area
"""
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if np.any(area_array):
return np.sum(channel[area_array > 0])
return 0
@classmethod
def get_units(cls, ndim):
return symbols("Pixel_brightness")
@classmethod
def need_channel(cls):
return True
class ComponentsNumber(MeasurementMethodBase):
text_info = "Components number", "Calculate number of connected components on segmentation"
@staticmethod
def calculate_property(area_array, **_): # pylint: disable=W0221
return np.unique(area_array).size - 1
@classmethod
def get_starting_leaf(cls):
return Leaf(cls.text_info[0], per_component=PerComponent.No)
@classmethod
def get_units(cls, ndim):
return symbols("count")
class MaximumPixelBrightness(MeasurementMethodBase):
text_info = "Maximum pixel brightness", "Calculate maximum pixel brightness for current area"
@staticmethod
def calculate_property(area_array, channel, **_):
if area_array.shape != channel.shape:
if area_array.size == channel.size:
channel = channel.reshape(area_array.shape)
else:
raise ValueError("channel and mask do not fit each other")
if
|
np.any(area_array)
|
numpy.any
|
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from numbers import Number
import numpy as np
from pymor.core.interfaces import BasicInterface, abstractmethod, abstractproperty, abstractclassmethod
def _numpy_version_older(version_tuple):
np_tuple = tuple(int(p) for p in
|
np.__version__.split('.')
|
numpy.__version__.split
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA=True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
# TODO: grid_size ==prediction.size(2)?
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4])
grid =
|
np.arange(grid_size)
|
numpy.arange
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 11:16, 26/04/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from numpy import dot, array, sum, matmul, where, sqrt, sign, min, cos, pi, exp, round
from opfunu.cec.utils import BasicFunction
class Model(BasicFunction):
def __init__(self, problem_size=None, cec_type="cec2013", f_shift="shift_data", f_matrix="M_D", bound=(-100, 100),
dimensions=(2, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)):
BasicFunction.__init__(self, cec_type)
self.problem_size = problem_size
self.dimensions = dimensions
self.check_dimensions(self.problem_size)
self.bound = bound
self.f_shift = f_shift + ".txt"
self.f_matrix = f_matrix + str(self.problem_size) + ".txt"
self.shift = self.load_matrix_data__(self.f_shift)[:, :problem_size]
self.matrix = self.load_matrix_data__(self.f_matrix)
def F1(self, solution=None, name="Sphere Function", shift=None, f_bias=-1400):
if shift is None:
shift = self.shift[0]
return sum((solution - shift)**2) + f_bias
def F2(self, solution=None, name="Rotated High Conditioned Elliptic Function", shift=None, matrix=None, f_bias=-1300):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = dot(matrix, solution - shift)
t2 = self.osz_func__(t1)
return self.elliptic__(t2) + f_bias
def F3(self, solution=None, name="Rotated Bent Cigar Function", shift=None, matrix=None, f_bias=-1200):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
t1 = dot(matrix[:self.problem_size, :], solution - shift)
t2 = self.asy_func__(t1, beta=0.5)
t3 = dot(matrix[self.problem_size:2 * self.problem_size, :], t2)
return self.bent_cigar__(t3) + f_bias
def F4(self, solution=None, name="Rotated Discus Function", shift=None, matrix=None, f_bias=-1100):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = dot(matrix, solution - shift)
t2 = self.osz_func__(t1)
return self.discus__(t2) + f_bias
def F5(self, solution=None, name="Different Powers Function", shift=None, f_bias=-1000):
if shift is None:
shift = self.shift[0]
return self.different_powers__(solution - shift) + f_bias
def F6(self, solution=None, name="Rotated Rosenbrock’s Function", shift=None, matrix=None, f_bias=-900):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:self.problem_size, :]
t1 = 2.048 * (solution - shift) / 100
t2 = dot(matrix, t1) + 1
return self.rosenbrock__(t2) + f_bias
def F7(self, solution=None, name="Rotated Schaffers F7 Function", shift=None, matrix=None, f_bias=-800):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2*self.problem_size, :]
t2 = dot(matrix[:self.problem_size, :], solution - shift)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.schaffers_f7__(t6) + f_bias
def F8(self, solution=None, name="Rotated Ackley’s Function", shift=None, matrix=None, f_bias=-700):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t2 = dot(matrix[:self.problem_size, :], solution - shift)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.ackley__(t6) + f_bias
def F9(self, solution=None, name="Rotated Weierstrass Function", shift=None, matrix=None, f_bias=-600):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 0.5 * (solution - shift) / 100
t2 = dot(matrix[:self.problem_size, :], t1)
t3 = self.asy_func__(t2, 0.5)
t4 = self.create_diagonal_matrix__(self.problem_size, alpha=10)
t5 = matmul(t4, matrix[self.problem_size: 2 * self.problem_size, :])
t6 = dot(t5, t3)
return self.weierstrass__(t6) + f_bias
def F10(self, solution=None, name="Rotated Griewank’s Function", shift=None, matrix=None, f_bias=-500):
if shift is None and matrix is None:
shift = self.shift[0]
matrix = self.matrix[:2 * self.problem_size, :]
t1 = 600 * (solution - shift) / 100
t2 = self.create_diagonal_matrix__(self.problem_size, alpha=100)
t3 = matmul(t2, matrix[:self.problem_size, :])
t4 =
|
dot(t3, t1)
|
numpy.dot
|
import os
from numpy.testing import *
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
import skimage.filter as F
from skimage import data_dir, img_as_float
class TestSobel():
def test_00_00_zeros(self):
"""Sobel on an array of all zeros"""
result = F.sobel(np.zeros((10, 10)), np.ones((10, 10), bool))
assert (
|
np.all(result == 0)
|
numpy.all
|
import random
import math
import numpy as np;
import matplotlib.pyplot as plt;
# Flame Shaped Dataset
def generator1():
rad = 2
num = 300
t = np.random.uniform(0.0, 2.0*np.pi, num)
r = rad * np.sqrt(np.random.uniform(0.0, 1.0, num))
x1 = r * np.cos(t)+ np.random.uniform(0.0,0.6,300)
y1 = r * np.sin(t)+ np.random.uniform(0.0,0.6,300)
size = 4;
dom = 0.125*3.14 + np.random.uniform(0.0,0.6,300)*1.25*3.14;
x2= size*np.sin(dom) +
|
np.random.uniform(0,2,300)
|
numpy.random.uniform
|
"""
This module contains pre-defined methods to compute gradients for common estimators in
combination with common loss-functions. These gradients can be used to train model trees [1]_
All these gradients are computed with respect to the models parameters.
In addition, the module also provides pre-defined methods to renormalize gradients of common estimators. Details
can be also found in [1]_.
References
----------
.. [1] <NAME>. and <NAME>.,
"A Gradient-Based Split Criterion for Highly Accurate and Transparent Model Trees",
Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), 2019
"""
# Copyright 2019 SCHUFA Holding AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
def get_default_gradient_function(model):
"""
Returns the default gradient computation method for well-know models and default loss functions
Parameters
----------
model
A predictive model
Returns
-------
gradient_function: callable
A function that computes gradients of the loss for the given type of model.
See Also
--------
gradient_logistic_regression_cross_entropy, gradient_linear_regression_square_loss
"""
if type(model) not in _DEFAULT_GRADIENTS:
raise ValueError(f"No default gradient defined for {type(model)}.")
return _DEFAULT_GRADIENTS[type(model)]
def get_default_renormalization_function(model):
"""
Returns the default renormalization function for gradients of well-know models and default loss functions
Parameters
----------
model
A predictive model
Returns
-------
gradient_function: callable
A function that computes gradients of the loss for the given type of model.
See Also
--------
renormalize_linear_model_gradients
Example function
get_get_default_gradient_function
Default gradient computation
"""
if type(model) not in _DEFAULT_RENORMALIZATION:
raise ValueError(f"No default renormalization defined for {type(model)}.")
return _DEFAULT_RENORMALIZATION[type(model)]
def gradient_logistic_regression_cross_entropy(model, X, y):
"""
Computes the gradients of a logistic regression model with cross validation loss
Parameters
----------
model : LogisticRegression
The model of which the gradient shall be computed.
The model should already be fitted to some data (typically to the data of the parent node)
X : array-like, shape = [n_samples, n_features]
Input Features of the points at which the gradient should be computed
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target variable. Corresponds to the samples in `X`
Returns
-------
g: array-like, shape = [n_samples, n_parameters]
Gradient of the cross entropy loss with respect to the model parameters at the samples given by `X` and `y`
Notes
-----
* The number of model parameters is equal to the number of features (if the intercept is not trainable) or
has one additional parameter (if the intercept is trainable)
* See [2]_ for the math behind it
References
----------
.. [2] https://peterroelants.github.io/posts/cross-entropy-logistic/
"""
if len(model.classes_) > 2:
# TODO: multi-class case is not supported, yet
raise ValueError(
f"This method currently only supports binary classification problems, but we got {len(model.classes_)} classes.")
# Compute Gradient (see also [1])
factor = model.predict_proba(X)[:, 1:2] - np.reshape(y, (-1, 1))
g = factor * X
if model.fit_intercept:
# Append artificial intercept gradient
n_intercept = np.prod(np.shape(model.intercept_))
n_samples = np.shape(X)[0]
g = np.concatenate([g, factor * np.ones((n_samples, n_intercept))], axis=1)
return g
def gradient_linear_regression_square_loss(model, X, y):
"""
Computes the gradients of a logistic regression model with cross validation loss
Parameters
----------
model : LinearRegression
The model of which the gradient shall be computed.
The model should already be fitted to some data (typically to the data of the parent node)
X : array-like, shape = [n_samples, n_features]
Input Features of the points at which the gradient should be computed
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target variable. Corresponds to the samples in `X`
Returns
-------
g: array-like, shape = [n_samples, n_parameters]
Gradient of the square loss with respect to the model parameters at the samples given by `X` and `y`
Notes
-----
* The number of model parameters is equal to the number of features (if the intercept is not trainable) or
has one additional parameter (if the intercept is trainable)
"""
# Prediction
y_ = model.predict(X)
# Residuals
r = y_ - y # TODO: handle shapes (n) and (n,1) in y_and y
if len(r.shape) == 1:
r = np.reshape(r, (-1, 1))
n_out = r.shape[1]
# Gradient by output
g = [r[:, o:o + 1] * X for o in range(n_out)]
# Concatenate along parameter axis (axis = 1)
g = np.concatenate(g, axis=1)
if model.fit_intercept:
# Append intercept gradient: The intercept gradient equals to the residuals
g = np.concatenate([g, r], axis=1)
return g
def renormalize_linear_model_gradients(model, gradients, a, c):
"""
Renormalizes gradients of a linear model.
This function applies to the linear case where a vector x is linearly normalized by `a * x + c`.
Parameters
----------
model: LinearRegression or LogisticRegression
The model that generated the gradients
gradients: array, shape=[n_samples, n_params]
A matrix of gradients where each row corresponds to one gradient
a: array, shape=[n_samples, n_features]
The normalization factor
c: array, shape=[n_samples, n_features]
The normalization offset
Returns
-------
gradients: array, shape=[n_samples, n_params]
Renormalized gradients
Warnings
--------
Note that this method modifies gradients inplace.
"""
# Shape of the coefficients
c_shape = np.shape(model.coef_)
# Number of input features
m = len(a)
if len(c_shape) == 2:
# 2-dim coefficients
# --> multiple outputs
d = c_shape[0] # Dimension of the output
# Multi elements of the gradient need to be normalized by the same factor
# --> Repeat a
a = np.repeat(a, d, axis=1)
# Compute number of parameter to modify
n = np.shape(a)[1]
# Modify the gradients according to eq. (14) of [1]_
# here, A is a diagonal matrix with diagonal elements `a`
# Note: in case of multi-dimensional outputs, c must be
c = [c * gradients[:, i:i + 1] for i in range(n,
|
np.shape(gradients)
|
numpy.shape
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import itertools
import numpy as np
import pytest
from brainstorm.handlers import NumpyHandler
from brainstorm.optional import has_pycuda
non_default_handlers = []
handler_ids = []
if has_pycuda:
from brainstorm.handlers import PyCudaHandler
non_default_handlers.append(PyCudaHandler())
handler_ids.append("PyCudaHandler")
# np.random.seed(1234)
ref_dtype = np.float32
ref = NumpyHandler(ref_dtype)
some_2d_shapes = ((1, 1), (4, 1), (1, 4), (5, 5), (3, 4), (4, 3))
some_nd_shapes = ((1, 1, 4), (1, 1, 3, 3), (3, 4, 2, 1))
np.set_printoptions(linewidth=150)
def operation_check(handler, op_name, ref_args, ignored_args=(), atol=1e-8):
args = get_args_from_ref_args(handler, ref_args)
getattr(ref, op_name)(*ref_args)
getattr(handler, op_name)(*args)
check_list = []
for i, (ref_arg, arg) in enumerate(zip(ref_args, args)):
if i in ignored_args:
# print(i, "was ignored")
continue
if type(ref_arg) is ref.array_type:
arg_ref = handler.get_numpy_copy(arg)
check = np.allclose(ref_arg, arg_ref, atol=atol)
check_list.append(check)
if not check:
print("-" * 40)
print("\nCheck failed for argument number %d:" % i)
print("Reference (expected) array {}:\n{}".format(
ref_arg.shape, ref_arg))
print("\nObtained array {}:\n{}".format(arg_ref.shape,
arg_ref))
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
else:
check = (ref_arg == arg)
check_list.append(check)
if not check:
print("-" * 40)
print("Check failed for argument number %d:" % i)
print("\nReference (expected) value:\n", ref_arg)
print("\nObtained value:\n", arg)
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
# print("Check was ", check)
if False in check_list:
return False
else:
return True
def get_args_from_ref_args(handler, ref_args):
args = []
for ref_arg in ref_args:
if type(ref_arg) is ref.array_type:
temp = handler.create_from_numpy(ref_arg)
args.append(temp)
else:
args.append(ref_arg)
return args
def get_random_arrays(shapes=some_2d_shapes, dtype=ref_dtype):
arrays = []
for shape in shapes:
arrays.append(np.random.randn(*shape).astype(dtype))
return arrays
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sum_t(handler):
list_a = get_random_arrays()
list_axis = [0, 1, None]
for a, axis in itertools.product(list_a, list_axis):
if axis == 0:
out = np.zeros((1, a.shape[1]), dtype=ref_dtype)
elif axis == 1:
out = np.zeros((a.shape[0]), dtype=ref_dtype)
else:
out = np.array([0.], dtype=ref_dtype).reshape(tuple())
ref_args = (a, axis, out)
assert operation_check(handler, 'sum_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros((a.shape[0], a.shape[0]), dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_add_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.random.randn(a.shape[0], a.shape[0]).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_add_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*a.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*b.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_broadcast_t(handler):
args_to_check = [
([1], 0, [3]),
([1], 0, [1]),
([1, 2], 0, [3, 2]),
([3, 1], 1, [3, 2]),
([1, 2, 5], 0, [3, 2, 5]),
([3, 1, 5], 1, [3, 2, 5]),
([3, 2, 1], 2, [3, 2, 5])
]
a_shapes, axes, out_shapes = list(zip(*args_to_check))
list_a = get_random_arrays(a_shapes)
list_out = get_random_arrays(out_shapes)
for ref_args in zip(list_a, axes, list_out):
assert operation_check(handler, 'broadcast_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_clip_t(handler):
list_a = get_random_arrays(some_nd_shapes)
list_clip_min = [-0.4, 0, 0.2]
list_clip_max = [-0.1, 0, 0.3]
for a, clip_min, clip_max in itertools.product(list_a, list_clip_min,
list_clip_max):
if clip_max >= clip_min:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, clip_min, clip_max, out)
assert operation_check(handler, 'clip_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_log_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
a += 10 # to remove negatives
out =
|
np.zeros_like(a, dtype=ref_dtype)
|
numpy.zeros_like
|
import autolens as al
import numpy as np
import grid_util
import pixelized_mass
import pixelized_source
import potential_correction_util as pcu
import scipy.linalg as linalg
from scipy.spatial import Delaunay
from potential_correction_util import LinearNDInterpolatorExt
from matplotlib import pyplot as plt
import copy
from plot import pixelized_source as ps_plot
class IterativePotentialCorrect(object):
def __init__(self, masked_imaging, shape_2d_dpsi=None, shape_2d_src=(50,50)):
"""
shape_2d_dpsi: the shape of potential correction grid, if not set, this will be set to the lens image shape
shape_2d_src: the number of grid used for source reconstruction (defined on image-plane)
"""
self.masked_imaging = masked_imaging #include grid, mask, image, noise, psf etc
self.image_data = self.masked_imaging.image.native #native image resolution, not the oversanpling one
self.image_noise = self.masked_imaging.noise_map.native
self.psf_kernel = self.masked_imaging.psf.native
image_mask = self.masked_imaging.mask
dpix_data = self.masked_imaging.pixel_scales[0]
if shape_2d_dpsi is None:
shape_2d_dpsi = self.image_data.shape
self.grid_obj = grid_util.SparseDpsiGrid(image_mask, dpix_data, shape_2d_dpsi=shape_2d_dpsi) #Note, mask_data has not been cleaned
self.shape_2d_src = shape_2d_src
def initialize_iteration(
self,
psi_2d_start=None,
niter=100,
lam_s_start=None,
lam_dpsi_start=1e9,
psi_anchor_points=None,
):
"""
psi_2d_0: the lens potential map of the initial start mass model, typicall given by a macro model like elliptical power law model.
niter: the upper limit of the number of the potential correctio iterations
lam_s_0: the initial regularization strength of pixelized sources.
lam_dpsi_0: the initial regularization strength of potential correction (dpsi)
psi_anchor_points: the anchor points of lens potential. we require the lens potential values at those anchor point
remain unchanged during potential corrention, to avoid various degeneracy problems. (see sec.2.3 in our document);
dpsi_anchor_points has the following form: [(y1,x1), (y2,x2), (y3,x3)]
"""
self._niter = niter
self._lam_s_start = lam_s_start
self._lam_dpsi_start = lam_dpsi_start
self._psi_anchor_points = psi_anchor_points
self._psi_2d_start = psi_2d_start
self._psi_2d_start[self.masked_imaging.mask] = 0.0 #set the lens potential of masked pixels to 0
# self.cum_dpsi_2d_tmp = np.zeros_like(self._psi_2d_start, dtype='float') #the cumulative 2d poential correction in native image resolution.
# self.cum_dpsi_2d = np.zeros_like(self._psi_2d_start, dtype='float') #the cumulative 2d poential correction in native image resolution.
#do iteration-0, the macro model
self.count_iter = 0 #count the iteration number
#initialized some necessary info for potential correction
#1--------regularization of source and lens potential
self.lam_s_this_iter = self._lam_s_start #source regularization strength of current iteration
self.lam_dpsi_this_iter = self._lam_dpsi_start #potential correction regularization strength of current iteration
#2--------the lensing potential of currect iteration
self.pix_mass_this_iter = self.pixelized_mass_from(self._psi_2d_start) #init pixelized mass object
#3---------pix src obj is mainly used for evalulating lens mapping matrix given a lens mass model
self.pix_src_obj = pixelized_source.PixelizedSource(
self.masked_imaging,
pixelization_shape_2d=self.shape_2d_src,
)
#to begin the potential correction algorithm, we need a initial guess of source light
#do the source inversion for the initial mass model
self.pix_src_obj.source_inversion(
self.pix_mass_this_iter,
lam_s=self.lam_s_this_iter,
)
#Note: self.s_points_this_iter are given in autolens [(y1,x1),(y2,x2),...] order
self._s_values_start = self.pix_src_obj.src_recontruct[:] #the intensity values of current best-fit pixelized source model
self._s_points_start = np.copy(self.pix_src_obj.relocated_pixelization_grid) #the location of pixelized source grids (on source-plane).
self.s_values_this_iter = np.copy(self._s_values_start)
self.s_points_this_iter = np.copy(self._s_points_start)
#Init other auxiliary info
self._psi_anchor_values = self.pix_mass_this_iter.eval_psi_at(self._psi_anchor_points)
self.pix_src_obj.inverse_covariance_matrix()
self.inv_cov_matrix = np.copy(self.pix_src_obj.inv_cov_mat) #inverse covariance matrix
self._ns = len(self.s_values_this_iter) #number source grids
self._np = len(self.grid_obj.xgrid_dpsi_1d) #number dpsi grids
self._d_1d = self.image_data[~self.grid_obj.mask_data] #1d unmasked image data
self._n_1d = self.image_noise[~self.grid_obj.mask_data] #1d unmasked noise
self.B_matrix = np.copy(self.pix_src_obj.psf_blur_matrix) #psf bluring matrix, see eq.7 in our document
self.Cf_matrix = np.copy(
self.grid_obj.map_matrix
) #see the $C_f$ matrix in our document (eq.7), which interpolate data defined on coarser dpsi grid to native image grid
self.Dpsi_matrix = pcu.dpsi_gradient_operator_from(
self.grid_obj.Hx_dpsi,
self.grid_obj.Hy_dpsi
) #the potential correction gradient operator, see the eq.8 in our document
self.dpsi_grid_points = np.vstack([self.grid_obj.ygrid_dpsi_1d, self.grid_obj.xgrid_dpsi_1d]).T #points of sparse potential correction grid
#calculate the merit of initial macro model. see eq.16 in our document
# merit_0 = np.sum(self.pix_src_obj.norm_residual_map**2) + \
# np.matmul(
# self.pix_src_obj.src_recontruct.T,
# np.matmul(
# self.pix_src_obj.regularization_matrix,
# self.pix_src_obj.src_recontruct
# )
# )
self.merit_0 = np.inf #float(merit_0)
self.merit_this_iter = self.merit_0
#visualize iteration-0
self.visualize_iteration(iter_num=self.count_iter)
#assign info of this iteration to the previous one
self.update_iterations()
def pixelized_mass_from(self, psi_2d):
pix_mass_obj = pixelized_mass.PixelizedMass(
xgrid=self.grid_obj.xgrid_data,
ygrid=self.grid_obj.ygrid_data,
psi_map=psi_2d,
mask=self.grid_obj.mask_data,
Hx=self.grid_obj.Hx_data,
Hy=self.grid_obj.Hy_data,
Hxx=self.grid_obj.Hxx_data,
Hyy=self.grid_obj.Hyy_data,
)
return pix_mass_obj
def update_lam_s(self):
"""
update the regularization strength of source with iterations
"""
self.lam_s_this_iter = self.lam_s_prev_iter
def update_lam_dpsi(self):
"""
update the regularization strength of potential correction with iterations
"""
self.lam_dpsi_this_iter = self.lam_dpsi_prev_iter * 1.0 #* 0.1
pass
def update_iterations(self):
self.count_iter += 1
#this iteration becomes previous iteration
self.lam_s_prev_iter = self.lam_s_this_iter
self.lam_dpsi_prev_iter = self.lam_dpsi_this_iter
self.pix_mass_prev_iter = copy.copy(self.pix_mass_this_iter)
self.s_values_prev_iter = np.copy(self.s_values_this_iter)
self.s_points_prev_iter = np.copy(self.s_points_this_iter)
self.merit_prev_iter = self.merit_this_iter
#erase information of this iteration
self.lam_s_this_iter = None
self.lam_dpsi_this_iter = None
self.pix_mass_this_iter = None
self.s_values_this_iter = None
self.s_points_this_iter = None
self.merit_this_iter = None
def return_L_matrix(self):
#need to update lens mapping beforehand
return np.copy(self.pix_src_obj.mapping_matrix)
def return_Ds_matrix(self, pix_mass_obj, source_points, source_values):
self.alpha_dpsi_yx = pix_mass_obj.eval_alpha_yx_at(self.dpsi_grid_points) #use previously found pix_mass_object to ray-tracing
self.alpha_dpsi_yx = np.asarray(self.alpha_dpsi_yx).T
self.src_plane_dpsi_yx = self.dpsi_grid_points - self.alpha_dpsi_yx #the location of dpsi grid on the source-plane
source_gradient = pcu.source_gradient_from(
source_points, #previously found best-fit src pixlization grids
source_values, #previously found best-fit src reconstruction
self.src_plane_dpsi_yx,
cross_size=1e-3,
)
return pcu.source_gradient_matrix_from(source_gradient)
def return_RTR_matrix(self):
#need to update lens mapping beforehand
#see eq.21 in our document, the regularization matrix for both source and lens potential corrections.
RTR_matrix = np.zeros((self._ns+self._np, self._ns+self._np), dtype='float')
self.pix_src_obj.build_reg_matrix(lam_s=self.lam_s_this_iter) #this statement depend on the lens mass model (via the `mapper`)
RTR_matrix[0:self._ns, 0:self._ns] = np.copy(self.pix_src_obj.regularization_matrix)
HTH_dpsi = np.matmul(self.grid_obj.Hx_dpsi_4th.T, self.grid_obj.Hx_dpsi_4th) + \
np.matmul(self.grid_obj.Hy_dpsi_4th.T, self.grid_obj.Hy_dpsi_4th)
RTR_matrix[self._ns:, self._ns:] = self.lam_dpsi_this_iter**2 * HTH_dpsi
return RTR_matrix
def Mc_RTR_matrices_from(self, pix_mass_obj, source_points, source_values):
self.pix_src_obj.build_lens_mapping(pix_mass_obj) #update the lens mapping matrix with pixelized mass object
self.L_matrix = self.return_L_matrix()
self.Ds_matrix = self.return_Ds_matrix(pix_mass_obj, source_points, source_values)
self.intensity_deficit_matrix = -1.0*np.matmul(
self.Cf_matrix,
np.matmul(
self.Ds_matrix,
self.Dpsi_matrix,
)
)
self.Lc_matrix = np.hstack([self.L_matrix, self.intensity_deficit_matrix]) #see eq.14 in our document
self.Mc_matrix = np.matmul(self.B_matrix, self.Lc_matrix)
self.RTR_matrix = self.return_RTR_matrix()
def return_data_vector(self):
#need to update Mc_matrix beforehand
#see the right hand side of eq.20 in our document
data_vector = np.matmul(
np.matmul(self.Mc_matrix.T, self.inv_cov_matrix),
self._d_1d,
)
return data_vector
def run_this_iteration(self):
#update regularization parameters for this iteration
self.update_lam_s()
self.update_lam_dpsi()
self.Mc_RTR_matrices_from(self.pix_mass_prev_iter, self.s_points_prev_iter, self.s_values_prev_iter)
self.data_vector = self.return_data_vector()
#solve the next source and potential corrections
self.curve_term = np.matmul(
np.matmul(self.Mc_matrix.T, self.inv_cov_matrix),
self.Mc_matrix,
)
self.curve_reg_term = self.curve_term + self.RTR_matrix
# print('~~~~~~~~~~~~~~~~iteration-{}, r-condition number {:.5e}'.format(self.count_iter, 1/np.linalg.cond(self.curve_reg_term)))
self.r_vector = linalg.solve(self.curve_reg_term, self.data_vector)
#extract source
self.s_values_this_iter = self.r_vector[0:self._ns]
self.s_points_this_iter = np.copy(self.pix_src_obj.relocated_pixelization_grid)
#extract potential correction
dpsi_2d = np.zeros_like(self._psi_2d_start, dtype='float')
dpsi_2d[~self.grid_obj.mask_data] = np.matmul(
self.Cf_matrix,
self.r_vector[self._ns:]
)
#update lens potential with potential correction at this iteration
psi_2d_this_iter = self.pix_mass_prev_iter.psi_map + dpsi_2d #the new 2d lens potential map
#rescale the current lens potential, to avoid various degeneracy problems. (see sec.2.3 in our document);
psi_2d_this_iter = self.rescale_lens_potential(psi_2d_this_iter)
#get pixelized mass object of this iteration
self.pix_mass_this_iter = self.pixelized_mass_from(psi_2d_this_iter)
#do visualization
self.visualize_iteration(iter_num=self.count_iter)
#check convergence
#TODO, better to be s_{i} and psi_{i+1}?
self.merit_this_iter = self.return_this_iter_merit()
if self.has_converged():
return True
# if not converge, keep updating
self.update_iterations()
return False
def rescale_lens_potential(self, psi_2d_in):
if not hasattr(self, 'tri_psi_interp'):
self.tri_psi_interp = Delaunay(
list(zip(self.grid_obj.xgrid_data_1d, self.grid_obj.ygrid_data_1d))
)
psi_interpolator = LinearNDInterpolatorExt(self.tri_psi_interp, psi_2d_in[~self.grid_obj.mask_data])
psi_anchor_values_new = psi_interpolator(self._psi_anchor_points[:,1], self._psi_anchor_points[:,0])
psi_2d_out = pcu.rescale_psi_map(
self._psi_anchor_values,
self._psi_anchor_points,
psi_anchor_values_new,
psi_2d_in,
self.grid_obj.xgrid_data,
self.grid_obj.ygrid_data,
)
psi_2d_out[self.grid_obj.mask_data] = 0.0 #always set lens potential values at masked region to 0.0
return psi_2d_out
def has_converged(self):
relative_change = (self.merit_prev_iter - self.merit_this_iter)/self.merit_this_iter
print('next VS current merit:', self.merit_prev_iter, self.merit_this_iter, relative_change)
# if relative_change < 1e-5:
# return True
# else:
# return False
return False
def return_this_iter_merit(self):
self.mapped_reconstructed_image = np.matmul(self.Mc_matrix, self.r_vector)
self.residual_map = self.mapped_reconstructed_image - self._d_1d
self.norm_residual_map = self.residual_map / self._n_1d
self.chi_squared = np.sum(self.norm_residual_map**2)
self.reg_terms = np.matmul(
self.r_vector.T,
np.matmul(
self.RTR_matrix,
self.r_vector
)
) #include the contribution from both source and potential corrections
return self.chi_squared + self.reg_terms
def run_iter_solve(self):
for ii in range(1, self._niter):
condition = self.run_this_iteration()
if condition:
print('111','code converge')
break
else:
print('111',ii, self.count_iter)
def visualize_iteration(self, basedir='./result', iter_num=0):
plt.figure(figsize=(15, 10))
percent = [0,100]
cbpar = {}
cbpar['fraction'] = 0.046
cbpar['pad'] = 0.04
myargs = {'origin':'upper'}
cmap = copy.copy(plt.get_cmap('jet'))
cmap.set_bad(color='white')
myargs['cmap'] = cmap
myargs['extent'] = copy.copy(self.grid_obj.image_bound)
markersize = 10
rgrid = np.sqrt(self.grid_obj.xgrid_data**2 + self.grid_obj.ygrid_data**2)
limit = np.max(rgrid[~self.grid_obj.mask_data])
#--------data image
plt.subplot(231)
vmin = np.percentile(self.image_data,percent[0])
vmax = np.percentile(self.image_data,percent[1])
masked_image_data = np.ma.masked_array(self.image_data, mask=self.grid_obj.mask_data)
plt.imshow(masked_image_data,vmax=vmax,**myargs)
plt.plot(self._psi_anchor_points[:,1], self._psi_anchor_points[:,0], 'k+', ms=markersize)
cb=plt.colorbar(**cbpar)
cb.ax.minorticks_on()
cb.ax.tick_params(labelsize='small')
plt.xlim(-1.0*limit, limit)
plt.ylim(-1.0*limit, limit)
plt.title(f'Data, Niter={iter_num}')
plt.xlabel('Arcsec')
plt.ylabel('Arcsec')
#model reconstruction given current mass model
mapped_reconstructed_image_2d = np.zeros_like(self.image_data)
self.pix_src_obj.source_inversion(self.pix_mass_this_iter, lam_s=self.lam_s_this_iter)
mapped_reconstructed_image_2d[~self.grid_obj.mask_data] = np.copy(self.pix_src_obj.mapped_reconstructed_image)
plt.subplot(232)
vmin = np.percentile(mapped_reconstructed_image_2d,percent[0])
vmax =
|
np.percentile(mapped_reconstructed_image_2d,percent[1])
|
numpy.percentile
|
import unittest
import sys
import bottlechest as bn
import numpy as np
class TestCountNans(unittest.TestCase):
def test2d(self):
a = np.ones((4, 5))
for x, y in ((0, 0), (0, 1), (0, 2), (3, 1)):
a[x, y] = float("nan")
self.assertEqual(bn.countnans(a), 4)
self.assertEqual(bn.slow.countnans(a), 4)
np.testing.assert_array_equal(bn.countnans(a, axis=0), [1, 2, 1, 0, 0])
np.testing.assert_array_equal(bn.slow.countnans(a, axis=0), [1, 2, 1, 0, 0])
np.testing.assert_array_equal(bn.countnans(a, axis=1), [3, 0, 0, 1])
np.testing.assert_array_equal(bn.slow.countnans(a, axis=1), [3, 0, 0, 1])
def test2d_w(self):
a = np.ones((4, 5))
w =
|
np.random.random((4, 5))
|
numpy.random.random
|
# Implements Shared Sparsity Confounder Selection suggested by:
# https://arxiv.org/abs/2011.01979
import warnings
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from causallib.preprocessing.confounder_selection import _BaseConfounderSelection
__all__ = ["SharedSparsityConfounderSelection"]
class MCPSelector:
# TODO: transpose `theta` and rename it `coef_` to align with sklearn models
def __init__(self, lmda="auto", alpha=1, step=0.1, max_iter=1000, tol=1e-3):
"""Constructor for MCPSelector. This class computes shared
sparsity matrix using proximal gradient descent applied with
MCP regularizer.
Args:
lmda (str|float): Parameter (>= 0) to control shape of MCP regularizer.
The bigger the value the stronger the regularization.
"auto" will auto-select good regularization value.
alpha (float): Associated lambda parameter (>= 0) to control shape of MCP regularizer.
The smaller the value the stronger the regularization.
step (float): Step size for proximal gradient, equivalent of learning rate.
max_iter (int): Maximum number of iterations of MCP proximal gradient.
tol (float): Stopping criterion for MCP. If the normalized value of
proximal gradient is less than tol then the algorithm is assumed
to have converged.
"""
super().__init__()
self.alpha = alpha
self.lmda = lmda
self.step = step
self.max_iter = max_iter
self.tol = tol
self.epsilon_safe_division = 1e-6
def _initialize_internal_params(self, X, a, y):
treatments = list(a.unique())
n_treatments = len(treatments)
n_confounders = X.shape[1]
if self.lmda == "auto":
avg_num_samples = len(y) / n_treatments
lmda = 0.2 * np.sqrt(n_treatments *
|
np.log(n_confounders)
|
numpy.log
|
import os
import shutil
import unittest
import copy
import onnx
import numpy as np
from onnx import helper, TensorProto, numpy_helper, onnx_pb
from onnxruntime.quantization.quant_utils import QuantizationMode
from neural_compressor.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer
from neural_compressor.adaptor.ox_utils.qdq_quantizer import QDQQuantizer
from neural_compressor.adaptor.ox_utils.util import QuantizedInitializer, QuantizedValue
import onnxruntime as ort
def build_model():
initializers = []
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 15, 15])
output = helper.make_tensor_value_info('reshape_output', TensorProto.FLOAT, [88, 11])
conv1_weight_initializer = numpy_helper.from_array(
np.random.randint(-1, 2, [3, 3, 3, 3]).astype(np.float32), name='conv1_weight')
conv1_node = helper.make_node('Conv', ['input', 'conv1_weight'], ['conv1_output'], name='conv1')
conv2_weight_initializer = numpy_helper.from_array(
np.random.randint(-1, 2, [5, 3, 3, 3]).astype(np.float32), name='conv2_weight')
conv2_node = helper.make_node('Conv', ['input', 'conv2_weight'], ['conv2_output'], name='conv2')
# 1, 8, 13, 13
concat_node = helper.make_node('Concat', ['conv1_output', 'conv2_output'], [
'concat_output'], name='Concat', axis=1)
# 1, 8, 11, 11
avg_args = {'kernel_shape': [3, 3]}
avgpool_node = helper.make_node('AveragePool', ['concat_output'], ['avg_output'], name='AveragePool', **avg_args)
reshape_node = onnx.helper.make_node('Reshape', ['avg_output', 'shape'], ['reshape_output'], name='Reshape')
initializers = [conv1_weight_initializer, conv2_weight_initializer]
initializers.append(onnx.numpy_helper.from_array(np.array([88, 11], dtype=np.int64), name='shape'))
graph = helper.make_graph([conv1_node, conv2_node, concat_node, avgpool_node, reshape_node],
'test', [input], [output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
return model
class TestAdaptorONNXRT(unittest.TestCase):
qlinear_backend = QuantizationMode.QLinearOps
qdq_backend = 'qdqops'
integer_backend = QuantizationMode.IntegerOps
q_config = {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}
@classmethod
def setUpClass(cls):
os.makedirs('./onnxrt_test')
@classmethod
def tearDownClass(cls):
shutil.rmtree("./onnxrt_test", ignore_errors=True)
def qlinear_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = ONNXQuantizer(copy.deepcopy(model),
q_config,
self.qlinear_backend,
True,
quantize_params,
quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
def qdq_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = QDQQuantizer(copy.deepcopy(model),
q_config,
self.qdq_backend,
True,
quantize_params,
quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
def dynamic_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = ONNXQuantizer(copy.deepcopy(model),
q_config,
self.integer_backend,
False,
quantize_params,
quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
def test_resize(self):
input_tensor = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 2, 26, 42])
conv_weight_arr = np.random.randint(-1, 2, [3, 2, 3, 3]).astype(np.float32)
conv_weight_initializer = onnx.numpy_helper.from_array(conv_weight_arr, name='conv1_weight')
conv_node = onnx.helper.make_node('Conv', ['input', 'conv1_weight'], ['conv_output'], name='conv_node')
initializers = [conv_weight_initializer]
output_tensor = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 48, 80])
resize_inputs = ['conv_output'] # resize_roi_name, resize_scales_name, resize_sizes_name]
resize_attrs = {'coordinate_transformation_mode': 'asymmetric', 'mode': 'nearest', 'nearest_mode': 'floor'}
resize_node = helper.make_node('Resize', resize_inputs, ['output'], name='resize_node', **resize_attrs)
resize_roi = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
resize_roi_name = 'resize_roi'
resize_roi_initializer = helper.make_tensor(resize_roi_name, TensorProto.FLOAT, [len(resize_roi)], resize_roi)
initializers.extend([resize_roi_initializer])
resize_node.input.extend([resize_roi_name])
resize_scales = [1.0, 1.0, 2.0, 2.0]
resize_scales_name = 'resize_scales'
resize_scales_initializer = helper.make_tensor(resize_scales_name, TensorProto.FLOAT, [
len(resize_scales)], resize_scales)
initializers.extend([resize_scales_initializer])
resize_node.input.extend([resize_scales_name])
graph = helper.make_graph([conv_node, resize_node], 'TestOpQuantizerResize_test_model',
[input_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
model.ir_version = 7 # use stable onnx ir version
q_config = {'Conv': self.q_config,
'Resize': self.q_config}
quantize_params = {'input': [np.float32(10.), np.uint8(0)],
'conv1_weight': [np.float32(10.), np.uint8(0)],
'conv_output': [np.float32(10.), np.uint8(0)],
'output': [np.float32(10.), np.uint8(0)],
}
self.qlinear_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.qdq_test(model, q_config, quantize_params, ['Resize', 'Conv'])
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 10)])
model.ir_version = 7 # use stable onnx ir version
self.qlinear_test(model, q_config, quantize_params, ['Resize', 'Conv'])
self.qdq_test(model, q_config, quantize_params, ['Resize', 'Conv'])
def test_embed(self):
input_ids_shape = [1, 4]
input_ids_tensor = helper.make_tensor_value_info('input_ids', TensorProto.INT32, input_ids_shape)
segment_ids_shape = [1, 4]
segment_ids_tensor = helper.make_tensor_value_info('segment_ids', TensorProto.INT32, segment_ids_shape)
# EmbedLayerNormalization Node Constants and Weights:
word_embed_shape = [32, 4]
word_embed_weights = np.random.random_sample(word_embed_shape).astype(dtype='float32')
word_embed_initializer = onnx.numpy_helper.from_array(word_embed_weights, name='word_embed')
pos_embed_shape = [16, 4]
pos_embed_weights = np.random.random_sample(pos_embed_shape).astype(dtype='float32')
pos_embed_initializer = onnx.numpy_helper.from_array(pos_embed_weights, name='pos_embed')
seg_embed_shape = [2, 4]
seg_embed_weights = np.random.random_sample(seg_embed_shape).astype(dtype='float32')
seg_embed_initializer = onnx.numpy_helper.from_array(seg_embed_weights, name='seg_embed')
gamma_shape = [4]
gamma = np.random.random_sample(gamma_shape).astype(dtype='float32')
gamma_initializer = onnx.numpy_helper.from_array(gamma, name='gamma')
beta_shape = [4]
beta = np.random.random_sample(beta_shape).astype(dtype='float32')
beta_initializer = onnx.numpy_helper.from_array(beta, name='beta')
# EmbedLayerNormalization Outputs:
layernorm_out_shape = [1, 4, 4]
layernorm_out_tensor = helper.make_tensor_value_info('layernorm_out', TensorProto.FLOAT, layernorm_out_shape)
mask_index_out_shape = [1]
mask_index_out_tensor = helper.make_tensor_value_info('mask_index_out', TensorProto.INT32, mask_index_out_shape)
# EmbedLayerNormalization Node:
embed_layer_norm_inputs = [
'input_ids', 'segment_ids', 'word_embed', 'pos_embed', 'seg_embed', 'gamma', 'beta'
]
embed_layer_norm_outputs = ['layernorm_out', 'mask_index_out']
embed_layer_norm_node = helper.make_node('EmbedLayerNormalization',
embed_layer_norm_inputs,
embed_layer_norm_outputs,
domain='com.microsoft',
name='Embed')
# Construct the Graph and Model:
nodes = [embed_layer_norm_node]
graph_name = 'embed_layernorm_graph'
inputs = [input_ids_tensor, segment_ids_tensor]
outputs = [layernorm_out_tensor, mask_index_out_tensor]
initializers = [
word_embed_initializer, pos_embed_initializer, seg_embed_initializer, gamma_initializer, beta_initializer
]
graph = helper.make_graph(nodes, graph_name, inputs, outputs, initializer=initializers)
model = helper.make_model(graph,
opset_imports=[helper.make_opsetid("com.microsoft", 14), helper.make_opsetid("ai.onnx", 14)])
model.ir_version = 7 # use stable onnx ir version
q_config = {'Embed': self.q_config}
quantize_params = {'word_embed': [np.float32(10.), np.uint8(0)],
'pos_embed': [np.float32(10.), np.uint8(0)],
'seg_embed': [np.float32(10.), np.uint8(0)],
'gamma': [np.float32(10.), np.uint8(0)],
'beta': [np.float32(10.), np.uint8(0)],
'layernorm_out': [np.float32(10.), np.uint8(0)],
'mask_index_out': [np.float32(10.), np.uint8(0)],
'input_ids': [np.float32(10.), np.uint8(0)],
}
self.qlinear_test(model, q_config, quantize_params, ['EmbedLayerNormalization'])
self.qdq_test(model, q_config, quantize_params, ['EmbedLayerNormalization'])
def test_concat_reshape_pooling(self):
model = build_model()
q_config = {'Reshape':self.q_config, 'conv1':self.q_config, 'conv2':self.q_config, \
'Concat':self.q_config, 'AveragePool':self.q_config}
quantize_params = {'input': [np.float32(10.), np.uint8(0)],
'conv1_weight': [np.float32(10.), np.uint8(0)],
'conv1_output': [np.float32(10.), np.uint8(0)],
'conv2_weight': [np.float32(10.), np.uint8(0)],
'conv2_output': [np.float32(10.), np.uint8(0)],
'concat_output': [np.float32(10.), np.uint8(0)],
'avg_output': [np.float32(10.), np.uint8(0)],
'shape': [np.float32(10.), np.uint8(0)],
'reshape_output': [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ['Reshape', 'Conv', 'Concat', 'AveragePool']
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {'Reshape':self.q_config, 'conv1':'fp32', 'conv2':self.q_config, \
'Concat':self.q_config, 'AveragePool':self.q_config}
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {'Reshape':self.q_config, 'conv1':'fp32', 'conv2':'fp32', \
'Concat':self.q_config, 'AveragePool':self.q_config}
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {'Reshape':self.q_config, 'conv1':self.q_config, 'conv2':self.q_config, \
'Concat':self.q_config, 'AveragePool':'fp32'}
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
quantize_params = {'input': [np.float32(10.), np.uint8(0)],
'conv1_weight': [np.float32(10.), np.uint8(0)],
'conv1_output': [np.float32(10.), np.uint8(0)],
'conv2_weight': [np.float32(10.), np.uint8(0)],
'conv2_output': [np.float32(10.), np.uint8(0)],
'concat_output': [np.float32(10.), np.uint8(0)],
'avg_output': [np.float32(10.), np.uint8(0)],
'shape': [np.float32(10.), np.uint8(0)],
'reshape_output': [np.float32(10.), np.uint8(0)]}
q_config = {'Reshape':self.q_config, 'conv1':self.q_config, 'conv2':self.q_config, \
'Concat':self.q_config, 'AveragePool':self.q_config}
self.qlinear_test(model, q_config, quantize_params, quantizable_op_types)
self.qdq_test(model, q_config, quantize_params, quantizable_op_types)
def test_conv(self):
for op in ['Conv', 'FusedConv']:
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5, 1])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 3, 3, 1])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 5, 5, 1])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 1])
conv_node = onnx.helper.make_node(op, ['A', 'B', 'C'], ['D'],
name=op,
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
graph = helper.make_graph([conv_node], 'test_graph_1', [A, B, C], [D])
model = helper.make_model(graph)
q_config = {op: self.q_config},
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.),
|
np.uint8(0)
|
numpy.uint8
|
"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
import sys
import threading
import cv2
import imutils
import matplotlib.pyplot as plt
import numpy as np
import pyqtgraph.opengl as gl
import qimage2ndarray
from PyQt5 import uic, QtCore
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication, QMainWindow
from cv2 import aruco
from ubidots import ApiClient
class CustomTextItem(gl.GLGraphicsItem.GLGraphicsItem):
"""
This class creates custom text element for GLViewWidget object
"""
def __init__(self, gl_view_widget, x, y, z, text, color=QColor(0, 255, 0)):
gl.GLGraphicsItem.GLGraphicsItem.__init__(self)
self.gl_view_widget = gl_view_widget
self.x = x
self.y = y
self.z = z
self.text = text
self.color = color
def set_x(self, x):
self.x = x
self.update()
def set_y(self, y):
self.y = y
self.update()
def set_z(self, z):
self.z = z
self.update()
def set_text(self, text):
self.text = text
self.update()
def set_color(self, color):
self.color = color
self.update()
def update_object(self):
self.update()
def paint(self):
self.gl_view_widget.qglColor(self.color)
self.gl_view_widget.renderText(self.x, self.y, self.z, self.text)
class Window(QMainWindow):
"""
This this a main class
"""
def __init__(self):
super(Window, self).__init__()
# Load .ui file
uic.loadUi('EP_1_1.ui', self)
# Setup
self.camera_matrix = np.loadtxt('cameraMatrix.txt', delimiter=',')
self.camera_distortion = np.loadtxt('cameraDistortion.txt', delimiter=',')
self.parameters = aruco.DetectorParameters_create()
self.parameters.adaptiveThreshConstant = 10
# System variables
self.source_capture = None
self.loop_running = False
self.opengl_updater_running = False
self.aruco_dictionary = None
self.points_surface = gl.GLScatterPlotItem(pos=np.array([[0, 0, 0]]))
self.text_objects = []
self.text_objects_last = []
self.points_line = gl.GLLinePlotItem()
self.ubidots_points = []
self.ubidots_points_text = []
self.ubidots_points_line = gl.GLLinePlotItem()
self.ubidots_points_surface = gl.GLScatterPlotItem(pos=np.array([[0, 0, 0]]))
self.marker_size = 0
# Connect buttons
self.btn_start.clicked.connect(self.start)
self.btn_stop.clicked.connect(self.stop)
self.btn_retrieve_data.clicked.connect(self.retrieve_data)
# Update and show OpenGL view
self.openGLWidget.addItem(gl.GLAxisItem())
self.openGLWidget.addItem(gl.GLGridItem())
self.openGLWidget.addItem(self.points_surface)
self.openGLWidget.addItem(self.points_line)
for i in range(10):
self.ubidots_points_text.append(CustomTextItem(self.openGLWidget, 0, 0, 0, '', QColor(0, 0, 0, 0)))
self.openGLWidget.addItem(self.ubidots_points_text[i])
self.openGLWidget.addItem(self.ubidots_points_surface)
self.openGLWidget.addItem(self.ubidots_points_line)
# Add camera object
camera_lines_factor = 5
camera_line = gl.GLLinePlotItem(
pos=np.array([[0, 0, 0], [1 * camera_lines_factor, 1 * camera_lines_factor, -0.56 * camera_lines_factor]]),
color=[1, 0, 0, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(
pos=np.array([[0, 0, 0], [1 * camera_lines_factor, 1 * camera_lines_factor, 0.56 * camera_lines_factor]]),
color=[1, 0.5, 0, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(
pos=np.array([[0, 0, 0], [1 * camera_lines_factor, -1 * camera_lines_factor, -0.56 * camera_lines_factor]]),
color=[1, 1, 0, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(
pos=np.array([[0, 0, 0], [1 * camera_lines_factor, -1 * camera_lines_factor, 0.56 * camera_lines_factor]]),
color=[0, 1, 0, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(pos=np.array(
[[1 * camera_lines_factor, 1 * camera_lines_factor, -0.56 * camera_lines_factor],
[1 * camera_lines_factor, -1 * camera_lines_factor, -0.56 * camera_lines_factor]]),
color=[0, 0, 1, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(pos=np.array(
[[1 * camera_lines_factor, 1 * camera_lines_factor, 0.56 * camera_lines_factor],
[1 * camera_lines_factor, -1 * camera_lines_factor, 0.56 * camera_lines_factor]]),
color=[0, 0, 1, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(pos=np.array(
[[1 * camera_lines_factor, 1 * camera_lines_factor, -0.56 * camera_lines_factor],
[1 * camera_lines_factor, 1 * camera_lines_factor, 0.56 * camera_lines_factor]]),
color=[0.5, 0, 1, 1])
self.openGLWidget.addItem(camera_line)
camera_line = gl.GLLinePlotItem(pos=np.array(
[[1 * camera_lines_factor, -1 * camera_lines_factor, -0.56 * camera_lines_factor],
[1 * camera_lines_factor, -1 * camera_lines_factor, 0.56 * camera_lines_factor]]),
color=[0.5, 0, 1, 1])
self.openGLWidget.addItem(camera_line)
# Timer for removing / appending new text objects to the openGLWidget
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_text)
self.timer.start(100)
# Show GUI
self.show()
def update_text(self):
if not self.text_objects_last == self.text_objects:
# Remove and then add all items if they changed
if self.text_objects_last is not None:
for text_object in self.text_objects_last:
self.openGLWidget.removeItem(text_object)
if self.text_objects is not None:
for text_object in self.text_objects:
self.openGLWidget.addItem(text_object)
self.text_objects_last = self.text_objects.copy()
# noinspection PyBroadException
def retrieve_data(self):
try:
ubidots_api = ApiClient(token=self.api_key.text())
variables = ubidots_api.get_variables()
variables_str = str(variables).replace('[', '').replace(']', '').replace(' ', '').split(',')
self.ubidots_points = []
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_0')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_1')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_2')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_3')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_4')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_5')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_6')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_7')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_8')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
self.ubidots_points.append(list(map(float, str(
variables[variables_str.index('point_9')].get_values(1)[0]['context']['position']).replace('\"',
'').split(
','))))
except:
print('Error reading Ubidots data!')
def start(self):
# Source capture
if self.radio_source_video.isChecked():
# Source from video file
self.source_capture = cv2.VideoCapture(self.line_source_video.text())
else:
# Source from camera
if self.check_source_camera_dshow.isChecked():
self.source_capture = cv2.VideoCapture(self.spin_source_camera_id.value(), cv2.CAP_DSHOW)
else:
self.source_capture = cv2.VideoCapture(self.spin_source_camera_id.value())
self.source_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.source_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.source_capture.set(cv2.CAP_PROP_AUTO_WB, 0)
self.source_capture.set(cv2.CAP_PROP_AUTOFOCUS, 0)
self.source_capture.set(cv2.CAP_PROP_FOCUS, 0)
# Define dictionary
self.aruco_dictionary = cv2.aruco.Dictionary_get(self.spin_dictionary.value())
# Define marker size
self.marker_size = self.spin_marker_size.value()
# Start main cycle as thread
self.loop_running = True
thread = threading.Thread(target=self.opencv_loop)
thread.start()
def stop(self):
# Stop main cycle
self.loop_running = False
# Release captures
if self.source_capture is not None:
self.source_capture.release()
# Destroy OpenCV windows
cv2.destroyAllWindows()
def opencv_loop(self):
while self.loop_running:
# Read both frames
source_ret, source_frame = self.source_capture.read()
# Check for both frames
if source_ret:
destination_frame = source_frame.copy()
gray_frame = cv2.cvtColor(source_frame, cv2.COLOR_BGR2GRAY)
# Detect ARUCO markers
markers_corners, marker_ids, rejected_candidates = \
cv2.aruco.detectMarkers(gray_frame, self.aruco_dictionary,
parameters=self.parameters,
cameraMatrix=self.camera_matrix,
distCoeff=self.camera_distortion)
# Sort markers
if marker_ids is not None:
ids_array = np.array([item[0] for item in marker_ids])
ids_permut = ids_array.argsort()
marker_ids = marker_ids[ids_permut]
markers_corners_sorted = markers_corners.copy()
for i in range(len(ids_permut)):
markers_corners_sorted[i] = markers_corners[ids_permut[i]]
markers_corners = markers_corners_sorted
# Draw detected markers
aruco.drawDetectedMarkers(destination_frame, markers_corners, marker_ids)
estimates_points = []
if marker_ids is not None:
# Estimate markers position
for marker_corners in markers_corners:
ret = aruco.estimatePoseSingleMarkers(marker_corners, self.marker_size, self.camera_matrix,
self.camera_distortion)
rvec, tvec = ret[0][0, 0, :], ret[1][0, 0, :]
marker_x = tvec[0]
marker_y = tvec[1]
marker_z = tvec[2]
estimates_points.append([marker_z, -marker_x, -marker_y])
# Create or remove text objects
while len(self.text_objects) < len(marker_ids):
self.text_objects.append(CustomTextItem(self.openGLWidget, 0, 0, 0, '', QColor(255, 0, 0)))
while len(self.text_objects) > len(marker_ids):
del self.text_objects[-1]
if len(estimates_points) > 1:
# Calculate color map
data_list = np.array(np.array(range(len(estimates_points))))
cmap = plt.get_cmap('hsv')
min_data =
|
np.min(data_list)
|
numpy.min
|
import os, sys
import dill as pickle
import tracemalloc
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import configuration as config
import warnings
warnings.filterwarnings("ignore")
theory_base = '/home/cblakemore/opt_lev_analysis/gravity_sim/results'
sim = '7_6um-gbead_1um-unit-cells_master/'
# sim = '15um-gbead_1um-unit-cells_close_morelamb/'
theory_data_dir = os.path.join(theory_base, sim)
tracemalloc.start()
gfuncs_class = gu.GravFuncs(theory_data_dir)
current, peak = tracemalloc.get_traced_memory()
print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")
tracemalloc.start()
lambdas = gfuncs_class.lambdas
rbead = gfuncs_class.rbead * 1e6
sep = 3.0 # um
noise = 7.0e-20 # N/rt(Hz)
int_time = 3e6 # s
save_base = '/home/cblakemore/opt_lev_analysis/scripts/sense_plot/projections/'
proj_name = 'attractorv2_rbead{:0.1f}um_sep{:0.1f}um_noise{:1.0e}NrtHz_int{:1.0e}s'\
.format(rbead, sep, noise, int_time)
proj_name = proj_name.replace('.', '_')
save_filename = os.path.join(save_base, proj_name+'.txt')
print(save_filename)
############################################################################
############################################################################
############################################################################
posvec = np.linspace(-50.0, 50.0, 100)
ones = np.ones_like(posvec)
pts = np.stack(((sep+rbead)*ones, posvec, 0.0*ones), axis=-1)
alphas = []
for yukind, yuklambda in enumerate(lambdas):
yukforce = gfuncs_class.yukfuncs[0][yukind](pts*1.0e-6)
diff = np.max(yukforce) - np.min(yukforce)
alpha = noise * (1.0 / np.sqrt(int_time)) / diff
alphas.append(alpha)
plt.loglog(lambdas, alphas)
plt.show()
outarr = np.array([lambdas, alphas]).T
|
np.savetxt(save_filename, outarr, delimiter=',')
|
numpy.savetxt
|
# -*- coding: utf-8 -*-
"""
pysteps.extrapolation.semilagrangian
====================================
Implementation of the semi-Lagrangian method described in :cite:`GZ2002`.
.. autosummary::
:toctree: ../generated/
extrapolate
"""
import time
import warnings
import numpy as np
import scipy.ndimage.interpolation as ip
def extrapolate(
precip,
velocity,
timesteps,
outval=np.nan,
xy_coords=None,
allow_nonfinite_values=False,
vel_timestep=1,
**kwargs,
):
"""Apply semi-Lagrangian backward extrapolation to a two-dimensional
precipitation field.
Parameters
----------
precip: array-like or None
Array of shape (m,n) containing the input precipitation field. All
values are required to be finite by default. If set to None, only the
displacement field is returned without interpolating the inputs. This
requires that return_displacement is set to True.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the m*n
advection field. All values are required to be finite by default.
timesteps: int or list of floats
If timesteps is integer, it specifies the number of time steps to
extrapolate. If a list is given, each element is the desired
extrapolation time step from the current time. The elements of the list
are required to be in ascending order.
outval: float, optional
Optional argument for specifying the value for pixels advected from
outside the domain. If outval is set to 'min', the value is taken as
the minimum value of precip.
Default: np.nan
xy_coords: ndarray, optional
Array with the coordinates of the grid dimension (2, m, n ).
* xy_coords[0]: x coordinates
* xy_coords[1]: y coordinates
By default, the *xy_coords* are computed for each extrapolation.
allow_nonfinite_values: bool, optional
If True, allow non-finite values in the precipitation and advection
fields. This option is useful if the input fields contain a radar mask
(i.e. pixels with no observations are set to nan).
Other Parameters
----------------
displacement_prev: array-like
Optional initial displacement vector field of shape (2,m,n) for the
extrapolation.
Default: None
n_iter: int
Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
the integration is done using the midpoint rule. Otherwise, the advection
vectors are taken from the starting point of each interval.
Default: 1
return_displacement: bool
If True, return the displacement between the initial input field and
the one obtained by integrating along the advection field.
Default: False
vel_timestep: float
The time step of the velocity field. It is assumed to have the same
unit as the timesteps argument. Applicable if timeseps is a list.
Default: 1.
interp_order: int
The order of interpolation to use. Default: 1 (linear). Setting this
to 0 (nearest neighbor) gives the best computational performance but
may produce visible artefacts. Setting this to 3 (cubic) gives the best
ability to reproduce small-scale variability but may significantly
increase the computation time.
Returns
-------
out: array or tuple
If return_displacement=False, return a time series extrapolated fields
of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
extrapolated fields and the integrated trajectory (displacement) along
the advection field.
References
----------
:cite:`GZ2002`
"""
if precip is not None and precip.ndim != 2:
raise ValueError("precip must be a two-dimensional array")
if velocity.ndim != 3:
raise ValueError("velocity must be a three-dimensional array")
if not allow_nonfinite_values:
if precip is not None and np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if precip is not None and np.all(~np.isfinite(precip)):
raise ValueError("precip contains only non-finite values")
if np.all(~np.isfinite(velocity)):
raise ValueError("velocity contains only non-finite values")
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# defaults
verbose = kwargs.get("verbose", False)
displacement_prev = kwargs.get("displacement_prev", None)
n_iter = kwargs.get("n_iter", 1)
return_displacement = kwargs.get("return_displacement", False)
interp_order = kwargs.get("interp_order", 1)
map_coordinates_mode = kwargs.get("map_coordinates_mode", "constant")
if precip is None and not return_displacement:
raise ValueError("precip is None but return_displacement is False")
if "D_prev" in kwargs.keys():
warnings.warn(
"deprecated argument D_prev is ignored, use displacement_prev instead",
)
# if interp_order > 1, apply separate masking to preserve nan and
# non-precipitation values
if precip is not None and interp_order > 1:
minval = np.nanmin(precip)
mask_min = (precip > minval).astype(float)
if allow_nonfinite_values:
mask_finite =
|
np.isfinite(precip)
|
numpy.isfinite
|
import h5py
import pandas as pd
import json
import cv2
import os, glob
from pylab import *
import numpy as np
import operator
from functools import reduce
from configparser import ConfigParser, MissingSectionHeaderError, NoOptionError
import errno
import simba.rw_dfs
def importSLEAPbottomUP(inifile, dataFolder, currIDList):
def func(name, obj):
attr = list(obj.attrs.items())
if name == 'metadata':
jsonList = (attr[1][1])
jsonList = jsonList.decode('utf-8')
final_dictionary = json.loads(jsonList)
final_dictionary = dict(final_dictionary)
return final_dictionary
configFile = str(inifile)
config = ConfigParser()
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
projectPath = config.get('General settings', 'project_path')
animalIDs = config.get('Multi animal IDs', 'id_list')
currIDList = animalIDs.split(",")
filesFound = glob.glob(dataFolder + '/*.slp')
videoFolder = os.path.join(projectPath, 'videos')
outputDfFolder = os.path.join(projectPath, 'csv', 'input_csv')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
animalsNo = len(currIDList)
bpNamesCSVPath = os.path.join(projectPath, 'logs', 'measures', 'pose_configs', 'bp_names', 'project_bp_names.csv')
poseEstimationSetting = config.get('create ensemble settings', 'pose_estimation_body_parts')
print('Converting .slp into csv dataframes...')
csvPaths = []
for filename in filesFound:
print('Processing ' + str(os.path.basename(filename)) + '...')
f = h5py.File(filename, 'r')
bpNames, orderVarList, OrderedBpList, MultiIndexCol, dfHeader, csvFilesFound, colorList, xy_heads, bp_cord_names, bpNameList, projBpNameList = [], [], [], [], [], [], [], [], [], [], []
final_dictionary = f.visititems(func)
try:
videoName = os.path.basename(final_dictionary['provenance']['video.path']).replace('.mp4', '')
except KeyError:
videoName = filename.replace('.slp', '')
print('Warning: The video name could not be found in the .SLP meta-data table')
print('SimBA therefore gives the imported CSV the same name as the SLP file.')
print('To be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., "Video1.mp4" and "Video1.slp" before importing the videos and SLP files to SimBA.')
savePath = os.path.join(outputDfFolder, videoName + '.csv')
for bpName in final_dictionary['nodes']: bpNames.append((bpName['name']))
skeletonOrder = final_dictionary['skeletons'][0]['nodes']
for orderVar in skeletonOrder: orderVarList.append((orderVar['id']))
for indexNo in orderVarList: OrderedBpList.append(bpNames[indexNo])
with h5py.File(filename, 'r') as f:
frames = f['frames'][:]
instances = f['instances'][:]
predicted_points = f['pred_points'][:]
predicted_points = np.reshape(predicted_points, (predicted_points.size, 1))
### CREATE COLUMN IN DATAFRAME
for animal in range(len(currIDList)):
for bp in OrderedBpList:
colName1, colName2, colName3 = str('Animal_' + str(animal+1) + '_' + bp + '_x'), ('Animal_' + str(animal+1) + '_' + bp + '_y'), ('Animal_' + str(animal+1) + '_' + bp + '_p')
xy_heads.extend((colName1, colName2))
bp_cord_names.append('_' + bp + '_x')
bp_cord_names.append('_' + bp + '_y')
bpNameList.extend((colName1, colName2, colName3))
dfHeader.extend((colName1, colName2, colName3))
if poseEstimationSetting == 'user_defined':
config.set("General settings", "animal_no", str(animalsNo))
with open(inifile, "w+") as f:
config.write(f)
f.close()
bpNameListGrouped = [xy_heads[x:x + len(OrderedBpList) * 2] for x in range(0, len(xy_heads) - 2, len(OrderedBpList) * 2)]
print(len(dfHeader))
dataDf = pd.DataFrame(columns=dfHeader)
### COUNT ANIMALS IN EACH FRAME
animalsinEachFrame = []
framesList = [l.tolist() for l in frames]
for row in framesList:
noAnimals = row[4] - row[3]
animalsinEachFrame.append(noAnimals)
noFrames = int(len(frames))
frameCounter, instanceCounter, startCurrFrame = 0, 0, 0
for frame in range(noFrames):
animalsinCurrFrame = animalsinEachFrame[frame]
endCurrFrame = startCurrFrame + (len(OrderedBpList) * animalsinCurrFrame)
currStartAnimal, currEndAnimal = 0, len(OrderedBpList)
currFrameNp = predicted_points[startCurrFrame:endCurrFrame]
currRow = []
for animal in range(animalsinCurrFrame):
currAnimalNp = currFrameNp[currStartAnimal:currEndAnimal]
currTrackID = int(instances[instanceCounter][4])
for bp in currAnimalNp:
currX, currY, currP = bp[0][0], bp[0][1], bp[0][4]
currRow.extend((currX,currY,currP))
currRow.append(currTrackID)
currNpRow =
|
np.array_split(currRow, animalsinCurrFrame)
|
numpy.array_split
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from onnx.defs import ONNX_DOMAIN, AI_ONNX_PREVIEW_TRAINING_DOMAIN
from ..base import Base
from . import expect
def apply_adagrad(r, t, x, g, h, norm_coefficient, epsilon, decay_factor): # type: ignore
# Compute adjusted learning-rate.
r_ = r / (1 + t * decay_factor)
# Add gradient of regularization term.
g_regularized = norm_coefficient * x + g
# Update squared accumulated gradient.
h_new = h + g_regularized * g_regularized
# Compute ADAGRAD's gradient scaling factors
h_sqrt = np.sqrt(h_new) + epsilon
# Apply ADAGRAD update rule.
x_new = x - r_ * g_regularized / h_sqrt
return (x_new, h_new)
class Adagrad(Base):
@staticmethod
def export_adagrad(): # type: () -> None
# Define operator attributes.
norm_coefficient = 0.001
epsilon = 1e-5
decay_factor = 0.1
# Create operator.
node = onnx.helper.make_node('Adagrad',
inputs=['R', 'T', 'X', 'G', 'H'],
outputs=['X_new', 'H_new'],
norm_coefficient=norm_coefficient,
epsilon=epsilon,
decay_factor=decay_factor,
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN
)
# Define operator inputs.
r = np.array(0.1, dtype=np.float32) # scalar
t = np.array(0, dtype=np.int64) # scalar
x = np.array([1.0], dtype=np.float32)
g = np.array([-1.0], dtype=np.float32)
h = np.array([2.0], dtype=np.float32)
# Compute expected outputs of Adagrad.
x_new, h_new = apply_adagrad(r, t, x, g, h,
norm_coefficient, epsilon, decay_factor)
# Check results.
expect(node, inputs=[r, t, x, g, h],
outputs=[x_new, h_new], name='test_adagrad',
opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])
@staticmethod
def export_adagrad_multiple(): # type: () -> None
# Define operator attributes.
norm_coefficient = 0.001
epsilon = 1e-5
decay_factor = 0.1
node = onnx.helper.make_node('Adagrad',
inputs=['R', 'T', 'X1', 'X2',
'G1', 'G2', 'H1', 'H2'],
outputs=['X1_new', 'X2_new',
'H1_new', 'H2_new'],
norm_coefficient=norm_coefficient,
epsilon=epsilon,
decay_factor=decay_factor,
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN
)
# Define operator inputs.
r = np.array(0.1, dtype=np.float32) # scalar
t = np.array(0, dtype=np.int64) # scalar
x1 = np.array([1.0], dtype=np.float32)
g1 = np.array([-1.0], dtype=np.float32)
h1 = np.array([2.0], dtype=np.float32)
x2 =
|
np.array([1.0, 2.0], dtype=np.float32)
|
numpy.array
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from sklearn.metrics import confusion_matrix
# # Problem 1 (K-means)
# In[2]:
pi = [0.2,0.5,0.3]
num_obs = 500
# In[3]:
mean = np.array([[0,0],[3,0],[0,3]])
cov = np.array([[1,0],[0,1]])
data= []
label = []
for _ in range(num_obs):
gaus_index = np.random.choice(3,p=pi)
label.append(gaus_index)
x,y = (np.random.multivariate_normal(mean[gaus_index], cov, 1).T)
data.append([x[0],y[0]])
data = np.array(data)
# In[5]:
scatter = plt.scatter(data[:,0],data[:,1],c=label)
plt.scatter(mean[:,0],mean[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Original Distribution of points")
plt.show()
# In[6]:
def K_Means(data,K,num_iter=20,plot=False,show_values=False):
num_iter = num_iter
num_obs = len(data)
c = np.zeros(num_obs)
mu =np.array(random.sample(list(data),K))
if(show_values):
print("Initialized cluster centers are:")
print(mu)
if(plot):
plt.scatter(data[:,0],data[:,1],c=c)
plt.scatter(mu[:,0],mu[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.suptitle("Distribution of points (colored by clusters)")
plt.title("(Initially assigning to one cluster)")
plt.show()
objective = []
for _ in range(num_iter):
for i in range(num_obs):
temp = [np.linalg.norm(data[i]-val)**2 for val in mu]
c[i] = (np.argmin(temp))
objective.append(compute_KMeans_Objective(data,c,mu))
for i in range(len(mu)):
temp = [data[index] for index in range(num_obs) if c[index] == i]
mu[i] = (np.mean(temp,axis=0))
objective.append(compute_KMeans_Objective(data,c,mu))
if(plot):
plt.scatter(data[:,0],data[:,1],c=c)
plt.scatter(mu[:,0],mu[:,1],c="red")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Distribution of points (colored by clusters)")
plt.show()
if(show_values):
print("The learned cluster centers are:")
print(mu)
return [c,mu,objective]
# In[7]:
def compute_KMeans_Objective(d,labels,centers):
loss = 0
for i in range(len(d)):
for j in range(len(centers)):
if(labels[i]==j):
loss+=np.linalg.norm(data[i]-centers[j])**2
return loss
# In[8]:
Ks = [2,3,4,5]
Cs = []
MUs = []
OBJs = []
for k in Ks:
plot= k == 3 or k==5
c,mu,obj = K_Means(data,k,num_iter=20,plot=plot)
Cs.append(c)
MUs.append(mu)
OBJs.append(obj)
# In[9]:
for i in range(len(OBJs)):
obj = OBJs[i]
obj1 = [obj[i] for i in range(len(obj)) if i%2==0]
obj2 = [obj[i] for i in range(len(obj)) if i%2!=0]
plt.plot([x * .5 for x in range(1,41)],obj, color ="green")
plt.plot([x * .5 for x in range(1,41,2)],obj1,"o",color="blue",mfc='none')
plt.plot([x * .5 for x in range(2,41,2)],obj2,"o",color="red",mfc='none')
plt.xticks(range(0,21))
plt.xlabel("Number of Iterations")
plt.ylabel("Objective Function")
plt.title("Value of the Objective Function for K-Means for K = " + str(Ks[i]))
plt.show()
# # Problem 2 (Bayes classifier revisited)
# In[3]:
X_train = pd.read_csv("Prob2_Xtrain.csv",header=None).values
X_test = pd.read_csv("Prob2_Xtest.csv",header=None).values
y_train = pd.read_csv("Prob2_ytrain.csv",header=None).values
y_test = pd.read_csv("Prob2_ytest.csv",header=None).values
# In[4]:
y_train = np.array([y_train[i][0] for i in range(len(y_train))])
y_test = np.array([y_test[i][0] for i in range(len(y_test))])
# In[5]:
X_train_0 = X_train[y_train == 0]
X_train_1 = X_train[y_train == 1]
# In[6]:
data = [X_train_0,X_train_1]
# In[7]:
def Naive_Bayes(data, pi, mu , sigma, class_priors,num_classes=2):
y_pred = np.zeros(len(data))
K = len(pi[0])
for i in range(len(data)):
prob = np.zeros(num_classes)
class_index = range(num_classes)
for index in class_index:
class_cond_prob = 0
for k in range(K):
N = multivariate_normal.pdf(data[i],mean=mu[index][k],cov=sigma[index][k])
class_cond_prob+=((pi[index][k])*N)
prob[index] = class_cond_prob
label = np.argmax(prob)
y_pred[i] = label
return y_pred
# In[8]:
def EM_GMM(data,k = 3,num_iter = 30,num_run = 10,compute_objective=True):
num_obs = len(data)
Objectives = []
best_phi = np.zeros((num_obs,k))
best_pi = np.full((k,1),1/k)
best_mu = np.random.multivariate_normal(np.mean(data,axis=0), np.cov(data.T), k)
best_Sigma = [np.cov(data.T)] * k
best_objective=-1
for run in range(num_run):
phi = np.zeros((num_obs,k))
pi = np.full((k,1),1/k)
mu = np.random.multivariate_normal(np.mean(data,axis=0), np.cov(data.T), k)
Sigma = np.full((k,data[0].shape[0],data[0].shape[0]),np.cov(data.T))
print("starting run: " + str(run))
objective = []
for _ in range(num_iter):
for i in range(num_obs):
for j in range(k):
phi[i][j] = (pi[j] * multivariate_normal.pdf(data[i],mean=mu[j],cov=Sigma[j],allow_singular=True))
denominator = sum(phi[i])
phi[i] = (phi[i]/denominator)
nk = np.sum(phi,axis=0)
pi = (nk/num_obs)
numerator_mu = np.zeros((k,data[0].shape[0]))
numerator_Sigma = np.zeros((k,data[0].shape[0],data[0].shape[0]))
for i in range(k):
for j in range(num_obs):
numerator_mu[i] += (phi[j][i] * data[i])
mu[i] = numerator_mu[i] / nk[i]
for j in range(num_obs):
temp = (data[j] - mu[i]).reshape(data[j].shape[0],1)
numerator_Sigma[i] += (phi[j][i] * np.matmul(temp,temp.T))
Sigma[i] = numerator_Sigma[i] / nk[i]
if compute_objective:
L = 0
log_pi = np.where(pi > np.exp(-20), np.log(pi), -20)
for i in range(num_obs):
for j in range(k):
M = multivariate_normal.pdf(data[i],mean=mu[j],cov=Sigma[j],allow_singular=True)
if(M<np.exp(-20)):
log_M = -20
else:
log_M = np.log(M)
N = log_pi[j]
L+=(phi[i][j]*(N + log_M))
objective.append(L)
if compute_objective:
print("Objective value for " + str(run) + " run is: " + str(objective[-1]))
Objectives.append(objective)
if(objective[-1]>=best_objective):
best_pi=pi
best_mu=mu
best_Sigma=Sigma
best_phi=phi
best_objective=objective[-1]
print("best objective for this run is: " + str(best_objective))
return [Objectives,best_mu,best_pi,best_Sigma,best_phi]
# In[9]:
num_class = 2
class_priors = np.zeros(num_class)
for i in range(num_class):
class_priors[i] = len(data[i])
class_priors /= (np.sum(class_priors))
# In[9]:
print("Starting EM for class 0")
EM0 = EM_GMM(data[0],k = 3,num_iter = 30,num_run = 10,compute_objective=True)
# In[10]:
print("Starting EM for class 1")
EM1 = EM_GMM(data[1],k = 3,num_iter = 30,num_run = 10,compute_objective=True)
EM = [EM0,EM1]
# In[12]:
for num in range(num_class):
plt.figure(figsize=(7,7))
for i in range(len(EM[num][0])):
plt.plot(range(5,31),EM[num][0][i][4:],label=str(i+1))
plt.xlabel("Number of iterations")
plt.ylabel("Log Joint Likelihood ")
plt.suptitle("For Class: " + str(num))
plt.title("Log marginal objective function for a 3-Gaussian mixture model over 10 different runs and for iterations 5 to 30 ")
plt.legend()
plt.show()
# In[13]:
MU = np.array([EM[0][1],EM[1][1]])
PI = np.array([EM[0][2],EM[1][2]])
SIGMA = np.array([EM[0][3],EM[1][3]])
predictions = Naive_Bayes(data = X_test,
pi = PI,
mu = MU,
sigma = SIGMA,
class_priors = class_priors,
num_classes = num_class)
conf_mat = confusion_matrix(y_true = y_test, y_pred = predictions)
print("The results for 3- Gaussian Mixture Model")
print(pd.DataFrame(conf_mat))
accuracy = round((conf_mat[0][0] + conf_mat[1][1])/np.sum(conf_mat),2)
print("Accuracy: " + str(accuracy))
# In[10]:
K = [1,2,4]
for k in K:
print(k)
print("Starting EM for class 0")
EM0 = EM_GMM(data[0],k = k,num_iter = 30,num_run = 10)
print("Starting EM for class 1")
EM1 = EM_GMM(data[1],k = k,num_iter = 30,num_run = 10)
EM1 = [EM0,EM1]
MU = np.array([EM1[0][1],EM1[1][1]])
PI = np.array([EM1[0][2],EM1[1][2]])
SIGMA = np.array([EM1[0][3],EM1[1][3]])
predictions = Naive_Bayes(data = X_test,
pi = PI,
mu = MU,
sigma = SIGMA,
class_priors = class_priors,
num_classes = num_class)
conf_mat = confusion_matrix(y_true = y_test, y_pred = predictions)
print("The results for " +str(k)+"- Gaussian Mixture Model")
print(pd.DataFrame(conf_mat))
accuracy = round((conf_mat[0][0] + conf_mat[1][1])/np.sum(conf_mat),2)
print("Accuracy: " + str(accuracy))
# # Problem 3 (Matrix factorization)
# In[4]:
def RMSE(y_predicted,y_test):
return np.sqrt(np.sum((y_predicted - y_test)**2)/len(y_test))
# In[5]:
ratings_train = pd.read_csv("Prob3_ratings.csv",header=None,names=["user_id","movie_id","ratings"])
ratings_test = pd.read_csv("Prob3_ratings_test.csv",header=None,names=["user_id","movie_id","ratings"])
# In[6]:
list_of_movies = []
f = open("Prob3_movies.txt","r")
for line in f:
list_of_movies.append(line.strip())
# In[8]:
sigma2 = 0.25
d = 10
lambda_val = 1
num_iter = 100
num_runs = 10
# In[9]:
SigmaUi = {}
SigmaVj = {}
user_mapping = {}
movie_mapping = {}
user_index = 0
movie_index = 0
for i in list(sorted(ratings_train["user_id"].unique())):
user_mapping[i] = user_index
dictui={user_index:[]}
SigmaUi.update(dictui)
user_index+=1
for i in list(sorted(ratings_train["movie_id"].unique())):
movie_mapping[i] = movie_index
dictui={movie_index:[]}
SigmaVj.update(dictui)
movie_index+=1
# In[10]:
num_users = len(user_mapping)
num_items = len(movie_mapping)
# In[11]:
M = ratings_train.pivot(index="user_id",columns="movie_id",values="ratings")
M.index = M.index.map(user_mapping)
M.columns = M.columns.map(movie_mapping)
M_array = np.array(M)
# In[12]:
Sigma = [tuple(pair) for pair in np.argwhere(M.notnull().values).tolist()]
# In[13]:
for i,j in Sigma:
SigmaUi[i].append(j)
SigmaVj[j].append(i)
# In[14]:
ratings_test["user_id"] = ratings_test["user_id"].map(user_mapping)
ratings_test["movie_id"] = ratings_test["movie_id"].map(movie_mapping)
new_test = ratings_test.dropna()
test_users_list = [int(val) for val in list(new_test["user_id"])]
test_items_list = [int(val) for val in list(new_test["movie_id"])]
y_test = new_test["ratings"].values
# In[432]:
best_log_likelihood = 100000
likelihoods = []
RMSES=[]
best_U = np.zeros([num_users,d])
best_V = np.zeros([num_items,d])
for num in range(num_runs):
U = np.random.multivariate_normal([0]*d, lambda_val**-1 * np.identity(d), num_users)
V = np.random.multivariate_normal([0]*d, lambda_val**-1 * np.identity(d), num_items)
log_likelihood = []
for _ in range(num_iter):
u_norm = 0
v_norm = 0
temp = 0
for i in range(num_users):
first = (lambda_val * sigma2 * np.identity(d))
vj = V[SigmaUi[i]]
second = np.matmul(vj.T, vj)
first_inv = np.linalg.inv(first + second)
Mij = M_array[i,SigmaUi[i]]
second_term = np.matmul(vj.T,Mij)
update = np.matmul(first_inv,second_term)
U[i]= update
u_norm+=np.linalg.norm(U[i])**2
for i in range(num_items):
first = (lambda_val * sigma2 * np.identity(d))
ui = U[SigmaVj[i]]
second = np.matmul(ui.T, ui)
first_inv = np.linalg.inv(first + second)
Mij = M_array[SigmaVj[i],i]
second_term = np.matmul(ui.T,Mij)
update = np.matmul(first_inv,second_term)
V[i]= update
v_norm+=np.linalg.norm(V[i])**2
temp+=np.linalg.norm(Mij - np.matmul(ui,V[i].T))**2
likelihood = -1*((temp*0.5 / sigma2)+ (-lambda_val * u_norm * 0.5) + (-lambda_val * v_norm * 0.5))
log_likelihood.append(likelihood)
likelihoods.append(log_likelihood)
if(best_log_likelihood==100000):
best_log_likelihood = log_likelihood[99]
elif(log_likelihood[99]>=best_log_likelihood):
best_log_likelihood = log_likelihood[99]
best_U = U
best_V = V
print("The best log joint likelihood value till " + str(num+1)+ " run is: " + str(best_log_likelihood))
u = U[test_users_list]
v = V[test_items_list]
z =
|
np.multiply(u,v)
|
numpy.multiply
|
"""
Monte Carlo Tree Search for asymmetric trees
CREDITS : <NAME>, Delft University of Technology
"""
import copy
import typing as ty
import collections
import numpy as np
import torch
from ..metas import CombinerAgent
from ..environment.state import CircuitStateDQN
from ..environment.env import step, evaluate
MemoryItem = collections.namedtuple('MemoryItem', ['state', 'reward', 'action', 'next_state', 'done'])
class MCTSAgent(CombinerAgent):
class MCTSState:
"""
State object representing the solution (boolean vector of swaps) as a MCTS node
"""
HYPERPARAM_NOISE_ALPHA = 0.2
HYPERPARAM_PRIOR_FRACTION = 0.25
def __init__(self, state, model, solution=None, r_previous=0, parent_state=None, parent_action=None):
"""
Initialize a new state
"""
self.state: CircuitStateDQN = state
self.model = model
self.parent_state, self.parent_action = parent_state, parent_action
self.r_previous = r_previous
self.num_actions = len(self.state.device.edges)
self.solution: np.ndarray = copy.copy(solution) if solution is not None else \
np.full(self.num_actions, False)
self.rollout_reward = self.rollout() if self.parent_action is not None else 0.0
self.action_mask = np.concatenate([state.device.swappable_edges(
self.solution, self.state.locked_edges, self.state.target_nodes == -1),
np.array([solution is not None or np.any(self.state.locked_edges)])])
self.n_value = torch.zeros(self.num_actions + 1)
self.q_value = torch.zeros(self.num_actions + 1)
self.child_states: ty.List[ty.Optional[MCTSAgent.MCTSState]] = [None for _ in range(self.num_actions + 1)]
model.eval()
with torch.no_grad():
_value, self.priors = self.model(self.state)
self.priors = self.priors.detach().numpy()
self.priors += np.bitwise_not(self.action_mask) * -1e8
self.priors = torch.flatten(torch.tensor(self.priors))
noise = np.random.dirichlet([self.HYPERPARAM_NOISE_ALPHA for _ in self.priors]) * self.action_mask
self.priors = self.HYPERPARAM_PRIOR_FRACTION * self.priors + (1 - self.HYPERPARAM_PRIOR_FRACTION) * noise
def update_q(self, reward, index):
"""
Updates the q-value for the state
:param reward: The obtained total reward from this state
:param index: the index of the action chosen for which the reward was provided
n_value is the number of times a node visited
q_value is the q function
n += 1, w += reward, q = w / n -> this is being implicitly computed using the weighted average
"""
self.q_value[index] = (self.q_value[index] * self.n_value[index] + reward) / (self.n_value[index] + 1)
self.n_value[index] += 1
def select(self, c=1000) -> int:
"""
Select one of the child actions based on UCT rule
"""
n_visits = torch.sum(self.n_value).item()
uct = self.q_value + (self.priors * c * np.sqrt(n_visits + 0.001) / (self.n_value + 0.001))
best_val = torch.max(uct)
best_move_indices: torch.Tensor = torch.where(torch.eq(best_val, uct))[0]
winner: int = np.random.choice(best_move_indices.numpy())
return winner
def choose(self) -> int:
"""
Select one of the child actions based on the best q-value which is allowed
"""
q_real = self.q_value + np.bitwise_not(self.action_mask) * -1e8
best_val = torch.max(q_real)
best_move_indices: torch.Tensor = torch.where(torch.eq(best_val, q_real))[0]
winner: int = np.random.choice(best_move_indices.numpy())
return winner
def rollout(self, num_rollouts=None): # TODO: Benchmark this on 100 rollouts
"""
performs R random rollout, the total reward in each rollout is computed.
returns: mean across the R random rollouts.
"""
if num_rollouts is None:
assert not np.any(np.bitwise_and(self.state.locked_edges, self.solution)), "Bad Action"
next_state, _, _, _ = step(self.solution, self.state)
with torch.no_grad():
self.model.eval()
self.rollout_reward, _priors = self.model(next_state)
return self.rollout_reward.item()
else:
total_reward = 0
for i in range(num_rollouts):
solution = np.copy(self.solution)
while True:
mask = np.concatenate([self.state.device.swappable_edges(solution, self.state.locked_edges),
|
np.array([True])
|
numpy.array
|
import numpy as np
def customTicks(data,
forceMin=np.nan, forceMax=np.nan,
forceMinTick=np.nan, forceMaxTick=np.nan,
desiredNumTicks=5,
displayFirst=True, displayLast=True,
multipleCandidates=[1,2,2.5,5]):
multipleCandidates = np.array(multipleCandidates)
if
|
np.isnan(forceMaxTick)
|
numpy.isnan
|
"""Probe segmentation by convolving with the Haar wavelet.
The basic HaarSeg algorithm:
* Apply the undecimated discrete wavelet transform (UDWT) on the data, using the
Haar wavelet.
* Select a set of detail subbands from the transform {LMIN, LMIN+1, ..., LMAX}.
* Find the local maxima of the selected detail subbands.
* Threshold the maxima of each subband separately, using an FDR thresholding
procedure.
* Unify selected maxima from all the subbands to create a list of significant
breakpoints in the data.
* Reconstruct the segmentation result from the list of significant breakpoints.
HaarSeg segmentation is based on detecting local maxima in the wavelet domain,
using Haar wavelet. The main algorithm parameter is breaksFdrQ, which controls
the sensitivity of the segmentation result. This function supports the optional
use of weights (also known as quality of measurments) and raw measurments. We
recommend using both extentions where possible, as it greatly improves the
segmentation result.
"""
import logging
import math
import numpy as np
import pandas as pd
from scipy import stats
def segment_haar(cnarr, fdr_q):
"""Do segmentation for CNVkit.
Calculate copy number segmentation by HaarSeg
(http://haarseg.r-forge.r-project.org/)
Parameters
----------
cnarr : CopyNumArray
Binned, normalized copy ratios.
fdr_q : float
False discovery rate q-value.
Returns
-------
CopyNumArray
The CBS data table as a CNVkit object.
"""
# Segment each chromosome individually
# ENH - skip large gaps (segment chrom. arms separately)
chrom_tables = [one_chrom(subprobes, fdr_q, chrom)
for chrom, subprobes in cnarr.by_arm()]
segarr = cnarr.as_dataframe(pd.concat(chrom_tables))
segarr.sort_columns()
return segarr
def one_chrom(cnarr, fdr_q, chrom):
logging.debug("Segmenting %s", chrom)
results = haarSeg(cnarr.smooth_log2(),
fdr_q,
W=(cnarr['weight'].values if 'weight' in cnarr
else None))
table = pd.DataFrame({
'chromosome': chrom,
'start': cnarr['start'].values.take(results['start']),
'end': cnarr['end'].values.take(results['end']),
'log2': results['mean'],
'gene': '-',
'probes': results['size'],
})
return table
def variants_in_segment(varr, segment, fdr_q):
if len(varr):
values = varr.mirrored_baf(above_half=True, tumor_boost=True)
results = haarSeg(values, fdr_q,
W=None) # ENH weight by sqrt(DP)
else:
values = pd.Series()
results = None
if results is not None and len(results['start']) > 1:
logging.info("Segmented on allele freqs in %s:%d-%d",
segment.chromosome, segment.start, segment.end)
# Ensure breakpoint locations make sense
# - Keep original segment start, end positions
# - Place breakpoints midway between SNVs, I guess?
# NB: 'results' are indices, i.e. enumerated bins
gap_rights = varr['start'].values.take(results['start'][1:])
gap_lefts = varr['end'].values.take(results['end'][:-1])
mid_breakpoints = (gap_lefts + gap_rights) // 2
starts =
|
np.concatenate([[segment.start], mid_breakpoints])
|
numpy.concatenate
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 23 18:06:12 2017
@author: duarteocarmo
"""
import numpy as np
from Bacteria_dataLoad import dataLoad
from Bacteria_dataPlot import dataPlot
from Bacteria_dataStatistics import dataStatistics
from Bacteria_displayMenu import displayMenu
print("Welcome to Bacteria Data Analysis! Here are your options:")
menuItems = np.array(["Load Data;", "Filter Data (Or clean filters);", "Display Statistics;", "Generate Plots;", "Quit;"])
filters = np.array(["Filter Growth Rates;", "Select a bacteria;","Filter Temperatures(a bit more);","Delete all filters;"])
bacteriatypes =
|
np.array(["Salmonella enterica;", "Bacillus Cereus;", "Listeria;", "Brochothrix thermosphacta;"])
|
numpy.array
|
import time
import numpy as np
from tqdm import tqdm
import pandas as pd
from theano import tensor as tt
import pymc3 as pm
from run_scripts.load_data import load_traintest_sparsereg
#Laplace prior PyMC3 model
def fit_mcmc_laplace(y,x,B,seed = 100,misspec = False):
with pm.Model() as model:
p = np.shape(x)[1]
#Laplace
b = pm.Gamma('b',alpha = 1,beta = 1)
beta = pm.Laplace('beta',mu = 0, b = b,shape = p)
intercept = pm.Flat('intercept')
if misspec == True:
sigma = pm.HalfNormal("sigma", sigma = 0.02) ## misspec prior
else:
sigma = pm.HalfNormal("sigma", sigma = 1)
obs = pm.Normal('obs',mu = pm.math.dot(x,beta)+ intercept,sigma = sigma,observed=y)
trace = pm.sample(B,random_seed = seed, chains = 4)
beta_post = trace['beta']
intercept_post = trace['intercept'].reshape(-1,1)
sigma_post = trace['sigma'].reshape(-1,1)
b_post = trace['b'].reshape(-1,1)
print(np.mean(sigma_post)) #check misspec.
return beta_post,intercept_post,b_post,sigma_post
#Repeat 50 mcmc runs for different train test splits
def run_sparsereg_mcmc(dataset,misspec = False):
#Repeat over 50 reps
rep = 50
train_frac = 0.7
B = 2000
#Initialize
x,y,x_test,y_test,y_plot,n,d = load_traintest_sparsereg(train_frac,dataset,100)
beta_post = np.zeros((rep,4*B, d))
intercept_post = np.zeros((rep,4*B, 1))
b_post = np.zeros((rep,4*B, 1))
sigma_post = np.zeros((rep,4*B,1))
times = np.zeros(rep)
for j in tqdm(range(rep)):
seed = 100+j
x,y,x_test,y_test,y_plot,n,d = load_traintest_sparsereg(train_frac,dataset,seed)
start = time.time()
beta_post[j],intercept_post[j],b_post[j],sigma_post[j] = fit_mcmc_laplace(y,x,B,seed,misspec)
end = time.time()
times[j] = (end - start)
#Save posterior samples
if misspec == False:
suffix = dataset
else:
suffix = dataset + "_misspec"
print("{}: {} ({})".format(suffix,
|
np.mean(times)
|
numpy.mean
|
from collections import OrderedDict
import numpy as np
np.set_printoptions(precision=8, suppress=True, linewidth=400, threshold=100)
# np.random.seed(0)
import tensorflow_datasets as tfds
import gym
import model_util as util
class DataEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, data_src):
super(DataEnv, self).__init__()
self.data_src = data_src
if data_src == 'shkspr':
ds = tfds.as_numpy(tfds.load('tiny_shakespeare', batch_size=-1)) # \n = done
ds = ds['train']['text'][0]
ds = np.frombuffer(ds, np.uint8)
# done = np.frombuffer(b'.\n', np.uint8)
# ds = ds[ds!=done[1]] # take out newlines
# split = np.asarray(np.nonzero(ds==done[0])[0])+1 # 6960
# ds = ds[:split[-1]]
ds = ds[:,None]
# self.observation_space = gym.spaces.Box(low=0, high=255, shape=(1,), dtype=np.uint8)
space = gym.spaces.Dict()
# space.spaces['timestamp'] = gym.spaces.Box(low=0.0, high=np.inf, shape=(1,), dtype=np.float64)
# space.spaces['data'] = gym.spaces.Discrete(256) # np.int64
space.spaces['data'] = gym.spaces.Box(low=0, high=255, shape=(1,), dtype=np.uint8)
# space.spaces['data'] = gym.spaces.Box(low=0, high=255, shape=(2,), dtype=np.uint8) # combine to latent
self.observation_space = space
space = gym.spaces.Dict()
space.spaces['data'] = gym.spaces.Discrete(256) # np.int64
# space.spaces['data'] = gym.spaces.Box(low=0, high=255, shape=(1,), dtype=np.uint8)
self.action_space = space
self.reward_range = (0.0,1.0)
# TODO split (reshape into batch) image into blocks or pixels to test for spatial autoregression
# if data_src == 'mnist':
# ds = tfds.as_numpy(tfds.load('mnist', batch_size=-1))
# # self.dsl = ds['train']['label'][:,None]
# ds = ds['train']['image']
# # train_obs, test_obs = tf.image.resize(train_obs, (16,16), method='nearest').numpy(), tf.image.resize(test_obs, (16,16), method='nearest').numpy()
# # self.action_space = gym.spaces.Discrete(10)
# # self.observation_space = gym.spaces.Box(low=0, high=255, shape=list(ds.shape)[1:], dtype=np.uint8)
# self.action_space = gym.spaces.Discrete(256)
# self.observation_space = gym.spaces.Box(low=0, high=255, shape=(1,), dtype=np.uint8)
# self.reward_range = (0.0,1.0)
# self.pxl_x, self.pxl_y, self.x_max, self.y_max = 0, 0, ds.shape[1], ds.shape[2]
# if data_src == 'mnist-mv':
# ds = tfds.as_numpy(tfds.load('moving_mnist', batch_size=-1))
# ds = ds['test']['image_sequence'].reshape((200000,64,64,1))
# ds = ds[:16]
self.ds, self.ds_idx, self.ds_max = ds, 0, 64
self.action_zero = util.gym_get_space_zero(self.action_space)
self.obs_zero = util.gym_get_space_zero(self.observation_space)
self.state = self.action_zero, self.obs_zero, np.float64(0.0), False, {}
self.item_accu = []
self.episode = 0
def step(self, action): return self._request(action)
def reset(self): return self._request(None)[0]
def render(self, mode='human', close=False):
action, obs, reward, done, info = self.state
# if action is None: print("{}\n".format(obs))
# else: print("{}\t\t--> {:.18f}{}\n{}\n".format(action, reward, (' DONE!' if done else ''), obs))
if action is None:
if self.data_src == 'shkspr':
text =
|
np.asarray(self.item_accu, dtype=np.uint8)
|
numpy.asarray
|
import os
import cv2
import math
import numpy as np
from configuration import parameters as parameter
from src.objloader_simple import OBJ
from src.filter_simple import FadingFilter
class AR_3D:
'''
This class manage the computation to add a 3D object in a camera
streaming.
input:
-`reference2D`: string, name of the reference
(only .jpg are supported at the moment).
-`model3D`: string, name of the 3D object to render (.obj file).
-`sample_time`: sample time of the video stream -> 1 / fps.
-`rectangle`: bool, display or not a bounding box where the reference
is estimated.
-`matches`: display the reference image on the side and show the matches.
'''
def __init__(self, reference2D: str, model3D: str, sample_time: float,
rectangle=False, matches=False):
# create ORB keypoint detector
self.orb = cv2.ORB_create()
# create BFMatcher object based on hamming distance
self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# load the reference surface that will be searched in the video stream
dir_name = os.getcwd()
ref_path = 'reference/' + reference2D + '.jpg'
self.model = cv2.imread(os.path.join(dir_name, ref_path), 0)
# Compute model keypoints and its descriptors
self.kp_model, self.des_model = self.orb.detectAndCompute(
self.model, None)
# Load 3D model from OBJ file
obj_path = 'models/' + model3D + '.obj'
self.obj = OBJ(os.path.join(dir_name, obj_path), swapyz=True)
# frame rendering option
self.rectangle = rectangle
self.matches = matches
# initialize filter class
self.filter = FadingFilter(0.5, sample_time)
def process_frame(self, frame):
'''
main function of the class, `process_frame` execute the entire pipeline
to compute the frame rendering, that is:
1. frame feature extraction and reference detection.
2. homography estimation.
3. homogeneus 3D transformation estimation.
4. 3D object rendering in the frame,
input:
- `frame`: frame to be analysed
output:
- `frame`: frame rendered (if it was succesful)
'''
# detect frame features
kp_frame, matches = self.feature_detection(frame)
# compute Homography if enough matches are found
if len(matches) > parameter.MIN_MATCHES:
homography = self.compute_homography(kp_frame, matches)
# if a valid homography matrix was found render cube on model plane
if homography is not None:
# filter homography
homography = self.filter.II_order_ff(homography)
# obtain 3D projection matrix from homography matrix
# and camera parameters
projection = self.projection_matrix(
parameter.CAMERA_CALIBRATION, homography)
if self.rectangle: # draw rectangle over the reference
frame = self.draw_rectangle(frame, homography)
if self.matches: # draw first 10 matches.
frame = cv2.drawMatches(self.model, self.kp_model,
frame, kp_frame,
matches[:parameter.MIN_MATCHES],
0, flags=2)
# project cube or model
frame = self.render(frame, projection)
else:
print("Not enough matches found - %d/%d" %
(len(matches), parameter.MIN_MATCHES))
# in any case return the frame
return frame
def feature_detection(self, frame):
''' detect frame keypoints and matches with reference '''
# find and draw the keypoints of the frame
kp_frame, des_frame = self.orb.detectAndCompute(frame, None)
# match frame descriptors with model descriptors
matches = self.bf.match(self.des_model, des_frame)
# sort them in the order of their distance
# the lower the distance, the better the match
matches = sorted(matches, key=lambda x: x.distance)
return kp_frame, matches
def compute_homography(self, kp_frame, matches):
''' estimate the homography transformation'''
# differenciate between source points and destination points
src_pts = np.float32(
[self.kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32(
[kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# compute Homography
homography, mask = cv2.findHomography(
src_pts, dst_pts, cv2.RANSAC, 5.0)
return homography
def projection_matrix(self, camera_calibration, homography):
'''
From the camera calibration matrix and the estimated homography
compute the 3D projection matrix.
'''
# Compute rotation along the x and y axis as well as the translation
homography = homography * (-1)
rot_and_transl = np.linalg.inv(camera_calibration) @ homography
col_1 = rot_and_transl[:, 0]
col_2 = rot_and_transl[:, 1]
col_3 = rot_and_transl[:, 2]
# normalise vectors
l = math.sqrt(np.linalg.norm(col_1, 2) *
|
np.linalg.norm(col_2, 2)
|
numpy.linalg.norm
|
import copy
import numpy as np
"""
This module implements Solver that optimize model parameters using provided optimizer.
You should fill in code into indicated sections.
"""
class Solver(object):
"""
Implements solver.
"""
def __init__(self, model):
"""
Initializes solver with the model.
Args:
model: Model to optimize.
"""
self.model = model
def _reset(self, optimizer, optimizer_config = {}):
"""
Resets solver by reinitializing every layer in the model.
Resets optimizer configuration for every layer in the model.
"""
self.model.reset()
self.optimizer = optimizer
self.optimizer_configs = {}
for i in range(len(self.model.layers)):
if hasattr(self.model.layers[i], 'params'):
self.optimizer_configs[i] = {}
param_names = self.model.layers[i].params.keys()
for param_name in param_names:
self.optimizer_configs[i][param_name] = copy.deepcopy(optimizer_config)
def train_on_batch(self, x_batch, y_batch):
"""
Trains on batch.
Args:
x_batch: Input batch data.
y_batch: Input batch labels.
Returns:
loss: Loss of the model.
"""
########################################################################################
# Compute gradient of the loss on the batch with the respect to model parameters. #
# Compute gradient of the loss with respect to parameters of the model. #
########################################################################################
out = self.model.forward(x_batch)
loss, dout = self.model.loss(out, y_batch)
# print loss
# print "loc out shape is "
# print out.shape
# print 'loss_shape'
# print loss.shape
self.model.backward(dout)
########################################################################################
# END OF YOUR CODE #
########################################################################################
for i in range(len(self.model.layers)):
if hasattr(self.model.layers[i], 'params'):
param_names = self.model.layers[i].params.keys()
optimizer_config = self.optimizer_configs[i]
for param_name in param_names:
w = self.model.layers[i].params[param_name]
dw = self.model.layers[i].grads[param_name]
# print "dw shape is :"
# print dw.shape
optimizer_config = self.optimizer_configs[i][param_name]
next_w, next_optimizer_config = self.optimizer(w, dw, optimizer_config)
self.model.layers[i].params[param_name] = next_w
self.optimizer_configs[i][param_name] = next_optimizer_config
return out, loss
def test_on_batch(self, x_batch, y_batch):
"""
Tests on batch.
Args:
x_batch: Input batch data.
y_batch: Input batch labels.
Returns:
out: Ouptut of the network for the provided batch.
loss: Loss of the network for the provided batch.
"""
########################################################################################
# Compute output and loss for x_batch and y_batch. #
########################################################################################
out = self.model.forward(x_batch)
loss,_ = self.model.loss(out, y_batch)
########################################################################################
# END OF YOUR CODE #
########################################################################################
return out, loss
def fit(self, x_train, y_train, optimizer, optimizer_config = {}, x_val = None, y_val = None,
batch_size = 200, num_iterations = 1000, val_iteration = 100, verbose = False):
"""
Fits model on x_train, y_train data using specified optimizer. If x_val and y_val are
provided then also test on this data every val_iteration iterations.
Args:
x_train: Input train data.
y_train: Input train labels.
optimizer: Optimizer to use for optimizing model.
optimizer_config: Configuration of optimizer.
x_val: Input validation data.
y_val: Input validation labels.
batch_size: Batch size for training.
num_iterations: Maximum number of iterations to perform.
val_iteration: Perform validation every val_iteration iterations.
verbose: Output or not intermediate results during training.
Returns:
train_loss_history: Train loss history during training of the model.
train_acc_history: Train accuracy history during training of the model.
val_loss_history: Validation loss history during training of the model.
val_acc_history: Validation accuracy history during training of the model.
"""
self._reset(optimizer, optimizer_config)
train_acc_history = []
train_loss_history = []
val_loss_history = []
val_acc_history = []
for iteration in range(num_iterations):
########################################################################################
# Sample a random mini-batch with size of batch_size from train set. Put images to #
# x_train_batch and labels to y_train_batch. #
########################################################################################
sample = np.random.choice(x_train.shape[0], size= batch_size, replace=False)
x_train_batch = x_train[sample]
y_train_batch = y_train[sample]
########################################################################################
# END OF YOUR CODE #
########################################################################################
self.model.set_train_mode()
########################################################################################
# Train on batch (x_train_batch, y_train_batch) using train_on_batch method. Compute #
# train loss and accuracy on this batch. #
########################################################################################
out, train_loss = self.train_on_batch(x_train_batch, y_train_batch)
train_acc = self.accuracy(out, y_train_batch)
# print train_acc
########################################################################################
# END OF YOUR CODE #
########################################################################################
self.model.set_test_mode()
if iteration % val_iteration == 0 or iteration == num_iterations - 1:
train_loss_history.append(train_loss)
train_acc_history.append(train_acc)
if verbose:
print("Iteration {0:d}/{1:d}: Train Loss = {2:.3f}, Train Accuracy = {3:.3f}".format(
iteration, num_iterations, train_loss_history[-1], train_acc_history[-1]))
if not x_val is None:
########################################################################################
# Compute the loss and accuracy on the validation set. #
########################################################################################
out, val_loss = self.test_on_batch(x_val, y_val)
val_acc = self.accuracy(out, y_val)
# print val_loss
# print val_loss.shape
# print val_acc
########################################################################################
# END OF YOUR CODE #
######################################################################################
val_loss_history.append(val_loss)
val_acc_history.append(val_acc)
if verbose:
print("Iteration {0:d}/{1:d}. Validation Loss = {2:.3f}, Validation Accuracy = {3:.3f}".format(
iteration, num_iterations, val_loss_history[-1], val_acc_history[-1]))
if not x_val is None:
return train_loss_history, train_acc_history, val_loss_history, val_acc_history
else:
return train_loss_history, train_acc_history
def accuracy(self, out, y):
"""
Computes accuracy on output out and y.
Args:
out: Output of the network.
y: True labels.
Returns:
accuracy: Accuracy score.
"""
########################################################################################
# Compute the accuracy on output of the network. Store it in accuracy variable. #
########################################################################################
pred = self.predict(out)
accuracy = float(np.sum(pred == y)) / float(len(y))
########################################################################################
# END OF YOUR CODE #
########################################################################################
return accuracy
def predict(self, x):
"""
Computes predictions on x.
Args:
x: Input data.
Returns:
y_pred: Predictions on x.
"""
########################################################################################
# Compute the prediction on data x. Store it in y_pred variable. #
# #
########################################################################################
y_pred =
|
np.argmax(x, axis=1)
|
numpy.argmax
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.