prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
##########################################################################
##########################################################################
##
## What are you doing looking at this file?
##
##########################################################################
##########################################################################
#
# Just kidding. There is some useful stuff in here that will help you complete
# some of the labs and your project. Feel free to adapt it.
#
# (Sorry about the awful commenting though. Do as I say, not as I do, etc...)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from ipywidgets import interact, fixed, interactive_output, HBox, Button, VBox, Output, IntSlider, Checkbox, FloatSlider, FloatLogSlider, Dropdown
TEXTSIZE = 16
from IPython.display import clear_output
import time
from scipy.optimize import curve_fit
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as colmap
from copy import copy
from scipy.stats import multivariate_normal
# commenting in here is pretty shocking tbh
# wairakei model
def wairakei_data():
# load some data
tq, q = | np.genfromtxt('wk_production_history.csv', delimiter=',', unpack=True) | numpy.genfromtxt |
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import numpy as np
import esutil
import time
import matplotlib.pyplot as plt
from .fgcmUtilities import objFlagDict
from .fgcmUtilities import obsFlagDict
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmStars(object):
"""
Class to describe the stars and observations of the stars. Note that
after initialization you must call loadStarsFromFits() or loadStars()
to load the star information. This allows an external caller to clear
out memory after it has been copied to the shared memory buffers.
parameters
----------
fgcmConfig: FgcmConfig
Config variables
----------------
minObsPerBand: int
Minumum number of observations per band to be "good"
sedFitBandFudgeFactors: float array
Fudge factors for computing fnuprime for the fit bands
sedExtraBandFudgeFactors: float array
Fudge factors for computing fnuprime for the extra bands
starColorCuts: list
List that contains lists of [bandIndex0, bandIndex1, minColor, maxColor]
sigma0Phot: float
Floor on photometric error to add to every observation
reserveFraction: float
Fraction of stars to hold in reserve
mapLongitudeRef: float
Reference longitude for plotting maps of stars
mapNSide: int
Healpix nside of map plotting.
superStarSubCCD: bool
Use sub-ccd info to make superstar flats?
obsFile: string, only if using fits mode
Star observation file
indexFile: string, only if using fits mode
Star index file
"""
def __init__(self,fgcmConfig):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.info('Initializing stars.')
self.obsFile = fgcmConfig.obsFile
self.indexFile = fgcmConfig.indexFile
self.bands = fgcmConfig.bands
self.nBands = len(fgcmConfig.bands)
self.nCCD = fgcmConfig.nCCD
self.minObsPerBand = fgcmConfig.minObsPerBand
self.fitBands = fgcmConfig.fitBands
self.nFitBands = len(fgcmConfig.fitBands)
self.extraBands = fgcmConfig.extraBands
self.sedFitBandFudgeFactors = fgcmConfig.sedFitBandFudgeFactors
self.sedExtraBandFudgeFactors = fgcmConfig.sedExtraBandFudgeFactors
self.starColorCuts = fgcmConfig.starColorCuts
self.sigma0Phot = fgcmConfig.sigma0Phot
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.plotPath = fgcmConfig.plotPath
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.expField = fgcmConfig.expField
self.ccdField = fgcmConfig.ccdField
self.reserveFraction = fgcmConfig.reserveFraction
self.modelMagErrors = fgcmConfig.modelMagErrors
self.inFlagStarFile = fgcmConfig.inFlagStarFile
self.mapLongitudeRef = fgcmConfig.mapLongitudeRef
self.mapNSide = fgcmConfig.mapNSide
self.lambdaStdBand = fgcmConfig.lambdaStdBand
self.bandRequiredFlag = fgcmConfig.bandRequiredFlag
self.bandRequiredIndex = np.where(self.bandRequiredFlag)[0]
self.bandExtraFlag = fgcmConfig.bandExtraFlag
self.bandExtraIndex = np.where(self.bandExtraFlag)[0]
self.lutFilterNames = fgcmConfig.lutFilterNames
self.filterToBand = fgcmConfig.filterToBand
self.superStarSubCCD = fgcmConfig.superStarSubCCD
#self.expArray = fgcmPars.expArray
#self._loadStars(fgcmPars)
self.magStdComputed = False
self.allMagStdComputed = False
self.sedSlopeComputed = False
#if (computeNobs):
# allExps = np.arange(fgcmConfig.expRange[0],fgcmConfig.expRange[1],dtype='i4')
# self.fgcmLog.info('Checking stars with full possible range of exp numbers')
#self.selectStarsMinObs(goodExps=allExps,doPlots=False)
# allExpsIndex = np.arange(fgcmPars.expArray.size)
# self.selectStarsMinObsExpIndex(allExpsIndex)
self.magConstant = 2.5/np.log(10)
self.hasXY = False
def loadStarsFromFits(self,fgcmPars,computeNobs=True):
"""
Load stars from fits files.
parameters
----------
fgcmPars: FgcmParameters
computeNobs: bool, default=True
Compute number of observations of each star/band
Config variables
----------------
indexFile: string
Star index file
obsFile: string
Star observation file
inFlagStarFile: string, optional
Flagged star file
"""
import fitsio
# read in the observation indices...
startTime = time.time()
self.fgcmLog.info('Reading in observation indices...')
index = fitsio.read(self.indexFile, ext='INDEX')
self.fgcmLog.info('Done reading in %d observation indices in %.1f seconds.' %
(index.size, time.time() - startTime))
# read in obsfile and cut
startTime = time.time()
self.fgcmLog.info('Reading in star observations...')
obs = fitsio.read(self.obsFile, ext=1)
# cut down to those that are indexed
obs = obs[index['OBSINDEX']]
self.fgcmLog.info('Done reading in %d observations in %.1f seconds.' %
(obs.size, time.time() - startTime))
# and positions...
startTime = time.time()
self.fgcmLog.info('Reading in star positions...')
pos = fitsio.read(self.indexFile, ext='POS')
self.fgcmLog.info('Done reading in %d unique star positions in %.1f secondds.' %
(pos.size, time.time() - startTime))
#obsBand = np.core.defchararray.strip(obs['BAND'][:])
obsFilterName = np.core.defchararray.strip(obs['FILTERNAME'][:])
if (self.inFlagStarFile is not None):
self.fgcmLog.info('Reading in list of previous flagged stars from %s' %
(self.inFlagStarFile))
inFlagStars = fitsio.read(self.inFlagStarFile, ext=1)
flagID = inFlagStars['OBJID']
flagFlag = inFlagStars['OBJFLAG']
else:
flagID = None
flagFlag = None
# FIXME: add support to x/y from fits files
if ('X' in obs.dtype.names and 'Y' in obs.dtype.names):
self.fgcmLog.info('Found X/Y in input observations')
obsX = obs['X']
obsY = obs['Y']
else:
obsX = None
obsY = None
# process
self.loadStars(fgcmPars,
obs[self.expField],
obs[self.ccdField],
obs['RA'],
obs['DEC'],
obs['MAG'],
obs['MAGERR'],
obsFilterName,
pos['FGCM_ID'],
pos['RA'],
pos['DEC'],
pos['OBSARRINDEX'],
pos['NOBS'],
obsX=obsX,
obsY=obsY,
flagID=flagID,
flagFlag=flagFlag,
computeNobs=computeNobs)
# and clear memory
index = None
obs = None
pos = None
def loadStars(self, fgcmPars,
obsExp, obsCCD, obsRA, obsDec, obsMag, obsMagErr, obsFilterName,
objID, objRA, objDec, objObsIndex, objNobs, obsX=None, obsY=None,
flagID=None, flagFlag=None, computeNobs=True):
"""
Load stars from arrays
parameters
----------
fgcmPars: fgcmParameters
obsExp: int array
Exposure number (or equivalent) for each observation
obsCCD: int array
CCD number (or equivalent) for each observation
obsRA: double array
RA for each observation (degrees)
obsDec: double array
Dec for each observation (degrees)
obsMag: float array
Raw ADU magnitude for each observation
obsMagErr: float array
Raw ADU magnitude error for each observation
obsFilterName: string array
Filter name for each observation
objID: int array
Unique ID number for each object
objRA: double array
RA for each object (degrees)
objDec: double array
Dec for each object (degrees)
objObsIndex: int array
For each object, where in the obs table to look
objNobs: int array
number of observations of this object (all bands)
obsX: float array, optional
x position for each observation
obsY: float array, optional
y position for each observation
flagID: int array, optional
ID of each object that is flagged from previous cycle
flagFlag: int array, optional
Flag value from previous cycle
computeNobs: bool, default=True
Compute number of good observations of each object?
"""
# FIXME: check that these are all the same length!
self.obsIndexHandle = snmm.createArray(obsRA.size, dtype='i4')
snmm.getArray(self.obsIndexHandle)[:] = np.arange(obsRA.size)
# need to stuff into shared memory objects.
# nStarObs: total number of observations of all starus
self.nStarObs = obsRA.size
# obsExp: exposure number of individual observation (pointed by obsIndex)
self.obsExpHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsExpIndex: exposure index
self.obsExpIndexHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsCCD: ccd number of individual observation
self.obsCCDHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsBandIndex: band index of individual observation
self.obsBandIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsLUTFilterIndex: filter index in LUT of individual observation
self.obsLUTFilterIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsFlag: individual bad observation
self.obsFlagHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsRA: RA of individual observation
self.obsRAHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsDec: Declination of individual observation
self.obsDecHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsSecZenith: secant(zenith) of individual observation
self.obsSecZenithHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsMagADU: log raw ADU counts of individual observation
## FIXME: need to know default zeropoint?
self.obsMagADUHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUErr: raw ADU counts error of individual observation
self.obsMagADUErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUModelErr: modeled ADU counts error of individual observation
self.obsMagADUModelErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsSuperStarApplied: SuperStar correction that was applied
self.obsSuperStarAppliedHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagStd: corrected (to standard passband) mag of individual observation
self.obsMagStdHandle = snmm.createArray(self.nStarObs,dtype='f4',syncAccess=True)
if (obsX is not None and obsY is not None):
self.hasXY = True
# obsX: x position on the CCD of the given observation
self.obsXHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsY: y position on the CCD of the given observation
self.obsYHandle = snmm.createArray(self.nStarObs,dtype='f4')
else:
# hasXY = False
if self.superStarSubCCD:
raise ValueError("Input stars do not have x/y but superStarSubCCD is set.")
snmm.getArray(self.obsExpHandle)[:] = obsExp
snmm.getArray(self.obsCCDHandle)[:] = obsCCD
snmm.getArray(self.obsRAHandle)[:] = obsRA
snmm.getArray(self.obsDecHandle)[:] = obsDec
snmm.getArray(self.obsMagADUHandle)[:] = obsMag
snmm.getArray(self.obsMagADUErrHandle)[:] = obsMagErr
snmm.getArray(self.obsMagStdHandle)[:] = obsMag # same as raw at first
snmm.getArray(self.obsSuperStarAppliedHandle)[:] = 0.0
if self.hasXY:
snmm.getArray(self.obsXHandle)[:] = obsX
snmm.getArray(self.obsYHandle)[:] = obsY
self.fgcmLog.info('Applying sigma0Phot = %.4f to mag errs' %
(self.sigma0Phot))
obsMagADUErr = snmm.getArray(self.obsMagADUErrHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
bad, = np.where(obsMagADUErr <= 0.0)
obsFlag[bad] |= obsFlagDict['BAD_ERROR']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with bad errors.' %
(bad.size))
obsMagADUErr[:] = np.sqrt(obsMagADUErr[:]**2. + self.sigma0Phot**2.)
# Initially, we set the model error to the observed error
obsMagADUModelErr = snmm.getArray(self.obsMagADUModelErrHandle)
obsMagADUModelErr[:] = obsMagADUErr[:]
startTime = time.time()
self.fgcmLog.info('Matching observations to exposure table.')
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsExpIndex[:] = -1
a,b=esutil.numpy_util.match(fgcmPars.expArray,
snmm.getArray(self.obsExpHandle)[:])
obsExpIndex[b] = a
self.fgcmLog.info('Observations matched in %.1f seconds.' %
(time.time() - startTime))
bad, = np.where(obsExpIndex < 0)
obsFlag[bad] |= obsFlagDict['NO_EXPOSURE']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with no associated exposure.' %
(bad.size))
# match bands and filters to indices
startTime = time.time()
self.fgcmLog.info('Matching observations to bands.')
#for i in xrange(self.nBands):
# use, = np.where(obsBand == self.bands[i])
# if (use.size == 0):
# raise ValueError("No observations in band %s!" % (self.bands[i]))
# snmm.getArray(self.obsBandIndexHandle)[use] = i
# new version for multifilter support
# First, we have the filterNames
for filterIndex,filterName in enumerate(self.lutFilterNames):
#try:
# bandIndex, = np.where(self.filterToBand[filterName] == self.bands)
#except:
# self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
# bandIndex = -1
try:
bandIndex = self.bands.index(self.filterToBand[filterName])
except:
self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
bandIndex = -1
# obsFilterName is an array from fits/numpy. filterName needs to be encoded to match
use, = np.where(obsFilterName == filterName.encode('utf-8'))
if use.size == 0:
self.fgcmLog.info('WARNING: no observations in filter %s' % (filterName))
else:
snmm.getArray(self.obsLUTFilterIndexHandle)[use] = filterIndex
snmm.getArray(self.obsBandIndexHandle)[use] = bandIndex
self.fgcmLog.info('Observations matched in %.1f seconds.' %
(time.time() - startTime))
#obs=None
#startTime=time.time()
#self.fgcmLog.info('Reading in star positions...')
#pos=fitsio.read(self.indexFile,ext='POS')
#self.fgcmLog.info('Done reading in %d unique star positions in %.1f secondds.' %
# (pos.size,time.time()-startTime))
# nStars: total number of unique stars
#self.nStars = pos.size
self.nStars = objID.size
# objID: unique object ID
self.objIDHandle = snmm.createArray(self.nStars,dtype='i4')
# objRA: mean RA for object
self.objRAHandle = snmm.createArray(self.nStars,dtype='f8')
# objDec: mean Declination for object
self.objDecHandle = snmm.createArray(self.nStars,dtype='f8')
# objObsIndex: for each object, the first
self.objObsIndexHandle = snmm.createArray(self.nStars,dtype='i4')
# objNobs: number of observations of this object (all bands)
self.objNobsHandle = snmm.createArray(self.nStars,dtype='i4')
# objNGoodObsHandle: number of good observations, per band
self.objNGoodObsHandle = snmm.createArray((self.nStars,self.nBands),dtype='i4')
#snmm.getArray(self.objIDHandle)[:] = pos['FGCM_ID'][:]
#snmm.getArray(self.objRAHandle)[:] = pos['RA'][:]
#snmm.getArray(self.objDecHandle)[:] = pos['DEC'][:]
snmm.getArray(self.objIDHandle)[:] = objID
snmm.getArray(self.objRAHandle)[:] = objRA
snmm.getArray(self.objDecHandle)[:] = objDec
#try:
# new field name
# snmm.getArray(self.objObsIndexHandle)[:] = pos['OBSARRINDEX'][:]
#except:
# old field name
# snmm.getArray(self.objObsIndexHandle)[:] = pos['OBSINDEX'][:]
#snmm.getArray(self.objNobsHandle)[:] = pos['NOBS'][:]
snmm.getArray(self.objObsIndexHandle)[:] = objObsIndex
snmm.getArray(self.objNobsHandle)[:] = objNobs
# minObjID: minimum object ID
self.minObjID = np.min(snmm.getArray(self.objIDHandle))
# maxObjID: maximum object ID
self.maxObjID = np.max(snmm.getArray(self.objIDHandle))
# obsObjIDIndex: object ID Index of each observation
# (to get objID, then objID[obsObjIDIndex]
startTime = time.time()
self.fgcmLog.info('Indexing star observations...')
self.obsObjIDIndexHandle = snmm.createArray(self.nStarObs,dtype='i4')
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
objID = snmm.getArray(self.objIDHandle)
obsIndex = snmm.getArray(self.obsIndexHandle)
objObsIndex = snmm.getArray(self.objObsIndexHandle)
objNobs = snmm.getArray(self.objNobsHandle)
## FIXME: check if this extra obsIndex reference is necessary or not.
## probably extraneous.
for i in xrange(self.nStars):
obsObjIDIndex[obsIndex[objObsIndex[i]:objObsIndex[i]+objNobs[i]]] = i
self.fgcmLog.info('Done indexing in %.1f seconds.' %
(time.time() - startTime))
#pos=None
obsObjIDIndex = None
objID = None
obsIndex = None
objObsIndex = None
objNobs = None
# and create a objFlag which flags bad stars as they fall out...
self.objFlagHandle = snmm.createArray(self.nStars,dtype='i2')
# and read in the previous bad stars if available
#if (self.inBadStarFile is not None):
# self.fgcmLog.info('Reading in list of previous bad stars from %s' %
# (self.inBadStarFile))
# objID = snmm.getArray(self.objIDHandle)
# objFlag = snmm.getArray(self.objFlagHandle)
# inBadStars = fitsio.read(self.inBadStarFile,ext=1)
# a,b=esutil.numpy_util.match(inBadStars['OBJID'],
# objID)
# self.fgcmLog.info('Flagging %d stars as bad.' %
# (a.size))
# objFlag[b] = inBadStars['OBJFLAG'][a]
if (flagID is not None):
# the objFlag contains information on RESERVED stars
objID = snmm.getArray(self.objIDHandle)
objFlag = snmm.getArray(self.objFlagHandle)
a,b=esutil.numpy_util.match(flagID, objID)
test,=np.where((flagFlag[a] & objFlagDict['VARIABLE']) > 0)
self.fgcmLog.info('Flagging %d stars as variable from previous cycles.' %
(test.size))
test,=np.where((flagFlag[a] & objFlagDict['RESERVED']) > 0)
self.fgcmLog.info('Flagging %d stars as reserved from previous cycles.' %
(test.size))
objFlag[b] = flagFlag[a]
else:
# we want to reserve stars, if necessary
if self.reserveFraction > 0.0:
objFlag = snmm.getArray(self.objFlagHandle)
nReserve = int(self.reserveFraction * objFlag.size)
reserve = np.random.choice(objFlag.size,
size=nReserve,
replace=False)
self.fgcmLog.info('Reserving %d stars from the fit.' % (nReserve))
objFlag[reserve] |= objFlagDict['RESERVED']
# And we need to record the mean mag, error, SED slopes...
# objMagStdMean: mean standard magnitude of each object, per band
self.objMagStdMeanHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4',
syncAccess=True)
# objMagStdMeanErr: error on the mean standard mag of each object, per band
self.objMagStdMeanErrHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4')
# objSEDSlope: linearized approx. of SED slope of each object, per band
self.objSEDSlopeHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4',
syncAccess=True)
# objMagStdMeanNoChrom: mean std mag of each object, no chromatic correction, per band
self.objMagStdMeanNoChromHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4')
# note: if this takes too long it can be moved to the star computation,
# but it seems pretty damn fast (which may raise the question of
# why it needs to be precomputed...)
# compute secZenith for every observation
startTime=time.time()
self.fgcmLog.info('Computing secZenith for each star observation...')
objRARad = np.radians(snmm.getArray(self.objRAHandle))
objDecRad = np.radians(snmm.getArray(self.objDecHandle))
## FIXME: deal with this at some point...
hi,=np.where(objRARad > np.pi)
objRARad[hi] -= 2*np.pi
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
obsIndex = snmm.getArray(self.obsIndexHandle)
obsHARad = (fgcmPars.expTelHA[obsExpIndex] +
fgcmPars.expTelRA[obsExpIndex] -
objRARad[obsObjIDIndex])
tempSecZenith = 1./(np.sin(objDecRad[obsObjIDIndex]) * fgcmPars.sinLatitude +
| np.cos(objDecRad[obsObjIDIndex]) | numpy.cos |
#!/usr/bin/env python3
import ipdb
#ipdb.set_trace()
import configparser
import os, sys
from matplotlib import pyplot as plt
import xarray as xr
thisDir = os.path.dirname(os.path.abspath(__file__))
parentDir = os.path.dirname(thisDir)
sys.path.insert(0,parentDir)
from metpy.calc import *
from metpy.units import units
import metpy
from skyfield import api
from skyfield.api import EarthSatellite, Topos, load
import numpy as np
from datetime import datetime, timedelta
fobs="obs_grid.nc"
f1="tar/gfs.t18z.pgrb2.0p25.f003"
f1="tar/gfs.t12z.pgrb2.0p25.f006"
f1="/work/noaa/dtc-hwrf/sbao/EMC_post/DOMAINPATH/postprd/GFSPRS.006.iveg2.grb2"
n_clouds=5
n_aerosol=1
def find_var(f,string):
for i in f:
if (string in i and i.startswith(string[0:3])):
found=i
return found
def effr_cld(cld):
min_qc=1.e-7
gfdl_rhor=1000.
gfdl_ccn=1.0e8
gfdl_rewmin=5.0
gfdl_rewmax=10.0
cld_positive=xr.where(cld>min_qc, cld, min_qc)
result=np.exp(1.0/3.0*np.log((3.*cld_positive)/(4.*np.pi*gfdl_rhor*gfdl_ccn)))*1.0e6
result=xr.where(result<gfdl_rewmin, gfdl_rewmin, result)
result=xr.where(result>gfdl_rewmax, gfdl_rewmax, result)
result=xr.where(cld>min_qc, result, 0)
return result*2
def effr_ice(ice,temp):
min_qi=1.e-8
gfdl_tice = 273.16
gfdl_beta = 1.22
gfdl_reimin = 10.0
gfdl_reimax = 150.0
result=xr.full_like(ice,0.0)
ice_positive=xr.where(ice>min_qi, ice, min_qi)
result=xr.where(temp[1:]-gfdl_tice>=-30.0, gfdl_beta / 9.387 * np.exp ((1 - 0.969) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(temp[1:]-gfdl_tice<-30.0, gfdl_beta / 9.208 * np.exp ((1 - 0.945) * np.log (1.0e3 * ice_positive)) * 1.0e3, result)
result=xr.where(temp[1:]-gfdl_tice<-40.0, gfdl_beta / 9.337 * np.exp ((1 - 0.920) * | np.log (1.0e3 * ice_positive) | numpy.log |
"""
Code for processing operations for numpy arrays of tif stacks
"""
#Import packages
#Dependences
import numpy as np
from numpy.fft import fft2, ifft2, fftshift
from scipy.ndimage import median_filter, gaussian_filter, shift
import itertools
import gc
def doMedianFilter(imgstack, med_fsize=3):
'''
Median Filter (Takes 303.37 sec, 5 min 3 sec)
imgstack is (nframes, height, width) numpy array of images
med_fsize is the median filter size
Returns medstack, a (nframes, height, width) numpy array of median filtered images
'''
medstack = np.empty(imgstack.shape, dtype=np.uint16)
for idx, frame in enumerate(imgstack):
medstack[idx,...] = median_filter(frame, size=med_fsize)
return medstack
def doHomomorphicFilter(imgstack, sigmaVal=7):
'''
Homomorphic Filter (Takes 323.1 sec, 5 min 23 sec)
imgstack is (nframes, height, width) numpy array of images
sigmaVal is the gaussian_filter size for subtracing the low frequency component
Returns homomorphimgs, a (nframes, height, width) numpy array of homomorphic filtered images
'''
#Constants to scale from between 0 and 1
eps = 7./3 - 4./3 -1
maxval = imgstack.max()
ScaleFactor = 1./maxval
Baseline = imgstack.min()
# Subtract minimum baseline, and multiply by scale factor. Force minimum of eps before taking log.
logimgs = np.log1p(np.maximum((imgstack-Baseline)*ScaleFactor, eps))
# Get Low Frequency Component from Gaussian Filter
lpComponent = np.empty(logimgs.shape)
for idx, frame in enumerate(logimgs):
lpComponent[idx,...] = gaussian_filter(frame, sigma=sigmaVal)
# Remove Low Frequency Component and Shift Values
adjimgs = logimgs - lpComponent
del logimgs, lpComponent
gc.collect()
logmin = adjimgs.min()
adjimgs = adjimgs - logmin #Shift by minimum logged difference value, so lowest value is 0
#Undo the log and shift back to standard image space
homomorphimgs = (np.expm1(adjimgs)/ScaleFactor) + Baseline
return homomorphimgs
def registerImages(imgstack, Ref=None, method='CrossCorrelation'):
'''
Perform frame-by-frame Image Registration to a reference image using a default of Cross Correlation (465.43 sec. 7 min 45 sec)
imgstack is (nframes, height, width) numpy array of images
Ref is a (height, width) numpy array as a reference image to use for motion correction
If no Ref is given, then the mean across all frames is used
method is the method to use to register the images, with the default being cross-correlation between the Reference frame and each individual frame
Returns stackshift, a (nframes, height, width) numpy array of motion corrected and shifted images
Returns yshift is the number of pixels to shift each frame in the y-direction (height)
Returns xshift is the number of pixels to shift each frame in the x-direction (width)
'''
#Insert functions for different registration methods
def CrossCorrelation(imgstack, Ref):
#Precalculate Static Values
if Ref is None:
Ref = imgstack.mean(axis=0)
imshape = Ref.shape
nframes = imgstack.shape[0]
imcenter = np.array(imshape)/2
yshift = | np.empty((nframes,1)) | numpy.empty |
"""
Creates dataset of SEoEi
Author(s): <NAME> (<EMAIL>)
"""
import os
import numpy as np
from matplotlib import pyplot as plt
#plt.switch_backend('Qt5Agg')
import math
from scipy.interpolate import splev, splprep, interp1d
from scipy.integrate import cumtrapz
import sys
sys.path.append("../")
from utils import visualize
def get_sf_params(variables, alpha, beta):
'''
alpha : control nonlinearity
beta : control number of categories
'''
params = []
for v in variables:
# v = [s, t]
# Set [m, n1, n2, n3]
params.append([4+math.floor(v[0]+v[1])%beta, alpha*v[0], alpha*(v[0]+v[1]), alpha*(v[0]+v[1])])
return np.array(params)
def r(phi, m, n1, n2, n3):
# a = b = 1, m1 = m2 = m
return ( abs(math.cos(m * phi / 4)) ** n2 + abs(math.sin(m * phi / 4)) ** n3 ) ** (-1/n1)
def interpolate(Q, N, k, D=20, resolution=1000):
''' Interpolate N points whose concentration is based on curvature. '''
res, fp, ier, msg = splprep(Q.T, u=None, k=k, s=1e-6, per=0, full_output=1)
tck, u = res
uu = np.linspace(u.min(), u.max(), resolution)
x, y = splev(uu, tck, der=0)
dx, dy = splev(uu, tck, der=1)
ddx, ddy = splev(uu, tck, der=2)
cv = np.abs(ddx*dy - dx*ddy)/(dx*dx + dy*dy)**1.5 + D
cv_int = cumtrapz(cv, uu, initial=0)
fcv = interp1d(cv_int, uu)
cv_int_samples = np.linspace(0, cv_int.max(), N)
u_new = fcv(cv_int_samples)
x_new, y_new = splev(u_new, tck, der=0)
return x_new, y_new, fp, ier
def gen_superformula(m, n1, n2, n3, num_points=64):
phis = np.linspace(0, 2*np.pi, num_points*4)#, endpoint=False)
S = [(r(phi, m, n1, n2, n3) * math.cos(phi),
r(phi, m, n1, n2, n3) * math.sin(phi)) for phi in phis]
S = np.array(S)
# Scale the heights to 1.0
mn = np.min(S[:,1])
mx = np.max(S[:,1])
h = mx-mn
S /= h
x_new, y_new, fp, ier = interpolate(S, N=num_points, k=3)
S = np.vstack((x_new, y_new)).T
return S
def gen_ellipse(a, b, num_points=16):
phis = np.linspace(0, 2*np.pi, num_points)
E = [(a * math.cos(phi),
b * math.sin(phi)) for phi in phis]
return np.array(E)
def filt_se(superformula, ellipses):
N = ellipses.shape[0]
R_sf = np.linalg.norm(superformula, axis=-1)
Rs_sf = np.tile( | np.expand_dims(R_sf, axis=0) | numpy.expand_dims |
from __future__ import print_function, division
import os
import numpy as np
from ..discretization import StructuredGrid
from ..datbase import DataType, DataInterface
import flopy.utils.binaryfile as bf
from flopy.utils import HeadFile
import numpy.ma as ma
import struct
import sys
# Module for exporting vtk from flopy
np_to_vtk_type = {'int8': 'Int8',
'uint8': 'UInt8',
'int16': 'Int16',
'uint16': 'UInt16',
'int32': 'Int32',
'uint32': 'UInt32',
'int64': 'Int64',
'uint64': 'UInt64',
'float32': 'Float32',
'float64': 'Float64'}
np_to_struct = {'int8': 'b',
'uint8': 'B',
'int16': 'h',
'uint16': 'H',
'int32': 'i',
'uint32': 'I',
'int64': 'q',
'uint64': 'Q',
'float32': 'f',
'float64': 'd'}
class XmlWriterInterface:
"""
Helps writing vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
# class attributes
self.open_tag = False
self.current = []
self.indent_level = 0
self.indent_char = ' '
# open file and initialize
self.f = self._open_file(file_path)
self.write_string('<?xml version="1.0"?>')
# open VTKFile element
self.open_element('VTKFile').add_attributes(version='0.1')
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
raise NotImplementedError('must define _open_file in child class')
def write_string(self, string):
"""
Write a string to the file.
"""
raise NotImplementedError('must define write_string in child class')
def open_element(self, tag):
if self.open_tag:
self.write_string(">")
indent = self.indent_level * self.indent_char
self.indent_level += 1
tag_string = "\n" + indent + "<%s" % tag
self.write_string(tag_string)
self.open_tag = True
self.current.append(tag)
return self
def close_element(self, tag=None):
self.indent_level -= 1
if tag:
assert (self.current.pop() == tag)
if self.open_tag:
self.write_string(">")
self.open_tag = False
indent = self.indent_level * self.indent_char
tag_string = "\n" + indent + "</%s>" % tag
self.write_string(tag_string)
else:
self.write_string("/>")
self.open_tag = False
self.current.pop()
return self
def add_attributes(self, **kwargs):
assert self.open_tag
for key in kwargs:
st = ' %s="%s"' % (key, kwargs[key])
self.write_string(st)
return self
def write_line(self, text):
if self.open_tag:
self.write_string('>')
self.open_tag = False
self.write_string('\n')
indent = self.indent_level * self.indent_char
self.write_string(indent)
self.write_string(text)
return self
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to the file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
raise NotImplementedError('must define write_array in child class')
def final(self):
"""
Finalize the file. Must be called.
"""
self.close_element('VTKFile')
assert (not self.open_tag)
self.f.close()
class XmlWriterAscii(XmlWriterInterface):
"""
Helps writing ascii vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
super(XmlWriterAscii, self).__init__(file_path)
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
return open(file_path, "w")
def write_string(self, string):
"""
Write a string to the file.
"""
self.f.write(string)
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to the file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
# open DataArray element with relevant attributes
self.open_element('DataArray')
vtk_type = np_to_vtk_type[array.dtype.name]
self.add_attributes(type=vtk_type)
self.add_attributes(**kwargs)
self.add_attributes(format='ascii')
# write the data
nlay = array.shape[0]
for lay in range(nlay):
if actwcells is not None:
idx = (actwcells[lay] != 0)
array_lay_flat = array[lay][idx].flatten()
else:
array_lay_flat = array[lay].flatten()
# replace NaN values by -1e9 as there is a bug is Paraview when
# reading NaN in ASCII mode
# https://gitlab.kitware.com/paraview/paraview/issues/19042
# this may be removed in the future if they fix the bug
array_lay_flat[np.isnan(array_lay_flat)] = -1e9
s = ' '.join(['{}'.format(val) for val in array_lay_flat])
self.write_line(s)
# close DataArray element
self.close_element('DataArray')
return
class XmlWriterBinary(XmlWriterInterface):
"""
Helps writing binary vtk files.
Parameters
----------
file_path : str
output file path
"""
def __init__(self, file_path):
super(XmlWriterBinary, self).__init__(file_path)
if sys.byteorder == "little":
self.byte_order = '<'
self.add_attributes(byte_order='LittleEndian')
else:
self.byte_order = '>'
self.add_attributes(byte_order='BigEndian')
self.add_attributes(header_type="UInt64")
# class attributes
self.offset = 0
self.byte_count_size = 8
self.processed_arrays = []
def _open_file(self, file_path):
"""
Open the file for writing.
Return
------
File object.
"""
return open(file_path, "wb")
def write_string(self, string):
"""
Write a string to the file.
"""
self.f.write(str.encode(string))
def write_array(self, array, actwcells=None, **kwargs):
"""
Write an array to file.
Parameters
----------
array : ndarray
the data array being output
actwcells : array
array of the active cells
kwargs : dictionary
Attributes to be added to the DataArray element
"""
# open DataArray element with relevant attributes
self.open_element('DataArray')
vtk_type = np_to_vtk_type[array.dtype.name]
self.add_attributes(type=vtk_type)
self.add_attributes(**kwargs)
self.add_attributes(format='appended', offset=self.offset)
# store array for later writing (appended data section)
if actwcells is not None:
array = array[actwcells != 0]
a = np.ascontiguousarray(array.ravel())
array_size = array.size * array[0].dtype.itemsize
self.processed_arrays.append([a, array_size])
# calculate the offset of the start of the next piece of data
# offset is calculated from beginning of data section
self.offset += array_size + self.byte_count_size
# close DataArray element
self.close_element('DataArray')
return
def _write_size(self, block_size):
# size is a 64 bit unsigned integer
byte_order = self.byte_order + 'Q'
block_size = struct.pack(byte_order, block_size)
self.f.write(block_size)
def _append_array_binary(self, data):
# see vtk documentation and more details here:
# https://vtk.org/Wiki/VTK_XML_Formats#Appended_Data_Section
assert (data.flags['C_CONTIGUOUS'] or data.flags['F_CONTIGUOUS'])
assert data.ndim==1
data_format = self.byte_order + str(data.size) + \
np_to_struct[data.dtype.name]
binary_data = struct.pack(data_format, *data)
self.f.write(binary_data)
def final(self):
"""
Finalize the file. Must be called.
"""
# build data section
self.open_element('AppendedData')
self.add_attributes(encoding='raw')
self.write_line('_')
for a, block_size in self.processed_arrays:
self._write_size(block_size)
self._append_array_binary(a)
self.close_element('AppendedData')
# call super final
super(XmlWriterBinary, self).final()
class _Array(object):
# class to store array and tell if array is 2d
def __init__(self, array, array2d):
self.array = array
self.array2d = array2d
def _get_basic_modeltime(perlen_list):
modeltim = 0
totim = []
for tim in perlen_list:
totim.append(modeltim)
modeltim += tim
return totim
class Vtk(object):
"""
Class to build VTK object for exporting flopy vtk
Parameters
----------
model : MFModel
flopy model instance
verbose : bool
If True, stdout is verbose
nanval : float
no data value, default is -1e20
smooth : bool
if True, will create smooth layer elevations, default is False
point_scalars : bool
if True, will also output array values at cell vertices, default is
False; note this automatically sets smooth to True
vtk_grid_type : str
Specific vtk_grid_type or 'auto' (default). Possible specific values
are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'.
If 'auto', the grid type is automatically determined. Namely:
* A regular grid (in all three directions) will be saved as an
'ImageData'.
* A rectilinear (in all three directions), non-regular grid
will be saved as a 'RectilinearGrid'.
* Other grids will be saved as 'UnstructuredGrid'.
true2d : bool
If True, the model is expected to be 2d (1 layer, 1 row or 1 column)
and the data will be exported as true 2d data, default is False.
binary : bool
if True the output file will be binary, default is False
Attributes
----------
arrays : dict
Stores data arrays added to VTK object
"""
def __init__(self, model, verbose=None, nanval=-1e+20, smooth=False,
point_scalars=False, vtk_grid_type='auto', true2d=False,
binary=False):
if point_scalars:
smooth = True
if verbose is None:
verbose = model.verbose
self.verbose = verbose
# set up variables
self.model = model
self.modelgrid = model.modelgrid
self.nlay = self.modelgrid.nlay
if hasattr(self.model, 'dis') and hasattr(self.model.dis, 'laycbd'):
self.nlay = self.nlay + np.sum(self.model.dis.laycbd.array > 0)
self.nrow = self.modelgrid.nrow
self.ncol = self.modelgrid.ncol
self.shape = (self.nlay, self.nrow, self.ncol)
self.shape2d = (self.shape[1], self.shape[2])
self.shape_verts = (self.shape[0]+1, self.shape[1]+1, self.shape[2]+1)
self.shape_verts2d = (self.shape_verts[1], self.shape_verts[2])
self.nanval = nanval
self.arrays = {}
self.vectors = {}
self.smooth = smooth
self.point_scalars = point_scalars
self.has_cell_data = False
self.has_point_data = False
# check if structured grid, vtk only supports structured grid
assert (isinstance(self.modelgrid, StructuredGrid))
# cbd
self.cbd_on = False
# get ibound
if self.modelgrid.idomain is None:
# ibound = None
ibound = np.ones(self.shape)
else:
ibound = self.modelgrid.idomain
# build cbd ibound
if ibound is not None and hasattr(self.model, 'dis') and \
hasattr(self.model.dis, 'laycbd'):
self.cbd = | np.where(self.model.dis.laycbd.array > 0) | numpy.where |
import os, re
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import math
import numpy as np
from threading import Thread
import time
import json
FILE_NAME = os.path.basename(__file__)
CURR_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(CURR_DIR)
import sys
sys.path.insert(-1, CURR_DIR)
sys.path.insert(-1, BASE_DIR)
import GeneticAlgorithm.test_functions as tf
import GeneticAlgorithm.genetic_data as gd
def generte_contours(eval=None):
delta = 0.025 # 0.025
x = np.arange(-5.11, 4.11, delta)
y = np.arange(-4.11, 5.11, delta)
X, Y = np.meshgrid(x, y)
Z = eval(X, Y)
print(Z)
plt.contour(X, Y, Z, [x * x / 8 for x in range(1, 50)], linewidths=0.5)
# plt.scatter(np.random.random(),np.linspace(-2,2,20))
plt.title('Minima ' + str(eval(0, 0)))
plt.show()
return
points = np.zeros(shape=(len(x) * len(y), 2))
vals = np.zeros(shape=(len(x) * len(y)))
print(points)
i = 0
for xx in x:
for yy in y:
print(xx, yy)
points[i] = [xx, yy]
vals[i] = eval(xx, yy)
i += 1
print(points, np.min(vals), np.max(vals))
def get_eval(e):
if e == 'rastringin_gen':
return tf.rastringin_gen
elif e == 'camel_hump_six':
return tf.camel_hump_six
def flat_array_to_nd_array(gdata, rows, cols, population, order='F'):
n_p = int(gdata[rows])
nv = gdata[cols]
np_vals = np.array(gdata[population])
return np_vals.reshape(n_p, nv, order=order)
def evaluate_parents(i_genr, gdata, population_fitness, np_vals_r, pop_size, n_vars):
eval = get_eval(gdata['eval_func_name'])
all_vals = [0. for i in range(pop_size)]
for i, parent in enumerate(np_vals_r):
all_vals[i] = eval(parent)
return all_vals
eval_function = evaluate_parents
read_data_function = gd.read_genetic_data
write_data_function = gd.write_genetic_data
def initialize_genetic_data(m, c, p, of, v, ps, ng, ran, ev, labels=None, ev_func=None):
"""
m: mutation rate
c: crossover point
p: initial population
of: ratio of offspring to population
ps: probability of parent selection
ng: number of genes
ran: Variable value ranges
ev: evaluation function
"""
if ev_func:
eval_function = ev_func
if not labels:
labels = [f'var{x+1}'for x in range(v)]
n_pool = int(p * (1 + of))
print('n_pool', n_pool)
gdata = {
'mutrate': m,
'crossover_point': c,
'n_pars': p,
'n_offsp': of * p,
'n_vars': v,
'n_pool': int(p * (1 + of)),
'parent_selection_prob': ps,
'n_gens': ng,
'parents': [0. for x in range(int(v * p))],
'offspring': [0. for x in range(int(v * of * p))],
'pool': [0. for x in range(int(v * n_pool))],
'fitness_off': [0. for x in range(int(of * p))],
'fitness_pool': [0. for x in range(n_pool)],
'fitness_pars': [0. for x in range(int(p))],
'id_offspring': [0. for x in range(int(of * p))],
'id_pool': [0. for x in range(n_pool)],
'id_parents': [0. for x in range(int(p))],
'ranges': [x for x in ran],
'labels': labels,
'eval_func_name': ev
}
write_data_function(gdata)
def generate_parents_old():
gdata = read_data_function()
xr = gdata['ranges']
p = gdata['n_pars']
nv = gdata['n_vars']
for i in range(nv):
rng = gdata['ranges'][i]
x = np.arange(rng[0], rng[1], (rng[1] - rng[0]) / p)
gdata['parents'][i] = list(x)
print(len(gdata['pool']))
write_data_function(gdata)
# y = np.arange(xr[2],xr[3],(xr[3]-xr[2])/p)
return
def sort_by_fitness(population, population_fitness, population_size):
gdata = read_data_function()
p = gdata[population_size]
nv = gdata['n_vars']
np_vals = np.array(gdata[population])
np_vals_r = np_vals.reshape(p, nv, order='F')
np_fits = np.array(gdata[population_fitness])
np_locs = np.array(gdata[f'id_{population}'])
# np_full = np.concatenate((np_vals_r,np.array([np_fits]).T),axis=1)
# print(np_full[np_full[:,-1].argsort()])
arr_inds = np_fits.argsort()
np_vals_sorted = np_vals_r[arr_inds] # [::-1]
np_fits_sorted = np_fits[arr_inds]
np_locs_sorted = np_locs[arr_inds]
gdata[population] = list(np_vals_sorted.T.flatten())
gdata[population_fitness] = list(np_fits_sorted)
gdata[f'id_{population}'] = list(np_locs_sorted)
# print(np_vals_sorted)
# print(np_vals_sorted.T.flatten())
write_data_function(gdata)
def evaluate_population_fitness(i_genr, population, population_fitness, pop_size, vals=None):
gdata = read_data_function()
p = int(gdata[pop_size])
nv = gdata['n_vars']
np_vals = np.array(gdata[population])
np_vals_r = np_vals.reshape(p, nv, order='F')
fitness_values = eval_function(i_genr, gdata, population_fitness, np_vals_r, p, nv)
for i, fv in enumerate(fitness_values):
gdata[population_fitness][i] = fv
write_data_function(gdata)
def generate_parents_np(i=0):
gdata = read_data_function()
# xr = gdata['ranges']
p = gdata['n_pars']
nv = gdata['n_vars']
print(gdata['ranges'])
npranges = np.array(gdata['ranges'])
ranges = npranges.reshape(int(nv), int(len(gdata['ranges']) / nv))
# ranges = npranges.reshape(int(len(gdata['ranges']) / nv), int(nv))
# px1 = np.arange(ranges[0][0], ranges[0][1], (ranges[0][1] - ranges[0][0]) / p)
px1 = np.random.uniform(ranges[0][0], ranges[0][1], p)
fin = list(px1)
# print('fin',fin)
for r in ranges[1:]:
px = np.arange(r[0], r[1], (r[1] - r[0]) / p)
px = np.random.uniform(r[0], r[1], p)
x = np.array(px)
# print('x',x)
fin.extend(list(x))
gdata['parents'] = fin[:]
write_data_function(gdata)
def crossover_parents():
gdata = read_data_function()
cp = gdata['crossover_point']
n_pars = gdata['n_pars']
nv = gdata['n_vars']
n_off = int(gdata['n_offsp'])
np_pars = np.array(gdata['parents'])
np_pars_r = np_pars.reshape(n_pars, nv, order='F')
par_indices = np.random.choice([a for a in range(n_pars)], int(gdata['n_offsp']), replace=False)
coff = 0
chosen = []
offspr = np.zeros(shape=(n_off, nv))
for i in range(n_pars):
i_par1 = np.random.randint(0, n_pars)
i_par2 = np.random.randint(0, n_pars)
while i_par1 in chosen or i_par2 in chosen or i_par2 == i_par1:
i_par1 = np.random.randint(0, n_pars)
i_par2 = np.random.randint(0, n_pars)
chosen.append(i_par1)
chosen.append(i_par2)
par1 = np_pars_r[i_par1]
par2 = np_pars_r[i_par2]
ch1 = np.zeros(nv)
ch2 = np.zeros(nv)
ch1[:cp] = par2[:cp]
ch2[:cp] = par1[:cp]
ch1[cp:] = par1[cp:]
ch2[cp:] = par2[cp:]
offspr[coff] = ch1
offspr[coff + 1] = ch2
coff += 2
if coff == n_off:
break
gdata['offspring'] = list(offspr.T.flatten())
write_data_function(gdata)
def mutate_population_readable(i_gen=0):
gdata = read_data_function()
gd.write_json(gdata, './before.json')
p = gdata['n_pars']
nv = gdata['n_vars']
n_mutate = int(gdata['mutrate'] * p)
np_pars = np.array(gdata['parents'])
np_pars_r = np_pars.reshape(p, nv, order='F')
np_ranges = np.array(gdata['ranges'])
np_ranges_r = np_ranges.reshape(int(nv), int(len(gdata['ranges']) / nv))
chosen = []
for i in range(n_mutate):
rai = np.random.randint(0, p * 0.8)
while rai in chosen:
rai = np.random.randint(0, p * 0.8)
chosen.append(rai)
# print(rai, rai%nv)
r_i = np.random.randint(0, nv) # int(rai / p)
mut = np.random.uniform(np_ranges_r[r_i][0], np_ranges_r[r_i][1], 1)
# print(rai, rai%nv, mut)
before = np_pars_r[rai]
# print(f'before {i_gen}: {before}')
np_pars_r[rai][r_i] = mut[0]
# print(f'after {i_gen}: {np_pars_r[rai]}')
# print(f'{i_gen} aft:{np_pars_r[rai]} mut:{mut[0]}, rai:{rai}, r_i:{r_i} [{np_ranges_r[r_i][0]}, {np_ranges_r[r_i][1]}]')
gdata['parents'] = list(np_pars_r.T.flatten())
write_data_function(gdata)
def combine(i=0):
gdata = read_data_function()
p = gdata['n_pars']
n_off = int(gdata['n_offsp'])
nv = gdata['n_vars']
np_pars_r = np.array(gdata['parents']).reshape(p, nv, order='F')
np_off_r = np.array(gdata['offspring']).reshape(n_off, nv, order='F')
np_pool = np.concatenate((np_pars_r, np_off_r), axis=0)
gdata['pool'] = list(np_pool.T.flatten())
gdata['fitness_pool'][:p] = gdata['fitness_pars'][:]
gdata['fitness_pool'][p:] = gdata['fitness_off'][:]
gdata['id_pool'][:p] = gdata['id_parents'][:]
gdata['id_pool'][p:] = gdata['id_offspring'][:]
write_data_function(gdata)
def select_new_population():
"""
Selects new parents from combined parents and offspring
:return:
"""
gdata = read_data_function()
p = gdata['n_pars']
npool = gdata['n_pool']
nv = gdata['n_vars']
n_off = int(gdata['n_offsp'])
np_pars_r = np.array(gdata['parents']).reshape(p, nv, order='F')
np_pool_r = np.array(gdata['pool']).reshape(npool, nv, order='F')
np_pool_fitness = | np.array(gdata['fitness_pool']) | numpy.array |
"""
Building
========
The building module contains functions related to building acoustics.
"""
from __future__ import division
import numpy as np
#from acoustics.utils import w
def rw_curve(tl):
"""
Calculate the curve of :math:`Rw` from a NumPy array `tl` with third
octave data between 100 Hz and 3.15 kHz.
:param tl: Transmission Loss
"""
ref_curve = np.array([0, 3, 6, 9, 12, 15, 18, 19, 20, 21, 22, 23, 23, 23,
23, 23])
residuals = 0
while residuals > -32:
ref_curve += 1
diff = tl - ref_curve
residuals = np.sum(np.clip(diff, | np.min(diff) | numpy.min |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 01 20:00:29 2020
@author: Ing. <NAME>
"""
import pandas as pd # Conectar con hojas de excel
import numpy as np # Creación de matrices
#from loggerPrint import Logger
def selectionBeamCol(D_nodos_totales, D_nodos_restringidos, D_criterio, ExcelPrincipal, DatosPrincipales, Perfil_Data):
# my_console = Logger('files/preselect.txt')
#ExcelPrincipal = pd.ExcelFile('DatosExcel.xlsx')
#DatosPrincipales = ExcelPrincipal.parse('Datos') #--> DatosPrincipalesPrincipales
print('Message: Variable cleanup has been completed successfully ✔✔✔✔✔✔✔✔✔✔')
print('')
print('Disclaimer')
print('This is a window executed by QThread event. This process could be fail when the program is overloaded, for bad use, when is not possible to find solutions (when there are few options in search, or simply due by error to input data), in wich case the program would be to close.')
print('We recomended to check tutorials GRISS UTPL, go to *Guide and Blog* buttom.')
# height = max(DatosPrincipales['y(j)'])
# lenght = max(DatosPrincipales['x(j)'])
print('')
print(170*'=')
print(170*'=')
# print('\n','Preselección de elementos estructurales en vigas y columnas:')
# print(' ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯')
ingreso_predatos = []
for i in range(len(DatosPrincipales)):
ingreso_predatos.append([DatosPrincipales.values[i][0], DatosPrincipales.values[i][1], DatosPrincipales.values[i][2],
DatosPrincipales.values[i][3], DatosPrincipales.values[i][4], DatosPrincipales.values[i][5],
DatosPrincipales.values[i][6], DatosPrincipales.values[i][7], DatosPrincipales.values[i][8],
DatosPrincipales.values[i][9], DatosPrincipales.values[i][10], DatosPrincipales.values[i][11],
DatosPrincipales.values[i][12], DatosPrincipales.values[i][15], DatosPrincipales.values[i][16],
DatosPrincipales.values[i][17], DatosPrincipales.values[i][18], DatosPrincipales.values[i][19],
DatosPrincipales.values[i][20], DatosPrincipales.values[i][21], DatosPrincipales.values[i][22]])
# predatos = pd.DataFrame(ingreso_predatos)
nl = D_nodos_totales - D_nodos_restringidos
ngdlt_t = D_nodos_totales * 3
ngdlt_l = nl * 3
ngdlt_r = D_nodos_restringidos * 3
kGE = np.zeros((ngdlt_t,ngdlt_t))
QcE = np.zeros((ngdlt_t,1))
mt_lista = []
rse_lista = []
sel_lista = []
kle_lista = []
Sg_lista = []
ind_lista = []
for i in range(len(ingreso_predatos)):
kgi = np.zeros((ngdlt_t,ngdlt_t))
Qci = np.zeros((ngdlt_t,1))
indx = np.array([ingreso_predatos[i][3], ingreso_predatos[i][4], ingreso_predatos[i][5],
ingreso_predatos[i][6], ingreso_predatos[i][7], ingreso_predatos[i][8]])
ind_lista.append(indx-1) #--> Extraigo los índices para cada elemento, dentro del bucle i
Lpre = round(np.sqrt((ingreso_predatos[i][11]-ingreso_predatos[i][9])**2+(ingreso_predatos[i][12]-ingreso_predatos[i][10])**2),3)
lx_pre = (ingreso_predatos[i][11]-ingreso_predatos[i][9])/Lpre
ly_pre = (ingreso_predatos[i][12]-ingreso_predatos[i][10])/Lpre
w_pre = ingreso_predatos[i][13]
p_pre = 1.2*ingreso_predatos[i][14]
m1_pre = ingreso_predatos[i][15]
m2_pre = ingreso_predatos[i][16]
v1_pre = ingreso_predatos[i][17]
v2_pre = ingreso_predatos[i][18]
ax1_pre = ingreso_predatos[i][19]
ax2_pre = ingreso_predatos[i][20]
mt_pre = np.array([[lx_pre, ly_pre, 0, 0, 0, 0],
[-ly_pre, lx_pre, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, lx_pre, ly_pre, 0],
[0, 0, 0, -ly_pre, lx_pre, 0],
[0, 0, 0, 0, 0, 1]])
mt_lista.append(mt_pre)
mt_pre_traspuesta = mt_pre.transpose()
rse = np.array([0, w_pre*Lpre/2+p_pre/2, (w_pre*Lpre**2)/12+p_pre*Lpre/8, 0, w_pre*Lpre/2+p_pre/2, (-w_pre*Lpre**2)/12-p_pre*Lpre/8])
rse_lista.append(rse)
se = np.array([ax1_pre, w_pre*Lpre/2+p_pre/2+v1_pre, (w_pre*Lpre**2)/12+p_pre*Lpre/8+m1_pre, ax2_pre, w_pre*Lpre/2+p_pre/2+v2_pre, (-w_pre*Lpre**2)/12-p_pre*Lpre/8+m2_pre])
sel_lista.append(se)
Sg = np.dot(mt_pre_traspuesta,se)
Sg_lista.append(Sg)
Qci[ind_lista[i], 0] = Sg # Me ubica los valores del vector de cargas local a global para el ensamblaje
for row in range(ngdlt_t):
QcE[row]+=Qci[row] # Me suma los valores de cada vector ensamblado de cada elemento para cada fila
kle = np.array([[ 1, 0, 0,-1, 0, 0],
[ 0, 1, 1, 0,-1, 1],
[ 0, 1, 1, 0,-1, 1],
[-1, 0, 0, 1, 0, 0],
[ 0,-1,-1, 0, 1,-1],
[ 0, 1, 1, 0,-1, 1]])
kle_lista.append(kle)
kge = np.dot( | np.dot(mt_pre_traspuesta,kle) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 15:51:00 2022
@author: alexany
"""
import time
import sys
sys.path.insert(0,"d:\\Users\\alexany\\CNN_AF\\23_02_2022")
import numpy as np
import bioformats as bf
#import javabridge
import os
import cv2
from scipy.signal import savgol_filter
import tifffile
import datetime
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import json
#import random
import master_subFunctions as mas
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, Flatten, BatchNormalization,Activation,Dense
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow as tf
from mobilenetv3_small import MobileNetV3
#---------------------------------------------------------------------------
class CNN_AF(object):
settings_filename = 'CNN_AF_settings.json'
image_filename = 'MMStack_Pos1.ome.tif'
stack_dir_prefix = 'stack'
data_folder = None
zStep = 0.5 # in um
n_frames = None
image_size = 96
proj_shape = None
n_stacks = None
inp_shape = None
pList = [] # list of stack file names
#---------------------------------------------------------------------------
def set_data_info(self,inp_dir_list):
self.pList = []
for f in inp_dir_list:
if os.path.isdir(f):
files = os.listdir(f)
for i in range(len(files)):
if os.path.isdir(f+files[i]):
if self.stack_dir_prefix in files[i]:
fullfilename = f + files[i] + os.path.sep + self.image_filename
if os.path.isfile(fullfilename):
self.pList.append(fullfilename)
try:
self.n_stacks = len(self.pList)
imgs = tifffile.imread(self.pList[0])
self.inp_shape = (imgs.shape[1],imgs.shape[2])
self.n_frames = imgs.shape[0]
#
# pT, pX, pY = getImageInfo(self.pList[0])
# self.inp_shape = (pX,pY)
# self.n_frames = pT
#
self.proj_shape = (max(self.inp_shape[0],self.inp_shape[1]),2)
#
# for var in vars(self):
# print(getattr(self,var))
except:
functionNameAsString = sys._getframe().f_code.co_name
print(functionNameAsString + ' error!')
return False
return True
#---------------------------------------------------------------------------
def save_settings(self,where_to_save_folder):
with open( where_to_save_folder + self.settings_filename, "w" ) as f:
json.dump( self.__dict__, f )
#---------------------------------------------------------------------------
def load_settings(self,fullfilename):
with open(fullfilename) as f:
self.__dict__ = json.load(f)
#---------------------------------------------------------------------------
def get_stack_range_indices(self,k):
beg_k = k*self.n_frames
end_k = (k+1)*self.n_frames
return beg_k, end_k
#---------------------------------------------------------------------------
class stack_data_generator(CNN_AF):
#---------------------------------------------------------------------------
def set_output_folder_in(self,dst_dir):
self.data_folder = createFolder(dst_dir,timestamp())
return self.data_folder
#---------------------------------------------------------------------------
def gen_XY(self,index):
X_image, X_proj, Y = 0,0,0
#
print(self.pList[index])
#
stack = self.load_stack(index)
X_image = self.gen_X_image(stack)
X_proj = self.gen_X_proj(stack)
Y = self.gen_Y(X_image,index)
return X_image, X_proj, Y
#---------------------------------------------------------------------------
def gen_XY_in_range(self,n1,n2):
#
self.save_settings(self.data_folder)
#
for i in range(n1,n2):
X_image, X_proj, Y = self.gen_XY(i)
np.save(self.data_folder + os.path.sep + 'X_image' + '_' + str(i), X_image)
np.save(self.data_folder + os.path.sep + 'X_proj' + '_' + str(i), X_proj)
np.save(self.data_folder + os.path.sep + 'Y' + '_' + str(i), Y)
#---------------------------------------------------------------------------
def load_stack(self,index):
return tifffile.imread(self.pList[index])
#---------------------------------------------------------------------------
def gen_X_image(self,stack):
X_image = np.zeros((int(self.n_frames),int(self.image_size), int(self.image_size)))
for i in range(0, self.n_frames):
u = stack[i,:,:]
#
# modified normalization - YA 25.11.2021
# u = normalize_by_percentile(u,.1,99.9)
# u = np.clip(u,0,1)
#
# original normalization
u = u/np.linalg.norm(u)
#
u = cv2.resize(u, dsize=(int(self.image_size), int(self.image_size)), interpolation=cv2.INTER_CUBIC)
X_image[i,:,:] = u
print(i,' @ ',self.n_frames)
return X_image
#---------------------------------------------------------------------------
def gen_X_proj(self,stack):
proj_len = max(self.inp_shape[0],self.inp_shape[1])
#
X_proj = np.zeros((int(self.n_frames),int(proj_len),int(2)))
#
for i in range(0, self.n_frames):
u = stack[i,:,:]
#
# modified normalization - YA 25.11.2021
u = normalize_by_percentile(u,.1,99.9)
u = np.clip(u,0,1)
#
# original normalization
# u = u/np.linalg.norm(u)
#
up1 = cv2.resize(u, dsize=(int(proj_len), int(proj_len)), interpolation=cv2.INTER_CUBIC)
X_proj[i,:,0] = np.mean(up1,axis = 0)
X_proj[i,:,1] = np.mean(up1,axis = 1)
#
print(i,' @ ',self.n_frames)
return X_proj
#---------------------------------------------------------------------------
def gen_Y(self,stack,index):
#
xSum1 = []
x = stack
numSlice = self.n_frames
#
cgx, xSum1 = center(x, xSum1, numSlice)
yS = 0-int(cgx)
yE = numSlice-int(cgx)
Y = np.arange(yS, yE, 1)
#print('cgx', cgx, 'ys', yS,'yE', yE,'y1',y1)
#print('y0',type(y),y)
#print('new_y', (list((np.asarray(y)-10)/25)))
#y = ((np.asarray(y)-10.0)/25.0).tolist()
#print('y1',type(y), y)
#plt.pause(0.05)
#plt.plot(Y,xSum1)
#plt.title(index)
half_range = (numSlice-1)/2.0
Y = ((np.asarray(Y)-0.0)/half_range).tolist()
return Y
#---------------------------------------------------------------------------
class stack_data_trainer(CNN_AF):
mode = None
MobileNet = None
#
batch_size = 128
epos = 500
#
results_folder = None
train_X, valid_X, train_Y, valid_Y = None, None, None, None
MobileNetV3_num_classes = int(1280)
MobileNetV3_width_multiplier = 1.0
MobileNetV3_l2_reg = 1e-5
#---------------------------------------------------------------------------
def __init__(self,mode=None,MobileNet=None):
assert(mode in ['proj','image'])
assert(MobileNet in ['V2','V3'])
stack_data_trainer.mode = mode
stack_data_trainer.MobileNet = MobileNet
#---------------------------------------------------------------------------
def set_data_folder(self,data_folder):
self.data_folder = data_folder
try:
self.load_settings(self.data_folder + super().settings_filename)
except:
print('error!')
#---------------------------------------------------------------------------
def getXY(self,validation_fraction,validation_only=False):
n_val = int(np.fix(validation_fraction*self.n_stacks))
self.valid_X, self.valid_Y = self.get_xy(0,n_val)
if validation_only: return
self.train_X, self.train_Y = self.get_xy(n_val,self.n_stacks)
#---------------------------------------------------------------------------
def get_xy(self,n1,n2):
n = n2 - n1
sT = int(self.n_frames*n)
prefix = None
if self.mode=='proj':
sX,sY = int(self.proj_shape[0]), int(self.proj_shape[1])
prefix = 'X_proj_'
if self.mode=='image':
sX,sY = int(self.image_size), int(self.image_size)
prefix = 'X_image_'
out_X = np.zeros((sT,sX,sY))
out_Y = np.zeros((sT))
for k in range(n1,n2):
fname = self.data_folder + prefix + str(k) + '.npy'
stack_k = np.load(fname)
beg_k,end_k = self.get_stack_range_indices(k-n1)
out_X[beg_k:end_k,:,:] = stack_k
#
fname = self.data_folder + 'Y_' + str(k) + '.npy'
out_Y[beg_k:end_k] = np.load(fname)
#
print(k-n1,' @ ',n)
return out_X, out_Y
#---------------------------------------------------------------------------
def train(self,validation_fraction):
self.results_folder = createFolder(self.data_folder,'results_' +
self.mode + '_' +
self.MobileNet + '_' + timestamp())
self.save_settings(self.results_folder)
self.getXY(validation_fraction)
#
if self.mode=='proj':
if self.MobileNet=='V2':
self.train_proj_V2()
if self.MobileNet=='V3':
self.train_proj_V3()
if self.mode=='image':
if self.MobileNet=='V2':
self.train_image_V2()
if self.MobileNet=='V3':
self.train_image_V3()
#---------------------------------------------------------------------------
def train_proj_V2(self):
train_X = transform_stack_to_stack_of_square_images(self.train_X)
valid_X = transform_stack_to_stack_of_square_images(self.valid_X)
s = train_X.shape
sV = valid_X.shape
x_train = np.zeros([s[0], s[1], s[2], 1])
x_val = np.zeros([sV[0], sV[1], sV[2], 1])
x_train[:,:,:,0] = train_X
x_val[:,:,:,0] = valid_X
x_train_tensor = tf.convert_to_tensor(x_train)
x_val_tensor = tf.convert_to_tensor(x_val)
#imports the MobileNetV2 model and discards the last 1000 neuron layer.
base_model=tf.keras.applications.MobileNetV2(weights=None,
include_top=False,
input_shape=(s[1],s[2],1))
x = base_model.output
x = GlobalAveragePooling2D()(x)
preds = Dense(1)(x)
model = tf.keras.Model(inputs=base_model.input,outputs=preds)
model.compile(optimizer = 'adam',
#loss = 'sparse_categorical_crossentropy',
loss = 'mse',
metrics = ['mse'])
#saveWeights(model, res_path)
save_model_summary(self.results_folder, model)
#best epoch callback
filepath = self.results_folder+"weights_best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_mse', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
stepsPerEpoch = int(s[0]/self.batch_size)
history = model.fit(x_train_tensor, # Features
self.train_Y, # Target vector
epochs=self.epos,#, # Number of epochs
validation_data=(x_val_tensor, self.valid_Y),
steps_per_epoch = stepsPerEpoch,
verbose=1, # Print description after each epoch
#batch_size=batchSize, # Number of observations per batch
callbacks=callbacks_list # callbacks best model
#callbacks = [callback]
)
#loss = history.history['loss']
acc = history.history['mse']
acc_val = history.history['val_mse']
# save model
pp = self.results_folder + "model"
model.save(pp)
pp = self.results_folder + "acc.npy"
| np.save(pp, acc) | numpy.save |
"""
Helper functions for PyTorch.
Functions:
get_trainable_parameters
save_model_weights
Class:
Measurement
PSNR
SSIM
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from datetime import datetime
from math import exp
from tqdm import tqdm
import os
def get_gpu_status():
"""
ref: https://developer.download.nvidia.com/compute/DCGM/docs/nvidia-smi-367.38.pdf
https://discuss.pytorch.org/t/it-there-anyway-to-let-program-select-free-gpu-automatically/17560/7
:return:
"""
# os.system('nvidia-smi -q -d power >tmp')
# data = open('tmp', 'r').read()
# print(data)
os.system('nvidia-smi -q -d power |grep -A14 GPU|grep Max\ Power\ Limit >tmp')
power_max = [float(x.split()[4]) for x in open('tmp', 'r').readlines()]
os.system('nvidia-smi -q -d power |grep -A14 GPU|grep Avg >tmp')
power_avg = [float(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('nvidia-smi -q -d memory |grep -A4 GPU|grep Total >tmp')
mem_tot = [float(x.split()[2]) for x in open('tmp', 'r').readlines()]
os.system('nvidia-smi -q -d memory |grep -A4 GPU|grep Free >tmp')
mem_free = [float(x.split()[2]) for x in open('tmp', 'r').readlines()]
return (mem_tot, mem_free), (power_max, power_avg)
def auto_select_GPU(mode='memory_priority', threshold=0., unit='pct', dwell=5):
mode_options = ['memory_priority',
'power_priority',
'memory_threshold',
'power_threshold']
assert mode in mode_options, print(f'{datetime.now()} E auto_select_GPU(): Unknown model_options. Select from: '
f'{mode_options}. Get {mode} instead.')
tq = tqdm(total=dwell, desc=f'GPU Selecting... {mode}:{threshold}:{unit}', unit='dwell', dynamic_ncols=True)
for i_dwell in range(dwell):
(mem_tot_new, mem_free_new), (power_max_new, power_avg_new) = get_gpu_status()
if i_dwell == 0:
mem_tot, mem_free, power_max, power_avg = mem_tot_new, mem_free_new, power_max_new, power_avg_new
else:
mem_free = [min([mem_free[i], mem_free_new[i]]) for i in range(len(mem_tot_new))]
power_avg = [max([power_avg[i], power_avg_new[i]]) for i in range(len(mem_tot_new))]
# sleep(1)
tq.update()
tq.close()
power_free = [i-j for (i, j) in zip(power_max, power_avg)]
if unit.lower() == 'pct':
pass
mem_free_pct = [i/j for (i, j) in zip(mem_free, mem_tot)]
power_free_pct = [i/j for (i, j) in zip(power_free, power_max)]
# print(mem_free_pct)
# print(power_free_pct)
if mode.lower() == 'memory_priority':
i_GPU = | np.argmax(mem_free_pct) | numpy.argmax |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 15:11:40 2021
@author: fede
"""
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
import random
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from networkx.drawing.nx_agraph import graphviz_layout
import time
import multiprocessing
from joblib import Parallel, delayed
from tqdm import tqdm
import csv
#generates the covariance matrix (divided by h/2pi)
def _generateCovM(Zz):
U=np.imag(Zz)
V=np.real(Zz)
invU = np.linalg.inv(U)
CM=0.5*np.block([[invU, invU @ V],[V @ invU, U+V @ invU@V]])
return CM
def _generateZ(CM):
dim=int(len(CM)/2)
Ui=np.empty([dim,dim])
UiV=np.empty([dim,dim])
for i in range(dim):
for j in range(dim):
Ui[i,j]=CM[i,j]
UiV[i,j]=CM[i+dim,j]
U=np.linalg.inv(Ui)/2
V=U@UiV*2
return V+1j*U
#builds the symplectic form matrix
def _buildSymM(dim):
return np.block([[np.zeros((dim,dim)),np.identity(dim)], [-np.identity(dim),np.zeros((dim,dim))]])
#Returns the purity of the state from its covariance matrix
def _Purity(CM):
det=np.linalg.det(CM)
Purity=1/((2**(len(CM)/2))*np.sqrt(det))
return Purity
#Checks if a matrix is positive defined
def _MisPositive(MM):
lambdas=np.linalg.eig(MM)[0]
#thr=1e14
thr=np.abs(sum(lambdas))/len(lambdas)*1e-5
realCheck=np.amax(np.abs(np.imag(lambdas)))>thr
if realCheck == True:
print("The matrix has complex eigenvalues: \n", lambdas)
PosCheck=np.amin(np.real(lambdas))<-thr
if PosCheck == True :
print("The matrix has negative eigenvalues!\n", lambdas)
return not (PosCheck or realCheck)
#Checks the gaussianity of a covariance matrix
def _checkGaussianity(CM):
return _MisPositive(CM+0.5j*_buildSymM(int(len(CM)/2)))
#Sum of the variance of the nullifiers
def _nullifiers(Zz):
return 0.5*np.trace(np.imag(Zz))
#Logarithmic negativity of entanglement, for two modes only
def negativity(Zz):
if len(Zz)==2:
CM=_generateCovM(Zz)
sym=_buildSymM(len(Zz))
elif len(Zz)==4:
CM=Zz
sym=_buildSymM(int(len(Zz)/2))
gamma=np.diag([1,1,-1,1])
CMpt=gamma@CM@gamma
lamb=np.min(np.abs(np.linalg.eig(1j*sym@(CMpt))[0]))
return np.max([0,-np.log(2*lamb)])
# return np.min(eig)
def niggativity(Zz):
if len(Zz)==2:
CM=_generateCovM(Zz)
elif len(Zz)==4:
CM=Zz
I1=CM[0,0]*CM[2,2]-CM[0,2]**2
I2=CM[1,1]*CM[3,3]-CM[1,3]**2
I3=CM[0,1]*CM[2,3]-CM[1,2]*CM[0,3]
I4=np.linalg.det(CM)
DELTA=I1+I2-2*I3
lamb=np.sqrt(0.5*(DELTA-np.sqrt(DELTA**2-4*I4)))
return np.max([0,-np.log(2*lamb)])
# return lamb
def Fcoh(Zz):
if len(Zz)==2:
CM=_generateCovM(Zz)
sym=_buildSymM(len(Zz))
elif len(Zz)==4:
CM=Zz
sym=_buildSymM(int(len(Zz)/2))
gamma=np.diag([1,1,-1,1])
CMpt=gamma@CM@gamma
eig=np.min(np.abs(np.linalg.eig(2j*sym@(CMpt))[0]))
return 1/(1+eig)
def Fcoh3(Zz):
if len(Zz)==2:
CM=_generateCovM(Zz)
elif len(Zz)==4:
CM=Zz
dx2=(CM[0,0]+CM[1,1]-2*CM[0,1])
dp2=(CM[2,2]+CM[3,3]+2*CM[2,3])
return 1/np.sqrt((1+dx2)*(1+dp2))
def DeltaE(Zz):
CM=_generateCovM(Zz)
return 0.5*(np.trace(CM)-len(Zz))
def _Wigner(sigma):
Vrange=2
steps=100
qq=np.linspace(-Vrange,Vrange,steps)
pp=np.copy(qq)
det=np.linalg.det(sigma)
WW=np.zeros([steps,steps])
for i in range(steps):
for j in range(steps):
xx=np.array([qq[i],pp[j]])
esp=xx@(np.linalg.inv(sigma))@(np.transpose(xx))
WW[i,j]=(2*np.pi)**(-int(len(sigma)/2))*det**(-0.5)*np.exp(-0.5*esp)
return WW
def _partial(Zz,node):
CM=_generateCovM(Zz)
size=int(len(CM)/2)
redCM= | np.zeros([2,2]) | numpy.zeros |
import unittest
import numpy as np
import pandas as pd
from biofes import biplot
from biofes import feature
import itertools
class test_feature(unittest.TestCase):
def test_selection(self):
n, p = np.random.randint(70,500), np.random.randint(30,50)
A = np.random.uniform(-300,300,size=(n,p))
target = list(np.random.randint(np.random.randint(2, 10), size = A.shape[0]))
d = | np.random.randint(p) | numpy.random.randint |
"""
File: bsd_patches.py
Author: Nrupatunga
Email: <EMAIL>
Github: https://github.com/nrupatunga
Description: BSDS500 patches
"""
import time
from pathlib import Path
import h5py
import numpy as np
from tqdm import tqdm
mode = 'train'
mat_root_dir = f'/media/nthere/datasets/DIV_superres/patches/train/'
out_root_dir = f'/home/nthere/2020/pytorch-deaf/data/train/'
read = True
if read:
root_dir = '/home/nthere/2020/pytorch-deaf/data/DIV_superres/hdf5/train/'
hdf5_files = Path(root_dir).rglob('*.hdf5')
images = []
means = []
stds = []
for i, f in tqdm(enumerate(hdf5_files)):
with h5py.File(f) as fout:
for j in range(10000):
image = fout['images_{}'.format(j)][()]
images.append(image)
if ((i + 1) % 10) == 0:
images = | np.asarray(images) | numpy.asarray |
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
import time
import numba as nb
@nb.jit(nopython=True)
def allocate(idx, idx_end_id, start_id, cnts, seq):
for i in range(len(start_id)):
idx[idx_end_id[i] - cnts[i]:idx_end_id[i]] = seq[start_id[i]: start_id[i] + cnts[i]]
def multi_slice_indexing2(seq, start_id, cnts, *args):
if start_id.shape[0] == 0: return _empty()
idx = np.ones(np.sum(cnts), dtype=np.int)
idx_end_id = np.cumsum(cnts)
allocate(idx, idx_end_id, start_id, cnts, seq)
return idx
DTYPE = np.uint32
class SparseVoxel(object):
"""
SparseVoxel object create from point set.
Parameters:
points: N x D size np.ndarray,
voxel_size D x 1 size np.ndarray,
min_bounds (optional): minimum bounds of points
Returns:
SparseVoxel object
Object Properties:
dim: points dimension
voxel_size: voxel size in point space
min_bounds: minimum bounds of points
indices: sparse voxel indices with size V x D
values: sparse voxel start index and point counts with size 2 x V
Functions:
SparseVoxel obj.toindex(indices(optional)):
get original index of points inside the sparse voxel
SparseVoxel obj.voxelize(points):
get voxel index of point with same parameters of SparseVoxel object
SparseVoxel obj.size():
get the size of SparseVoxel object
SparseVoxel obj.save(save_file):
save the SparseVoxel in .npz file
SparseVoxel.load(save_file):
return the loaded SparseVoxel obj
"""
def __init__(self, points, voxel_size, min_bounds=None):
if points is None and voxel_size is None:
self._sparse_voxel = _SparseVoxel()
else:
self._sparse_voxel = sparse_voxelize(points, voxel_size, min_bounds)
def __getattr__(self, item):
if item in self.__dict__.keys():
return object.__getattribute__(self, item)
else:
return getattr(self._sparse_voxel, item)
def __getitem__(self, item):
inds = self._sparse_voxel[item]
sv = SparseVoxel(None, None)
if len(inds) != 0:
sv._sparse_voxel = sliced_voxelize(self._sparse_voxel, inds)
return sv
def __len__(self):
return len(self._sparse_voxel)
@staticmethod
def load(load_file):
npz_file = np.load(load_file)
sv = SparseVoxel(None, None)
if len(npz_file['indices']) != 0:
sv._sparse_voxel = load_voxelize(**npz_file)
return sv
def save(self, save_file):
save_dict = {p:getattr(self._sparse_voxel, p) for p in
['indices', 'values', '_sorted_pts_index', 'min_bounds', 'voxel_size']}
np.savez(save_file, **save_dict)
def sparse_voxelize(points, voxel_size, min_bounds=None):
V = _SparseVoxel()
V.create_from_points(points, voxel_size, min_bounds)
return V
def sliced_voxelize(sparse_voxel, index):
V = _SparseVoxel()
V.create_from_indexed_voxel(index, sparse_voxel)
return V
def load_voxelize(**kwargs):
V = _SparseVoxel()
V.create_from_arrays(**kwargs)
return V
def _empty(shape=(1,)):
return np.tile(np.array([], dtype=np.int), shape)
def multi_slice_indexing3(seq, start_id, cnts, pre_arrange=None):
# pre_arrange make it a bit more faster (10%)
process_num = start_id.shape[0]
if process_num == 0: return _empty()
#indexed_seq = np.ones((np.sum(cnts),), dtype=np.int)
idx = np.ones(np.sum(cnts), dtype=np.int)
idx[np.cumsum(cnts)[:-1]] -= cnts[:-1]
idx = np.cumsum(idx) - 1 + np.repeat(start_id, cnts)
return seq[idx] if len(idx) != 0 else _empty()
def multi_slice_indexing(seq, start_id, cnts, pre_arrange=None):
# pre_arrange make it a bit more faster (10%)
if start_id.shape[0] == 0: return _empty()
slices = np.asarray([start_id, start_id + cnts]).transpose().tolist()
if pre_arrange is not None:
s = list(map(lambda x: pre_arrange[slice(*x)], slices))
else:
s = list(map(lambda x: np.arange(*x), slices))
ind = np.concatenate(s)
return seq[ind] if len(ind) != 0 else _empty()
def voxel_map_point_loc(points, min_bounds, voxel_size):
# input: orgin points: N x d
# points min bound: d x 1
# voxel size: d x 1 size of one voxel in the points space
# output: voxel unique index M x d
# sorted index of orginal points M x 1
# start index of the sorted index for each voxel M x 1
# points num counts of eah voxel
# EXAMPLE:
# index 0 -|
# index 1 |
# index 2 |= > (voxel index 0, start index 0, counts 5)
# index 3 |
# index 4 -|
#
# index 5 -|
# index 6 |= > (voxel index 1, start index 1, counts 3)
# index 7 -|
dim = points.shape[1] if len(points.shape) != 1 else 0
voxel_index = ((points - min_bounds) / voxel_size).astype(np.int)
if dim == 0:
sort_index = np.argsort(points)
else:
sort_index = np.lexsort(np.rot90(voxel_index))
vind_un, start_inds, cnts = np.unique(voxel_index[sort_index],
axis=0, return_index=True, return_counts=True)
indices = np.arange(points.shape[0])[sort_index]
return vind_un, indices, start_inds, cnts
class _SparseSlicer1D(object):
def __init__(self, point_1d, length=None):
self.point_un, self.ordered_ids, start_idx, cnts \
= voxel_map_point_loc(point_1d, 0, 1)
self.start_idx_ = np.append(start_idx, 0)
self.cnts_ = np.append(cnts, 0)
# self.point_min = self.point_un[0]
self.length = length if length is not None else int(self.point_un[-1]) + 1
self.dense_to_un_idx = self.dense_to_sparse_map()
def __len__(self):
return self.length
def __getitem__(self, item):
un_idx = np.unique(self.dense_to_un_idx[item])
if un_idx.shape[0] == 0 or \
(un_idx.shape[0] == 1 and un_idx[0] == len(self.point_un)):
return _empty() # empty or no valid index
if isinstance(item, np.ndarray):
return self.un_idx_array_to_ids(un_idx)
elif isinstance(item, (int, np.integer)):
return self.un_idx_to_ids(un_idx)
elif isinstance(item, slice):
if un_idx[-1] == len(self.point_un): un_idx[-1] = un_idx[-2]
return self.un_idx_slice_to_ids(un_idx[0], un_idx[-1])
else:
raise TypeError('Index with type {} is invalid.'.format(type(item)))
def un_idx_to_ids(self, un_idx):
start = self.start_idx_[un_idx]
end = start + self.cnts_[un_idx]
return self.ordered_ids[int(start):int(end)]
def un_idx_slice_to_ids(self, idx_min, idx_max):
start = self.start_idx_[idx_min]
end = start + np.sum(self.cnts_[idx_min:idx_max + 1])
return self.ordered_ids[int(start):int(end)]
def un_idx_array_to_ids(self, un_idx_arr):
start_ind = self.start_idx_[un_idx_arr]
cnts = self.cnts_[un_idx_arr]
return multi_slice_indexing(self.ordered_ids, start_ind, cnts)
def dense_to_sparse_map(self):
start_mask = np.isin(np.arange(0, len(self)),
self.point_un)
look_up = np.full((len(self),), len(self.point_un), dtype=np.int)
look_up[start_mask] = np.arange(len(self.point_un))
return look_up
class _SparseSlicerkD(object): #add check valid in each slicer1d
def __init__(self, points_kd, point_size=None):
self.dim = points_kd.shape[1]
self._size = point_size if point_size is not None \
else self.get_point_up_bounds(points_kd)
self.points = points_kd # * self._mul_shape#same dtype
self.slicers = self.get_slicers(points_kd)
def size(self):
return self._size
def __len__(self):
return np.prod(self._size)
def get_slicers(self, points):
return [_SparseSlicer1D(points[:, i], self._size[i])
for i in range(self.dim)]
def get_point_up_bounds(self, points):
return tuple((np.max(points, axis=0) + 1).tolist())
def __getitem__(self, items):
if not isinstance(items, tuple): items = tuple([items])
if len(items) > self.dim:
raise IndexError('Index with dim {} out of dim {}'.format(len(items), self.dim))
m = map(self.get_voxel_index_one_dim, items, range(len(items)), )
return self.voxel_index_intersection(*list(m))
def get_voxel_index_one_dim(self, item, dim):
return None if isinstance(item, slice) \
and item.start is None \
and item.stop is None else \
self.slicers[dim][item]
def voxel_index_intersection(self, *indices):
not_none_indices = [ind for ind in indices if ind is not None]
if len(not_none_indices) == 0: return np.arange(len(self))
ind = not_none_indices[0]
for ind_ in not_none_indices[1:]:
ind = np.intersect1d(ind, ind_, assume_unique=True)
return ind
class _DenseSlicerkD(object): # start from 0, largest voxel point just 10**4 -10**6
def __init__(self, points_kd, point_size=None, pre_seq=None):
self.dim = points_kd.shape[1]
self._size = point_size if point_size is not None \
else self.get_point_up_bounds(points_kd)
self._mul_shape = self.get_shape_multiple()
self.points = points_kd # * self._mul_shape#same dtype
self.map_array = self.gen_map_array(pre_seq)
def gen_map_array(self, pre_seq=None):
if pre_seq is None:
return [np.arange(s) for s in self._size]
else:
return [pre_seq[:s] for s in self._size]
def size(self):
return self._size
def parse_items(self, items):
parse = {int: [],
slice: [],
np.ndarray: [],
None: []}
for i, it in enumerate(items):
parse[self._type(it)].append(i)
parse_without_none = [np.asarray(parse[k], dtype=np.int) for k in parse.keys()
if k is not None]
return parse, | np.concatenate(parse_without_none) | numpy.concatenate |
#!/usr/bin/env python
# download
# Downloads the example datasets for running the examples.
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Created: Wed May 18 11:54:45 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: download.py [1f73d2b] <EMAIL> $
"""
Downloads the example datasets for running the examples.
"""
##########################################################################
## Imports
##########################################################################
import os
import numpy as np
from .utils import load_numpy, load_corpus, download_data, DATASETS
from .utils import _lookup_path
##########################################################################
## Functions
##########################################################################
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
def download_all(data_path=FIXTURES, verify=True):
"""
Downloads all the example datasets. If verify is True then compare the
download signature with the hardcoded signature. If extract is True then
extract the contents of the zipfile to the given path.
"""
for name, meta in DATASETS.items():
download_data(name, data_dir=data_path)
def load_concrete(data_path=FIXTURES):
"""
Downloads the 'concrete' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'concrete'
data = load_numpy(name, data_path=data_path)
return data
def load_energy(data_path=FIXTURES):
"""
Downloads the 'energy' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'energy'
data = load_numpy(name, data_path=data_path)
return data
def load_credit(data_path=FIXTURES):
"""
Downloads the 'credit' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'credit'
data = load_numpy(name, data_path=data_path)
return data
def load_occupancy(data_path=FIXTURES):
"""
Downloads the 'occupancy' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'occupancy'
data = load_numpy(name, data_path=data_path)
return data
def load_mushroom(data_path=FIXTURES):
"""
Downloads the 'mushroom' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'mushroom'
data = load_numpy(name, data_path=data_path)
return data
def load_hobbies(data_path=FIXTURES):
"""
Downloads the 'hobbies' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'hobbies'
data = load_corpus(name, data_path=data_path)
return data
def load_game(data_path=FIXTURES):
"""
Downloads the 'game' dataset, saving it to the output
path specified and returns the data.
"""
# name of the dataset
name = 'game'
path = _lookup_path(name, data_path=data_path)
dtype = np.array(['S1']*42+['|S4'])
return | np.genfromtxt(path, dtype=dtype, delimiter=',', names=True) | numpy.genfromtxt |
'''
Basic unittest that will instantiate a fixed DocTopicCount
(assumed the same for all documents, for simplicity),
and then examine how inference of rho/omega procedes given this
fixed set of counts.
Conclusions
-----------
rho/omega objective seems to be convex in rho/omega,
and very flat with respect to omega (so up to 6 significant figs,
same objective obtained by omega that differ by say 100 or 200)
'''
import argparse
import sys
import os
import numpy as np
from scipy.optimize import approx_fprime
import warnings
import unittest
import joblib
from matplotlib import pylab
from bnpy.util import digamma, as1D, argsort_bigtosmall_stable, is_sorted_bigtosmall
from bnpy.allocmodel.topics import OptimizerRhoOmegaBetter
from bnpy.util.StickBreakUtil import rho2beta
from bnpy.viz.PrintTopics import vec2str
from bnpy.allocmodel.topics.HDPTopicUtil import \
calcELBO_IgnoreTermsConstWRTrhoomegatheta
np.set_printoptions(precision=4, suppress=1, linewidth=140)
def reorder_rho(rho, bigtosmallIDs):
betaK = rho2beta(rho, returnSize='K')
newbetaK = betaK[bigtosmallIDs]
return OptimizerRhoOmegaBetter.beta2rho(newbetaK, rho.size), newbetaK
def calcAvgPiFromDocTopicCount(DocTopicCount):
estPi = DocTopicCount / DocTopicCount.sum(axis=1)[:,np.newaxis]
avgPi = np.sum(estPi, axis=0) / DocTopicCount.shape[0]
return avgPi
def mapToNewPos(curposIDs, bigtosmall):
''' Convert list of old ids to new positions after bigtosmall reordering.
Example
-------
>>> curposIDs = [0, 2, 4]
>>> N = [11, 9, 3, 1, 5]
>>> bigtosmall = argsort_bigtosmall_stable(N)
>>> print bigtosmall
[0 1 4 2 3]
>>> newposIDs = mapToNewPos(curposIDs, bigtosmall)
>>> print newposIDs
[0, 3, 2]
'''
newposIDs = np.zeros_like(curposIDs)
for posID in range(len(curposIDs)):
newposIDs[posID] = np.flatnonzero(bigtosmall == curposIDs[posID])[0]
return newposIDs.tolist()
def learn_rhoomega_fromFixedCounts(DocTopicCount=None,
nDoc=0,
canShuffleInit='byUsage',
canShuffle=None,
maxiter=5,
warmStart_rho=1,
alpha=None, gamma=None,
initrho=None, initomega=None, **kwargs):
assert nDoc == DocTopicCount.shape[0]
K = DocTopicCount.shape[1]
didShuffle = 0
if canShuffleInit:
if canShuffleInit.lower().count('byusage'):
print('INITIAL SORTING BY USAGE')
avgPi = calcAvgPiFromDocTopicCount(DocTopicCount)
bigtosmall = argsort_bigtosmall_stable(avgPi)
elif canShuffleInit.lower().count('bycount'):
print('INITIAL SORTING BY COUNT')
bigtosmall = argsort_bigtosmall_stable(DocTopicCount.sum(axis=0))
elif canShuffleInit.lower().count('random'):
print('INITIAL SORTING RANDOMLY')
PRNG = np.random.RandomState(0)
bigtosmall = np.arange(K)
PRNG.shuffle(bigtosmall)
else:
bigtosmall = np.arange(K)
# Now, sort.
if not np.allclose(bigtosmall, np.arange(K)):
DocTopicCount = DocTopicCount[:, bigtosmall]
didShuffle = 1
avgPi = calcAvgPiFromDocTopicCount(DocTopicCount)
sortedids = argsort_bigtosmall_stable(avgPi)
if canShuffleInit.lower().count('byusage'):
assert np.allclose(sortedids, np.arange(K))
# Find UIDs of comps to track
emptyUIDs = np.flatnonzero(DocTopicCount.sum(axis=0) < 0.0001)
if emptyUIDs.size >= 3:
firstEmptyUID = emptyUIDs.min()
lastEmptyUID = emptyUIDs.max()
middleEmptyUID = emptyUIDs[len(emptyUIDs)/2]
trackEmptyUIDs = [firstEmptyUID, middleEmptyUID, lastEmptyUID]
emptyLabels = ['first', 'middle', 'last']
elif emptyUIDs.size == 2:
trackEmptyUIDs = [emptyUIDs.min(), emptyUIDs.max()]
emptyLabels = ['first', 'last']
elif emptyUIDs.size == 1:
firstEmptyUID = emptyUIDs.min()
trackEmptyUIDs = [firstEmptyUID]
emptyLabels = ['first']
else:
trackEmptyUIDs = []
emptyLabels = []
trackActiveUIDs = list()
activeLabels = list()
# Track the top 5 active columns of DocTopicCount
for pos in range(0, np.minimum(5, K)):
if sortedids[pos] not in emptyUIDs:
trackActiveUIDs.append(sortedids[pos])
activeLabels.append('max+%d' % (pos))
# Find the minnonemptyID
for pos in range(K-1, 0, -1):
curid = sortedids[pos]
if curid not in emptyUIDs:
break
minnonemptyPos = pos
# Track the 5 smallest active columns of DocTopicCount
nBeyond5 = np.minimum(5, K - len(emptyUIDs) - 5)
for i in range(-1 * (nBeyond5-1), 1):
trackActiveUIDs.append(sortedids[minnonemptyPos + i])
activeLabels.append('min+%d' % (-1 * i))
assert np.all(avgPi[trackActiveUIDs] > 0)
assert np.allclose(avgPi[trackEmptyUIDs], 0.0)
assert is_sorted_bigtosmall(avgPi[trackActiveUIDs])
nDocToDisplay = np.minimum(nDoc, 10)
# Initialize rho
if initrho is None:
rho = OptimizerRhoOmegaBetter.make_initrho(K, nDoc, gamma)
else:
if didShuffle:
rho, _ = reorder_rho(initrho, bigtosmall)
else:
rho = initrho
# Initialize omega
if initomega is None:
omega = OptimizerRhoOmegaBetter.make_initomega(K, nDoc, gamma)
else:
omega = initomega
# ELBO value of initial state
Ltro = evalELBOandPrint(
rho=rho, omega=omega,
nDoc=nDoc,
DocTopicCount=DocTopicCount,
alpha=alpha, gamma=gamma,
msg='init',
)
Snapshots = dict()
Snapshots['DTCSum'] = list()
Snapshots['DTCUsage'] = list()
Snapshots['beta'] = list()
Snapshots['Lscore'] = list()
Snapshots['activeLabels'] = activeLabels
Snapshots['emptyLabels'] = emptyLabels
Snapshots['pos_trackActive'] = list()
Snapshots['pos_trackEmpty'] = list()
Snapshots['beta_trackActive'] = list()
Snapshots['beta_trackEmpty'] = list()
Snapshots['count_trackActive'] = list()
Snapshots['count_trackEmpty'] = list()
Snapshots['beta_trackRem'] = list()
LtroList = list()
LtroList.append(Ltro)
betaK = rho2beta(rho, returnSize="K")
iterid = 0
prevbetaK = np.zeros_like(betaK)
prevrho = rho.copy()
while np.sum(np.abs(betaK - prevbetaK)) > 0.0000001:
iterid += 1
if iterid > maxiter:
break
# Take Snapshots of Learned Params
Snapshots['Lscore'].append(Ltro)
Snapshots['DTCSum'].append(DocTopicCount.sum(axis=0))
Snapshots['DTCUsage'].append((DocTopicCount > 0.001).sum(axis=0))
Snapshots['beta'].append(betaK)
Snapshots['pos_trackActive'].append(trackActiveUIDs)
Snapshots['pos_trackEmpty'].append(trackEmptyUIDs)
Snapshots['beta_trackActive'].append(betaK[trackActiveUIDs])
Snapshots['beta_trackEmpty'].append(betaK[trackEmptyUIDs])
Snapshots['beta_trackRem'].append(1.0 - betaK.sum())
Snapshots['count_trackActive'].append(
DocTopicCount.sum(axis=0)[trackActiveUIDs])
Snapshots['count_trackEmpty'].append(
DocTopicCount.sum(axis=0)[trackEmptyUIDs])
# Sort by beta
didShuffle = 0
tlabel = '_t'
if iterid > 1 and canShuffle and canShuffle.lower().count('bybeta'):
bigtosmall = argsort_bigtosmall_stable(betaK)
if not np.allclose(bigtosmall, np.arange(K)):
trackActiveUIDs = mapToNewPos(trackActiveUIDs, bigtosmall)
trackEmptyUIDs = mapToNewPos(trackEmptyUIDs, bigtosmall)
rho, betaK = reorder_rho(rho, bigtosmall)
DocTopicCount = DocTopicCount[:, bigtosmall]
didShuffle = 1
tlabel = '_ts'
# Update theta
sumLogPiActiveVec, sumLogPiRemVec, LP = DocTopicCount_to_sumLogPi(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount,
alpha=alpha, gamma=gamma,
**kwargs)
# Show ELBO with freshly-optimized theta value.
Ltro = evalELBOandPrint(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount,
theta=LP['theta'],
thetaRem=LP['thetaRem'],
nDoc=nDoc,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
alpha=alpha, gamma=gamma, f=None,
msg=str(iterid) + tlabel,
)
LtroList.append(Ltro)
if not LtroList[-1] >= LtroList[-2]:
if didShuffle:
print('NOT MONOTONIC! just after theta update with SHUFFLE!')
else:
print('NOT MONOTONIC! just after theta standard update')
didELBODrop = 0
if canShuffle:
if canShuffle.lower().count('bysumlogpi'):
bigtosmall = argsort_bigtosmall_stable(
sumLogPiActiveVec)
elif canShuffle.lower().count('bycounts'):
bigtosmall = argsort_bigtosmall_stable(
DocTopicCount.sum(axis=0))
elif canShuffle.lower().count('byusage'):
estPi = DocTopicCount / DocTopicCount.sum(axis=1)[:,np.newaxis]
avgPi = np.sum(estPi, axis=0)
bigtosmall = argsort_bigtosmall_stable(avgPi)
else:
bigtosmall = np.arange(K)
if not np.allclose(bigtosmall, np.arange(K)):
trackActiveUIDs = mapToNewPos(trackActiveUIDs, bigtosmall)
trackEmptyUIDs = mapToNewPos(trackEmptyUIDs, bigtosmall)
rho, betaK = reorder_rho(rho, bigtosmall)
sumLogPiActiveVec = sumLogPiActiveVec[bigtosmall]
DocTopicCount = DocTopicCount[:,bigtosmall]
LP['theta'] = LP['theta'][:, bigtosmall]
didShuffle = 1
# Show ELBO with freshly-optimized rho value.
Ltro = evalELBOandPrint(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount,
theta=LP['theta'],
thetaRem=LP['thetaRem'],
nDoc=nDoc,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
alpha=alpha, gamma=gamma, f=None,
msg=str(iterid) + "_ss",
)
LtroList.append(Ltro)
if not LtroList[-1] >= LtroList[-2]:
print('NOT MONOTONIC! just after %s shuffle update!' % (
canShuffle))
didELBODrop = 1
prevrho[:] = rho
# Update rhoomega
if warmStart_rho:
initrho = rho
else:
initrho = None
rho, omega, f, Info = OptimizerRhoOmegaBetter.\
find_optimum_multiple_tries(
alpha=alpha,
gamma=gamma,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
nDoc=nDoc,
initrho=initrho,
initomega=omega,
approx_grad=1,
do_grad_omega=0,
)
prevbetaK[:] = betaK
betaK = rho2beta(rho, returnSize="K")
# Show ELBO with freshly-optimized rho value.
Ltro = evalELBOandPrint(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount,
theta=LP['theta'],
thetaRem=LP['thetaRem'],
nDoc=nDoc,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
alpha=alpha, gamma=gamma, f=f,
msg=str(iterid) + "_r",
)
LtroList.append(Ltro)
if not LtroList[-1] >= LtroList[-2]:
print('NOT MONOTONIC! just after rho update!')
if didELBODrop:
if LtroList[-1] >= LtroList[-3]:
print('Phew. Combined update of sorting then optimizing rho OK')
else:
print('WHOA! Combined update of sorting then' + \
' optimizing rho beta NOT MONOTONIC')
Snapshots['Lscore'].append(Ltro)
Snapshots['DTCSum'].append(DocTopicCount.sum(axis=0))
Snapshots['DTCUsage'].append((DocTopicCount > 0.001).sum(axis=0))
Snapshots['beta'].append(betaK)
Snapshots['pos_trackActive'].append(trackActiveUIDs)
Snapshots['pos_trackEmpty'].append(trackEmptyUIDs)
Snapshots['beta_trackActive'].append(betaK[trackActiveUIDs])
Snapshots['beta_trackEmpty'].append(betaK[trackEmptyUIDs])
Snapshots['beta_trackRem'].append(1.0 - betaK.sum())
Snapshots['count_trackActive'].append(
DocTopicCount.sum(axis=0)[trackActiveUIDs])
Snapshots['count_trackEmpty'].append(
DocTopicCount.sum(axis=0)[trackEmptyUIDs])
print('\nEmpty cluster ids (%d of %d)' % (
len(trackEmptyUIDs), len(emptyUIDs)))
print('-----------------')
print(' '.join(['% 10d' % (x) for x in trackEmptyUIDs]))
print('\nSelected active clusters to track')
print('---------------------------------')
print(' '.join(['% 10d' % (x) for x in trackActiveUIDs]))
print(' '.join(['% .3e' % (x) for x in avgPi[trackActiveUIDs]]))
print('\nDocTopicCount for %d of %d docs' % (nDocToDisplay, nDoc))
print('---------------------------------')
for n in range(nDocToDisplay):
print(' '.join([
'% 9.2f' % (x) for x in DocTopicCount[n, trackActiveUIDs]]))
print('\nFinal sumLogPiActiveVec')
print('---------------------------------')
print(' '.join(['% .3e' % (x) for x in sumLogPiActiveVec[trackActiveUIDs]]))
print('is sumLogPiActiveVec sorted?', \
is_sorted_bigtosmall(sumLogPiActiveVec))
return rho, omega, Snapshots
def evalELBOandPrint(nDoc=None,
theta=None, thetaRem=None,
DocTopicCount=None,
sumLogPiActiveVec=None,
sumLogPiRemVec=None,
alpha=None, gamma=None,
rho=None, omega=None, msg='', f=None, **kwargs):
''' Check on the objective.
'''
L = calcELBO_IgnoreTermsConstWRTrhoomegatheta(
nDoc=nDoc,
alpha=alpha,
gamma=gamma,
DocTopicCount=DocTopicCount,
theta=theta,
thetaRem=thetaRem,
sumLogPi=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
rho=rho,
omega=omega)
if sumLogPiActiveVec is None:
sumLogPiActiveVec, sumLogPiRemVec, LP = DocTopicCount_to_sumLogPi(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount, alpha=alpha, gamma=gamma)
Lrhoomega = OptimizerRhoOmegaBetter.negL_rhoomega_viaHDPTopicUtil(
nDoc=nDoc,
alpha=alpha,
gamma=gamma,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
rho=rho,
omega=omega)
if f is None:
f = Lrhoomega
assert np.allclose(f, Lrhoomega)
print("%10s Ltro= % .8e Lro= % .5e fro= % .5e" % (
msg, L, Lrhoomega, f))
return L
def DocTopicCount_to_sumLogPi(
rho=None, omega=None,
betaK=None, DocTopicCount=None, alpha=None, gamma=None, **kwargs):
'''
Returns
-------
f : scalar
'''
K = rho.size
if betaK is None:
betaK = rho2beta(rho, returnSize="K")
theta = DocTopicCount + alpha * betaK[np.newaxis,:]
thetaRem = alpha * (1 - np.sum(betaK))
assert np.allclose(theta.sum(axis=1) + thetaRem,
alpha + DocTopicCount.sum(axis=1))
digammaSum = digamma(theta.sum(axis=1) + thetaRem)
ElogPi = digamma(theta) - digammaSum[:,np.newaxis]
ElogPiRem = digamma(thetaRem) - digammaSum
sumLogPiActiveVec = np.sum(ElogPi, axis=0)
sumLogPiRemVec = np.zeros(K)
sumLogPiRemVec[-1] = np.sum(ElogPiRem)
LP = dict(
ElogPi=ElogPi,
ElogPiRem=ElogPiRem,
digammaSumTheta=digammaSum,
theta=theta,
thetaRem=thetaRem)
return sumLogPiActiveVec, sumLogPiRemVec, LP
def f_DocTopicCount(
rho=None, omega=None,
betaK=None, DocTopicCount=None, alpha=None, gamma=None, **kwargs):
''' Evaluate the objective f for rho/omega optimization.
Returns
-------
f : scalar
'''
K = rho.size
sumLogPiActiveVec, sumLogPiRemVec, LP = DocTopicCount_to_sumLogPi(
rho=rho, omega=omega,
DocTopicCount=DocTopicCount, alpha=alpha, gamma=gamma,
**kwargs)
f = OptimizerRhoOmegaBetter.negL_rhoomega(
rho=rho, omega=omega,
sumLogPiActiveVec=sumLogPiActiveVec,
sumLogPiRemVec=sumLogPiRemVec,
alpha=alpha, gamma=gamma,
nDoc=DocTopicCount.shape[0],
approx_grad=1)
return f
def makeDocTopicCount(
nDoc=10, K=5, Kempty=0,
seed=0, minK_d=1, maxK_d=5, **kwargs):
'''
Returns
-------
DocTopicCount : 2D array, nDoc x K
'''
PRNG = | np.random.RandomState(seed) | numpy.random.RandomState |
from scipy.optimize import curve_fit
from hydroDL.master import basins
from hydroDL.app import waterQuality, relaCQ
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
from hydroDL import utils
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fileSiteNoLst = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst')
siteNoLst = pd.read_csv(fileSiteNoLst, header=None, dtype=str)[0].tolist()
dfHBN = pd.read_csv(os.path.join(kPath.dirData, 'USGS', 'inventory', 'HBN.csv'), dtype={
'siteNo': str}).set_index('siteNo')
siteNoHBN = [siteNo for siteNo in dfHBN.index.tolist() if siteNo in siteNoLst]
pdfArea = gageII.readData(varLst=['DRAIN_SQKM'], siteNoLst=siteNoHBN)
unitConv = 0.3048**3*365*24*60*60/1000**2
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoHBN)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
def func(x, a, b):
return a*1/(x/b+1)
# cal dw
code = '00955'
pMat2 = np.ndarray([len(siteNoHBN), 2])
for k, siteNo in enumerate(siteNoHBN):
area = pdfArea.loc[siteNo]['DRAIN_SQKM']
dfC = usgs.readSample(siteNo, codeLst=usgs.codeLst)
dfQ = usgs.readStreamflow(siteNo)
df = dfC.join(dfQ)
t = df.index.values
q = df['00060_00003'].values/area*unitConv
c = df[code].values
ceq, dw, y = relaCQ.kateModel2(q, c)
pMat2[k, 0] = ceq
pMat2[k, 1] = dw
def funcMap():
figM, axM = plt.subplots(1, 1, figsize=(8, 6))
axplot.mapPoint(axM, lat, lon, pMat2[:, 1], s=12)
figP, axP = plt.subplots(2, 1, figsize=(8, 6))
axP2 = np.array([axP[0], axP[0].twinx(), axP[1]])
return figM, axM, figP, axP2, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoHBN[iP]
dfC = usgs.readSample(siteNo, codeLst=usgs.codeLst)
dfQ = usgs.readStreamflow(siteNo)
df = dfC.join(dfQ)
t = df.index.values
q = df['00060_00003'].values/area*unitConv
c = df[code].values
[q, c], ind = utils.rmNan([q, c])
t = t[ind]
qAll = dfQ['00060_00003'].values
qT = dfQ.index.values
axplot.plotTS(axP[0], qT, qAll, cLst='b', styLst='--')
axplot.plotTS(axP[1], t, c)
axP[2].plot(np.log(q), c, 'k*')
x = 10**np.linspace(np.log10(np.min(q[q > 0])),
np.log10(np.max(q[~ | np.isnan(q) | numpy.isnan |
import datetime
import numpy as np
from xarray import DataArray
def make_simple_sample_data_2D(data_type='iris'):
"""
function creating a simple dataset to use in tests for tobac.
The grid has a grid spacing of 1km in both horizontal directions and 100 grid cells in x direction and 500 in y direction.
Time resolution is 1 minute and the total length of the dataset is 100 minutes around a abritraty date (2000-01-01 12:00).
The longitude and latitude coordinates are added as 2D aux coordinates and arbitrary, but in realisitic range.
The data contains a single blob travelling on a linear trajectory through the dataset for part of the time.
:param data_type: 'iris' or 'xarray' to chose the type of dataset to produce
:return: sample dataset as an Iris.Cube.cube or xarray.DataArray
"""
from iris.cube import Cube
from iris.coords import DimCoord,AuxCoord
t_0=datetime.datetime(2000,1,1,12,0,0)
x=np.arange(0,100e3,1000)
y=np.arange(0,50e3,1000)
t=t_0+np.arange(0,100,1)*datetime.timedelta(minutes=1)
xx,yy=np.meshgrid(x,y)
t_temp=np.arange(0,60,1)
track1_t=t_0+t_temp*datetime.timedelta(minutes=1)
x_0_1=10e3
y_0_1=10e3
track1_x=x_0_1+30*t_temp*60
track1_y=y_0_1+14*t_temp*60
track1_magnitude=10*np.ones(track1_x.shape)
data=np.zeros((t.shape[0],y.shape[0],x.shape[0]))
for i_t,t_i in enumerate(t):
if np.any(t_i in track1_t):
x_i=track1_x[track1_t==t_i]
y_i=track1_y[track1_t==t_i]
mag_i=track1_magnitude[track1_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))
t_start=datetime.datetime(1970,1,1,0,0)
t_points=(t-t_start).astype("timedelta64[ms]").astype(int) / 1000
t_coord=DimCoord(t_points,standard_name='time',var_name='time',units='seconds since 1970-01-01 00:00')
x_coord=DimCoord(x,standard_name='projection_x_coordinate',var_name='x',units='m')
y_coord=DimCoord(y,standard_name='projection_y_coordinate',var_name='y',units='m')
lat_coord=AuxCoord(24+1e-5*xx,standard_name='latitude',var_name='latitude',units='degree')
lon_coord=AuxCoord(150+1e-5*yy,standard_name='longitude',var_name='longitude',units='degree')
sample_data=Cube(data,dim_coords_and_dims=[(t_coord, 0),(y_coord, 1),(x_coord, 2)],aux_coords_and_dims=[(lat_coord, (1,2)),(lon_coord, (1,2))],var_name='w',units='m s-1')
if data_type=='xarray':
sample_data=DataArray.from_iris(sample_data)
return sample_data
def make_sample_data_2D_3blobs(data_type='iris'):
from iris.cube import Cube
from iris.coords import DimCoord,AuxCoord
"""
function creating a simple dataset to use in tests for tobac.
The grid has a grid spacing of 1km in both horizontal directions and 100 grid cells in x direction and 200 in y direction.
Time resolution is 1 minute and the total length of the dataset is 100 minutes around a abritraty date (2000-01-01 12:00).
The longitude and latitude coordinates are added as 2D aux coordinates and arbitrary, but in realisitic range.
The data contains a three individual blobs travelling on a linear trajectory through the dataset for part of the time.
:param data_type: 'iris' or 'xarray' to chose the type of dataset to produce
:return: sample dataset as an Iris.Cube.cube or xarray.DataArray
"""
t_0=datetime.datetime(2000,1,1,12,0,0)
x=np.arange(0,100e3,1000)
y=np.arange(0,200e3,1000)
t=t_0+np.arange(0,100,1)*datetime.timedelta(minutes=1)
xx,yy=np.meshgrid(x,y)
t_temp=np.arange(0,60,1)
track1_t=t_0+t_temp*datetime.timedelta(minutes=1)
x_0_1=10e3
y_0_1=10e3
track1_x=x_0_1+30*t_temp*60
track1_y=y_0_1+14*t_temp*60
track1_magnitude=10*np.ones(track1_x.shape)
t_temp=np.arange(0,30,1)
track2_t=t_0+(t_temp+40)*datetime.timedelta(minutes=1)
x_0_2=20e3
y_0_2=10e3
track2_x=x_0_2+24*(t_temp*60)**2/1000
track2_y=y_0_2+12*t_temp*60
track2_magnitude=20*np.ones(track2_x.shape)
t_temp=np.arange(0,20,1)
track3_t=t_0+(t_temp+50)*datetime.timedelta(minutes=1)
x_0_3=70e3
y_0_3=110e3
track3_x=x_0_3+20*(t_temp*60)**2/1000
track3_y=y_0_3+20*t_temp*60
track3_magnitude=15*np.ones(track3_x.shape)
data=np.zeros((t.shape[0],y.shape[0],x.shape[0]))
for i_t,t_i in enumerate(t):
if np.any(t_i in track1_t):
x_i=track1_x[track1_t==t_i]
y_i=track1_y[track1_t==t_i]
mag_i=track1_magnitude[track1_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))
if np.any(t_i in track2_t):
x_i=track2_x[track2_t==t_i]
y_i=track2_y[track2_t==t_i]
mag_i=track2_magnitude[track2_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))
if np.any(t_i in track3_t):
x_i=track3_x[track3_t==t_i]
y_i=track3_y[track3_t==t_i]
mag_i=track3_magnitude[track3_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))\
t_start=datetime.datetime(1970,1,1,0,0)
t_points=(t-t_start).astype("timedelta64[ms]").astype(int) / 1000
t_coord=DimCoord(t_points,standard_name='time',var_name='time',units='seconds since 1970-01-01 00:00')
x_coord=DimCoord(x,standard_name='projection_x_coordinate',var_name='x',units='m')
y_coord=DimCoord(y,standard_name='projection_y_coordinate',var_name='y',units='m')
lat_coord=AuxCoord(24+1e-5*xx,standard_name='latitude',var_name='latitude',units='degree')
lon_coord=AuxCoord(150+1e-5*yy,standard_name='longitude',var_name='longitude',units='degree')
sample_data=Cube(data,dim_coords_and_dims=[(t_coord, 0),(y_coord, 1),(x_coord, 2)],aux_coords_and_dims=[(lat_coord, (1,2)),(lon_coord, (1,2))],var_name='w',units='m s-1')
if data_type=='xarray':
sample_data=DataArray.from_iris(sample_data)
return sample_data
def make_sample_data_2D_3blobs_inv(data_type='iris'):
"""
function creating a version of the dataset created in the function make_sample_cube_2D, but with switched coordinate order for the horizontal coordinates
for tests to ensure that this does not affect the results
:param data_type: 'iris' or 'xarray' to chose the type of dataset to produce
:return: sample dataset as an Iris.Cube.cube or xarray.DataArray
"""
from iris.cube import Cube
from iris.coords import DimCoord,AuxCoord
t_0=datetime.datetime(2000,1,1,12,0,0)
x=np.arange(0,100e3,1000)
y=np.arange(0,200e3,1000)
t=t_0+np.arange(0,100,1)*datetime.timedelta(minutes=1)
yy,xx=np.meshgrid(y,x)
t_temp=np.arange(0,60,1)
track1_t=t_0+t_temp*datetime.timedelta(minutes=1)
x_0_1=10e3
y_0_1=10e3
track1_x=x_0_1+30*t_temp*60
track1_y=y_0_1+14*t_temp*60
track1_magnitude=10*np.ones(track1_x.shape)
t_temp=np.arange(0,30,1)
track2_t=t_0+(t_temp+40)*datetime.timedelta(minutes=1)
x_0_2=20e3
y_0_2=10e3
track2_x=x_0_2+24*(t_temp*60)**2/1000
track2_y=y_0_2+12*t_temp*60
track2_magnitude=20*np.ones(track2_x.shape)
t_temp=np.arange(0,20,1)
track3_t=t_0+(t_temp+50)*datetime.timedelta(minutes=1)
x_0_3=70e3
y_0_3=110e3
track3_x=x_0_3+20*(t_temp*60)**2/1000
track3_y=y_0_3+20*t_temp*60
track3_magnitude=15*np.ones(track3_x.shape)
data=np.zeros((t.shape[0],x.shape[0],y.shape[0]))
for i_t,t_i in enumerate(t):
if np.any(t_i in track1_t):
x_i=track1_x[track1_t==t_i]
y_i=track1_y[track1_t==t_i]
mag_i=track1_magnitude[track1_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))
if np.any(t_i in track2_t):
x_i=track2_x[track2_t==t_i]
y_i=track2_y[track2_t==t_i]
mag_i=track2_magnitude[track2_t==t_i]
data[i_t]=data[i_t]+mag_i*np.exp(-np.power(xx - x_i,2.) / (2 * np.power(10e3, 2.)))*np.exp(-np.power(yy - y_i, 2.) / (2 * np.power(10e3, 2.)))
if | np.any(t_i in track3_t) | numpy.any |
import numpy as np
from rail.evaluation.metrics.pit import PIT, PITOutRate, PITKS, PITCvM, PITAD
from rail.evaluation.metrics.cdeloss import CDELoss
import qp
# values for metrics
OUTRATE = 0.0
KSVAL = 0.367384
CVMVAL = 20.63155
ADVAL_ALL = 82.51480
ADVAL_CUT = 1.10750
CDEVAL = -4.31200
def construct_test_ensemble():
np.random.seed(87)
nmax = 2.5
NPDF = 399
true_zs = np.random.uniform(high=nmax, size=NPDF)
locs = np.expand_dims(true_zs + np.random.normal(0.0, 0.01, NPDF), -1)
scales = np.ones((NPDF, 1)) * 0.1 + np.random.uniform(size=(NPDF, 1)) * .05
n_ens = qp.Ensemble(qp.stats.norm, data=dict(loc=locs, scale=scales))
zgrid = np.linspace(0, nmax, 301)
grid_ens = n_ens.convert_to(qp.interp_gen, xvals=zgrid)
return zgrid, true_zs, grid_ens
def test_pit_metrics():
zgrid, zspec, pdf_ens = construct_test_ensemble()
pit_obj = PIT(pdf_ens, zspec)
pit_vals = pit_obj._pit_samps
quant_grid = np.linspace(0, 1, 101)
quant_ens, metametrics = pit_obj.evaluate(quant_grid)
out_rate = PITOutRate(pit_vals, quant_ens).evaluate()
assert np.isclose(out_rate, OUTRATE)
ks_obj = PITKS(pit_vals, quant_ens)
ks_stat = ks_obj.evaluate().statistic
assert np.isclose(ks_stat, KSVAL)
cvm_obj = PITCvM(pit_vals, quant_ens)
cvm_stat = cvm_obj.evaluate().statistic
assert np.isclose(cvm_stat, CVMVAL)
ad_obj = PITAD(pit_vals, quant_ens)
all_ad_stat = ad_obj.evaluate().statistic
cut_ad_stat = ad_obj.evaluate(pit_min=0.6, pit_max=0.9).statistic
assert | np.isclose(all_ad_stat, ADVAL_ALL) | numpy.isclose |
#!/usr/bin/env python
"""
@package ion_functions.test.opt_functions
@file ion_functions/test/opt_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Unit tests for opt_functions module
"""
from nose.plugins.attrib import attr
from ion_functions.test.base_test import BaseUnitTestCase
import numpy as np
from ion_functions.data import opt_functions as optfunc
@attr('UNIT', group='func')
class TestOptFunctionsUnit(BaseUnitTestCase):
def test_opt_functions_OPTAA_sub_functions(self):
"""
Test the OPTAA function subroutines in the opt_functions.py module.
Values based on test data in DPSs and available on Alfresco.
OOI (2013). Data Product Specification for Optical Beam Attenuation
Coefficient. Document Control Number 1341-00690.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00690_Data_Product_SPEC_OPTATTN_OOI.pdf)
OOI (2013). Data Product Specification for Optical Absorption
Coefficient. Document Control Number 1341-00700.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00700_Data_Product_SPEC_OPTABSN_OOI.pdf)
OOI (2014). OPTAA Unit Test. 1341-00700_OPTABSN Artifact.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00700_OPTABSN >>
OPTAA_unit_test.xlsx)
Implemented by <NAME>, April 2013
Modified by <NAME>, 20-Feb-2014
The opt_scatter_corr function was modified to trap out commonly
occurring instances for when the scattering correction to absorption
should not be applied (in these cases the additive correction term is
set to 0). This requires that the unit test values for the abs and c
data be physically reasonable (c >= abs). Therefore the unit test c
data were changed by adding 1.0 to the c clear water offset.
"""
# test inputs
tbins = np.array([14.5036, 15.5200, 16.4706, 17.4833, 18.4831, 19.5196, 20.5565])
tarr = np.array([
[-0.004929, -0.004611, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004611, -0.004418, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -0.004418, -0.004355, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, -0.004355, -0.004131, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, -0.004131, -0.003422, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, -0.003422, -0.002442]
])
sig = np.array([50, 150, 250, 350, 450, 495])
ref = np.array([500, 500, 500, 500, 500, 500])
traw = np.array([48750, 48355, 47950, 47535, 47115, 46684])
wlngth = np.array([500., 550., 600., 650., 700., 715.])
Tcal = 20.
T = np.array([4., 8., 12., 16., 20., 24.])
PS = np.array([10., 15., 20., 25., 30., 35])
# clear water offsets
c_off = 1.01 # previous tests used 0.01
a_off = 0.1
# also test case of user selected reference wavelength for abs scatter correction
ref_wave = 695 # nearest wavelength (700nm) should become reference wavelength
# expected outputs
tint = np.array([15., 16., 17., 18., 19., 20.])
deltaT = np.array([-0.0048, -0.0045, -0.0044, -0.0042, -0.0038, -0.0030])
cpd = np.array([10.2251, 5.8304, 3.7870, 2.4409, 1.4352, 1.0532])
apd = np.array([9.3151, 4.9204, 2.8770, 1.5309, 0.5252, 0.1432])
cpd_ts = np.array([10.226025, 5.831245, 3.795494, 2.441203, 1.440651, 1.044652])
apd_ts = np.array([9.3155, 4.9206, 2.8848, 1.5303, 0.5297, 0.1338])
apd_ts_s_ref715 = np.array([9.181831, 4.786862, 2.751082, 1.396591, 0.396010, 0.000000])
apd_ts_s_ref700 = np.array([8.785990, 4.390950, 2.355159, 1.000592, 0.000000, -0.396015])
# compute beam attenuation and optical absorption values
dgC = np.zeros(6)
dT = np.zeros(6)
c = np.zeros(6)
c_ts = np.zeros(6)
a = np.zeros(6)
a_ts = np.zeros(6)
a_ts_sdef = np.zeros(6)
a_ts_s700 = np.zeros(6)
for i in range(6):
dgC[i] = optfunc.opt_internal_temp(traw[i])
c[i], dT[i] = optfunc.opt_pd_calc(ref[i], sig[i], c_off, dgC[i], tbins, tarr[i])
c_ts[i] = optfunc.opt_tempsal_corr('c', c[i], wlngth[i], Tcal, T[i], PS[i])
a[i], foo = optfunc.opt_pd_calc(ref[i], sig[i], a_off, dgC[i], tbins, tarr[i])
a_ts[i] = optfunc.opt_tempsal_corr('a', a[i], wlngth[i], Tcal, T[i], PS[i])
# the scatter-correction-to-absorption check must be done outside the loop:
# for each iteration within the loop, abs and c for one wavelength are
# processed. because there is only one wavelength of data, that wavelength
# also becomes the reference wavelength, in which case the scatter-corrected
# abs value is calculated to be a - (a/(c-a)) * (c-a) = identically 0 for
# each iteration wavelength.
# case: default reference wavelength (715nm) used for scatter correction to abs.
a_ts_sdef = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth)
# case: user-selected reference wavelength (695nm --> 700nm is closest)
a_ts_s700 = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth, ref_wave)
# compare calculated results to expected
np.testing.assert_allclose(dgC, tint, rtol=0.1, atol=0.1)
np.testing.assert_allclose(dT, deltaT, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(c, cpd, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(a, apd, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(c_ts, cpd_ts, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(a_ts, apd_ts, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(a_ts_sdef, apd_ts_s_ref715, rtol=1e-4, atol=1e-4)
np.testing.assert_allclose(a_ts_s700, apd_ts_s_ref700, rtol=1e-4, atol=1e-4)
# now test the traps set for the cases in which unphysical scatter correction
# calculations would be applied to the absorption data if not for the traps.
a_ts_save = np.copy(a_ts)
c_ts_save = np.copy(c_ts)
# case: if a_ref < 0, do not apply the scatter correction to abs.
# subcase: default reference wavelength
a_ts[-1] = -0.01
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# subcase: user selected reference wavelength
a_ts = np.copy(a_ts_save)
a_ts[-2] = -0.01
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth, ref_wave)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# case: if a_ref > 0 but c_ref - a_ref < 0, do not apply the scatter correction to abs.
# subcase: default reference wavelength
a_ts = np.copy(a_ts_save)
a_ts[-1] = 0.01
c_ts[-1] = 0.005
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# subcase: user selected reference wavelength
a_ts = np.copy(a_ts_save)
c_ts = np.copy(c_ts_save)
a_ts[-2] = 0.01
c_ts[-2] = 0.005
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth, ref_wave)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# case: when both a_ref < 0 and c_ref - a_ref < 0, the scatter ratio does have
# the correct sign; however, the scatter correction to absorption should not be
# applied. this test is included because it is possible to code for the two
# traps above without catching this case.
# subcase: default reference wavelength
a_ts = np.copy(a_ts_save)
c_ts = np.copy(c_ts_save)
a_ts[-1] = -0.01
c_ts[-1] = -0.02
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# subcase: user selected reference wavelength
a_ts = np.copy(a_ts_save)
c_ts = np.copy(c_ts_save)
a_ts[-2] = -0.01
c_ts[-2] = -0.02
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth, ref_wave)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# case: when c_ref - a_ref = 0, do not apply the scatter correction to abs.
# subcase: default reference wavelength
a_ts = np.copy(a_ts_save)
c_ts = np.copy(c_ts_save)
a_ts[-1] = 0.01
c_ts[-1] = 0.01
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
# subcase: user selected reference wavelength
a_ts = np.copy(a_ts_save)
c_ts = np.copy(c_ts_save)
a_ts[-2] = 0.01
c_ts[-2] = 0.01
a_ts_out = np.zeros(6)
a_ts_out = optfunc.opt_scatter_corr(a_ts, wlngth, c_ts, wlngth, ref_wave)
np.testing.assert_allclose(a_ts_out, a_ts, rtol=1e-8, atol=1e-8)
def test_opt_functions_OPTAA_wrapper_functions(self):
"""
Test the OPTAA wrapper functions in the opt_functions.py module.
Use realistically shaped ac-s data arrays in the calling arguments:
offsets are dimensioned at the number of wavelengths;
for internal temperature, L1 temperature, and salinity,
one value per a-c dataset.
Values based on test data in DPSs and available on Alfresco.
OOI (2013). Data Product Specification for Optical Beam Attenuation
Coefficient. Document Control Number 1341-00690.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00690_Data_Product_SPEC_OPTATTN_OOI.pdf)
OOI (2013). Data Product Specification for Optical Absorption
Coefficient. Document Control Number 1341-00700.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00700_Data_Product_SPEC_OPTABSN_OOI.pdf)
OOI (2014). OPTAA Unit Test. 1341-00700_OPTABSN Artifact.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00700_OPTABSN >>
OPTAA_unit_test.xlsx)
Implemented by <NAME>, 21-Feb-2014. Additional unit test data
constructed as documented in the OPTAA Unit Test document artifact
reference above.
2015-04-17: <NAME>. Changed signal and reference count input from float to fix.
"""
# test inputs:
tbins = np.array([14.5036, 15.5200, 16.4706, 17.4833, 18.4831, 19.5196, 20.5565])
# nrows of tarr = length(wlngth); ncols of tarr = length(tbins)
tarr = np.array([
[0.0, -0.004929, -0.004611, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004611, -0.004418, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004418, -0.004355, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004355, -0.004131, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004131, -0.003422, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.003422, -0.002442, 0.0, 0.0, 0.0, 0.0]
])
# a_off and c_off must have same dimensions as wlngth.
wlngth = np.array([500., 550., 600., 650., 700., 715.])
c_sig = np.array([150, 225, 200, 350, 450, 495])
c_ref = np.array([550, 540, 530, 520, 510, 500])
c_off = np.array([1.35, 1.30, 1.25, 1.20, 1.15, 1.10])
a_sig = np.array([250, 300, 210, 430, 470, 495])
a_ref = np.array([450, 460, 470, 480, 490, 500])
a_off = np.array([0.35, 0.30, 0.25, 0.20, 0.15, 0.10])
# traw, T, and PS must be scalar (before vectorization).
traw = 48355
Tcal = 20.0
T = 12.0
PS = 35.0
# also test case of user selected reference wavelength for abs scatter correction
ref_wave = 695 # nearest wavelength (700nm) should become reference wavelength
# expected outputs
cpd_ts = np.array([6.553646, 4.807949, 5.161655, 2.788220, 1.666362, 1.184530], ndmin=2)
apd_ts_s_ref715 = np.array([1.999989, 1.501766, 3.177048, 0.249944, 0.086438, 0.000000],
ndmin=2)
apd_ts_s_ref700 = np.array([1.750859, 1.320885, 3.068470, 0.111075, 0.000000, -0.064806],
ndmin=2)
# beam attenuation (beam c) wrapper test
c_ts = np.zeros(6)
c_ts = optfunc.opt_beam_attenuation(c_ref, c_sig, traw, wlngth, c_off, Tcal,
tbins, tarr, T, PS)
np.testing.assert_allclose(c_ts, cpd_ts, rtol=1e-6, atol=1e-6)
# absorption wrapper test
# case: default reference wavelength for scatter correction
a_ts_sdef = np.zeros(6)
a_ts_sdef = optfunc.opt_optical_absorption(a_ref, a_sig, traw, wlngth, a_off, Tcal,
tbins, tarr, cpd_ts, wlngth, T, PS)
np.testing.assert_allclose(a_ts_sdef, apd_ts_s_ref715, rtol=1e-6, atol=1e-6)
# case: user selected reference wavelength for scatter correction
a_ts_s700 = np.zeros(6)
a_ts_s700 = optfunc.opt_optical_absorption(a_ref, a_sig, traw, wlngth, a_off, Tcal,
tbins, tarr, cpd_ts, wlngth, T, PS, ref_wave)
np.testing.assert_allclose(a_ts_s700, apd_ts_s_ref700, rtol=1e-6, atol=1e-6)
def test_opt_functions_OPTAA_a_and_c_wavelength_sets(self):
"""
Test the OPTAA wrapper functions in the opt_functions.py module.
Test a-c dataset with different wavelength values for the absorption versus
beam c optical channels. Included to test that the c optical values are correctly
interpolated onto the abs wavelengths in the scatter correction algorithm.
OOI (2013). Data Product Specification for Optical Beam Attenuation
Coefficient. Document Control Number 1341-00690.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00690_Data_Product_SPEC_OPTATTN_OOI.pdf)
OOI (2013). Data Product Specification for Optical Absorption
Coefficient. Document Control Number 1341-00700.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00700_Data_Product_SPEC_OPTABSN_OOI.pdf)
OOI (2014). OPTAA Unit Test. 1341-00700_OPTABSN Artifact.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00700_OPTABSN >>
OPTAA_unit_test.xlsx)
Implemented by <NAME>, 26-Feb-2014. Additional unit test data
constructed as documented in the OPTAA Unit Test document artifact
reference above.
2015-04-17: <NAME>. Changed signal and reference count input from float to fix.
"""
# test inputs:
tbins = np.array([14.5036, 15.5200, 16.4706, 17.4833, 18.4831, 19.5196, 20.5565])
# nrows of tarr = length(wlngth); ncols of tarr = length(tbins)
tarr = np.array([
[0.0, -0.004929, -0.004611, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004611, -0.004418, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004418, -0.004355, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004355, -0.004131, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004131, -0.003422, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.003422, -0.002442, 0.0, 0.0, 0.0, 0.0]
])
# a_off and c_off must have same dimensions as wlngth.
cwlngth = np.array([510., 540., 580., 630., 670., 710.])
awlngth = np.array([500., 550., 600., 650., 700., 715.])
c_sig = np.array([150, 225, 200, 350, 450, 495])
c_ref = np.array([550, 540, 530, 520, 510, 500])
c_off = np.array([1.35, 1.30, 1.25, 1.20, 1.15, 1.10])
a_sig = np.array([250, 300, 210, 430, 470, 495])
a_ref = np.array([450, 460, 470, 480, 490, 500])
a_off = np.array([0.35, 0.30, 0.25, 0.20, 0.15, 0.10])
# traw, T, and PS must be scalar (before vectorization).
traw = 48355
Tcal = 20.0
T = 12.0
PS = 35.0
# also test case of user selected reference wavelength for abs scatter correction
ref_wave = 695 # nearest abs wavelength (700nm) should become reference wavelength
# expected outputs
cpd_ts = np.array([6.553771, 4.807914, 5.156010, 2.788715, 1.655607, 1.171965], ndmin=2)
apd_ts_s_ref715 = np.array([1.990992, 1.479089, 3.350109, 0.350108, 0.152712, 0.000000],
ndmin=2)
apd_ts_s_ref700 = np.array([1.379858, 1.021574, 3.235057, 0.099367, 0.000000, -0.156972],
ndmin=2)
# beam attenuation (beam c) wrapper test
c_ts = np.zeros(6)
c_ts = optfunc.opt_beam_attenuation(c_ref, c_sig, traw, cwlngth, c_off, Tcal,
tbins, tarr, T, PS)
np.testing.assert_allclose(c_ts, cpd_ts, rtol=1e-6, atol=1e-6)
# absorption wrapper test
# case: default reference wavelength for scatter correction
a_ts_sdef = np.zeros(6)
a_ts_sdef = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS)
np.testing.assert_allclose(a_ts_sdef, apd_ts_s_ref715, rtol=1e-6, atol=1e-6)
# case: user selected reference wavelength for scatter correction
a_ts_s700 = np.zeros(6)
a_ts_s700 = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS, ref_wave)
np.testing.assert_allclose(a_ts_s700, apd_ts_s_ref700, rtol=1e-6, atol=1e-6)
def test_opt_functions_OPTAA_vectorization(self):
"""
Test the vectorization of the OPTAA wrapper functions in the opt_functions.py module.
OOI (2013). Data Product Specification for Optical Beam Attenuation
Coefficient. Document Control Number 1341-00690.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00690_Data_Product_SPEC_OPTATTN_OOI.pdf)
OOI (2013). Data Product Specification for Optical Absorption
Coefficient. Document Control Number 1341-00700.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00700_Data_Product_SPEC_OPTABSN_OOI.pdf)
OOI (2014). OPTAA Unit Test. 1341-00700_OPTABSN Artifact.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00700_OPTABSN >>
OPTAA_unit_test.xlsx)
Implemented by <NAME>, 05-Mar-2014. Additional unit test data
constructed as documented in the OPTAA Unit Test document artifact
reference above.
2015-04-17: <NAME>. Changed signal and reference count input from float to fix.
Replaced 'exec' statements.
2015-04-21: <NAME>. Corrected input data shapes to current CI implementation.
Added scalar time record case.
"""
### set test inputs: scalar (that is, 1) time record case
# native scalars will come into the DPA with shape (1,)
traw = np.array([48355])
Tcal = np.array([20.0])
T = np.array([12.0])
PS = np.array([35.0])
# native 1D arrays will come in as 2D row vectors
cwlngth = np.array([[510., 540., 580., 630., 670., 710.]])
awlngth = np.array([[500., 550., 600., 650., 700., 715.]])
c_sig = np.array([[150, 225, 200, 350, 450, 495]])
c_ref = np.array([[550, 540, 530, 520, 510, 500]])
c_off = np.array([[1.35, 1.30, 1.25, 1.20, 1.15, 1.10]])
a_sig = np.array([[250, 300, 210, 430, 470, 495]])
a_ref = np.array([[450, 460, 470, 480, 490, 500]])
a_off = np.array([[0.35, 0.30, 0.25, 0.20, 0.15, 0.10]])
tbins = np.array([[14.5036, 15.5200, 16.4706, 17.4833, 18.4831, 19.5196, 20.5565]])
# native 2D arrays will come in as 3D arrays, for this single time record case;
# a singleton dimension will be prepended to the native 2D array.
# native dimensions of tarr are (nwavelengths, ntempbins),
# so that tarr.shape = (1,6,7)
tarr = np.array([[
[0.0, -0.004929, -0.004611, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004611, -0.004418, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004418, -0.004355, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004355, -0.004131, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.004131, -0.003422, 0.0, 0.0, 0.0, 0.0],
[0.0, -0.003422, -0.002442, 0.0, 0.0, 0.0, 0.0]
]])
## expected outputs to scalar time record test: 2D row vectors
# beam attenuation (beam c) test
# expected:
cpd_ts = np.array([[6.553771, 4.807914, 5.156010, 2.788715, 1.655607, 1.171965]])
# calculated:
c_ts = optfunc.opt_beam_attenuation(c_ref, c_sig, traw, cwlngth, c_off, Tcal,
tbins, tarr, T, PS)
np.testing.assert_allclose(c_ts, cpd_ts, rtol=1e-6, atol=1e-6)
# absorption test
# case: default reference wavelength for scatter correction
apd_ts_s_ref715 = np.array([[1.990992, 1.479089, 3.350109, 0.350108, 0.152712, 0.000000]])
a_ts_sdef = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS)
np.testing.assert_allclose(a_ts_sdef, apd_ts_s_ref715, rtol=1e-6, atol=1e-6)
# absorption test
# case: user selected reference wavelength for scatter correction to abs values
ref_wave = 695 # nearest abs wavelength (700nm) should become reference wavelength
apd_ts_s_ref700 = np.array([[1.379858, 1.021574, 3.235057, 0.099367, 0.000000, -0.156972]])
a_ts_s700 = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS, ref_wave)
np.testing.assert_allclose(a_ts_s700, apd_ts_s_ref700, rtol=1e-6, atol=1e-6)
### multiple time records case
# replicate the inputs to represent 3 data packets
npackets = 3
[traw, Tcal, T, PS] = [np.tile(xarray, npackets) for xarray in [traw, Tcal, T, PS]]
[cwlngth, awlngth, c_sig, c_ref, c_off, a_sig, a_ref, a_off, tbins] = [
np.tile(xarray, (npackets, 1)) for xarray in [
cwlngth, awlngth, c_sig, c_ref, c_off, a_sig, a_ref, a_off, tbins]]
tarr = np.tile(tarr, (npackets, 1, 1))
# replicate the expected output data products
cpd_ts = np.tile(cpd_ts, (npackets, 1))
apd_ts_s_ref715 = np.tile(apd_ts_s_ref715, (npackets, 1))
apd_ts_s_ref700 = np.tile(apd_ts_s_ref700, (npackets, 1))
# beam attenuation (beam c) test
c_ts = optfunc.opt_beam_attenuation(c_ref, c_sig, traw, cwlngth, c_off, Tcal,
tbins, tarr, T, PS)
np.testing.assert_allclose(c_ts, cpd_ts, rtol=1e-6, atol=1e-6)
# absorption test
# case: default reference wavelength for scatter correction
a_ts_sdef = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS)
np.testing.assert_allclose(a_ts_sdef, apd_ts_s_ref715, rtol=1e-6, atol=1e-6)
# case: user selected reference wavelength for scatter correction
a_ts_s700 = optfunc.opt_optical_absorption(a_ref, a_sig, traw, awlngth, a_off, Tcal,
tbins, tarr, cpd_ts, cwlngth, T, PS, ref_wave)
np.testing.assert_allclose(a_ts_s700, apd_ts_s_ref700, rtol=1e-6, atol=1e-6)
def test_opt_functions_OPTAA_vectorization_with_tscor_nans(self):
"""
Test the vectorized OPTAA wrapper functions in the opt_functions.py module;
include "a" and "c" test wavelengths outside of the (original) range of the
wavelength keys in the tscor.py file, which contains the dictionary of the
temperature and salinity correction coefficients. The dictionary keys have
been extended from [400.0 755.0] to [380.0 775.0] with entries of np.nan.
OOI (2013). Data Product Specification for Optical Beam Attenuation
Coefficient. Document Control Number 1341-00690.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00690_Data_Product_SPEC_OPTATTN_OOI.pdf)
OOI (2013). Data Product Specification for Optical Absorption
Coefficient. Document Control Number 1341-00700.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00700_Data_Product_SPEC_OPTABSN_OOI.pdf)
OOI (2014). OPTAA Unit Test. 1341-00700_OPTABSN Artifact.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00700_OPTABSN >>
OPTAA_unit_test.xlsx)
Initial code by <NAME>, 29-Apr-2014. Added tests for when OPTAA
wavelengths are outside the range of the empirically derived temperature
and salinity correction wavelength keys in the original tscor.py dictionary
by modifying def test_opt_functions_OPTAA_vectorization test data.
2015-04-17: <NAME>. Use np.nan instead of fill_value.
Changed signal and reference count input from float to fix.
Replaced 'exec' statements.
2015-04-21: <NAME>io. Cosmetic statement re-ordering and shaping of input arrays.
"""
# test inputs:
# replicate the inputs to represent 3 data packets
npackets = 3
## natively scalar inputs
traw = np.array([48355])
Tcal = np.array([20.0])
T = np.array([12.0])
PS = np.array([35.0])
# time-vectorize
[traw, Tcal, T, PS] = [np.tile(xarray, npackets) for xarray in [traw, Tcal, T, PS]]
## natively 1D inputs
# valid tscor dictionary entries (those without nan values) are
# keyed at [400.0 755.0] nm. The dictionary has been extended to
# to [380.0 775.0] using np.nan as fill values. test:
cwlngth = np.array([[398.5, 540., 580., 630., 670., 710.]])
awlngth = np.array([[500., 550., 600., 650., 700., 761.2]])
c_sig = | np.array([[150, 225, 200, 350, 450, 495]]) | numpy.array |
"""Preprocessing data methods."""
import random
import numpy as np
import pandas as pd
from autots.tools.impute import FillNA, df_interpolate
from autots.tools.seasonal import date_part, seasonal_int
class EmptyTransformer(object):
"""Base transformer returning raw data."""
def __init__(self, name: str = 'EmptyTransformer', **kwargs):
self.name = name
def _fit(self, df):
"""Learn behavior of data to change.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def fit(self, df):
"""Learn behavior of data to change.
Args:
df (pandas.DataFrame): input dataframe
"""
self._fit(df)
return self
def transform(self, df):
"""Return changed data.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Return data to original *or* forecast form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def fit_transform(self, df):
"""Fits and Returns *Magical* DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
return self._fit(df)
def __repr__(self):
"""Print."""
return 'Transformer ' + str(self.name) + ', uses standard .fit/.transform'
@staticmethod
def get_new_params(method: str = 'random'):
"""Generate new random parameters"""
if method == 'test':
return {'test': random.choice([1, 2])}
else:
return {}
def remove_outliers(df, std_threshold: float = 3):
"""Replace outliers with np.nan.
https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
Args:
df (pandas.DataFrame): DataFrame containing numeric data, DatetimeIndex
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]
return df
def clip_outliers(df, std_threshold: float = 3):
"""Replace outliers above threshold with that threshold. Axis = 0.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df2 = df.clip(lower=lower, upper=upper, axis=1)
return df2
def simple_context_slicer(df, method: str = 'None', forecast_length: int = 30):
"""Condensed version of context_slicer with more limited options.
Args:
df (pandas.DataFrame): training data frame to slice
method (str): Option to slice dataframe
'None' - return unaltered dataframe
'HalfMax' - return half of dataframe
'ForecastLength' - return dataframe equal to length of forecast
'2ForecastLength' - return dataframe equal to twice length of forecast
(also takes 4, 6, 8, 10 in addition to 2)
'n' - any integer length to slice by
'-n' - full length less this amount
"0.n" - this percent of the full data
"""
if method in [None, "None"]:
return df
df = df.sort_index(ascending=True)
if 'forecastlength' in str(method).lower():
len_int = int([x for x in str(method) if x.isdigit()][0])
return df.tail(len_int * forecast_length)
elif method == 'HalfMax':
return df.tail(int(len(df.index) / 2))
elif str(method).replace("-", "").replace(".", "").isdigit():
method = float(method)
if method >= 1:
return df.tail(int(method))
elif method > -1:
return df.tail(int(df.shape[0] * abs(method)))
else:
return df.tail(int(df.shape[0] + method))
else:
print("Context Slicer Method not recognized")
return df
class Detrend(EmptyTransformer):
"""Remove a linear trend from the data."""
def __init__(self, model: str = 'GLS', **kwargs):
super().__init__(name='Detrend')
self.model = model
self.need_positive = ['Poisson', 'Gamma', 'Tweedie']
@staticmethod
def get_new_params(method: str = 'random'):
if method == "fast":
choice = random.choices(
[
"GLS",
"Linear",
],
[
0.5,
0.5,
],
k=1,
)[0]
else:
choice = random.choices(
[
"GLS",
"Linear",
"Poisson",
"Tweedie",
"Gamma",
"TheilSen",
"RANSAC",
"ARD",
],
[0.24, 0.2, 0.1, 0.1, 0.1, 0.02, 0.02, 0.02],
k=1,
)[0]
return {
"model": choice,
}
def _retrieve_detrend(self, detrend: str = "Linear"):
if detrend == 'Linear':
from sklearn.linear_model import LinearRegression
return LinearRegression(fit_intercept=True)
elif detrend == "Poisson":
from sklearn.linear_model import PoissonRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(
PoissonRegressor(fit_intercept=True, max_iter=200)
)
elif detrend == 'Tweedie':
from sklearn.linear_model import TweedieRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(TweedieRegressor(power=1.5, max_iter=200))
elif detrend == 'Gamma':
from sklearn.linear_model import GammaRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(
GammaRegressor(fit_intercept=True, max_iter=200)
)
elif detrend == 'TheilSen':
from sklearn.linear_model import TheilSenRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(TheilSenRegressor())
elif detrend == 'RANSAC':
from sklearn.linear_model import RANSACRegressor
return RANSACRegressor()
elif detrend == 'ARD':
from sklearn.linear_model import ARDRegression
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(ARDRegression())
else:
from sklearn.linear_model import LinearRegression
return LinearRegression()
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
Y = df.values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model == 'GLS':
from statsmodels.regression.linear_model import GLS
self.trained_model = GLS(Y, X, missing='drop').fit()
else:
self.trained_model = self._retrieve_detrend(detrend=self.model)
if self.model in self.need_positive:
self.trnd_trans = PositiveShift(
log=False, center_one=True, squared=False
)
Y = pd.DataFrame(self.trnd_trans.fit_transform(df)).values
X = X.reshape((-1, 1))
self.trained_model.fit(X, Y)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model != "GLS":
X = X.reshape((-1, 1))
# df = df.astype(float) - self.model.predict(X)
if self.model in self.need_positive:
temp = pd.DataFrame(
self.trained_model.predict(X), index=df.index, columns=df.columns
)
temp = self.trnd_trans.inverse_transform(temp)
df = df - temp
else:
df = df - self.trained_model.predict(X)
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model != "GLS":
X = X.reshape((-1, 1))
if self.model in self.need_positive:
temp = pd.DataFrame(
self.trained_model.predict(X), index=df.index, columns=df.columns
)
df = df + self.trnd_trans.inverse_transform(temp)
else:
df = df + self.trained_model.predict(X)
# df = df.astype(float) + self.trained_model.predict(X)
return df
class StatsmodelsFilter(EmptyTransformer):
"""Irreversible filters.
Args:
method (str): bkfilter or cffilter
"""
def __init__(self, method: str = 'bkfilter', **kwargs):
super().__init__(name="StatsmodelsFilter")
self.method = method
def fit(self, df):
"""Fits filter.
Args:
df (pandas.DataFrame): input dataframe
"""
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
if self.method == 'bkfilter':
from statsmodels.tsa.filters import bk_filter
cycles = bk_filter.bkfilter(df, K=1)
cycles.columns = df.columns
df = (df - cycles).fillna(method='ffill').fillna(method='bfill')
elif self.method == 'cffilter':
from statsmodels.tsa.filters import cf_filter
cycle, trend = cf_filter.cffilter(df)
cycle.columns = df.columns
df = df - cycle
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
class SinTrend(EmptyTransformer):
"""Modelling sin."""
def __init__(self, **kwargs):
super().__init__(name="SinTrend")
def fit_sin(self, tt, yy):
"""Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"
from user unsym @ https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
"""
import scipy.optimize
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs( | np.fft.fft(yy) | numpy.fft.fft |
import argparse
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import cifar10_data
from checkpoints import save_weights,load_weights
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1) #random seed for theano operation
parser.add_argument('--seed_data', type=int, default=1) #random seed for picking labeled data
parser.add_argument('--count', type=int, default=400) #how much data one class
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--base_RF_loss_weight', type=float, default=0.01) #weight for base random field loss, i.e. f-E[f]
parser.add_argument('--lrd', type=float, default=1e-3)
parser.add_argument('--lrg', type=float, default=1e-3)
parser.add_argument('--potential_control_weight', default=1e-3 ,type=float) #weight for confidence loss
parser.add_argument('--beta', default=0.5 ,type=float) #beta for SGHMC
parser.add_argument('--gradient_coefficient', default=0.003,type=float) #coefficient for gradient term of SGLD/SGHMC
parser.add_argument('--noise_coefficient', default=0,type=float) #coefficient for noise term of SGLD/SGHMC
parser.add_argument('--L', default=10 ,type=int) #revision steps
parser.add_argument('--max_e', default=600 ,type=int) #max number of epochs
parser.add_argument('--revison_method', default='revision_x_sghmc' ,type=str) #revision method
parser.add_argument('--load', default='' ,type=str) #file name to load trained model
parser.add_argument('--data_dir', type=str, default='data/cifar-10-python/') #data folder to load
args = parser.parse_args()
print(args)
# fixed random seeds
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))
# load CIFAR data
def rescale(mat):
return np.transpose(np.cast[th.config.floatX]((-127.5 + mat)/127.5),(3,2,0,1))
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
testx, testy = cifar10_data.load(args.data_dir, subset='test')
trainx_unl = np.array(trainx).copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(np.ceil(float(testx.shape[0])/args.batch_size))
# specify random field
layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_1'),name='d_w1'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_2'),name='d_w2'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_3'),name='d_w3'))
layers.append(ll.MaxPool2DLayer(layers[-1],(2,2)))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_4'),name='d_w4'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_5'),name='d_w5'))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_6'),name='d_w6'))
layers.append(ll.MaxPool2DLayer(layers[-1],(2,2)))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.weight_norm(ll.Conv2DLayer(layers[-1],512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_7'),name='d_w7'))
layers.append(nn.weight_norm(ll.NINLayer(layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_8'),name='d_w8'))
layers.append(nn.weight_norm(ll.NINLayer(layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu,name='d_9'),name='d_w9'))
layers.append(ll.GlobalPoolLayer(layers[-1]))
layers.append(nn.weight_norm(ll.DenseLayer(layers[-1], num_units=10, W=Normal(0.05), nonlinearity=None,name='d_10'), train_g=True, init_stdv=0.1,name='d_w10'))
labels = T.ivector()
x_lab = T.tensor4()
temp = ll.get_output(layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in layers for u in getattr(l,'init_updates',[])]
output_before_softmax_lab = ll.get_output(layers[-1], x_lab, deterministic=False)
logit_lab = output_before_softmax_lab[T.arange(T.shape(x_lab)[0]),labels]
u_lab = T.mean(nn.log_sum_exp(output_before_softmax_lab))
#cross entropy loss of labeled data
loss_lab = -T.mean(logit_lab) + u_lab
train_err = T.mean(T.neq(T.argmax(output_before_softmax_lab,axis=1),labels))
# test error
output_before_softmax = ll.get_output(layers[-1], x_lab, deterministic=True)
test_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),labels))
# Theano functions for training the random field
lr = T.scalar()
RF_params = ll.get_all_params(layers, trainable=True)
RF_param_updates = lasagne.updates.rmsprop(loss_lab, RF_params, learning_rate=lr)
train_RF = th.function(inputs=[x_lab,labels,lr], outputs=[loss_lab, train_err], updates=RF_param_updates)
#weight norm initalization
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
#predition on test data
output_before_softmax = ll.get_output(layers[-1], x_lab, deterministic=True)
test_batch = th.function(inputs=[x_lab], outputs=output_before_softmax)
# select labeled data
rng_data = np.random.RandomState(args.seed_data)
inds = rng_data.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = | np.concatenate(txs, axis=0) | numpy.concatenate |
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import tensorflow as tf
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import gpflow
from gpflow import settings
from gpflow.conditionals import uncertain_conditional
from gpflow.conditionals import feature_conditional
from gpflow.quadrature import mvnquad
from gpflow.test_util import session_context
class MomentMatchingSVGP(gpflow.models.SVGP):
@gpflow.params_as_tensors
def uncertain_predict_f_moment_matching(self, Xmu, Xcov):
return uncertain_conditional(
Xmu, Xcov, self.feature, self.kern, self.q_mu, self.q_sqrt,
mean_function=self.mean_function, white=self.whiten,
full_cov_output=self.full_cov_output)
def uncertain_predict_f_monte_carlo(self, Xmu, Xchol, mc_iter=int(1e6)):
rng = np.random.RandomState(0)
D_in = Xchol.shape[0]
X_samples = Xmu + np.reshape(
Xchol[None, :, :] @ rng.randn(mc_iter, D_in)[:, :, None], [mc_iter, D_in])
F_mu, F_var = self.predict_f(X_samples)
F_samples = F_mu + rng.randn(*F_var.shape) * (F_var ** 0.5)
mean = np.mean(F_samples, axis=0)
covar = np.cov(F_samples.T)
return mean, covar
def gen_L(rng, n, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(n)])
def gen_q_sqrt(rng, D_out, *shape):
return np.array([np.tril(rng.randn(*shape)) for _ in range(D_out)])
def mean_function_factory(rng, mean_function_name, D_in, D_out):
if mean_function_name == "Zero":
return gpflow.mean_functions.Zero(output_dim=D_out)
elif mean_function_name == "Constant":
return gpflow.mean_functions.Constant(c=rng.rand(D_out))
elif mean_function_name == "Linear":
return gpflow.mean_functions.Linear(
A=rng.rand(D_in, D_out), b=rng.rand(D_out))
else:
return None
class Data:
N = 7
N_new = 2
D_out = 3
D_in = 1
rng = np.random.RandomState(1)
X = np.linspace(-5, 5, N)[:, None] + rng.randn(N, 1)
Y = np.hstack([np.sin(X), np.cos(X), X**2])
Xnew_mu = rng.randn(N_new, 1)
Xnew_covar = np.zeros((N_new, 1, 1))
class DataMC1(Data):
Y = np.hstack([np.sin(Data.X), np.sin(Data.X) * 2, Data.X ** 2])
class DataMC2(Data):
N = 7
N_new = 5
D_out = 4
D_in = 2
X = Data.rng.randn(N, D_in)
Y = np.hstack([ | np.sin(X) | numpy.sin |
"""
A method that computes the constraint violations, where its considered a
violation if P(General|x) < P(Specific|x)
"""
from typing import Dict, List
from box_mlc.dataset_readers.hierarchy_readers.hierarchy_reader import (
HierarchyReader,
)
from torch.nn.parameter import Parameter
from allennlp.common import Registrable
import logging
import torch
import numpy as np
logger = logging.getLogger(__name__)
# TODO: Remove the parent class Module
# TODO: remove the extra useless parameter adjacency_matrix_param
class ConstraintViolation(torch.nn.Module,Registrable):
"""
Given a hierarchy in the form of an adjacency matrix or cooccurence
statistic in the adjacency matrix format, compute the average
constraint violation.
"""
def __init__(
self,
hierarchy_reader: HierarchyReader,
cooccurence_threshold: float = 1,
) -> None:
"""
Args:
hierarchy_reader: Creates the adjacency_matrix and the mask.
cooccurence_threshold: If adjecency matrix captures the cooc stats, threshold determines
if an edge exixst b/w labels. Row->general.Column->Specific.
"""
super().__init__() # type:ignore
#self.adjacency_matrix_param = torch.nn.Parameter(hierarchy_reader.adjacency_matrix, requires_grad=False) # This is useless but present only so that we can load old models.
self.adjacency_matrix = (
hierarchy_reader.adjacency_matrix.detach().cpu().numpy()
)
self.threshold = cooccurence_threshold
def get_true_mask(self, true_labels: np.ndarray) -> np.ndarray:
true_mask = true_labels.copy()
true_mask[true_mask == 1] = -100000
true_mask[true_mask == 0] = 1
return true_mask
def __call__(
self, positive_probabilities: torch.Tensor, true_labels: torch.Tensor
) -> Dict:
"""
true_labels: (examples, labels). True labels for the given example.
Should follow same label indexing as the adj. matrix.
positive_probabilities: (examples, labels). Predicted probabilities by the model.
"""
positive_probabilities = positive_probabilities.detach().cpu().numpy()
true_labels = true_labels.detach().cpu().numpy()
edges_idx = np.argwhere(self.adjacency_matrix >= self.threshold)
true_mask = self.get_true_mask(true_labels)
# logger.info(f"Processing {len(edges_idx)} edges")
avg_number_of_violations: List = []
number_of_violations: List = []
extent_of_violations: List = []
frequency: List = []
distances: List = []
no_examples_edges_count: int = 0
for edge in edges_idx:
ind = np.logical_and(
true_labels[:, edge[0]], true_labels[:, edge[1]]
) # examples where the edge is present
true_subset = true_labels.copy()[ind]
if true_subset.shape[0] > 0:
frequency.append(true_subset.shape[0])
true_mask_subset = true_mask.copy()[ind]
true_mask_subset[:, edge[0]] = 1
true_mask_subset[:, edge[1]] = 1
positive_subset = positive_probabilities.copy()[
ind
] # (#subset_ex, num_labels)
extent_of_violations.append(
np.mean(
positive_subset[:, edge[0]]
- positive_subset[:, edge[1]]
)
)
sorted_ind = np.argsort(
-1 * positive_subset * true_mask_subset, axis=1
)
distance_g_s = (
np.argwhere(sorted_ind == edge[0])[:, -1]
- np.argwhere(sorted_ind == edge[1])[:, -1]
)
avg_number_of_violations.append(
np.sum(np.where(distance_g_s > 0, 1.0, 0.0))
/ true_subset.shape[0]
)
number_of_violations.append(
np.sum(np.where(distance_g_s > 0, 1, 0))
)
extent_of_violations.append(
np.mean( | np.maximum(distance_g_s, 0) | numpy.maximum |
#!/usr/bin/env python3
#
# PURPOSE: Read and reproject an ICESat-2 tiled mask HDF5 file
# for one northern or southern hemisphere area with all
# longitudes (0 - 359.95). Returns logical of land surf_type.
#
# FILES ACCESSED: H5 surf_type file (input)
#
# COMMENTS:
#
# Usage: surftype surf_type_filename hemisphere_flag
#
#
# HISTORY:
#
# YYYY-MM-DD AUID SCM Comment
# ---------- --------- ----- ------------------------------------------------
# 2019-11-19 bjelley M0265 Initial version for masking ATL11 tiles by surf_type
#
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import sh
import sys
import os
import time
import datetime
import h5py
import numpy as np
import h5py
from osgeo import osr
import matplotlib
import matplotlib.pyplot as plt
PGE_NAME='create_surfmask'
PGE_VERS='Version 1.0'
PGE_INFO='Reads a composite HDF5 file, extracts "land" surf_type.'
#
# Error/Status constants
#
GE_NOERROR=0
GE_NOTICE=1
GE_WARNING=2
GE_FATAL=3
#
# Execution time
#
proc_start=time.time()
proc_end=time.time()
#
#==============================================================================
#
# NAME: msg
#
# PURPOSE: Prints a message in ASAS format
#
# FILES ACCESSED: stdout
#
# COMMENTS:
#
#------------------------------------------------------------------------------
#
def msg(i_res, mod, routine, msg):
#
# Create a timestamp (pulled from asas_common.py)
#
tstamp=datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
s_res='{:0>6}'.format(i_res)
if (i_res==GE_NOERROR):
mstatus="Status "
if (i_res==GE_NOTICE):
mstatus="Notice "
if (i_res==GE_WARNING):
mstatus="Warning"
if (i_res==GE_FATAL):
mstatus="ERROR "
print(tstamp+' | '+mstatus+' | '+s_res+' | '+routine+' | '+msg)
return
#enddef
#
#==============================================================================
#
# NAME: end_banner
#
# PURPOSE: Prints an ending banner
#
# FILES ACCESSED: stdout
#
# COMMENTS:
#
#------------------------------------------------------------------------------
#
def end_banner(i_res):
#
# Write ending banner
#
proc_end=time.time()
msg(GE_NOERROR, PGE_NAME, 'main', '---')
if (i_res == GE_NOERROR) :
msg(GE_NOERROR, PGE_NAME, 'main', "Successful execution")
else:
msg(GE_NOERROR, PGE_NAME, 'main', "Execution failed")
msg(GE_NOERROR, PGE_NAME, 'main', "Execution time: "+str(proc_end-proc_start))
msg(GE_NOERROR, PGE_NAME, 'main', "Result Code : "+str(i_res))
msg(GE_NOERROR, PGE_NAME, 'main', '---')
if (i_res == GE_FATAL):
sys.exit(GE_FATAL)
return
#enddef
#
#==============================================================================
#
# NAME: ibits
#
# PURPOSE: Return integer value of bits
#
# COMMENTS:
#
#------------------------------------------------------------------------------
def ibits(ival, ipos, ilen):
"""Same usage as Fortran ibits function."""
ones = ((1 << ilen)-1)
return (ival & (ones << ipos)) >> ipos
#
#==============================================================================
#
# NAME: read_hdf5
#
# PURPOSE: Read and reproject a HDF5 of surf_type tiles.
#
# FILES ACCESSED: Input- Composite HDF5; Return - Hemisphere specific surf_type
#
# COMMENTS:
#
#------------------------------------------------------------------------------
#
class landmask(object):
def __init__(self):
self.x=None
self.y=None
self.z=None
def read_surftype_h5(self,in_file,hemisphere=-1):
#
# Open file and read tile attributes
#
f_in = h5py.File(in_file, 'r')
g_in = f_in['TILE_INDEX']
lon0 = g_in.attrs['LON0']
lon1 = g_in.attrs['LON1']
lat0 = g_in.attrs['LAT0']
lat1 = g_in.attrs['LAT1']
lon_scale = g_in.attrs['LON_SCALE']
lat_scale = g_in.attrs['LAT_SCALE']
tile_name = g_in.attrs['NAME']
nlon = g_in.attrs['N_LON']
nlat = g_in.attrs['N_LAT']
#
# Read 1 tile, need dtype for initialization
#
tile = np.array(f_in[tile_name[0]])
#
# Initialize arrays
#
xsz = int(np.ceil((np.max(lon1) - np.min(lon0)) / lon_scale[0]))
ysz = int(np.ceil((np.max(lat1) - np.min(lat0)) / lat_scale[0]))
surf_type = np.empty([ysz,xsz],dtype=tile.dtype)
lons = np.full([ysz,xsz],np.inf)
lats = np.full([ysz,xsz],np.inf)
sum_tile = 0
#
# Loop through all tiles, capturing geolocation and tile data
#
for lat_tile in range(0,9):
for lon_tile in range(0,18):
tile_num = lon_tile * (lat_tile + 1)
if sum_tile >= np.size(nlon):
print("breaking loop for beyond array size, size(nlon):",np.size(nlon))
break
x = np.linspace(lon0[sum_tile], lon1[sum_tile] - lon_scale[sum_tile], nlon[sum_tile])
x = np.repeat(x[np.newaxis,:],nlon[sum_tile],0)
lons[lat_tile*nlat[0]:lat_tile*nlat[0]+nlat[0],lon_tile*nlon[0]:lon_tile*nlon[0]+nlon[0]] = x
y = np.linspace(lat0[sum_tile], lat1[sum_tile] - lat_scale[sum_tile], nlat[sum_tile])
y = np.repeat(y[:,np.newaxis],nlat[sum_tile],1)
lats[lat_tile*nlat[0]:lat_tile*nlat[0]+nlat[0],lon_tile*nlon[0]:lon_tile*nlon[0]+nlon[0]] = y
tile = np.array(f_in[tile_name[sum_tile]])
surf_type[lat_tile*nlat[0]:lat_tile*nlat[0]+nlat[0],lon_tile*nlon[0]:lon_tile*nlon[0]+nlon[0]] = tile
sum_tile+=1
#
# Resample to every 5th in surf_type (no need for 5m resolution)
#
subset_size = 3
lons = lons[0:np.shape(lons)[0]:subset_size,0:np.shape(lons)[1]:subset_size]
lats = lats[0:np.shape(lats)[0]:subset_size,0:np.shape(lats)[1]:subset_size]
surf_type = surf_type[0:np.shape(surf_type)[0]:subset_size,0:np.shape(surf_type)[1]:subset_size]
xsz = int(xsz/subset_size)
ysz = int(ysz/subset_size)
surfmask = np.full([ysz,xsz], False)
#
# Set mask True if land bit is set
#
for j in range(np.shape(surf_type)[1]):
for i in range(np.shape(surf_type)[0]):
if int(ibits(surf_type[i,j],0,1)) == 1:
surfmask[i,j] = True
#
# Subset down to where lats match hemisphere
# Assumes all longitudes included
# np.reshape(arr, (-1,int(360/dx))) provides for unknown number of lats
#
dx=lon_scale[0]
latlimit=-60.0
if hemisphere==-1:
lons = | np.reshape(lons[lats <= latlimit], (-1,xsz)) | numpy.reshape |
#The DF of a tidal stream
import copy
import multiprocessing
import warnings
from pkg_resources import parse_version
import numpy
import scipy
from scipy import special, interpolate, integrate, optimize
_SCIPY_VERSION= parse_version(scipy.__version__)
if _SCIPY_VERSION < parse_version('0.10'): #pragma: no cover
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION < parse_version('0.19'): #pragma: no cover
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from ..orbit import Orbit
from .df import df
from ..util import coords, fast_cholesky_invert, \
conversion, multi, plot, stable_cho_factor, ars
from ..util.conversion import physical_conversion, _APY_UNITS, _APY_LOADED
from ..actionAngle.actionAngleIsochroneApprox import dePeriod
from ..potential import flatten as flatten_potential
from ..util import galpyWarning
if _APY_LOADED:
from astropy import units
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
# cast a wide net
_TWOPIWRAPS= numpy.arange(-4,5)*2.*numpy.pi
_labelDict= {'x': r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(df):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,useTM=False,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
ro=None,vo=None,
Vnorm=None,Rnorm=None,
R0=8.,Zsun=0.0208,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False,nospreadsetup=False,
approxConstTrackFreq=False,useTMHessian=False,
custom_transform=None):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor (can be Quantity)
tdisrupt= (5 Gyr) time since start of disruption (can be Quantity)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
useTM= (False) if set to an actionAngleTorus instance, use this to speed up calculations
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially (can be Quantity)
deltaAngleTrack= (None) angle to estimate the stream track over (rad; or can be Quantity)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
nospreadsetup= (False) if True, don't setup the spread around the stream track (only for nosetup is False)
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
vo= (220) circular velocity to normalize velocities with [used to be Vnorm; can be Quantity]
ro= (8) Galactocentric radius to normalize positions with [used to be Rnorm; can be Quantity]
R0= (8) Galactocentric radius of the Sun (kpc) [can be different from ro; can be Quantity]
Zsun= (0.0208) Sun's height above the plane (kpc; can be Quantity)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center) (can be Quantity)
custom_transform= (None) matrix implementing the rotation from (ra,dec) to a custom set of sky coordinates
approxConstTrackFreq= (False) if True, approximate the stream assuming that the frequency is constant along the stream (only works with useTM, for which this leads to a significant speed-up)
useTMHessian= (False) if True, compute the basic Hessian dO/dJ_prog using TM; otherwise use aA
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
if ro is None and not Rnorm is None:
warnings.warn("WARNING: Rnorm keyword input to streamdf is deprecated in favor of the standard ro keyword", galpyWarning)
ro= Rnorm
if vo is None and not Vnorm is None:
warnings.warn("WARNING: Vnorm keyword input to streamdf is deprecated in favor of the standard vo keyword", galpyWarning)
vo= Vnorm
df.__init__(self,ro=ro,vo=vo)
sigv= conversion.parse_velocity(sigv,vo=self._vo)
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./conversion.time_in_Gyr(self._vo,self._ro)
else:
self._tdisrupt= conversion.parse_time(tdisrupt,ro=self._ro,vo=self._vo)
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= flatten_potential(pot)
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._check_consistent_units()
if useTM:
self._useTM= True
self._aAT= useTM # confusing, no?
self._approxConstTrackFreq= approxConstTrackFreq
if not self._aAT._pot == self._pot:
raise IOError("Potential in useTM=actionAngleTorus instance does not appear to be the same as given potential pot")
else:
self._useTM= False
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading,useTMHessian)
sigangle= conversion.parse_angle(sigangle)
deltaAngleTrack= conversion.parse_angle(deltaAngleTrack)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
R0= conversion.parse_length_kpc(R0)
Zsun= conversion.parse_length_kpc(Zsun)
vsun= conversion.parse_velocity_kms(vsun)
vsun[0]= conversion.parse_velocity_kms(vsun[0])
vsun[1]= conversion.parse_velocity_kms(vsun[1])
vsun[2]= conversion.parse_velocity_kms(vsun[2])
self._setup_coord_transform(R0,Zsun,vsun,progenitor,custom_transform)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
if not nospreadsetup: self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading,useTMHessian):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,
_firstFlip=(not leading),
use_physical=False)
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
if useTMHessian:
h, fr,fp,fz,e= self._aAT.hessianFreqs(self._progenitor_jr,
self._progenitor_lz,
self._progenitor_jz)
self._dOdJp= h
# Replace frequencies with TM frequencies
self._progenitor_Omegar= fr
self._progenitor_Omegaphi= fp
self._progenitor_Omegaz= fz
self._progenitor_Omega= numpy.array([self._progenitor_Omegar,
self._progenitor_Omegaphi,
self._progenitor_Omegaz]).reshape(3)
else:
self._dOdJp= calcaAJac(self._progenitor.vxvv[0],
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpInv= numpy.linalg.inv(self._dOdJp)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None
def _setup_coord_transform(self,R0,Zsun,vsun,progenitor,custom_transform):
#Set the coordinate-transformation parameters; check that these do not conflict with those in the progenitor orbit object; need to use the original, since this objects _progenitor has physical turned off
if progenitor._roSet \
and (numpy.fabs(self._ro-progenitor._ro) > 10.**-.8 \
or numpy.fabs(R0-progenitor._ro) > 10.**-8.):
warnings.warn("Warning: progenitor's ro does not agree with streamdf's ro and R0; this may have unexpected consequences when projecting into observables", galpyWarning)
if progenitor._voSet \
and numpy.fabs(self._vo-progenitor._vo) > 10.**-8.:
warnings.warn("Warning: progenitor's vo does not agree with streamdf's vo; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.fabs(Zsun-progenitor._zo) > 10.**-8.:
warnings.warn("Warning: progenitor's zo does not agree with streamdf's Zsun; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.any(numpy.fabs(vsun-numpy.array([0.,self._vo,0.])\
-progenitor._solarmotion) > 10.**-8.):
warnings.warn("Warning: progenitor's solarmotion does not agree with streamdf's vsun (after accounting for vo); this may have unexpected consequences when projecting into observables", galpyWarning)
self._R0= R0
self._Zsun= Zsun
self._vsun= vsun
self._custom_transform= custom_transform
return None
def _setup_progIsTrack(self):
"""If progIsTrack, the progenitor orbit that was passed to the
streamdf initialization is the track at zero angle separation;
this routine computes an actual progenitor position that gives
the desired track given the parameters of the streamdf"""
# We need to flip the sign of the offset, to go to the progenitor
self._sigMeanSign*= -1.
# Use _determine_stream_track_single to calculate the track-progenitor
# offset at zero angle separation
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
# Setup the new progenitor orbit
progenitor= Orbit(prog_stream_offset[3])
# Flip the offset sign again
self._sigMeanSign*= -1.
# Now re-do the previous setup
self._progenitor_setup(progenitor,self._leading,False)
self._offset_setup(self._sigangle,self._leading,
self._deltaAngleTrack)
return None
@physical_conversion('angle',pop=True)
def misalignment(self,isotropic=False,**kwargs):
"""
NAME:
misalignment
PURPOSE:
calculate the misalignment between the progenitor's frequency
and the direction along which the stream disrupts
INPUT:
isotropic= (False), if True, return the misalignment assuming an isotropic action distribution
OUTPUT:
misalignment in rad
HISTORY:
2013-12-05 - Written - Bovy (IAS)
2017-10-28 - Changed output unit to rad - Bovy (UofT)
"""
warnings.warn("In versions >1.3, the output unit of streamdf.misalignment has been changed to radian (from degree before)",galpyWarning)
if isotropic:
dODir= self._dOdJpEig[1][:,numpy.argmax(numpy.fabs(self._dOdJpEig[0]))]
else:
dODir= self._dsigomeanProgDirection
out= numpy.arccos(numpy.sum(self._progenitor_Omega*dODir)/numpy.sqrt(numpy.sum(self._progenitor_Omega**2.)))
if out > numpy.pi/2.: return out-numpy.pi
else: return out
def freqEigvalRatio(self,isotropic=False):
"""
NAME:
freqEigvalRatio
PURPOSE:
calculate the ratio between the largest and 2nd-to-largest (in abs)
eigenvalue of sqrt(dO/dJ^T V_J dO/dJ)
(if this is big, a 1D stream will form)
INPUT:
isotropic= (False), if True, return the ratio assuming an isotropic action distribution (i.e., just of dO/dJ)
OUTPUT:
ratio between eigenvalues of fabs(dO / dJ)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
sortedEig= sorted(numpy.fabs(self._dOdJpEig[0]))
return sortedEig[2]/sortedEig[1]
else:
return numpy.sqrt(self._sortedSigOEig)[2]\
/numpy.sqrt(self._sortedSigOEig)[1]
@physical_conversion('time',pop=True)
def estimateTdisrupt(self,deltaAngle):
"""
NAME:
estimateTdisrupt
PURPOSE:
estimate the time of disruption
INPUT:
deltaAngle- spread in angle since disruption
OUTPUT:
time in natural units
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
return deltaAngle\
/numpy.sqrt(numpy.sum(self._dsigomeanProg**2.))
def subhalo_encounters(self,venc=numpy.inf,sigma=150./220.,
nsubhalo=0.3,bmax=0.025,yoon=False):
"""
NAME:
subhalo_encounters
PURPOSE:
estimate the number of encounters with subhalos over the lifetime of this stream, using a formalism similar to that of Yoon et al. (2011)
INPUT:
venc= (numpy.inf) count encounters with (relative) speeds less than this (relative radial velocity in cylindrical stream frame, unless yoon is True) (can be Quantity)
sigma= (150/220) velocity dispersion of the DM subhalo population (can be Quantity)
nsubhalo= (0.3) spatial number density of subhalos (can be Quantity)
bmax= (0.025) maximum impact parameter (if larger than width of stream) (can be Quantity)
yoon= (False) if True, use erroneous Yoon et al. formula
OUTPUT:
number of encounters
HISTORY:
2016-01-19 - Written - Bovy (UofT)
"""
venc= conversion.parse_velocity(venc,vo=self._vo)
sigma= conversion.parse_velocity(sigma,vo=self._vo)
nsubhalo= conversion.parse_numdens(nsubhalo,ro=self._ro)
bmax= conversion.parse_length(bmax,ro=self._ro)
Ravg= numpy.mean(numpy.sqrt(self._progenitor.orbit[0,:,0]**2.
+self._progenitor.orbit[0,:,3]**2.))
if numpy.isinf(venc):
vencFac= 1.
elif yoon:
vencFac= (1.-(1.+venc**2./4./sigma**2.)\
*numpy.exp(-venc**2./4./sigma**2.))
else:
vencFac= (1.-numpy.exp(-venc**2./2./sigma**2.))
if yoon:
yoonFac= 2*numpy.sqrt(2.)
else:
yoonFac= 1.
# Figure out width of stream
w= self.sigangledAngle(self._meandO*self._tdisrupt,simple=True,
use_physical=False)
if bmax < w*Ravg/2.: bmax= w*Ravg/2.
return yoonFac/numpy.sqrt(2.)*numpy.sqrt(numpy.pi)*Ravg*sigma\
*self._tdisrupt**2.*self._meandO\
*bmax*nsubhalo*vencFac
############################STREAM TRACK FUNCTIONS#############################
def plotTrack(self,d1='x',d2='z',interp=True,spread=0,simple=_USESIMPLE,
*args,**kwargs):
"""
NAME:
plotTrack
PURPOSE:
plot the stream track
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
interp= (True) if True, use the interpolated stream track
spread= (0) if int > 0, also plot the spread around the track as spread x sigma
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
simple= (False), if True, use a simple estimate for the spread in perpendicular angle
galpy.util.plot.plotplot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
if not hasattr(self,'_ObsTrackLB') and \
(d1.lower() == 'll' or d1.lower() == 'bb'
or d1.lower() == 'dist' or d1.lower() == 'pmll'
or d1.lower() == 'pmbb' or d1.lower() == 'vlos'
or d2.lower() == 'll' or d2.lower() == 'bb'
or d2.lower() == 'dist' or d2.lower() == 'pmll'
or d2.lower() == 'pmbb' or d2.lower() == 'vlos'):
self.calc_stream_lb()
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_track_dim(d1,interp=interp,phys=phys)
ty= self._parse_track_dim(d2,interp=interp,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
if spread:
addx, addy= self._parse_track_spread(d1,d2,interp=interp,phys=phys,
simple=simple)
if ('ls' in kwargs and kwargs['ls'] == 'none') \
or ('linestyle' in kwargs \
and kwargs['linestyle'] == 'none'):
kwargs.pop('ls',None)
kwargs.pop('linestyle',None)
spreadls= 'none'
else:
spreadls= '-.'
spreadmarker= kwargs.pop('marker',None)
spreadcolor= kwargs.pop('color',None)
spreadlw= kwargs.pop('lw',1.)
plot.plot(tx+spread*addx,ty+spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
plot.plot(tx-spread*addx,ty-spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
return None
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs):
"""
NAME:
plotProgenitor
PURPOSE:
plot the progenitor orbit
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
galpy.util.plot.plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
tts= self._progenitor.t[self._progenitor.t \
< self._trackts[self._nTrackChunks-1]]
obs= [self._R0,0.,self._Zsun]
obs.extend(self._vsun)
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_progenitor_dim(d1,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
ty= self._parse_progenitor_dim(d2,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
return None
def _parse_track_dim(self,d1,interp=True,phys=False):
"""Parse the dimension to plot the stream track for"""
if interp: interpStr= 'interpolated'
else: interpStr= ''
if d1.lower() == 'x':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,0]
elif d1.lower() == 'y':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,1]
elif d1.lower() == 'z':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,2]
elif d1.lower() == 'r':
tx= self.__dict__['_%sObsTrack' % interpStr][:,0]
elif d1.lower() == 'phi':
tx= self.__dict__['_%sObsTrack' % interpStr][:,5]
elif d1.lower() == 'vx':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,3]
elif d1.lower() == 'vy':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,4]
elif d1.lower() == 'vz':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,5]
elif d1.lower() == 'vr':
tx= self.__dict__['_%sObsTrack' % interpStr][:,1]
elif d1.lower() == 'vt':
tx= self.__dict__['_%sObsTrack' % interpStr][:,2]
elif d1.lower() == 'll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,0]
elif d1.lower() == 'bb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,1]
elif d1.lower() == 'dist':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,2]
elif d1.lower() == 'pmll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,4]
elif d1.lower() == 'pmbb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,5]
elif d1.lower() == 'vlos':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,3]
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_progenitor_dim(self,d1,ts,ro=None,vo=None,obs=None,
phys=False):
"""Parse the dimension to plot the progenitor orbit for"""
if d1.lower() == 'x':
tx= self._progenitor.x(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'y':
tx= self._progenitor.y(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'z':
tx= self._progenitor.z(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'r':
tx= self._progenitor.R(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'phi':
tx= self._progenitor.phi(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vx':
tx= self._progenitor.vx(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vy':
tx= self._progenitor.vy(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vz':
tx= self._progenitor.vz(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vr':
tx= self._progenitor.vR(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vt':
tx= self._progenitor.vT(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'll':
tx= self._progenitor.ll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'bb':
tx= self._progenitor.bb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'dist':
tx= self._progenitor.dist(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmll':
tx= self._progenitor.pmll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmbb':
tx= self._progenitor.pmbb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vlos':
tx= self._progenitor.vlos(ts,ro=ro,vo=vo,obs=obs)
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_track_spread(self,d1,d2,interp=True,phys=False,
simple=_USESIMPLE):
"""Determine the spread around the track"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
okaySpreadR= ['r','vr','vt','z','vz','phi']
okaySpreadXY= ['x','y','z','vx','vy','vz']
okaySpreadLB= ['ll','bb','dist','vlos','pmll','pmbb']
#Determine which coordinate system we're in
coord= [False,False,False] #R, XY, LB
if d1.lower() in okaySpreadR and d2.lower() in okaySpreadR:
coord[0]= True
elif d1.lower() in okaySpreadXY and d2.lower() in okaySpreadXY:
coord[1]= True
elif d1.lower() in okaySpreadLB and d2.lower() in okaySpreadLB:
coord[2]= True
else:
raise NotImplementedError("plotting the spread for coordinates from different systems not implemented yet ...")
#Get the right 2D Jacobian
indxDict= {}
indxDict['r']= 0
indxDict['vr']= 1
indxDict['vt']= 2
indxDict['z']= 3
indxDict['vz']= 4
indxDict['phi']= 5
indxDictXY= {}
indxDictXY['x']= 0
indxDictXY['y']= 1
indxDictXY['z']= 2
indxDictXY['vx']= 3
indxDictXY['vy']= 4
indxDictXY['vz']= 5
indxDictLB= {}
indxDictLB['ll']= 0
indxDictLB['bb']= 1
indxDictLB['dist']= 2
indxDictLB['vlos']= 3
indxDictLB['pmll']= 4
indxDictLB['pmbb']= 5
if coord[0]:
relevantCov= self._allErrCovs
relevantDict= indxDict
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._vo,self._vo,
self._ro,self._vo,1.])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[1]:
relevantCov= self._allErrCovsXY
relevantDict= indxDictXY
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._ro,self._ro,
self._vo,self._vo,self._vo])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[2]:
relevantCov= self._allErrCovsLBUnscaled
relevantDict= indxDictLB
indx0= numpy.array([[relevantDict[d1.lower()],relevantDict[d1.lower()]],
[relevantDict[d2.lower()],relevantDict[d2.lower()]]])
indx1= numpy.array([[relevantDict[d1.lower()],relevantDict[d2.lower()]],
[relevantDict[d1.lower()],relevantDict[d2.lower()]]])
cov= relevantCov[:,indx0,indx1] #cov contains all nTrackChunks covs
if not interp:
out= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
out[ii]= minEigvec*numpy.sqrt(covEig[0][minIndx])
eigDir= minEigvec
else:
#We slerp the minor eigenvector and interpolate the eigenvalue
#First store all of the eigenvectors on the track
allEigval= numpy.empty(self._nTrackChunks)
allEigvec= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
allEigval[ii]= numpy.sqrt(covEig[0][minIndx])
allEigvec[ii]= minEigvec
eigDir= minEigvec
#Now interpolate where needed
interpEigval=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allEigval,k=3)
interpolatedEigval= interpEigval(self._interpolatedThetasTrack)
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
2))
for ii in range(self._nTrackChunks-1):
slerpOmega= numpy.arccos(numpy.sum(allEigvec[ii]*allEigvec[ii+1]))
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(2):
interpolatedEigvec[slerpIndx,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmega)*allEigvec[ii,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmega)*allEigvec[ii+1,jj])/numpy.sin(slerpOmega)
out= numpy.tile(interpolatedEigval.T,(2,1)).T*interpolatedEigvec
if coord[2]: #if LB, undo rescalings that were applied before
out[:,0]*= self._ErrCovsLBScale[relevantDict[d1.lower()]]
out[:,1]*= self._ErrCovsLBScale[relevantDict[d2.lower()]]
return (out[:,0],out[:,1])
def plotCompareTrackAAModel(self,**kwargs):
"""
NAME:
plotCompareTrackAAModel
PURPOSE:
plot the comparison between the underlying model's dOmega_perp vs. dangle_r (line) and the track in (x,v)'s dOmega_perp vs. dangle_r (dots; explicitly calculating the track's action-angle coordinates)
INPUT:
galpy.util.plot.plot kwargs
OUTPUT:
plot
HISTORY:
2014-08-27 - Written - Bovy (IAS)
"""
#First calculate the model
model_adiff= (self._ObsTrackAA[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
model_operp= numpy.dot(self._ObsTrackAA[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
#Then calculate the track's frequency-angle coordinates
if self._multi is None:
aatrack= numpy.empty((self._nTrackChunks,6))
for ii in range(self._nTrackChunks):
aatrack[ii]= self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[ii,:]),
use_physical=False)[3:]
else:
aatrack= numpy.reshape(\
multi.parallel_map(
(lambda x: self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[x,:]),use_physical=False)[3:]),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi])),(self._nTrackChunks,6))
track_adiff= (aatrack[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
track_operp= numpy.dot(aatrack[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
overplot= kwargs.pop('overplot',False)
yrange= kwargs.pop('yrange',
[0.,numpy.amax(numpy.hstack((model_operp,track_operp)))*1.1])
xlabel= kwargs.pop('xlabel',r'$\Delta \theta_R$')
ylabel= kwargs.pop('ylabel',r'$\Delta \Omega_\parallel$')
plot.plot(model_adiff,model_operp,'k-',overplot=overplot,
xlabel=xlabel,ylabel=ylabel,yrange=yrange,**kwargs)
plot.plot(track_adiff,track_operp,'ko',overplot=True,
**kwargs)
return None
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment(quantity=False)) < 1./180.*numpy.pi:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment(quantity=False)) >= 1./180.*numpy.pi \
and numpy.fabs(self.misalignment(quantity=False)) < 3./180.*numpy.pi:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment(quantity=False)) >= 3./180.*numpy.pi:
self.nTrackIterations= 2
return None
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
if self._nTrackChunks < 4: self._nTrackChunks= 4
if not hasattr(self,'nInterpolatedTrackChunks'):
self.nInterpolatedTrackChunks= 1001
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
if self._useTM:
return self._determine_stream_track_TM()
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack.orbit[...,1]= -auxiliaryTrack.orbit[...,1]
auxiliaryTrack.orbit[...,2]= -auxiliaryTrack.orbit[...,2]
auxiliaryTrack.orbit[...,4]= -auxiliaryTrack.orbit[...,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),
use_physical=False)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x:self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None
def _calc_ObsTrackXY(self):
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._ObsTrackXY= numpy.empty_like(self._ObsTrack)
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
self._ObsTrackXY[:,0]= TrackX
self._ObsTrackXY[:,1]= TrackY
self._ObsTrackXY[:,2]= TrackZ
self._ObsTrackXY[:,3]= TrackvX
self._ObsTrackXY[:,4]= TrackvY
self._ObsTrackXY[:,5]= TrackvZ
return None
def _determine_stream_track_TM(self):
# With TM, can get the track in a single shot
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
if self._approxConstTrackFreq:
alljacsTrack, allinvjacsTrack, ObsTrack, ObsTrackAA, detdOdJps= \
_determine_stream_track_TM_approxConstantTrackFreq(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack)
#Store the track, didn't compute _allAcfsTrack
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_TM_single(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
alljacsTrack[ii,:,:]= multiOut[0]
allinvjacsTrack[ii,:,:]= multiOut[1]
ObsTrack[ii,:]= multiOut[2]
ObsTrackAA[ii,:]= multiOut[3]
detdOdJps[ii]= multiOut[4]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_TM_single(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
alljacsTrack[ii,:,:]= multiOut[ii][0]
allinvjacsTrack[ii,:,:]= multiOut[ii][1]
ObsTrack[ii,:]= multiOut[ii][2]
ObsTrackAA[ii,:]= multiOut[ii][3]
detdOdJps[ii]= multiOut[ii][4]
#Store the track, didn't compute _allAcfsTrack
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._calc_ObsTrackXY()
return None
def _determine_stream_spread(self,simple=_USESIMPLE):
"""Determine the spread around the stream track, just sets matrices that describe the covariances"""
allErrCovs= numpy.empty((self._nTrackChunks,6,6))
if self._multi is None:
for ii in range(self._nTrackChunks):
allErrCovs[ii]= _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[ii],
lambda x: self.sigOmega(x,use_physical=False),
lambda y: self.sigangledAngle(y,simple=simple,use_physical=False),
self._allinvjacsTrack[ii])
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[x],
lambda x: self.sigOmega(x,use_physical=False),
lambda y: self.sigangledAngle(y,simple=simple,use_physical=False),
self._allinvjacsTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allErrCovs[ii]= multiOut[ii]
self._allErrCovs= allErrCovs
#Also propagate to XYZ coordinates
allErrCovsXY= numpy.empty_like(self._allErrCovs)
allErrCovsEigvalXY= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecXY= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjac= coords.cyl_to_rect_jac(*self._ObsTrack[ii])
allErrCovsXY[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovs[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsXY[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalXY[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecXY[ii]= teig[1][:,sortIndx]
self._allErrCovsXY= allErrCovsXY
#Interpolate the allErrCovsXY covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalXY=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalXY[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsXY= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalXY[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecXY[ii,:,jj]*allErrCovsEigvecXY[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecXY[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecXY[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsXY[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsXY= interpolatedAllErrCovsXY
#Also interpolate in l and b coordinates
self._determine_stream_spreadLB(simple=simple)
return None
def _determine_stream_spreadLB(self,simple=_USESIMPLE,
ro=None,vo=None,
R0=None,Zsun=None,vsun=None):
"""Determine the spread in the stream in observable coordinates"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
if ro is None:
ro= self._ro
if vo is None:
vo= self._vo
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
allErrCovsLB= numpy.empty_like(self._allErrCovs)
obs= [R0,0.,Zsun]
obs.extend(vsun)
obskwargs= {}
obskwargs['ro']= ro
obskwargs['vo']= vo
obskwargs['obs']= obs
obskwargs['quantity']= False
self._ErrCovsLBScale= [180.,90.,
self._progenitor.dist(**obskwargs),
numpy.fabs(self._progenitor.vlos(**obskwargs)),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.)]
allErrCovsEigvalLB= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecLB= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjacXY= coords.galcenrect_to_XYZ_jac(*self._ObsTrackXY[ii])
tjacLB= coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
tjacLB[:3,:]/= ro
tjacLB[3:,:]/= vo
for jj in range(6):
tjacLB[:,jj]*= self._ErrCovsLBScale[jj]
tjac= numpy.dot(numpy.linalg.inv(tjacLB),tjacXY)
allErrCovsLB[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovsXY[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsLB[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalLB[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecLB[ii]= teig[1][:,sortIndx]
self._allErrCovsLBUnscaled= allErrCovsLB
#Interpolate the allErrCovsLB covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalLB=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalLB[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsLB= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalLB[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecLB[ii,:,jj]*allErrCovsEigvecLB[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecLB[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecLB[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsLB[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsLBUnscaled= interpolatedAllErrCovsLB
#Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points
trackLogDetJacLB= numpy.empty_like(self._thetasTrack)
interpolatedTrackLogDetJacLB=\
numpy.empty_like(self._interpolatedThetasTrack)
for ii in range(self._nTrackChunks):
tjacLB= coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
trackLogDetJacLB[ii]= numpy.log(numpy.linalg.det(tjacLB))
self._trackLogDetJacLB= trackLogDetJacLB
for ii in range(len(self._interpolatedThetasTrack)):
tjacLB=\
coords.lbd_to_XYZ_jac(*self._interpolatedObsTrackLB[ii],
degree=True)
interpolatedTrackLogDetJacLB[ii]=\
numpy.log(numpy.linalg.det(tjacLB))
self._interpolatedTrackLogDetJacLB= interpolatedTrackLogDetJacLB
return None
def _interpolate_stream_track(self):
"""Build interpolations of the stream track"""
if hasattr(self,'_interpolatedThetasTrack'):
return None #Already did this
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
#Interpolate
self._interpTrackX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackX,k=3)
self._interpTrackY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackY,k=3)
self._interpTrackZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackZ,k=3)
self._interpTrackvX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvX,k=3)
self._interpTrackvY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvY,k=3)
self._interpTrackvZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvZ,k=3)
#Now store an interpolated version of the stream track
self._interpolatedThetasTrack=\
numpy.linspace(0.,self._deltaAngleTrack,
self.nInterpolatedTrackChunks)
self._interpolatedObsTrackXY= numpy.empty((len(self._interpolatedThetasTrack),6))
self._interpolatedObsTrackXY[:,0]=\
self._interpTrackX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,1]=\
self._interpTrackY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,2]=\
self._interpTrackZ(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,3]=\
self._interpTrackvX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,4]=\
self._interpTrackvY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,5]=\
self._interpTrackvZ(self._interpolatedThetasTrack)
#Also in cylindrical coordinates
self._interpolatedObsTrack= \
numpy.empty((len(self._interpolatedThetasTrack),6))
tR,tphi,tZ= coords.rect_to_cyl(self._interpolatedObsTrackXY[:,0],
self._interpolatedObsTrackXY[:,1],
self._interpolatedObsTrackXY[:,2])
tvR,tvT,tvZ=\
coords.rect_to_cyl_vec(self._interpolatedObsTrackXY[:,3],
self._interpolatedObsTrackXY[:,4],
self._interpolatedObsTrackXY[:,5],
tR,tphi,tZ,cyl=True)
self._interpolatedObsTrack[:,0]= tR
self._interpolatedObsTrack[:,1]= tvR
self._interpolatedObsTrack[:,2]= tvT
self._interpolatedObsTrack[:,3]= tZ
self._interpolatedObsTrack[:,4]= tvZ
self._interpolatedObsTrack[:,5]= tphi
return None
def _interpolate_stream_track_aA(self):
"""Build interpolations of the stream track in action-angle coordinates"""
if hasattr(self,'_interpolatedObsTrackAA'):
return None #Already did this
#Calculate 1D meanOmega on a fine grid in angle and interpolate
if not hasattr(self,'_interpolatedThetasTrack'):
self._interpolate_stream_track()
dmOs= numpy.array([self.meanOmega(da,oned=True,use_physical=False)
for da in self._interpolatedThetasTrack])
self._interpTrackAAdmeanOmegaOneD=\
interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,dmOs,k=3)
#Build the interpolated AA
self._interpolatedObsTrackAA=\
numpy.empty((len(self._interpolatedThetasTrack),6))
for ii in range(len(self._interpolatedThetasTrack)):
self._interpolatedObsTrackAA[ii,:3]=\
self._progenitor_Omega+dmOs[ii]*self._dsigomeanProgDirection\
*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
self._progenitor_angle+self._interpolatedThetasTrack[ii]\
*self._dsigomeanProgDirection*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
numpy.mod(self._interpolatedObsTrackAA[ii,3:],2.*numpy.pi)
return None
def calc_stream_lb(self,
vo=None,ro=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
calc_stream_lb
PURPOSE:
convert the stream track to observational coordinates and store
INPUT:
Coordinate transformation inputs (all default to the instance-wide
values):
vo= circular velocity to normalize velocities with
ro= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(none)
HISTORY:
2013-12-02 - Written - Bovy (IAS)
"""
if vo is None:
vo= self._vo
if ro is None:
ro= self._ro
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
self._ObsTrackLB= numpy.empty_like(self._ObsTrack)
XYZ= coords.galcencyl_to_XYZ(self._ObsTrack[:,0]*ro,
self._ObsTrack[:,5],
self._ObsTrack[:,3]*ro,
Xsun=R0,Zsun=Zsun).T
vXYZ= coords.galcencyl_to_vxvyvz(self._ObsTrack[:,1]*vo,
self._ObsTrack[:,2]*vo,
self._ObsTrack[:,4]*vo,
self._ObsTrack[:,5],
vsun=vsun,Xsun=R0,Zsun=Zsun).T
slbd=coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],slbd[:,2],
degree=True)
self._ObsTrackLB[:,0]= slbd[:,0]
self._ObsTrackLB[:,1]= slbd[:,1]
self._ObsTrackLB[:,2]= slbd[:,2]
self._ObsTrackLB[:,3]= svlbd[:,0]
self._ObsTrackLB[:,4]= svlbd[:,1]
self._ObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_interpolatedObsTrackXY'):
#Do the same for the interpolated track
self._interpolatedObsTrackLB=\
numpy.empty_like(self._interpolatedObsTrackXY)
XYZ=\
coords.galcenrect_to_XYZ(\
self._interpolatedObsTrackXY[:,0]*ro,
self._interpolatedObsTrackXY[:,1]*ro,
self._interpolatedObsTrackXY[:,2]*ro,
Xsun=R0,Zsun=Zsun).T
vXYZ=\
coords.galcenrect_to_vxvyvz(\
self._interpolatedObsTrackXY[:,3]*vo,
self._interpolatedObsTrackXY[:,4]*vo,
self._interpolatedObsTrackXY[:,5]*vo,
vsun=vsun,Xsun=R0,Zsun=Zsun).T
slbd=coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
self._interpolatedObsTrackLB[:,0]= slbd[:,0]
self._interpolatedObsTrackLB[:,1]= slbd[:,1]
self._interpolatedObsTrackLB[:,2]= slbd[:,2]
self._interpolatedObsTrackLB[:,3]= svlbd[:,0]
self._interpolatedObsTrackLB[:,4]= svlbd[:,1]
self._interpolatedObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_allErrCovsLBUnscaled'):
#Re-calculate this
self._determine_stream_spreadLB(simple=_USESIMPLE,
vo=vo,ro=ro,
R0=R0,Zsun=Zsun,vsun=vsun)
return None
def _find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""For backward compatibility"""
return self.find_closest_trackpoint(R,vR,vT,z,vz,phi,
interp=interp,xy=xy,
usev=usev)
def find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""
NAME:
find_closest_trackpoint
PURPOSE:
find the closest point on the stream track to a given point
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
xy= (False) if True, input is X,Y,Z,vX,vY,vZ in Galactocentric rectangular coordinates; if xy, some coordinates may be missing (given as None) and they will not be used
usev= (False) if True, also use velocities to find the closest point
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
if xy:
X= R
Y= vR
Z= vT
else:
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if xy and usev:
vX= z
vY= vz
vZ= phi
elif usev:
vX= vR*numpy.cos(phi)-vT*numpy.sin(phi)
vY= vR*numpy.sin(phi)+vT*numpy.cos(phi)
vZ= vz
present= [not X is None,not Y is None,not Z is None]
if usev: present.extend([not vX is None,not vY is None,not vZ is None])
present= numpy.array(present,dtype='float')
if X is None: X= 0.
if Y is None: Y= 0.
if Z is None: Z= 0.
if usev and vX is None: vX= 0.
if usev and vY is None: vY= 0.
if usev and vZ is None: vZ= 0.
if interp:
dist2= present[0]*(X-self._interpolatedObsTrackXY[:,0])**2.\
+present[1]*(Y-self._interpolatedObsTrackXY[:,1])**2.\
+present[2]*(Z-self._interpolatedObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._interpolatedObsTrackXY[:,3])**2.\
+present[4]*(vY-self._interpolatedObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._interpolatedObsTrackXY[:,5])**2.
else:
dist2= present[0]*(X-self._ObsTrackXY[:,0])**2.\
+present[1]*(Y-self._ObsTrackXY[:,1])**2.\
+present[2]*(Z-self._ObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._ObsTrackXY[:,3])**2.\
+present[4]*(vY-self._ObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._ObsTrackXY[:,5])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
return self.find_closest_trackpointLB(l,b,D,vlos,pmll,pmbb,
interp=interp,
usev=usev)
def find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
"""
NAME:
find_closest_trackpointLB
PURPOSE:
find the closest point on the stream track to a given point in (l,b,...) coordinates
INPUT:
l,b,D,vlos,pmll,pmbb- coordinates in (deg,deg,kpc,km/s,mas/yr,mas/yr)
interp= (True) if True, return the closest index on the interpolated track
usev= (False) if True, also use the velocity components (default is to only use the positions)
OUTPUT:
index of closest track point on the interpolated or not-interpolated track
HISTORY:
2013-12-17- Written - Bovy (IAS)
"""
if interp:
nTrackPoints= len(self._interpolatedThetasTrack)
else:
nTrackPoints= len(self._thetasTrack)
if l is None:
l= 0.
trackL= numpy.zeros(nTrackPoints)
elif interp:
trackL= self._interpolatedObsTrackLB[:,0]
else:
trackL= self._ObsTrackLB[:,0]
if b is None:
b= 0.
trackB= numpy.zeros(nTrackPoints)
elif interp:
trackB= self._interpolatedObsTrackLB[:,1]
else:
trackB= self._ObsTrackLB[:,1]
if D is None:
D= 1.
trackD= numpy.ones(nTrackPoints)
elif interp:
trackD= self._interpolatedObsTrackLB[:,2]
else:
trackD= self._ObsTrackLB[:,2]
if usev:
if vlos is None:
vlos= 0.
trackVlos= numpy.zeros(nTrackPoints)
elif interp:
trackVlos= self._interpolatedObsTrackLB[:,3]
else:
trackVlos= self._ObsTrackLB[:,3]
if pmll is None:
pmll= 0.
trackPmll= numpy.zeros(nTrackPoints)
elif interp:
trackPmll= self._interpolatedObsTrackLB[:,4]
else:
trackPmll= self._ObsTrackLB[:,4]
if pmbb is None:
pmbb= 0.
trackPmbb= numpy.zeros(nTrackPoints)
elif interp:
trackPmbb= self._interpolatedObsTrackLB[:,5]
else:
trackPmbb= self._ObsTrackLB[:,5]
#Calculate rectangular coordinates
XYZ= coords.lbd_to_XYZ(l,b,D,degree=True)
trackXYZ= coords.lbd_to_XYZ(trackL,trackB,trackD,degree=True)
if usev:
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vlos,pmll,pmbb,
XYZ[0],XYZ[1],XYZ[2],
XYZ=True)
trackvxvyvz= coords.vrpmllpmbb_to_vxvyvz(trackVlos,trackPmll,
trackPmbb,
trackXYZ[:,0],
trackXYZ[:,1],
trackXYZ[:,2],
XYZ=True)
#Calculate distance
dist2= (XYZ[0]-trackXYZ[:,0])**2.\
+(XYZ[1]-trackXYZ[:,1])**2.\
+(XYZ[2]-trackXYZ[:,2])**2.
if usev:
dist2+= (vxvyvz[0]-trackvxvyvz[:,0])**2.\
+(vxvyvz[1]-trackvxvyvz[:,1])**2.\
+(vxvyvz[2]-trackvxvyvz[:,2])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_find_closest_trackpointaA
PURPOSE:
find the closest point on the stream track to a given point in
frequency-angle coordinates
INPUT:
Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
#Calculate angle offset along the stream parallel to the stream track,
# finding first the angle among a few wraps where the point is
# closest to the parallel track and then the closest trackpoint to that
# point
da= numpy.stack(\
numpy.meshgrid(_TWOPIWRAPS+ar-self._progenitor_angle[0],
_TWOPIWRAPS+ap-self._progenitor_angle[1],
_TWOPIWRAPS+az-self._progenitor_angle[2],
indexing='xy')).T.reshape((len(_TWOPIWRAPS)**3,3))
dapar= self._sigMeanSign*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\
numpy.cross(da,self._dsigomeanProgDirection),axis=1))],
self._dsigomeanProgDirection)
if interp:
dist= numpy.fabs(dapar-self._interpolatedThetasTrack)
else:
dist= numpy.fabs(dapar-self._thetasTrack)
return numpy.argmin(dist)
#########DISTRIBUTION AS A FUNCTION OF ANGLE ALONG THE STREAM##################
def pOparapar(self,Opar,apar,tdisrupt=None):
"""
NAME:
pOparapar
PURPOSE:
return the probability of a given parallel (frequency,angle) offset pair
INPUT:
Opar - parallel frequency offset (array) (can be Quantity)
apar - parallel angle offset along the stream (scalar) (can be Quantity)
OUTPUT:
p(Opar,apar)
HISTORY:
2015-12-07 - Written - Bovy (UofT)
"""
Opar= conversion.parse_frequency(Opar,ro=self._ro,vo=self._vo)
apar= conversion.parse_angle(apar)
if tdisrupt is None: tdisrupt= self._tdisrupt
if isinstance(Opar,(int,float,numpy.float32,numpy.float64)):
Opar= numpy.array([Opar])
out= numpy.zeros(len(Opar))
# Compute ts
ts= apar/Opar
# Evaluate
out[(ts < tdisrupt)*(ts >= 0.)]=\
numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
def density_par(self,dangle,coord='apar',tdisrupt=None,
**kwargs):
"""
NAME:
density_par
PURPOSE:
calculate the density as a function of a parallel coordinate
INPUT:
dangle - parallel angle offset for this coordinate value
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
density(angle)
HISTORY:
2015-11-17 - Written - Bovy (UofT)
"""
if coord.lower() != 'apar':
# Need to compute the Jacobian for this coordinate value
ddangle= dangle+10.**-7.
ddangle-= dangle
if coord.lower() == 'phi':
phi_h= coords.rect_to_cyl(\
self._interpTrackX(dangle+ddangle),
self._interpTrackY(dangle+ddangle),
self._interpTrackZ(dangle+ddangle))
phi= coords.rect_to_cyl(\
self._interpTrackX(dangle),
self._interpTrackY(dangle),
self._interpTrackZ(dangle))
jac= numpy.fabs(phi_h[1]-phi[1])/ddangle
elif coord.lower() == 'll' or coord.lower() == 'ra' \
or coord.lower() == 'customra':
XYZ_h= coords.galcenrect_to_XYZ(\
self._interpTrackX(dangle+ddangle)*self._ro,
self._interpTrackY(dangle+ddangle)*self._ro,
self._interpTrackZ(dangle+ddangle)*self._ro,
Xsun=self._R0,Zsun=self._Zsun)
lbd_h= coords.XYZ_to_lbd(XYZ_h[0],XYZ_h[1],XYZ_h[2],
degree=True)
XYZ= coords.galcenrect_to_XYZ(\
self._interpTrackX(dangle)*self._ro,
self._interpTrackY(dangle)*self._ro,
self._interpTrackZ(dangle)*self._ro,
Xsun=self._R0,Zsun=self._Zsun)
lbd= coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
if coord.lower() == 'll':
jac= numpy.fabs(lbd_h[0]-lbd[0])/ddangle
else:
radec_h= coords.lb_to_radec(lbd_h[0],
lbd_h[1],
degree=True)
radec= coords.lb_to_radec(lbd[0],
lbd[1],
degree=True)
if coord.lower() == 'ra':
jac= numpy.fabs(radec_h[0]-radec[0])/ddangle
else:
xieta_h= coords.radec_to_custom(\
radec_h[0],radec_h[1],T=self._custom_transform,
degree=True)
xieta= coords.radec_to_custom(\
radec[0],radec[1],T=self._custom_transform,
degree=True)
jac= numpy.fabs(xieta_h[0]-xieta[0])/ddangle
else:
raise ValueError('Coordinate input %s not supported by density_par' % coord)
else:
jac= 1.
return self._density_par(dangle,tdisrupt=tdisrupt,**kwargs)/jac
def _density_par(self,dangle,tdisrupt=None):
"""The raw density as a function of parallel angle"""
if tdisrupt is None: tdisrupt= self._tdisrupt
dOmin= dangle/tdisrupt
# Normalize to 1 close to progenitor
return 0.5\
*(1.+special.erf((self._meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2])))
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None,
**kwargs):
"""
NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT)
"""
peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak
try:
result=\
optimize.brentq(lambda x: self.density_par(x,
tdisrupt=tdisrupt,
**kwargs)\
-peak_dens*threshold,
0.1,self._deltaAngleTrack)
except RuntimeError: #pragma: no cover
raise RuntimeError('Length could not be returned, because length method failed to find the threshold value')
except ValueError:
raise ValueError('Length could not be returned, because length method failed to initialize')
if phys:
# Need to now integrate length
dXda= self._interpTrackX.derivative()
dYda= self._interpTrackY.derivative()
dZda= self._interpTrackZ.derivative()
result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\
+dYda(da)**2.\
+dZda(da)**2.),
0.,result)[0]*self._ro
elif ang:
# Need to now integrate length
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1)
-self._interpolatedObsTrackLB[:,0]) > 0.:
ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1)
-self._interpolatedObsTrackLB[:,1]) > 0.:
bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
dlda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,ll,k=3).derivative()
dbda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,bb,k=3).derivative()
result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\
+dbda(da)**2.),
0.,result)[0]
return result
@physical_conversion('frequency',pop=True)
def meanOmega(self,dangle,oned=False,offset_sign=None,
tdisrupt=None):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
offset_sign= sign of the frequency offset (shouldn't be set)
OUTPUT:
mean Omega
HISTORY:
2013-12-01 - Written - Bovy (IAS)
"""
if offset_sign is None: offset_sign= self._sigMeanSign
if tdisrupt is None: tdisrupt= self._tdisrupt
dOmin= dangle/tdisrupt
meandO= self._meandO
dO1D= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO)
if oned: return dO1D
else:
return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\
*offset_sign
@physical_conversion('frequency',pop=True)
def sigOmega(self,dangle):
"""
NAME:
sigmaOmega
PURPOSE:
calculate the 1D sigma in frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
OUTPUT:
sigma Omega
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
sO1D2= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*(meandO+dOmin)\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO**2.+self._sortedSigOEig[2])
mO= self.meanOmega(dangle,oned=True,use_physical=False)
return numpy.sqrt(sO1D2-mO**2.)
def ptdAngle(self,t,dangle):
"""
NAME:
ptdangle
PURPOSE:
return the probability of a given stripping time at a given angle along the stream
INPUT:
t - stripping time
dangle - angle offset along the stream
OUTPUT:
p(td|dangle)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isinstance(t,(int,float,numpy.float32,numpy.float64)):
t= numpy.array([t])
out= numpy.zeros(len(t))
if t > 0.:
dO= dangle/t[t < self._tdisrupt]
else:
return 0.
#p(t|a) = \int dO p(O,t|a) = \int dO p(t|O,a) p(O|a) = \int dO delta (t-a/O)p(O|a) = O*2/a p(O|a); p(O|a) = \int dt p(a|O,t) p(O)p(t) = 1/O p(O)
out[t < self._tdisrupt]=\
dO**2./dangle*numpy.exp(-0.5*(dO-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
@physical_conversion('time',pop=True)
def meantdAngle(self,dangle):
"""
NAME:
meantdAngle
PURPOSE:
calculate the mean stripping time at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
mean stripping time at this dangle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
num= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return self._tdisrupt
elif numpy.isnan(denom): return 0.
else: return num/denom
@physical_conversion('time',pop=True)
def sigtdAngle(self,dangle):
"""
NAME:
sigtdAngle
PURPOSE:
calculate the dispersion in the stripping times at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
dispersion in the stripping times at this angle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
numsig2= integrate.quad(lambda x: x**2.*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
nummean= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def pangledAngle(self,angleperp,dangle,smallest=False):
"""
NAME:
pangledAngle
PURPOSE:
return the probability of a given perpendicular angle at a given angle along the stream
INPUT:
angleperp - perpendicular angle
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
p(angle_perp|dangle)
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
out= numpy.zeros(len(angleperp))
out= numpy.array([\
integrate.quad(self._pangledAnglet,0.,self._tdisrupt,
(ap,dangle,smallest))[0] for ap in angleperp])
return out
@physical_conversion('angle',pop=True)
def meanangledAngle(self,dangle,smallest=False):
"""
NAME:
meanangledAngle
PURPOSE:
calculate the mean perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
OUTPUT:
mean perpendicular angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])\
*self._tdisrupt*5.,
self._sigangle])
num= integrate.quad(lambda x: x*self.pangledAngle(x,dangle,smallest),
aplow,-aplow)[0]
denom= integrate.quad(self.pangledAngle,aplow,-aplow,
(dangle,smallest))[0]
if denom == 0.: return numpy.nan
else: return num/denom
@physical_conversion('angle',pop=True)
def sigangledAngle(self,dangle,assumeZeroMean=True,smallest=False,
simple=False):
"""
NAME:
sigangledAngle
PURPOSE:
calculate the dispersion in the perpendicular angle at a given angle
INPUT:
dangle - angle offset along the stream
assumeZeroMean= (True) if True, assume that the mean is zero (should be)
smallest= (False) calculate for smallest eigenvalue direction rather than for middle
simple= (False), if True, return an even simpler estimate
OUTPUT:
dispersion in the perpendicular angle at this angle
HISTORY:
2013-12-06 - Written - Bovy (IAS)
"""
if smallest: eigIndx= 0
else: eigIndx= 1
if simple:
dt= self.meantdAngle(dangle,use_physical=False)
return numpy.sqrt(self._sigangle2
+self._sortedSigOEig[eigIndx]*dt**2.)
aplow= numpy.amax([numpy.sqrt(self._sortedSigOEig[eigIndx])*self._tdisrupt*5.,
self._sigangle])
numsig2= integrate.quad(lambda x: x**2.*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
if not assumeZeroMean:
nummean= integrate.quad(lambda x: x*self.pangledAngle(x,dangle),
aplow,-aplow)[0]
else:
nummean= 0.
denom= integrate.quad(self.pangledAngle,aplow,-aplow,(dangle,))[0]
if denom == 0.: return numpy.nan
else: return numpy.sqrt(numsig2/denom-(nummean/denom)**2.)
def _pangledAnglet(self,t,angleperp,dangle,smallest):
"""p(angle_perp|angle_par,time)"""
if smallest: eigIndx= 0
else: eigIndx= 1
if isinstance(angleperp,(int,float,numpy.float32,numpy.float64)):
angleperp= numpy.array([angleperp])
t= numpy.array([t])
out= numpy.zeros_like(angleperp)
tindx= t < self._tdisrupt
out[tindx]=\
numpy.exp(-0.5*angleperp[tindx]**2.\
/(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2))/\
numpy.sqrt(t[tindx]**2.*self._sortedSigOEig[eigIndx]+self._sigangle2)\
*self.ptdAngle(t[t < self._tdisrupt],dangle)
return out
################APPROXIMATE FREQUENCY-ANGLE TRANSFORMATION#####################
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True,cindx=None):
"""
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
"""
if isinstance(R,(int,float,numpy.float32,numpy.float64)): #Scalar input
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if cindx is None:
closestIndx= [self._find_closest_trackpoint(X[ii],Y[ii],Z[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=True,usev=False)
for ii in range(len(R))]
else:
closestIndx= cindx
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
dmJacIndx= (X[ii]-self._ObsTrackXY[jacIndx,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx,2])**2.
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
else:
dm1= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
dm2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= numpy.sqrt(dmJacIndx)/(numpy.sqrt(dmJacIndx)\
+numpy.sqrt(dmJacIndx2))
#Make sure phi hasn't wrapped around
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
#Apply closest jacobians
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._alljacsTrack[jacIndx,:,:]
+ampJacIndx*self._alljacsTrack[jacIndx2,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out
def _approxaAInv(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_approxaAInv
PURPOSE:
return R,vR,... coordinates for a point based on the linear
approximation around the stream track
INPUT:
Or,Op,Oz,ar,ap,az - phase space coordinates in frequency-angle
space
interp= (True), if True, use the interpolated track
OUTPUT:
(R,vR,vT,z,vz,phi)
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
if isinstance(Or,(int,float,numpy.float32,numpy.float64)): #Scalar input
Or= numpy.array([Or])
Op= numpy.array([Op])
Oz= numpy.array([Oz])
ar= numpy.array([ar])
ap= numpy.array([ap])
az= numpy.array([az])
#Calculate apar, angle offset along the stream
closestIndx= [self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=interp)\
for ii in range(len(Or))]
out= numpy.empty((6,len(Or)))
for ii in range(len(Or)):
dOa= numpy.empty(6)
if interp:
dOa[0]= Or[ii]-self._interpolatedObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._interpolatedObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._interpolatedObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._interpolatedObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._interpolatedObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._interpolatedObsTrackAA[closestIndx[ii],5]
jacIndx= self._find_closest_trackpointaA(Or[ii],Op[ii],Oz[ii],
ar[ii],ap[ii],az[ii],
interp=False)
else:
dOa[0]= Or[ii]-self._ObsTrackAA[closestIndx[ii],0]
dOa[1]= Op[ii]-self._ObsTrackAA[closestIndx[ii],1]
dOa[2]= Oz[ii]-self._ObsTrackAA[closestIndx[ii],2]
dOa[3]= ar[ii]-self._ObsTrackAA[closestIndx[ii],3]
dOa[4]= ap[ii]-self._ObsTrackAA[closestIndx[ii],4]
dOa[5]= az[ii]-self._ObsTrackAA[closestIndx[ii],5]
jacIndx= closestIndx[ii]
# Find 2nd closest Jacobian point for smoothing
da= numpy.stack(\
numpy.meshgrid(_TWOPIWRAPS+ar[ii]-self._progenitor_angle[0],
_TWOPIWRAPS+ap[ii]-self._progenitor_angle[1],
_TWOPIWRAPS+az[ii]-self._progenitor_angle[2],
indexing='xy')).T\
.reshape((len(_TWOPIWRAPS)**3,3))
dapar= self._sigMeanSign\
*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\
numpy.cross(da,self._dsigomeanProgDirection),
axis=1))],
self._dsigomeanProgDirection)
dmJacIndx= numpy.fabs(dapar-self._thetasTrack[jacIndx])
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
else:
dm1= numpy.fabs(dapar-self._thetasTrack[jacIndx-1])
dm2= numpy.fabs(dapar-self._thetasTrack[jacIndx+1])
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= dmJacIndx/(dmJacIndx+dmJacIndx2)
#Make sure the angles haven't wrapped around
if dOa[3] > numpy.pi:
dOa[3]-= 2.*numpy.pi
elif dOa[3] < -numpy.pi:
dOa[3]+= 2.*numpy.pi
if dOa[4] > numpy.pi:
dOa[4]-= 2.*numpy.pi
elif dOa[4] < -numpy.pi:
dOa[4]+= 2.*numpy.pi
if dOa[5] > numpy.pi:
dOa[5]-= 2.*numpy.pi
elif dOa[5] < -numpy.pi:
dOa[5]+= 2.*numpy.pi
#Apply closest jacobian
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._allinvjacsTrack[jacIndx,:,:]
+ampJacIndx*self._allinvjacsTrack[jacIndx2,:,:],
dOa)
if interp:
out[:,ii]+= self._interpolatedObsTrack[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrack[closestIndx[ii]]
return out
################################EVALUATE THE DF################################
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the DF
INPUT:
Either:
a) R,vR,vT,z,vz,phi ndarray [nobjects]
b) (Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) tuple if aAInput
where:
Omegar - radial frequency
Omegaphi - azimuthal frequency
Omegaz - vertical frequency
angler - radial angle
anglephi - azimuthal angle
anglez - vertical angle
c) Orbit instance or list thereof
log= if True, return the natural log
aaInput= (False) if True, option b above
OUTPUT:
value of DF
HISTORY:
2013-12-03 - Written - Bovy (IAS)
"""
#First parse log
log= kwargs.pop('log',True)
dOmega, dangle= self.prepData4Call(*args,**kwargs)
#Omega part
dOmega4dfOmega= dOmega\
-numpy.tile(self._dsigomeanProg.T,(dOmega.shape[1],1)).T
logdfOmega= -0.5*numpy.sum(dOmega4dfOmega*
numpy.dot(self._sigomatrixinv,
dOmega4dfOmega),
axis=0)-0.5*self._sigomatrixLogdet\
+numpy.log(numpy.fabs(numpy.dot(self._dsigomeanProgDirection,dOmega)))
#Angle part
dangle2= numpy.sum(dangle**2.,axis=0)
dOmega2= numpy.sum(dOmega**2.,axis=0)
dOmegaAngle= numpy.sum(dOmega*dangle,axis=0)
logdfA= -0.5/self._sigangle2*(dangle2-dOmegaAngle**2./dOmega2)\
-2.*self._lnsigangle-0.5*numpy.log(dOmega2)
#Finite stripping part
a0= dOmegaAngle/numpy.sqrt(2.)/self._sigangle/numpy.sqrt(dOmega2)
ad= numpy.sqrt(dOmega2)/numpy.sqrt(2.)/self._sigangle\
*(self._tdisrupt-dOmegaAngle/dOmega2)
loga= numpy.log((special.erf(a0)+special.erf(ad))/2.) #divided by 2 st 0 for well-within the stream
out= logdfA+logdfOmega+loga+self._logmeandetdOdJp
if log:
return out
else:
return numpy.exp(out)
def prepData4Call(self,*args,**kwargs):
"""
NAME:
prepData4Call
PURPOSE:
prepare stream data for the __call__ method
INPUT:
__call__ inputs
OUTPUT:
(dOmega,dangle); wrt the progenitor; each [3,nobj]
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
#First calculate the actionAngle coordinates if they're not given
#as such
freqsAngles= self._parse_call_args(*args,**kwargs)
dOmega= freqsAngles[:3,:]\
-numpy.tile(self._progenitor_Omega.T,(freqsAngles.shape[1],1)).T
dangle= freqsAngles[3:,:]\
-numpy.tile(self._progenitor_angle.T,(freqsAngles.shape[1],1)).T
#Assuming single wrap, resolve large angle differences (wraps should be marginalized over)
dangle[(dangle < -4.)]+= 2.*numpy.pi
dangle[(dangle > 4.)]-= 2.*numpy.pi
return (dOmega,dangle)
def _parse_call_args(self,*args,**kwargs):
"""Helper function to parse the arguments to the __call__ and related functions,
return [6,nobj] array of frequencies (:3) and angles (3:)"""
interp= kwargs.get('interp',self._useInterp)
if len(args) == 5:
raise IOError("Must specify phi for streamdf")
elif len(args) == 6:
if kwargs.get('aAInput',False):
if isinstance(args[0],(int,float,numpy.float32,numpy.float64)):
out= numpy.empty((6,1))
else:
out= numpy.empty((6,len(args[0])))
for ii in range(6):
out[ii,:]= args[ii]
return out
else:
return self._approxaA(*args,interp=interp)
elif isinstance(args[0],Orbit):
if len(args[0].shape) > 1:
raise RuntimeError("Evaluating streamdf with Orbit instances with multi-dimensional shapes is not supported") #pragma: no cover
o= args[0]
return self._approxaA(o.R(),o.vR(),o.vT(),o.z(),o.vz(),o.phi(),
interp=interp)
elif isinstance(args[0],list) and isinstance(args[0][0],Orbit):
if numpy.any([len(no) > 1 for no in args[0]]):
raise RuntimeError('Only single-object Orbit instances can be passed to DF instances at this point') #pragma: no cover
R, vR, vT, z, vz, phi= [], [], [], [], [], []
for o in args[0]:
R.append(o.R())
vR.append(o.vR())
vT.append(o.vT())
z.append(o.z())
vz.append(o.vz())
phi.append(o.phi())
return self._approxaA(numpy.array(R),numpy.array(vR),
numpy.array(vT),numpy.array(z),
numpy.array(vz),numpy.array(phi),
interp=interp)
def callMarg(self,xy,**kwargs):
"""
NAME:
callMarg
PURPOSE:
evaluate the DF, marginalizing over some directions, in Galactocentric rectangular coordinates (or in observed l,b,D,vlos,pmll,pmbb) coordinates)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
nsigma= (3) number of sigma to marginalize the DF over (approximate sigma)
ngl= (5) order of Gauss-Legendre integration
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the marginalized PDF in these coordinates is returned
vo= (220) circular velocity to normalize with when lb=True
ro= (8) Galactocentric radius to normalize with when lb=True
R0= (8) Galactocentric radius of the Sun (kpc)
Zsun= (0.0208) Sun's height above the plane (kpc)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
p(xy) marginalized over missing directions in xy
HISTORY:
2013-12-16 - Written - Bovy (IAS)
"""
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
if numpy.sum(coordGiven) == 6:
raise NotImplementedError("When specifying all coordinates, please use __call__ instead of callMarg")
#First construct the Gaussian approximation at this xy
gaussmean, gaussvar= self.gaussApprox(xy,**kwargs)
cholvar, chollower= stable_cho_factor(gaussvar)
#Now Gauss-legendre integrate over missing directions
ngl= kwargs.get('ngl',5)
nsigma= kwargs.get('nsigma',3)
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
coordEval= []
weightEval= []
jj= 0
baseX= (glx+1)/2.
baseX= list(baseX)
baseX.extend(-(glx+1)/2.)
baseX= numpy.array(baseX)
baseW= glw
baseW= list(baseW)
baseW.extend(glw)
baseW= numpy.array(baseW)
for ii in range(6):
if not coordGiven[ii]:
coordEval.append(nsigma*baseX)
weightEval.append(baseW)
jj+= 1
else:
coordEval.append(xy[ii]*numpy.ones(1))
weightEval.append(numpy.ones(1))
mgrid= numpy.meshgrid(*coordEval,indexing='ij')
mgridNotGiven= numpy.array([mgrid[ii].flatten() for ii in range(6)
if not coordGiven[ii]])
mgridNotGiven= numpy.dot(cholvar,mgridNotGiven)
jj= 0
if coordGiven[0]: iX= mgrid[0]
else:
iX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[1]: iY= mgrid[1]
else:
iY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[2]: iZ= mgrid[2]
else:
iZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[3]: ivX= mgrid[3]
else:
ivX= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[4]: ivY= mgrid[4]
else:
ivY= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
if coordGiven[5]: ivZ= mgrid[5]
else:
ivZ= mgridNotGiven[jj]+gaussmean[jj]
jj+= 1
iXw, iYw, iZw, ivXw, ivYw, ivZw=\
numpy.meshgrid(*weightEval,indexing='ij')
if kwargs.get('lb',False): #Convert to Galactocentric cylindrical coordinates
#Setup coordinate transformation kwargs
vo= kwargs.get('vo',self._vo)
ro= kwargs.get('ro',self._ro)
R0= kwargs.get('R0',self._R0)
Zsun= kwargs.get('Zsun',self._Zsun)
vsun= kwargs.get('vsun',self._vsun)
tXYZ= coords.lbd_to_XYZ(iX.flatten(),iY.flatten(),
iZ.flatten(),
degree=True)
iR,iphi,iZ= coords.XYZ_to_galcencyl(tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],
Xsun=R0,Zsun=Zsun).T
tvxvyvz= coords.vrpmllpmbb_to_vxvyvz(ivX.flatten(),
ivY.flatten(),
ivZ.flatten(),
tXYZ[:,0],tXYZ[:,1],
tXYZ[:,2],XYZ=True)
ivR,ivT,ivZ= coords.vxvyvz_to_galcencyl(tvxvyvz[:,0],
tvxvyvz[:,1],
tvxvyvz[:,2],
iR,iphi,iZ,
galcen=True,
vsun=vsun,
Xsun=R0,Zsun=Zsun).T
iR/= ro
iZ/= ro
ivR/= vo
ivT/= vo
ivZ/= vo
else:
#Convert to cylindrical coordinates
iR,iphi,iZ=\
coords.rect_to_cyl(iX.flatten(),iY.flatten(),iZ.flatten())
ivR,ivT,ivZ=\
coords.rect_to_cyl_vec(ivX.flatten(),ivY.flatten(),
ivZ.flatten(),
iR,iphi,iZ,cyl=True)
#Add the additional Jacobian dXdY/dldb... if necessary
if kwargs.get('lb',False):
#Find the nearest track point
interp= kwargs.get('interp',self._useInterp)
if not 'cindx' in kwargs:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Only l,b,d,... to Galactic X,Y,Z,... is necessary because going
#from Galactic to Galactocentric has Jacobian determinant 1
if interp:
addLogDet= self._interpolatedTrackLogDetJacLB[cindx]
else:
addLogDet= self._trackLogDetJacLB[cindx]
else:
addLogDet= 0.
logdf= self(iR,ivR,ivT,iZ,ivZ,iphi,log=True)
return logsumexp(logdf
+numpy.log(iXw.flatten())
+numpy.log(iYw.flatten())
+numpy.log(iZw.flatten())
+numpy.log(ivXw.flatten())
+numpy.log(ivYw.flatten())
+numpy.log(ivZw.flatten()))\
+0.5*numpy.log(numpy.linalg.det(gaussvar))\
+addLogDet
def gaussApprox(self,xy,**kwargs):
"""
NAME:
gaussApprox
PURPOSE:
return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned
OUTPUT:
(mean,variance) of the approximate Gaussian DF for the missing directions in xy
HISTORY:
2013-12-12 - Written - Bovy (IAS)
"""
interp= kwargs.get('interp',self._useInterp)
lb= kwargs.get('lb',False)
#What are we looking for
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
nGiven= numpy.sum(coordGiven)
#First find the nearest track point
if not 'cindx' in kwargs and lb:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
elif not 'cindx' in kwargs and not lb:
cindx= self._find_closest_trackpoint(*xy,xy=True,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Get the covariance matrix
if interp and lb:
tcov= self._interpolatedAllErrCovsLBUnscaled[cindx]
tmean= self._interpolatedObsTrackLB[cindx]
elif interp and not lb:
tcov= self._interpolatedAllErrCovsXY[cindx]
tmean= self._interpolatedObsTrackXY[cindx]
elif not interp and lb:
tcov= self._allErrCovsLBUnscaled[cindx]
tmean= self._ObsTrackLB[cindx]
elif not interp and not lb:
tcov= self._allErrCovsXY[cindx]
tmean= self._ObsTrackXY[cindx]
if lb:#Apply scale factors
tcov= copy.copy(tcov)
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1))
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)).T
#Fancy indexing to recover V22, V11, and V12; V22, V11, V12 as in Appendix B of 0905.2979v1
V11indx0= numpy.array([[ii for jj in range(6-nGiven)] for ii in range(6) if not coordGiven[ii]])
V11indx1= numpy.array([[ii for ii in range(6) if not coordGiven[ii]] for jj in range(6-nGiven)])
V11= tcov[V11indx0,V11indx1]
V22indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if coordGiven[ii]])
V22indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(nGiven)])
V22= tcov[V22indx0,V22indx1]
V12indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if not coordGiven[ii]])
V12indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(6-nGiven)])
V12= tcov[V12indx0,V12indx1]
#Also get m1 and m2, again following Appendix B of 0905.2979v1
m1= tmean[True^coordGiven]
m2= tmean[coordGiven]
#conditional mean and variance
V22inv= numpy.linalg.inv(V22)
v2= numpy.array([xy[ii] for ii in range(6) if coordGiven[ii]])
condMean= m1+numpy.dot(V12,numpy.dot(V22inv,v2-m2))
condVar= V11-numpy.dot(V12,numpy.dot(V22inv,V12.T))
return (condMean,condVar)
################################SAMPLE THE DF##################################
def sample(self,n,returnaAdt=False,returndt=False,interp=None,
xy=False,lb=False):
"""
NAME:
sample
PURPOSE:
sample from the DF
INPUT:
n - number of points to return
returnaAdt= (False) if True, return (Omega,angle,dt)
returndT= (False) if True, also return the time since the star was stripped
interp= (object-wide default) use interpolation of the stream track
xy= (False) if True, return Galactocentric rectangular coordinates
lb= (False) if True, return Galactic l,b,d,vlos,pmll,pmbb coordinates
OUTPUT:
(R,vR,vT,z,vz,phi) of points on the stream in 6,N array
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
#First sample frequencies
Om,angle,dt= self._sample_aAt(n)
if returnaAdt:
if _APY_UNITS and self._voSet and self._roSet:
Om=\
units.Quantity(\
Om*conversion.freq_in_Gyr(self._vo,self._ro),
unit=1/units.Gyr)
angle= units.Quantity(angle,unit=units.rad)
dt= units.Quantity(\
dt*conversion.time_in_Gyr(self._vo,self._ro),
unit=units.Gyr)
return (Om,angle,dt)
if interp is None:
interp= self._useInterp
#Propagate to R,vR,etc.
RvR= self._approxaAInv(Om[0,:],Om[1,:],Om[2,:],
angle[0,:],angle[1,:],angle[2,:],
interp=interp)
if returndt and not xy and not lb:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(RvR[0]*self._ro,unit=units.kpc),
units.Quantity(RvR[1]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[2]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[3]*self._ro,unit=units.kpc),
units.Quantity(RvR[4]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[5],unit=units.rad),
units.Quantity(\
dt*conversion.time_in_Gyr(self._vo,self._ro),
unit=units.Gyr))
return (RvR[0],RvR[1],RvR[2],RvR[3],RvR[4],RvR[5],dt)
elif not xy and not lb:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(RvR[0]*self._ro,unit=units.kpc),
units.Quantity(RvR[1]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[2]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[3]*self._ro,unit=units.kpc),
units.Quantity(RvR[4]*self._vo,unit=units.km/units.s),
units.Quantity(RvR[5],unit=units.rad))
return RvR
if xy:
sX= RvR[0]*numpy.cos(RvR[5])
sY= RvR[0]*numpy.sin(RvR[5])
sZ= RvR[3]
svX, svY, svZ=\
coords.cyl_to_rect_vec(RvR[1],RvR[2],RvR[4],RvR[5])
out= numpy.empty((6,n))
out[0]= sX
out[1]= sY
out[2]= sZ
out[3]= svX
out[4]= svY
out[5]= svZ
if returndt:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(out[0]*self._ro,unit=units.kpc),
units.Quantity(out[1]*self._ro,unit=units.kpc),
units.Quantity(out[2]*self._ro,unit=units.kpc),
units.Quantity(out[3]*self._vo,unit=units.km/units.s),
units.Quantity(out[4]*self._vo,unit=units.km/units.s),
units.Quantity(out[5]*self._vo,unit=units.km/units.s),
units.Quantity(\
dt*conversion.time_in_Gyr(self._vo,self._ro),
unit=units.Gyr))
return (out[0],out[1],out[2],out[3],out[4],out[5],dt)
else:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(out[0]*self._ro,unit=units.kpc),
units.Quantity(out[1]*self._ro,unit=units.kpc),
units.Quantity(out[2]*self._ro,unit=units.kpc),
units.Quantity(out[3]*self._vo,unit=units.km/units.s),
units.Quantity(out[4]*self._vo,unit=units.km/units.s),
units.Quantity(out[5]*self._vo,unit=units.km/units.s))
return out
if lb:
vo= self._vo
ro= self._ro
R0= self._R0
Zsun= self._Zsun
vsun= self._vsun
XYZ= coords.galcencyl_to_XYZ(RvR[0]*ro,RvR[5],RvR[3]*ro,
Xsun=R0,Zsun=Zsun).T
vXYZ= coords.galcencyl_to_vxvyvz(RvR[1]*vo,RvR[2]*vo,RvR[4]*vo,
RvR[5],
vsun=vsun,Xsun=R0,Zsun=Zsun).T
slbd=coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
out= numpy.empty((6,n))
out[0]= slbd[:,0]
out[1]= slbd[:,1]
out[2]= slbd[:,2]
out[3]= svlbd[:,0]
out[4]= svlbd[:,1]
out[5]= svlbd[:,2]
if returndt:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(out[0],unit=units.deg),
units.Quantity(out[1],unit=units.deg),
units.Quantity(out[2],unit=units.kpc),
units.Quantity(out[3],unit=units.km/units.s),
units.Quantity(out[4],unit=units.mas/units.yr),
units.Quantity(out[5],unit=units.mas/units.yr),
units.Quantity(\
dt*conversion.time_in_Gyr(self._vo,self._ro),
unit=units.Gyr))
return (out[0],out[1],out[2],out[3],out[4],out[5],dt)
else:
if _APY_UNITS and self._voSet and self._roSet:
return (units.Quantity(out[0],unit=units.deg),
units.Quantity(out[1],unit=units.deg),
units.Quantity(out[2],unit=units.kpc),
units.Quantity(out[3],unit=units.km/units.s),
units.Quantity(out[4],unit=units.mas/units.yr),
units.Quantity(out[5],unit=units.mas/units.yr))
return out
def _sample_aAt(self,n):
"""Sampling frequencies, angles, and times part of sampling"""
#Sample frequency along largest eigenvalue using ARS
dO1s=\
ars.ars([0.,0.],[True,False],
[self._meandO-numpy.sqrt(self._sortedSigOEig[2]),
self._meandO+ | numpy.sqrt(self._sortedSigOEig[2]) | numpy.sqrt |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
#
#
# $Id: test_interpolate.py,v 1.4 2007/07/24 17:30:40 vareille Exp $
from mglutil.math.rotax import interpolate3DTransform, rotax
from math import pi, sin, cos, sqrt
import numpy.oldnumeric as N
import unittest
degtorad = pi / 180.0
class Interpolate3DBaseTest(unittest.TestCase):
def diff(self, res, expect):
return res - expect < 1.0e-6 # close enough -> true
def test_interpolate3D(self):
mat1 = rotax([0, 0, 0], [0, 0, 1], 30.0 * degtorad)
mat2 = rotax([0, 0, 0], [0, 0, 1], 60.0 * degtorad)
mat3 = rotax([0, 0, 0], [0, 0, 1], 90.0 * degtorad)
# add translation (0,1,0) to mat2
mat2 = N.array(
[
[0.5, 0.86602539, 0.0, 0.0],
[-0.86602539, 0.5, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
],
"f",
)
matList = [mat1, mat2, mat3]
indexList = [0.33333333, 0.66666666667, 1.0]
data = [[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0], [2.0, 0.0, 0.0, 1.0]]
p = 0.5
M = interpolate3DTransform(matList, indexList, p)
res = | N.dot(data, M) | numpy.oldnumeric.dot |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_I = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_I = numpy.array(self.parameters['mu_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_I'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (numpy.array(self.parameters['gamma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D'], shape=(self.numNodes,1))) if self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (numpy.array(self.parameters['mu_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_D'], shape=(self.numNodes,1))) if self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_I = numpy.array(self.parameters['theta_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_I'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_I = numpy.array(self.parameters['phi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_I'], shape=(self.numNodes,1))
self.psi_E = numpy.array(self.parameters['psi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_E'], shape=(self.numNodes,1))
self.psi_I = numpy.array(self.parameters['psi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_I'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.psi_I) and (numpy.any(self.theta_I) or numpy.any(self.phi_I)))
or (numpy.any(self.psi_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E))) )
self.tracing_scenario = ( (numpy.any(self.psi_E) and numpy.any(self.phi_E))
or (numpy.any(self.psi_I) and numpy.any(self.phi_I)) )
self.vitality_scenario = (numpy.any(self.mu_0) and numpy.any(self.nu))
self.resusceptibility_scenario = (numpy.any(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes,1))
if(numpy.any(self.numI[self.tidx])
and numpy.any(self.beta!=0)):
transmissionTerms_I = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_beta, self.X==self.I) )
transmissionTerms_DI = numpy.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and numpy.any(self.numD_I[self.tidx])
and numpy.any(self.beta_D)):
transmissionTerms_DI = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X==self.D_I) )
numContacts_D = numpy.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_I[self.tidx]))):
numContacts_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_I)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI[self.tidx] + self.q*self.beta_D*self.numD_I[self.tidx])/self.N[self.tidx])
+ (1-self.p)*numpy.divide((transmissionTerms_I + transmissionTerms_DI), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0)
)*(self.X==self.S)
propensities_EtoI = self.sigma*(self.X==self.E)
propensities_ItoR = self.gamma*(self.X==self.I)
propensities_ItoF = self.mu_I*(self.X==self.I)
# propensities_EtoDE = ( self.theta_E + numpy.divide((self.phi_E*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.psi_E*(self.X==self.E)
# propensities_ItoDI = ( self.theta_I + numpy.divide((self.phi_I*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (self.theta_I + self.phi_I*numContacts_D)*self.psi_I*(self.X==self.I)
propensities_DEtoDI = self.sigma_D*(self.X==self.D_E)
propensities_DItoR = self.gamma_D*(self.X==self.D_I)
propensities_DItoF = self.mu_D*(self.X==self.D_I)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = numpy.hstack([propensities_StoE, propensities_EtoI,
propensities_ItoR, propensities_ItoF,
propensities_EtoDE, propensities_ItoDI, propensities_DEtoDI,
propensities_DItoR, propensities_DItoF,
propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoI', 'ItoR', 'ItoF', 'EtoDE', 'ItoDI', 'DEtoDI', 'DItoR', 'DItoF', 'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries= numpy.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = numpy.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = numpy.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI = numpy.pad(self.numI, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = numpy.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_I = numpy.pad(self.numD_I, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = numpy.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = numpy.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = numpy.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = numpy.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = numpy.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI'] = numpy.pad(self.nodeGroupData[groupName]['numI'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = numpy.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_I'] = numpy.pad(self.nodeGroupData[groupName]['numD_I'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = numpy.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = numpy.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = numpy.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries= numpy.array(self.tseries, dtype=float)[:self.tidx+1]
self.numS = numpy.array(self.numS, dtype=float)[:self.tidx+1]
self.numE = numpy.array(self.numE, dtype=float)[:self.tidx+1]
self.numI = numpy.array(self.numI, dtype=float)[:self.tidx+1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_I = numpy.array(self.numD_I, dtype=float)[:self.tidx+1]
self.numR = numpy.array(self.numR, dtype=float)[:self.tidx+1]
self.numF = numpy.array(self.numF, dtype=float)[:self.tidx+1]
self.N = numpy.array(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.array(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = numpy.array(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI'] = numpy.array(self.nodeGroupData[groupName]['numI'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = numpy.array(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_I'] = numpy.array(self.nodeGroupData[groupName]['numD_I'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = numpy.array(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = numpy.array(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = numpy.array(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if(propensities.sum() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order='F')
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*numpy.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.S), a_min=0, a_max=self.numNodes)
self.numE[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.E), a_min=0, a_max=self.numNodes)
self.numI[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I), a_min=0, a_max=self.numNodes)
self.numD_E[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_E), a_min=0, a_max=self.numNodes)
self.numD_I[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_I), a_min=0, a_max=self.numNodes)
self.numR[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.R), a_min=0, a_max=self.numNodes)
self.numF[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.F), a_min=0, a_max=self.numNodes)
self.N[self.tidx] = numpy.clip((self.numS[self.tidx] + self.numE[self.tidx] + self.numI[self.tidx] + self.numD_E[self.tidx] + self.numD_I[self.tidx] + self.numR[self.tidx]), a_min=0, a_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][self.tidx] = numpy.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_min=0, a_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terminate if tmax reached or num infectious and num exposed is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tmax or (self.numI[self.tidx]<1 and self.numE[self.tidx]<1 and self.numD_E[self.tidx]<1 and self.numD_I[self.tidx]<1)):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, numpy.ndarray)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.numNodes if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI/self.numNodes if plot_percentages else self.numI
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_I=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), | numpy.ma.masked_where(Eseries<=0, Eseries) | numpy.ma.masked_where |
import numpy as np
import scipy.signal as sps
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def resample_data(gsrdata, prevSR, newSR):
'''Calculates rolling mean
Function to calculate moving average over the passed data
Parameters
----------
gsrdata : 1-d array
array containing the gsr data
prevSR : int or float
the previous sample rate of the data
newSR : int or float
the new sample rate of the data
Returns
-------
data : 1-d array
array containing the resampled data
'''
number_of_samples = int(round(len(gsrdata) * float(newSR) / prevSR))
data = sps.resample(gsrdata, number_of_samples)
return data
def normalization(gsrdata):
'''Min Max normalization
Function to calculate normalized gsr data
Parameters
----------
gsrdata : 1-d array
array containing the gsr data
Returns
-------
n_gsrdata : 1-d array
normalized gsr data
'''
gsrdata = gsrdata - (np.min(gsrdata))
gsrdata /= (np.max(gsrdata) - np.min(gsrdata))
n_gsrdata = gsrdata
return n_gsrdata
def rolling_mean(data, windowsize, sample_rate):
'''calculates rolling mean
Function to calculate moving average over the passed data
Parameters
----------
data : 1-d array
array containing the gsr data
windowsize : int or float
the moving average window size in seconds
sample_rate : int or float
the sample rate of the data set
Returns
-------
rol_mean : 1-d array
array containing computed rolling mean
'''
avg_hr = (np.mean(data))
data_arr = np.array(data)
t_windowsize = int(windowsize*sample_rate)
t_shape = data_arr.shape[:-1] + (data_arr.shape[-1] - t_windowsize + 1, t_windowsize)
t_strides = data_arr.strides + (data_arr.strides[-1],)
sep_win = np.lib.stride_tricks.as_strided(data_arr, shape=t_shape, strides=t_strides)
rol_mean = np.mean(sep_win, axis=1)
missing_vals = np.array([avg_hr for i in range(0, int(abs(len(data_arr) - len(rol_mean))/2))])
rol_mean = np.insert(rol_mean, 0, missing_vals)
rol_mean = | np.append(rol_mean, missing_vals) | numpy.append |
import pyglet
from pyglet.gl import *
from .globs import *
from .constants import *
from . import config
import ctypes
import math
from .colors import _getColor, color, blue
try:
import numpy
npy = True
numpy.seterr(divide='ignore')
except:
npy = False
# exports
__all__ = ['PImage', 'loadImage', 'image', 'get', 'setScreen', 'save',
'createImage', 'loadPixels', 'updatePixels', 'screenFilter', 'blend']
# the PImage class
class PImage(object):
"""This basically wraps pyglet's AbstractImage with a Processing-like syntax."""
img = None # this is the actual AbstractImage
def __init__(self, *args):
"""Either creates a new image from scratch or wraps an AbstractImage.
Arguments are of the form
PImage()
PImage(width,height)
PImage(width,height,format)
PImage(img)
"""
if len(args) == 1 and isinstance(args[0], pyglet.image.AbstractImage):
# Wraps an AbstractImage
self.img = args[0]
elif len(args) in (2, 3):
# Creates an ImageData from width, height and type
if len(args) == 2:
# default
w, h = args
format = ARGB
else:
w, h, format = args
data = create_string_buffer(w * h * len(format))
self.img = pyglet.image.ImageData(w, h, format, data.raw)
else:
assert (len(args) == 0)
# Do an initial loading of the pixels[] array
self.loadPixels()
self.updatePixels()
def loadPixels(self):
"""Gets the pixel data as an array of integers."""
n = self.width * self.height
self.buf = self.img.get_image_data().get_data('BGRA', -self.width * 4)
if npy:
self.pixels = numpy.fromstring(self.buf, dtype=ctypes.c_uint)
else:
self.pixels = ctypes.cast(self.buf, ctypes.POINTER(ctypes.c_uint))
def filter(self, mode, *args):
"""Applies a filter to the image.
The existant filters are: GRAY, INVERT, OPAQUE, THRESHOLD, POSTERIZE,
ERODE, DILATE and BLUR. This method requires numpy."""
if not npy:
raise ImportError("Numpy is required")
if mode == GRAY:
# Gray value = (77*(n>>16&0xff) + 151*(n>>8&0xff) + 28*(n&0xff)) >> 8
# Where n is the ARGB color of the pixel
lum1 = numpy.multiply(
numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff), 77)
lum2 = numpy.multiply(
numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff), 151)
lum3 = numpy.multiply(numpy.bitwise_and(self.pixels, 0xff), 28)
lum = numpy.right_shift(numpy.add(numpy.add(lum1, lum2), lum3), 8)
self.pixels = numpy.bitwise_and(self.pixels, 0xff000000)
self.pixels = numpy.bitwise_or(self.pixels,
numpy.left_shift(lum, 16))
self.pixels = numpy.bitwise_or(self.pixels,
numpy.left_shift(lum, 8))
self.pixels = numpy.bitwise_or(self.pixels, lum)
elif mode == INVERT:
# This is the same as applying an exclusive or with the maximum value
self.pixels = numpy.bitwise_xor(self.pixels, 0xffffff)
elif mode == BLUR:
if not args:
args = [3]
# Makes the image square by adding zeros.
# This avoids the convolution (via fourier transform multiplication)
# from jumping to another extreme of the image when a border is reached
if self.width > self.height:
dif = self.width - self.height
updif = numpy.zeros(self.width * dif / 2, dtype=numpy.uint32)
downdif = numpy.zeros(self.width * (dif - dif / 2),
dtype=numpy.uint32)
self.pixels = numpy.concatenate((updif, self.pixels, downdif))
size = self.width
elif self.width < self.height:
dif = self.height - self.width
leftdif = numpy.zeros(self.height * dif / 2, dtype=numpy.uint32)
rightdif = numpy.zeros(self.height * (dif - dif / 2),
dtype=numpy.uint32)
self.pixels = self.pixels.reshape(self.height, self.width)
self.pixels = numpy.transpose(self.pixels)
self.pixels = self.pixels.reshape(self.width * self.height)
self.pixels = numpy.concatenate(
(leftdif, self.pixels, rightdif))
self.pixels = self.pixels.reshape(self.height, self.height)
self.pixels = numpy.transpose(self.pixels)
self.pixels = self.pixels.reshape(self.height * self.height)
size = self.height
else:
size = self.height
# Creates a gaussian kernel of the image's size
_createKernel2d(args[0], size)
# Divides the image's R, G and B channels, reshapes them
# to square matrixes and applies two dimensional fourier transforms
red = numpy.bitwise_and(numpy.right_shift(self.pixels, 16), 0xff)
red = numpy.reshape(red, (size, size))
red = numpy.fft.fft2(red)
green = numpy.bitwise_and(numpy.right_shift(self.pixels, 8), 0xff)
green = numpy.reshape(green, (size, size))
green = numpy.fft.fft2(green)
blue = numpy.bitwise_and(self.pixels, 0xff)
blue = numpy.reshape(blue, (size, size))
blue = numpy.fft.fft2(blue)
# Does a element-wise multiplication of each channel matrix
# and the fourier transform of the kernel matrix
kernel = numpy.fft.fft2(weights)
red = numpy.multiply(red, kernel)
green = numpy.multiply(green, kernel)
blue = numpy.multiply(blue, kernel)
# Reshapes them back to arrays and converts to unsigned integers
red = numpy.reshape(numpy.fft.ifft2(red).real, size * size)
green = numpy.reshape(numpy.fft.ifft2(green).real, size * size)
blue = numpy.reshape(numpy.fft.ifft2(blue).real, size * size)
red = red.astype(numpy.uint32)
green = green.astype(numpy.uint32)
blue = blue.astype(numpy.uint32)
self.pixels = numpy.bitwise_or(numpy.left_shift(green, 8), blue)
self.pixels = numpy.bitwise_or(numpy.left_shift(red, 16),
self.pixels)
# Crops out the zeros added
if self.width > self.height:
self.pixels = self.pixels[
self.width * dif / 2:size * size - self.width * (
dif - dif / 2)]
elif self.width < self.height:
self.pixels = numpy.reshape(self.pixels, (size, size))
self.pixels = numpy.transpose(self.pixels)
self.pixels = numpy.reshape(self.pixels, size * size)
self.pixels = self.pixels[
self.height * dif / 2:size * size - self.height * (
dif - dif / 2)]
self.pixels = numpy.reshape(self.pixels,
(self.width, self.height))
self.pixels = numpy.transpose(self.pixels)
self.pixels = numpy.reshape(self.pixels,
self.height * self.width)
elif mode == OPAQUE:
# This is the same as applying an bitwise or with the maximum value
self.pixels = numpy.bitwise_or(self.pixels, 0xff000000)
elif mode == THRESHOLD:
# Maximum = max((n & 0xff0000) >> 16, max((n & 0xff00)>>8, (n & 0xff)))
# Broken down to Maximum = max(aux,aux2)
# The pixel will be white if its maximum is greater than the threshold
# value, and black if not. This was implemented via a boolean matrix
# multiplication.
if not args:
args = [0.5]
thresh = args[0] * 255
aux = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff00), 8)
aux = numpy.maximum(aux, numpy.bitwise_and(self.pixels, 0xff))
aux2 = numpy.right_shift(numpy.bitwise_and(self.pixels, 0xff0000),
16)
boolmatrix = numpy.greater_equal(numpy.maximum(aux, aux2), thresh)
self.pixels.fill(0xffffff)
self.pixels = | numpy.multiply(self.pixels, boolmatrix) | numpy.multiply |
import os #운영체제와 상호 작용하기 위한 라이브 러리
import sys
import xml.etree.ElementTree as ET #xml을 tree 형태로 읽어오는 라이브러리, as ET는 라이브러리를 단축시키기 위한 명령어
import shutil # 파일 및 디렉터리 작업을 수행하는 데 사용할 모듈 (파일 복사 이동 )
import random # 난수 형성 함수
from xml.dom import minidom #xml 에 접근하기 위한 함수
import operator # list의 차를 구하기 위한 라이브 러리
import math # 표준편차를 구하기위한 수학계산 라이브러리
import numpy as np # 표준편차를 구하기위한 수학계산 라이브러리
from matplotlib import pyplot as plt
import pandas as pd
from collections import Counter
import csv
import pickle
from collections import Counter #list 중복값 카운팅 하기
import time
start = time.time()
# sys.stdout = open('output.txt','a') #print 로 출력된 내용을 텍스트로 저장함
answer = ['car','person']
attr = ['width', 'height', 'box']
car_array = []
person_array = []
# zz=[]
# zz1=[]
car_count = 0
person_count = 0
total_box = 0
total_xml = 0
total_box_count = 0
total_car = 0
total_person = 0
person_w = []
person_h = []
car_w = []
car_h = []
other =0
c_w = []
c_h = []
p_w = []
p_h = []
p_b = []
c_b = []
c_height_list = []
c_width_list = []
c_box_len_list = []
p_height_list = []
p_width_list = []
p_box_len_list = []
path_list = []
file_list = []
name_list = []
dir_len = len(path_list)
root_path = "D:/backup/DataSet"
target_path = "D:/backup/DataSet"
parser = ET.XMLParser(encoding="utf-8") # XMLParser는 xml에서 원하는 구문을 출력할수있게 해준다
rootpath = r"D:\backup\DataSet"
xmlRootpath = r'D:\backup\DataSet'
xmlList = []
coun=0
for (path, dir, files) in os.walk(xmlRootpath):
for file in files:
if file.endswith(".xml"):
# for x in os.listdir(root_path):
# if x.endswith('xml'):
empty = os.path.join(root_path,path)
path_list.append(empty)
total_xml += 1
tree = ET.parse(os.path.join(empty, file)) # x에 대한정보를 가져온다
root = tree.getroot() # 문서의 최상단을 가리킴 여기서는 annotations
#print(os.path.join(empty, file))
for child in root.findall("object"): #root(annotations) 아래 자식image 의 객수만큼 반복한다)
bndbox=child.find('bndbox')
name = child.find('name').text
total_box_count += 1
xmin = int(float(bndbox.find("xmin").text))
ymin = int(float(bndbox.find("ymin").text))
xmax = int(float(bndbox.find("xmax").text))
ymax = int(float(bndbox.find("ymax").text))
# print(name+' %s %s %s %s' %(xmin, ymin, xmax, ymax)) #5
if name == "person":
person_count += 1
total_person += 1
p_w = abs(float(xmin)-float(xmax))
p_h = abs(float(ymin)-float(ymax))
p_b = abs(float(p_w*p_h))
# print("person width=" + str(p_w), "person height" + str(p_h))
# print("label:person"+" xmin:"+str(xmin)+" ymin:"+str(ymin)+" xmax:"+str(xmax)+" ymax:"+str(ymax)+" width=" + str(p_w), " height" + str(p_h))
person_w.append(p_w)
person_h.append(p_h)
p_box_len_list.append(p_b)
elif name == "car":
car_count += 1
total_car += 1
c_w = abs(float(xmin)-float(xmax))
c_h = abs(float(ymin)-float(ymax))
c_b = abs(float(c_w * c_h))
# print("car width=" + str(c_w), "car height=" + str(c_h))
#print("label:car" + " xmin:" + str(xmin) + " ymin:" + str(ymin) + " xmax:" + str(xmax) + " ymax:" + str(ymax) + " width=" + str(c_w), " height" + str(c_h))
car_w.append(c_w)
car_h.append(c_h)
c_box_len_list.append(c_b)
else:
other += 1
# print("car num:"+str(car_count)+" person num:"+str(person_count))
person_array.append(person_count)
car_array.append(car_count)
car_count=0
person_count=0
person_np = np.array(person_array)
car_np = np.array(car_array)
# np.max(arr) max 값 구하기
x = np.array(person_w)#person_width
y = np.array(person_h)#person_height
z = x*y #person _w*h
zz = list(z)
# print(z)
x1 = np.array(car_w) #car width
y1 = np.array(car_h) #car height
z1 = x1*y1 #car_w*h
zz1 = list(z1)
a = round( | np.average(x) | numpy.average |
"""
NCL_conOncon_2.py
=================
This script illustrates the following concepts:
- Overlaying two sets of contours on a map
- Drawing the zero contour line thicker
- Changing the center longitude for a cylindrical equidistant projection
- Using a blue-white-red color map
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/conOncon_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/conOncon_2_lg.png
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import geocat.datafiles as gdf
import matplotlib.pyplot as plt
################################################################################
# Import packages:
import numpy as np
import xarray as xr
from cartopy.mpl.gridliner import LatitudeFormatter, LongitudeFormatter
from geocat.viz import cmaps as gvcmaps
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
sst = xr.open_dataset(gdf.get("netcdf_files/sst8292a.nc"))
olr = xr.open_dataset(gdf.get("netcdf_files/olr7991a.nc"))
# Extract data for December 1982
sst = sst.isel(time=11, drop=True).SSTA
olr = olr.isel(time=47, drop=True).OLRA
# Fix the artifact of not-shown-data around 0 and 360-degree longitudes
sst = gvutil.xr_add_cyclic_longitudes(sst, 'lon')
olr = gvutil.xr_add_cyclic_longitudes(olr, 'lon')
###############################################################################
# Plot:
# Generate figure and axes
plt.figure(figsize=(8, 8))
# Set axes projection
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=-160))
ax.set_extent([100, 300, -60, 60], crs=ccrs.PlateCarree())
# Load in color map and specify contour levels
cmap = gvcmaps.BlWhRe
sst_levels = | np.arange(-5.5, 6, 0.5) | numpy.arange |
import pytest
import numpy as np
import sys
import os
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from dtaidistance import dtw, dtw_c
def test_numpymatrix():
"""Passing a matrix instead of a list failed because the array is now a
view instead of the original data structure."""
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 0]])
m = dtw_c.distance_matrix_nogil(s)
m2 = dtw.distance_matrix(s)
correct = np.array([
[np.inf, 1.41421356, 1.73205081],
[np.inf, np.inf, 1.41421356],
[np.inf, np.inf, np.inf]])
assert m[0, 1] == pytest.approx(math.sqrt(2))
assert m2[0, 1] == pytest.approx(math.sqrt(2))
np.testing.assert_almost_equal(correct, m, decimal=4)
np.testing.assert_almost_equal(correct, m2, decimal=4)
def test_numpymatrix_transpose():
"""Passing a matrix instead of a list failed because the array is now a
view instead of the original data structure."""
s = np.array([
[0., 0., 1.,],
[0, 1, 2],
[1, 2, 0],
[2, 0, 0],
[1, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 0]
]).T
m = dtw_c.distance_matrix_nogil(s)
m2 = dtw.distance_matrix(s)
correct = np.array([
[np.inf, 1.41421356, 1.73205081],
[np.inf, np.inf, 1.41421356],
[np.inf, np.inf, np.inf]])
assert m[0, 1] == pytest.approx(math.sqrt(2))
assert m2[0, 1] == pytest.approx(math.sqrt(2))
| np.testing.assert_almost_equal(correct, m, decimal=4) | numpy.testing.assert_almost_equal |
import os
import operator
import unittest
import numpy as np
from pandas.core.api import DataFrame, Index, notnull
from pandas.core.datetools import bday
from pandas.core.panel import (WidePanel, LongPanelIndex, LongPanel,
group_agg, pivot)
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.core.panel as panelm
import pandas.util.testing as common
class PanelTests(object):
def test_iter(self):
common.equalContents(list(self.panel), self.panel.items)
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_repr(self):
foo = repr(self.panel)
def test_set_values(self):
self.panel.values = np.array(self.panel.values, order='F')
assert(self.panel.values.flags.contiguous)
def _check_statistic(self, frame, name, alternative):
f = getattr(frame, name)
for i, ax in enumerate(['items', 'major', 'minor']):
result = f(axis=i)
assert_frame_equal(result, frame.apply(alternative, axis=ax))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_statistic(self.panel, 'count', f)
def test_sum(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.sum()
self._check_statistic(self.panel, 'sum', f)
def test_prod(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return np.prod(nona)
self._check_statistic(self.panel, 'prod', f)
def test_mean(self):
def f(x):
x = np.asarray(x)
return x[notnull(x)].mean()
self._check_statistic(self.panel, 'mean', f)
def test_median(self):
def f(x):
x = np.asarray(x)
return np.median(x[notnull(x)])
self._check_statistic(self.panel, 'median', f)
def test_min(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.min()
self._check_statistic(self.panel, 'min', f)
def test_max(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) == 0:
return np.NaN
else:
return nona.max()
self._check_statistic(self.panel, 'max', f)
def test_var(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.var(ddof=1)
self._check_statistic(self.panel, 'var', f)
def test_std(self):
def f(x):
x = np.asarray(x)
nona = x[notnull(x)]
if len(nona) < 2:
return np.NaN
else:
return nona.std(ddof=1)
self._check_statistic(self.panel, 'std', f)
def test_skew(self):
return
try:
from scipy.stats import skew
except ImportError:
return
def f(x):
x = np.asarray(x)
return skew(x[notnull(x)], bias=False)
self._check_statistic(self.panel, 'skew', f)
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
class TestWidePanel(unittest.TestCase, PanelTests):
def setUp(self):
self.panel = common.makeWidePanel()
common.add_nans(self.panel)
def test_get_axis(self):
assert(self.panel._get_axis(0) is self.panel.items)
assert(self.panel._get_axis(1) is self.panel.major_axis)
assert(self.panel._get_axis(2) is self.panel.minor_axis)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major')
self.assertEqual(self.panel._get_axis_name(2), 'minor')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major')
index, columns = self.panel._get_plane_axes('minor')
index, columns = self.panel._get_plane_axes(0)
def test_arith(self):
def test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
test_op(self.panel, operator.add)
test_op(self.panel, operator.sub)
test_op(self.panel, operator.mul)
test_op(self.panel, operator.div)
test_op(self.panel, operator.pow)
test_op(self.panel, lambda x, y: y + x)
test_op(self.panel, lambda x, y: y - x)
test_op(self.panel, lambda x, y: y * x)
test_op(self.panel, lambda x, y: y / x)
test_op(self.panel, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
def test_fromDict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A' : itema, 'B' : itemb[5:]}
wp = WidePanel.fromDict(d)
self.assert_(wp.major_axis.equals(self.panel.major_axis))
# intersect
wp = WidePanel.fromDict(d, intersect=True)
self.assert_(wp.major_axis.equals(itemb.index[5:]))
def test_keys(self):
common.equalContents(self.panel.keys(), self.panel.items)
def test_iteritems(self):
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
def test_values(self):
self.assertRaises(Exception, WidePanel, np.random.randn(5, 5, 5),
range(5), range(5), range(4))
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assert_('ItemA' not in self.panel.items)
del self.panel['ItemB']
self.assert_('ItemB' not in self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = WidePanel(values, range(3), range(3), range(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA']).toLong()
self.panel['ItemE'] = lp
lp = self.panel.filter(['ItemA', 'ItemB']).toLong()
self.assertRaises(Exception, self.panel.__setitem__,
'ItemE', lp)
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index,
columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = 1
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert(conformed.index.equals(self.panel.major_axis))
assert(conformed.columns.equals(self.panel.minor_axis))
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis)
assert(result.items is self.panel.items)
assert(result.major_axis is self.panel.major_axis)
assert(result.minor_axis is self.panel.minor_axis)
self.assertRaises(Exception, self.panel.reindex)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis,
fill_method='pad')
assert_frame_equal(larger.getMajorXS(self.panel.major_axis[1]),
smaller.getMajorXS(smaller_major[0]))
def test_fill(self):
filled = self.panel.fill(0)
self.assert_(np.isfinite(filled.values).all())
filled = self.panel.fill(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fill(method='backfill'))
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.getMajorXS(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.getMajorXS(idx),
op(self.panel.getMajorXS(idx), xs))
# minor
xs = self.panel.getMinorXS(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.getMinorXS(idx),
op(self.panel.getMinorXS(idx), xs))
check_op(operator.add, 'add')
check_op(operator.sub, 'subtract')
check_op(operator.mul, 'multiply')
check_op(operator.div, 'divide')
def test_combinePanel(self):
result = self.panel.add(self.panel)
assert_panel_equal(result, self.panel * 2)
long = self.panel.toLong(filter_observations=False)
result = self.panel.add(long)
assert_panel_equal(result, self.panel * 2)
def test_operators(self):
pass
def test_neg(self):
assert_panel_equal(-self.panel, self.panel * -1)
def test_getMajorXS(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.getMajorXS(idx)
assert_series_equal(xs['ItemA'], ref.getXS(idx))
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.getMajorXS, idx)
def test_getMinorXS(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.getMinorXS(idx)
assert_series_equal(xs['ItemA'], ref[idx])
# not contained
self.assertRaises(Exception, self.panel.getMinorXS, 'E')
def test_groupby(self):
grouped = self.panel.groupby({'ItemA' : 0, 'ItemB' : 0, 'ItemC' : 1},
axis='items')
agged = grouped.agg(np.mean)
self.assert_(np.array_equal(agged.items, [0, 1]))
grouped = self.panel.groupby(lambda x: x.month, axis='major')
agged = grouped.agg(np.mean)
self.assert_(np.array_equal(agged.major_axis, [1, 2]))
grouped = self.panel.groupby({'A' : 0, 'B' : 0, 'C' : 1, 'D' : 1},
axis='minor')
agged = grouped.agg(np.mean)
self.assert_(np.array_equal(agged.minor_axis, [0, 1]))
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assert_(result.items is self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assert_(result.items is self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assert_(result.major_axis is self.panel.minor_axis)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assert_(result.items is self.panel.major_axis)
# this should also work
self.assertRaises(Exception, self.panel.swapaxes, 'items', 'items')
def test_toLong(self):
# filtered
filtered = self.panel.toLong()
# unfiltered
unfiltered = self.panel.toLong(filter_observations=False)
assert_panel_equal(unfiltered.toWide(), self.panel)
def test_filter(self):
pass
def test_apply(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.getMajorXS(idx),
shifted.getMajorXS(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.getMinorXS(idx),
shifted.getMinorXS(idx_lag))
self.assertRaises(Exception, self.panel.shift, 1, axis='items')
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
class TestLongPanelIndex(unittest.TestCase):
def setUp(self):
major_axis = Index([1, 2, 3, 4])
minor_axis = Index([1, 2])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index = LongPanelIndex(major_axis, minor_axis,
major_labels, minor_labels)
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
self.incon = LongPanelIndex(major_axis, minor_axis,
major_labels, minor_labels)
def test_consistency(self):
self.assert_(self.index.consistent)
self.assert_(not self.incon.consistent)
# need to construct an overflow
major_axis = range(70000)
minor_axis = range(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(range(10), 7000)
index = LongPanelIndex(major_axis, minor_axis,
major_labels, minor_labels)
self.assert_(index.consistent)
def test_truncate(self):
result = self.index.truncate(before=1)
self.assert_(0 not in result.major_axis)
self.assert_(1 in result.major_axis)
result = self.index.truncate(after=1)
self.assert_(2 not in result.major_axis)
self.assert_(1 in result.major_axis)
result = self.index.truncate(before=1, after=2)
self.assertEqual(len(result.major_axis), 2)
def test_getMajorBounds(self):
pass
def test_getAxisBounds(self):
pass
def test_getLabelBounds(self):
pass
def test_bounds(self):
pass
def test_makeMask(self):
mask = self.index.mask
expected = np.array([True, True,
True, False,
False, True,
True, True], dtype=bool)
self.assert_(np.array_equal(mask, expected))
def test_dims(self):
pass
class TestLongPanel(unittest.TestCase):
def setUp(self):
panel = common.makeWidePanel()
common.add_nans(panel)
self.panel = panel.toLong()
self.unfiltered_panel = panel.toLong(filter_observations=False)
def test_pickle(self):
import cPickle
pickled = cPickle.dumps(self.panel)
unpickled = cPickle.loads(pickled)
assert_almost_equal(unpickled['ItemA'].values,
self.panel['ItemA'].values)
def test_len(self):
len(self.unfiltered_panel)
def test_constructor(self):
pass
def test_fromRecords_toRecords(self):
# structured array
K = 10
recs = np.zeros(K, dtype='O,O,f8,f8')
recs['f0'] = range(K / 2) * 2
recs['f1'] = np.arange(K) / (K / 2)
recs['f2'] = np.arange(K) * 2
recs['f3'] = np.arange(K)
lp = LongPanel.fromRecords(recs, 'f0', 'f1')
self.assertEqual(len(lp.items), 2)
lp = LongPanel.fromRecords(recs, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
torecs = lp.toRecords()
self.assertEqual(len(torecs.dtype.names), len(lp.items) + 2)
# DataFrame
df = DataFrame.fromRecords(recs)
lp = LongPanel.fromRecords(df, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
# dict of arrays
series = DataFrame.fromRecords(recs)._series
lp = LongPanel.fromRecords(series, 'f0', 'f1', exclude=['f2'])
self.assertEqual(len(lp.items), 1)
self.assert_('f2' in series)
self.assertRaises(Exception, LongPanel.fromRecords, np.zeros((3, 3)),
0, 1)
def test_factors(self):
# structured array
K = 10
recs = np.zeros(K, dtype='O,O,f8,f8,O,O')
recs['f0'] = ['one'] * 5 + ['two'] * 5
recs['f1'] = ['A', 'B', 'C', 'D', 'E'] * 2
recs['f2'] = np.arange(K) * 2
recs['f3'] = np.arange(K)
recs['f4'] = ['A', 'B', 'C', 'D', 'E'] * 2
recs['f5'] = ['foo', 'bar'] * 5
lp = LongPanel.fromRecords(recs, 'f0', 'f1')
def test_columns(self):
self.assert_(np.array_equal(self.panel.items, self.panel.columns))
self.assert_(np.array_equal(self.panel.items, self.panel.cols()))
def test_copy(self):
thecopy = self.panel.copy()
self.assert_(np.array_equal(thecopy.values, self.panel.values))
self.assert_(thecopy.values is not self.panel.values)
def test_values(self):
valslice = self.panel.values[:-1]
self.assertRaises(Exception, self.panel._set_values, valslice)
def test_getitem(self):
col = self.panel['ItemA']
def test_setitem(self):
self.panel['ItemE'] = self.panel['ItemA']
self.panel['ItemF'] = 1
wp = self.panel.toWide()
assert_frame_equal(wp['ItemA'], wp['ItemE'])
itemf = wp['ItemF'].values.ravel()
self.assert_((itemf[np.isfinite(itemf)] == 1).all())
# check exceptions raised
lp = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC', 'ItemE'])
self.assertRaises(Exception, lp.__setitem__, 'foo', lp2)
def test_combineFrame(self):
wp = self.panel.toWide()
result = self.panel.add(wp['ItemA'])
assert_frame_equal(result.toWide()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
wp = self.panel.toWide()
result = self.panel.add(self.panel)
wide_result = result.toWide()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
def test_operators(self):
wp = self.panel.toWide()
result = (self.panel + 1).toWide()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sort(axis='minor')
self.assert_(is_sorted(sorted_minor.index.minor_labels))
sorted_major = sorted_minor.sort(axis='major')
self.assert_(is_sorted(sorted_major.index.major_labels))
def test_toWide(self):
pass
def test_toCSV(self):
self.panel.toCSV('__tmp__')
os.remove('__tmp__')
def test_toString(self):
from cStringIO import StringIO
buf = StringIO()
self.panel.toString(buf)
self.panel.toString(buf, col_space=12)
def test_swapaxes(self):
swapped = self.panel.swapaxes()
self.assert_(swapped.major_axis is self.panel.minor_axis)
# what else to test here?
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).toWide()
expected = self.panel.toWide()['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start).toWide()
expected = self.panel.toWide()['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end).toWide()
expected = self.panel.toWide()['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
def test_filter(self):
pass
def test_axis_dummies(self):
minor_dummies = self.panel.get_axis_dummies('minor')
self.assertEqual(len(minor_dummies.items),
len(self.panel.minor_axis))
major_dummies = self.panel.get_axis_dummies('major')
self.assertEqual(len(major_dummies.items),
len(self.panel.major_axis))
mapping = {'A' : 'one',
'B' : 'one',
'C' : 'two',
'D' : 'two'}
transformed = self.panel.get_axis_dummies('minor',
transform=mapping.get)
self.assertEqual(len(transformed.items), 2)
self.assert_(np.array_equal(transformed.items, ['one', 'two']))
# TODO: test correctness
def test_get_dummies(self):
self.panel['Label'] = self.panel.index.minor_labels
minor_dummies = self.panel.get_axis_dummies('minor')
dummies = self.panel.get_dummies('Label')
self.assert_(np.array_equal(dummies.values, minor_dummies.values))
def test_apply(self):
# ufunc
applied = self.panel.apply(np.sqrt)
self.assert_(assert_almost_equal(
applied.values, np.sqrt(self.panel.values)))
def test_mean(self):
means = self.panel.mean('major')
# test versus WidePanel version
wide_means = self.panel.toWide().mean('major')
assert_frame_equal(means, wide_means)
means_broadcast = self.panel.mean('major', broadcast=True)
self.assert_(isinstance(means_broadcast, LongPanel))
# how to check correctness?
def test_sum(self):
sums = self.panel.sum('major')
# test versus WidePanel version
wide_sums = self.panel.toWide().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
pass
def test_leftJoin(self):
pass
def test_merge(self):
pass
def test_addPrefix(self):
lp = self.panel.addPrefix('foo#')
self.assertEqual(lp.items[0], 'foo#ItemA')
def test_pivot(self):
df = pivot(np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
self.assertEqual(df['a'][1], 1)
self.assertEqual(df['b'][2], 2)
self.assertEqual(df['c'][3], 3)
self.assertEqual(df['d'][4], 5)
self.assertEqual(df['e'][5], 4)
# weird overlap
df = pivot( | np.array([1, 2, 3, 4, 4]) | numpy.array |
# -*- coding: utf-8 -*-
"""Supports the Ion Velocity Meter (IVM) onboard the Communication
and Navigation Outage Forecasting System (C/NOFS) satellite, part
of the Coupled Ion Netural Dynamics Investigation (CINDI). Downloads
data from the NASA Coordinated Data Analysis Web (CDAWeb) in CDF
format.
The IVM is composed of the Retarding Potential Analyzer (RPA) and
Drift Meter (DM). The RPA measures the energy of plasma along the
direction of satellite motion. By fitting these measurements
to a theoretical description of plasma the number density, plasma
composition, plasma temperature, and plasma motion may be determined.
The DM directly measures the arrival angle of plasma. Using the reported
motion of the satellite the angle is converted into ion motion along
two orthogonal directions, perpendicular to the satellite track.
References
----------
A brief discussion of the C/NOFS mission and instruments can be found at
de La Beaujardière, O., et al. (2004), C/NOFS: A mission to forecast
scintillations, J. Atmos. Sol. Terr. Phys., 66, 1573–1591,
doi:10.1016/j.jastp.2004.07.030.
Discussion of cleaning parameters for ion drifts can be found in:
Burrell, <NAME>., Equatorial topside magnetic field-aligned ion drifts
at solar minimum, The University of Texas at Dallas, ProQuest
Dissertations Publishing, 2012. 3507604.
Discussion of cleaning parameters for ion temperature can be found in:
Hairston, <NAME>., <NAME>, and <NAME> (2010), Mapping the
duskside topside ionosphere with CINDI and DMSP, J. Geophys. Res.,115,
A08324, doi:10.1029/2009JA015051.
Properties
----------
platform
'cnofs'
name
'ivm'
tag
None supported
inst_id
None supported
Warnings
--------
- The sampling rate of the instrument changes on July 29th, 2010.
The rate is attached to the instrument object as .sample_rate.
- The cleaning parameters for the instrument are still under development.
"""
import datetime as dt
import functools
import numpy as np
from pysat import logger
from pysat.instruments.methods import general as mm_gen
from pysatNASA.instruments.methods import cnofs as mm_cnofs
from pysatNASA.instruments.methods import cdaweb as cdw
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'cnofs'
name = 'ivm'
tags = {'': ''}
inst_ids = {'': ['']}
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'': dt.datetime(2009, 1, 1)}}
# ----------------------------------------------------------------------------
# Instrument methods
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
logger.info(mm_cnofs.ackn_str)
self.acknowledgements = mm_cnofs.ackn_str
self.references = '\n'.join((mm_cnofs.refs['mission'],
mm_cnofs.refs['ivm']))
return
def preprocess(self):
"""Apply C/NOFS IVM default attributes
Note
----
The sample rate for loaded data is attached at inst.sample_rate
before any attached custom methods are executed.
"""
self.sample_rate = 1.0 if self.date >= dt.datetime(2010, 7, 29) else 2.0
return
def clean(self):
"""Routine to return C/NOFS IVM data cleaned to the specified level
Note
----
Supports 'clean', 'dusty', 'dirty'
"""
# Make sure all -999999 values are NaN
self.data = self.data.replace(-999999., np.nan)
# Set maximum flags
if self.clean_level == 'clean':
max_rpa_flag = 1
max_idm_flag = 0
elif self.clean_level == 'dusty':
max_rpa_flag = 3
max_idm_flag = 3
else:
max_rpa_flag = 4
max_idm_flag = 6
# Find bad drifts according to quality flags
idm_mask = self.data['driftMeterflag'] > max_idm_flag
rpa_mask = self.data['RPAflag'] > max_rpa_flag
# Also exclude RPA drifts where the velocity is set to zero
if (self.clean_level == 'clean') or (self.clean_level == 'dusty'):
if 'ionVelocityX' in self.data.columns:
# Possible unrealistic velocities - value may be set to zero
# in fit routine instead of using a flag
vel_mask = self.data['ionVelocityX'] == 0.0
rpa_mask = rpa_mask | vel_mask
# Replace bad drift meter values with NaNs
if idm_mask.any():
data_labels = ['ionVelocityY', 'ionVelocityZ']
for label in data_labels:
self.data[label] = np.where(idm_mask, np.nan, self.data[label])
# Only remove field-aligned drifts if IDM component is large enough
unit_vecs = {'ionVelmeridional': 'meridionalunitvector',
'ionVelparallel': 'parallelunitvector',
'ionVelzonal': 'zonalunitvector'}
for label in unit_vecs.keys():
for coord in ['Y', 'Z']:
coord_label = ''.join([unit_vecs[label], coord])
vec_mask = idm_mask & (np.abs(self.data[coord_label]) >= 0.01)
self.data[label] = np.where(vec_mask, np.nan, self.data[label])
# Replace bad rpa values with NaNs
if rpa_mask.any():
data_labels = ['ionVelocityX', 'sensPlanePot', 'sensPlanePotvar']
for label in data_labels:
self.data[label] = np.where(rpa_mask, np.nan, self.data[label])
# Only remove field-aligned drifts if RPA component is large enough
unit_vecs = {'ionVelmeridional': 'meridionalunitvectorX',
'ionVelparallel': 'parallelunitvectorX',
'ionVelzonal': 'zonalunitvectorX'}
for label in unit_vecs:
vec_mask = rpa_mask & (np.abs(self.data[unit_vecs[label]]) >= 0.01)
self.data[label] = np.where(vec_mask, np.nan, self.data[label])
# Replace non-velocity data values where fits are bad. This test is
# separate from the drifts, as confidence in the fitted values decreases
# as the complexity increases. Densities are the most robust, followed by
# composition and temperatures.
rpa_mask = self.data['RPAflag'] > 4
if rpa_mask.any():
data_labels = ['Ni', 'ionDensity', 'ionDensityvariance',
'ionTemperature', 'ionTemperaturevariance',
'ion1fraction', 'ion1variance',
'ion2fraction', 'ion2variance',
'ion3fraction', 'ion3variance',
'ion4fraction', 'ion4variance',
'ion5fraction', 'ion5variance']
for label in data_labels:
self.data[label] = np.where(rpa_mask, np.nan, self.data[label])
# Additional checks for clean and dusty data
if self.clean_level == 'dusty' or self.clean_level == 'clean':
# Low O+ concentrations for RPA Flag of 3 are suspect. Apply the O+
# concentration criteria from Burrell, 2012. Using the ion density
# from the RPA fit ('ionDensity') instead of the measurement from the
# zero volt current ('Ni').
n_oplus = self.data['ion1fraction'] * self.data['ionDensity']
low_odens_mask = (self.data['RPAflag'] == 3) & (n_oplus <= 3.0e4)
# 100% O+ creates a shallow fit region for the ram velocity
shallow_fit_mask = self.data['ion1fraction'] >= 1.0
# Exclude areas where either of these are true
oplus_mask = low_odens_mask | shallow_fit_mask
# Only remove data if RPA component of drift is greater than 1%
unit_vecs = {'ionVelmeridional': 'meridionalunitvectorX',
'ionVelparallel': 'parallelunitvectorX',
'ionVelzonal': 'zonalunitvectorX'}
for label in unit_vecs:
omask = oplus_mask & ( | np.abs(self.data[unit_vecs[label]]) | numpy.abs |
#!/usr/bin/env python
"""
Make images of the identified clusters in a cell.
Hazen 09/16
"""
import numpy
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import randomcolor
import sys
import tifffile
import storm_analysis.sa_library.i3dtype as i3dtype
import storm_analysis.sa_library.readinsight3 as readinsight3
import storm_analysis.simulator.draw_gaussians_c as dg
def clusterImages(mlist_name, title, min_size, image_max, output, image_size):
i3_data = readinsight3.loadI3GoodOnly(mlist_name)
print("Only coloring clusters with at least", min_size, "localizations.")
rand_color = randomcolor.RandomColor()
scale = 4
image_size[0] = scale * image_size[0]
image_size[1] = scale * image_size[1]
red_image = numpy.zeros(image_size)
grn_image = numpy.zeros(image_size)
blu_image = numpy.zeros(image_size)
sum_image = numpy.zeros(image_size)
labels = i3_data['lk']
start = int( | numpy.min(labels) | numpy.min |
import numpy as np
import pytest
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
def test_MaxPool_1d_default():
x = np.random.randn(1, 3, 32).astype(np.float32)
y = np.empty((1, 3, 31), dtype=np.float32)
for idx in np.ndindex(y.shape):
y[idx] = max(x[idx], x[idx[:-1] + (idx[-1] + 1,)])
op = MaxPool(x, np.array([2]))
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, y)
op = MaxPool(Input((1, 3, 32), np.dtype(np.float32)), np.array([2]))
tf_op = TensorflowConverter().visit(op)
result = tf_op(x).numpy()
assert | np.allclose(result, y) | numpy.allclose |
from __future__ import print_function, division, absolute_import
import warnings
import sys
import itertools
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import keypoints_equal, reseed
class Test_blur_gaussian_(unittest.TestCase):
def setUp(self):
reseed()
def test_integration(self):
backends = ["auto", "scipy", "cv2"]
nb_channels_lst = [None, 1, 3, 4, 5, 10]
gen = itertools.product(backends, nb_channels_lst)
for backend, nb_channels in gen:
with self.subTest(backend=backend, nb_channels=nb_channels):
image = np.zeros((5, 5), dtype=np.uint8)
if nb_channels is not None:
image = np.tile(image[..., np.newaxis], (1, 1, nb_channels))
image[2, 2] = 255
mask = image < 255
observed = iaa.blur_gaussian_(
np.copy(image), sigma=5.0, backend=backend)
assert observed.shape == image.shape
assert observed.dtype.name == "uint8"
assert np.all(observed[2, 2] < 255)
assert np.sum(observed[mask]) > (5*5-1)
if nb_channels is not None and nb_channels > 1:
for c in sm.xrange(1, observed.shape[2]):
assert np.array_equal(observed[..., c],
observed[..., 0])
def test_sigma_zero(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4, 1))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
image = np.arange(4*4*3).astype(np.uint8).reshape((4, 4, 3))
observed = iaa.blur_gaussian_(np.copy(image), 0)
assert np.array_equal(observed, image)
def test_eps(self):
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
observed_no_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=0)
observed_with_eps = iaa.blur_gaussian_(np.copy(image), 1.0, eps=1e10)
assert not np.array_equal(observed_no_eps, observed_with_eps)
assert np.array_equal(observed_with_eps, image)
def test_ksize(self):
def side_effect(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
sigmas = [5.0, 5.0]
ksizes = [None, 3]
ksizes_expected = [2.6*5.0, 3]
gen = zip(sigmas, ksizes, ksizes_expected)
for (sigma, ksize, ksize_expected) in gen:
with self.subTest(sigma=sigma, ksize=ksize):
mock_GaussianBlur = mock.Mock(side_effect=side_effect)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
observed = iaa.blur_gaussian_(
np.copy(image),
sigma=sigma,
ksize=ksize,
backend="cv2")
assert np.array_equal(observed, image+1)
cargs = mock_GaussianBlur.call_args
assert mock_GaussianBlur.call_count == 1
assert np.array_equal(cargs[0][0], image)
assert isinstance(cargs[0][1], tuple)
assert np.allclose(
np.float32(cargs[0][1]),
np.float32([ksize_expected, ksize_expected]))
assert np.isclose(cargs[1]["sigmaX"], sigma)
assert np.isclose(cargs[1]["sigmaY"], sigma)
assert cargs[1]["borderType"] == cv2.BORDER_REFLECT_101
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_gaussian_(np.copy(image), 1.0)
assert image_aug.shape == image.shape
def test_backends_called(self):
def side_effect_cv2(image, ksize, sigmaX, sigmaY, borderType):
return image + 1
def side_effect_scipy(image, sigma, mode):
return image + 1
mock_GaussianBlur = mock.Mock(side_effect=side_effect_cv2)
mock_gaussian_filter = mock.Mock(side_effect=side_effect_scipy)
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
with mock.patch('cv2.GaussianBlur', mock_GaussianBlur):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="cv2")
assert mock_GaussianBlur.call_count == 1
with mock.patch('scipy.ndimage.gaussian_filter', mock_gaussian_filter):
_observed = iaa.blur_gaussian_(
np.copy(image), sigma=1.0, eps=0, backend="scipy")
assert mock_gaussian_filter.call_count == 1
def test_backends_similar(self):
with self.subTest(nb_channels=None):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
with self.subTest(nb_channels=3):
size = 10
image = np.arange(
0, size*size).astype(np.uint8).reshape((size, size))
image = np.tile(image[..., np.newaxis], (1, 1, 3))
image[1] += 1
image[2] += 2
image_cv2 = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, ksize=20, backend="cv2")
image_scipy = iaa.blur_gaussian_(
np.copy(image), sigma=3.0, backend="scipy")
diff = np.abs(image_cv2.astype(np.int32)
- image_scipy.astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
for c in sm.xrange(3):
diff = np.abs(image_cv2[..., c].astype(np.int32)
- image_scipy[..., c].astype(np.int32))
assert np.average(diff) < 0.05 * (size * size)
def test_warnings(self):
# note that self.assertWarningRegex does not exist in python 2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = iaa.blur_gaussian_(
np.zeros((1, 1), dtype=np.uint32),
sigma=3.0,
ksize=11,
backend="scipy")
assert len(caught_warnings) == 1
assert (
"but also provided 'ksize' argument"
in str(caught_warnings[-1].message))
def test_other_dtypes_sigma_0(self):
dtypes_to_test_list = [
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"],
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
for dtype in float_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0, backend=backend)
assert image_aug.dtype.name == dtype.name
assert np.allclose(image_aug, image)
def test_other_dtypes_sigma_075(self):
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
dtypes_to_test_list = [
# scipy
["bool",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
# cv2
["bool",
"uint8", "uint16",
"int8", "int16", "int32",
"float16", "float32", "float64"]
]
gen = zip(["scipy", "cv2"], dtypes_to_test_list)
for backend, dtypes_to_test in gen:
# bool
if "bool" in dtypes_to_test:
with self.subTest(backend=backend, dtype="bool"):
image = np.zeros((5, 5), dtype=bool)
image[2, 2] = True
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.75, backend=backend)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == (mask > 0.5))
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32, np.uint64]
int_dts = [np.int8, np.int16, np.int32, np.int64]
for dtype in uint_dts + int_dts:
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if dtype.itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64, np.float128]
values = [5000, 1000**1, 1000**2, 1000**3]
for dtype, value in zip(float_dts, values):
dtype = np.dtype(dtype)
if dtype.name in dtypes_to_test:
with self.subTest(backend=backend, dtype=dtype.name):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = iaa.blur_gaussian_(
image, sigma=0.75, backend=backend)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1,
# 2, 4, 8 bytes, i.e. 8, 16, 32, 64 bit)
max_diff = (
np.dtype(dtype).itemsize
* 0.01
* np.float128(value))
assert np.max(diff) < max_diff
def test_other_dtypes_bool_at_sigma_06(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
for backend in ["scipy", "cv2"]:
image_aug = iaa.blur_gaussian_(
np.copy(image), sigma=0.6, backend=backend)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
class Test_blur_mean_shift_(unittest.TestCase):
@property
def image(self):
image = [
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203],
[1, 2, 3, 4, 200, 201, 202, 203]
]
image = np.array(image, dtype=np.uint8).reshape((4, 2*4, 1))
image = np.tile(image, (1, 1, 3))
return image
def test_simple_image(self):
image = self.image
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
assert 0 <= np.average(image[:, 0:4, :]) <= 5
assert 199 <= np.average(image[:, 4:, :]) <= 203
def test_hw_image(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_hw1_image(self):
image = self.image[:, :, 0:1]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0.5, 0.5)
assert image_blurred.ndim == 3
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_non_contiguous_image(self):
image = self.image
image_cp = np.copy(np.fliplr(image))
image = np.fliplr(image)
assert image.flags["C_CONTIGUOUS"] is False
image_blurred = iaa.blur_mean_shift_(image, 0.5, 0.5)
assert image_blurred.shape == image_cp.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image_cp)
def test_both_parameters_are_zero(self):
image = self.image[:, :, 0]
image_blurred = iaa.blur_mean_shift_(np.copy(image), 0, 0)
assert image_blurred.shape == image.shape
assert image_blurred.dtype.name == "uint8"
assert not np.array_equal(image_blurred, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.blur_mean_shift_(np.copy(image), 1.0, 1.0)
assert image_aug.shape == image.shape
class TestGaussianBlur(unittest.TestCase):
def setUp(self):
reseed()
def test_sigma_is_zero(self):
# no blur, shouldnt change anything
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_low_sigma(self):
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
# weak blur of center pixel
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
# images as numpy array
observed = aug.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
# images as list
observed = aug.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
observed = aug_det.augment_images(images_list)
assert 100 < observed[0][1, 1] < 255
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 0).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 50).all()
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 1))]
aug = iaa.GaussianBlur(sigma=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_sigma_is_tuple(self):
# varying blur sigmas
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
aug = iaa.GaussianBlur(sigma=(0, 1))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
def test_other_dtypes_bool_at_sigma_0(self):
# bool
aug = iaa.GaussianBlur(sigma=0)
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
def test_other_dtypes_uint_int_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
def test_other_dtypes_float_at_sigma_0(self):
aug = iaa.GaussianBlur(sigma=0)
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
_min_value, center_value, _max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = center_value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_bool_at_sigma_060(self):
# --
# blur of bool input at sigma=0.6
# --
# here we use a special mask and sigma as otherwise the only values
# ending up with >0.5 would be the ones that
# were before the blur already at >0.5
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[1, 0] = 255
# mask[2, 0] = 255
# mask[2, 2] = 255
# mask[2, 4] = 255
# mask[3, 0] = 255
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=0.6)
mask_bool = np.float64([
[ 57, 14, 2, 1, 1],
[142, 42, 29, 14, 28],
[169, 69, 114, 56, 114],
[142, 42, 29, 14, 28],
[ 57, 14, 2, 1, 1]
]) / 255.0
image = np.zeros((5, 5), dtype=bool)
image[1, 0] = True
image[2, 0] = True
image[2, 2] = True
image[2, 4] = True
image[3, 0] = True
image_aug = aug.augment_image(image)
expected = mask_bool > 0.5
assert image_aug.shape == mask_bool.shape
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
def test_other_dtypes_at_sigma_1(self):
# --
# blur of various dtypes at sigma=1.0
# and using an example value of 100 for int/uint/float and True for
# bool
# --
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.float64)
# mask[2, 2] = 100
# mask = ndimage.gaussian_filter(mask, 1.0, mode="mirror")
aug = iaa.GaussianBlur(sigma=1.0)
mask = np.float64([
[1, 2, 3, 2, 1],
[2, 5, 9, 5, 2],
[4, 9, 15, 9, 4],
[2, 5, 9, 5, 2],
[1, 2, 3, 2, 1]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
assert np.average(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4
assert np.average(diff) < 2.0
def test_other_dtypes_at_sigma_040(self):
# --
# blur of various dtypes at sigma=0.4
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.GaussianBlur(sigma=0.4)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.uint8)
# mask[2, 2] = 100
# kernel = ndimage.gaussian_filter(mask, 0.4, mode="mirror")
mask = np.float64([
[0, 0, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 3, 83, 3, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0]
])
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 4
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = mask.astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
assert np.max(diff) < 4.0
def test_other_dtypes_at_sigma_075(self):
# --
# blur of various dtypes at sigma=0.75
# and values being half-way between center and maximum for each dtype
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# Such inaccuracies appear for float64 if used.
# --
aug = iaa.GaussianBlur(sigma=0.75)
# prototype kernel, generated via:
# mask = np.zeros((5, 5), dtype=np.int32)
# mask[2, 2] = 1000 * 1000
# kernel = ndimage.gaussian_filter(mask, 0.75)
mask = np.float64([
[ 923, 6650, 16163, 6650, 923],
[ 6650, 47896, 116408, 47896, 6650],
[ 16163, 116408, 282925, 116408, 16163],
[ 6650, 47896, 116408, 47896, 6650],
[ 923, 6650, 16163, 6650, 923]
]) / (1000.0 * 1000.0)
# uint, int
uint_dts = [np.uint8, np.uint16, np.uint32]
int_dts = [np.int8, np.int16, np.int32]
for dtype in uint_dts + int_dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = max_value - min_value
value = int(center_value + 0.4 * max_value)
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
if np.dtype(dtype).itemsize <= 1:
assert np.max(diff) <= 4
else:
assert np.max(diff) <= 0.01 * dynamic_range
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((5, 5), dtype=dtype)
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.shape == mask.shape
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
max_diff = np.dtype(dtype).itemsize * 0.01 * np.float128(value)
assert np.max(diff) < max_diff
def test_failure_on_invalid_dtypes(self):
# assert failure on invalid dtypes
aug = iaa.GaussianBlur(sigma=1.0)
for dt in [np.float128]:
got_exception = False
try:
_ = aug.augment_image(np.zeros((1, 1), dtype=dt))
except Exception as exc:
assert "forbidden dtype" in str(exc)
got_exception = True
assert got_exception
class TestAverageBlur(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestAverageBlur, self).__init__(*args, **kwargs)
base_img = np.zeros((11, 11, 1), dtype=np.uint8)
base_img[5, 5, 0] = 200
base_img[4, 5, 0] = 100
base_img[6, 5, 0] = 100
base_img[5, 4, 0] = 100
base_img[5, 6, 0] = 100
blur3x3 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 11, 56, 67, 56, 11, 0, 0, 0],
[0, 0, 0, 11, 44, 56, 44, 11, 0, 0, 0],
[0, 0, 0, 0, 11, 11, 11, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur3x3 = np.array(blur3x3, dtype=np.uint8)[..., np.newaxis]
blur4x4 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 31, 38, 38, 31, 6, 0, 0],
[0, 0, 0, 6, 25, 31, 31, 25, 6, 0, 0],
[0, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur4x4 = np.array(blur4x4, dtype=np.uint8)[..., np.newaxis]
blur5x5 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 20, 24, 24, 24, 20, 4, 0, 0],
[0, 0, 4, 16, 20, 20, 20, 16, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
blur5x5 = np.array(blur5x5, dtype=np.uint8)[..., np.newaxis]
self.base_img = base_img
self.blur3x3 = blur3x3
self.blur4x4 = blur4x4
self.blur5x5 = blur5x5
def setUp(self):
reseed()
def test_kernel_size_0(self):
# no blur, shouldnt change anything
aug = iaa.AverageBlur(k=0)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.base_img)
def test_kernel_size_3(self):
# k=3
aug = iaa.AverageBlur(k=3)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur3x3)
def test_kernel_size_5(self):
# k=5
aug = iaa.AverageBlur(k=5)
observed = aug.augment_image(self.base_img)
assert np.array_equal(observed, self.blur5x5)
def test_kernel_size_is_tuple(self):
# k as (3, 4)
aug = iaa.AverageBlur(k=(3, 4))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@1")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_with_wider_range(self):
# k as (3, 5)
aug = iaa.AverageBlur(k=(3, 5))
nb_iterations = 200
nb_seen = [0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur4x4):
nb_seen[1] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[2] += 1
else:
raise Exception("Unexpected result in AverageBlur@2")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.23 <= p_seen[0] <= 0.43
assert 0.23 <= p_seen[1] <= 0.43
assert 0.23 <= p_seen[2] <= 0.43
def test_kernel_size_is_stochastic_parameter(self):
# k as stochastic parameter
aug = iaa.AverageBlur(k=iap.Choice([3, 5]))
nb_iterations = 100
nb_seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
if np.array_equal(observed, self.blur3x3):
nb_seen[0] += 1
elif np.array_equal(observed, self.blur5x5):
nb_seen[1] += 1
else:
raise Exception("Unexpected result in AverageBlur@3")
p_seen = [v/nb_iterations for v in nb_seen]
assert 0.4 <= p_seen[0] <= 0.6
assert 0.4 <= p_seen[1] <= 0.6
def test_kernel_size_is_tuple_of_tuples(self):
# k as ((3, 5), (3, 5))
aug = iaa.AverageBlur(k=((3, 5), (3, 5)))
possible = dict()
for kh in [3, 4, 5]:
for kw in [3, 4, 5]:
key = (kh, kw)
if kh == 0 or kw == 0:
possible[key] = np.copy(self.base_img)
else:
possible[key] = cv2.blur(
self.base_img, (kh, kw))[..., np.newaxis]
nb_iterations = 250
nb_seen = dict([(key, 0) for key, val in possible.items()])
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(self.base_img)
for key, img_aug in possible.items():
if np.array_equal(observed, img_aug):
nb_seen[key] += 1
# dont check sum here, because 0xX and Xx0 are all the same, i.e. much
# higher sum than nb_iterations
assert np.all([v > 0 for v in nb_seen.values()])
def test_more_than_four_channels(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_aug = iaa.AverageBlur(k=3)(image=image)
assert image_aug.shape == image.shape
def test_keypoints_dont_change(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(11, 11, 1))]
aug = iaa.AverageBlur(k=3)
aug_det = aug.to_deterministic()
observed = aug.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(kpsoi)
expected = kpsoi
assert keypoints_equal(observed, expected)
def test_other_dtypes_k0(self):
aug = iaa.AverageBlur(k=0)
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == image)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = int(center_value + 0.4 * max_value)
image[2, 2] = int(center_value + 0.4 * max_value)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == image)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, image)
def test_other_dtypes_k3_value_100(self):
# --
# blur of various dtypes at k=3
# and using an example value of 100 for int/uint/float and True for
# bool
# --
aug = iaa.AverageBlur(k=3)
# prototype mask
# we place values in a 3x3 grid at positions (row=1, col=1) and
# (row=2, col=2) (beginning with 0)
# AverageBlur uses cv2.blur(), which uses BORDER_REFLECT_101 as its
# default padding mode,
# see https://docs.opencv.org/3.1.0/d2/de8/group__core__array.html
# the matrix below shows the 3x3 grid and the padded row/col values
# around it
# [1, 0, 1, 0, 1]
# [0, 0, 0, 0, 0]
# [1, 0, 1, 0, 1]
# [0, 0, 0, 1, 0]
# [1, 0, 1, 0, 1]
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# bool
image = np.zeros((3, 3), dtype=bool)
image[1, 1] = True
image[2, 2] = True
image_aug = aug.augment_image(image)
expected = mask > 0.5
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == expected)
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100
image[2, 2] = 100
image_aug = aug.augment_image(image)
# cv2.blur() applies rounding for int/uint dtypes
expected = np.round(mask * 100).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
assert np.max(diff) <= 2
# float
float_dts = [np.float16, np.float32, np.float64]
for dtype in float_dts:
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = 100.0
image[2, 2] = 100.0
image_aug = aug.augment_image(image)
expected = (mask * 100.0).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
assert np.max(diff) < 1.0
def test_other_dtypes_k3_dynamic_value(self):
# --
# blur of various dtypes at k=3
# and values being half-way between center and maximum for each
# dtype (bool is skipped as it doesnt make any sense here)
# The goal of this test is to verify that no major loss of resolution
# happens for large dtypes.
# --
aug = iaa.AverageBlur(k=3)
# prototype mask (see above)
mask = np.float64([
[4/9, 2/9, 4/9],
[2/9, 2/9, 3/9],
[4/9, 3/9, 5/9]
])
# uint, int
uint_dts = [np.uint8, np.uint16]
int_dts = [np.int8, np.int16]
for dtype in uint_dts + int_dts:
_min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = int(center_value + 0.4 * max_value)
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.int64)
- expected.astype(np.int64))
assert image_aug.dtype.type == dtype
# accepts difference of 4, 8, 16 (at 1, 2, 4 bytes, i.e. 8, 16,
# 32 bit)
assert np.max(diff) <= 2**(1 + np.dtype(dtype).itemsize)
# float
float_dts = [np.float16, np.float32, np.float64]
values = [5000, 1000*1000, 1000*1000*1000]
for dtype, value in zip(float_dts, values):
image = np.zeros((3, 3), dtype=dtype)
image[1, 1] = value
image[2, 2] = value
image_aug = aug.augment_image(image)
expected = (mask * value).astype(dtype)
diff = np.abs(image_aug.astype(np.float128)
- expected.astype(np.float128))
assert image_aug.dtype.type == dtype
# accepts difference of 2.0, 4.0, 8.0, 16.0 (at 1, 2, 4, 8 bytes,
# i.e. 8, 16, 32, 64 bit)
assert | np.max(diff) | numpy.max |
"""
Programmer: <NAME>
Purpose: A variety of tools for computing self-similarity matrices (SSMs)
and cross-similarity matrices (CSMs), with a particular
emphasis on speeding up Euclidean SSMs/CSMs.
"""
import numpy as np
import scipy.misc
import scipy.interpolate
import matplotlib.pyplot as plt
import SequenceAlignment.SequenceAlignment as SA
import SequenceAlignment._SequenceAlignment as SAC
from SimilarityFusion import *
import time
from multiprocessing import Pool as PPool
def imresize(D, dims, kind='cubic', use_scipy=False):
"""
Resize a floating point image
Parameters
----------
D : ndarray(M1, N1)
Original image
dims : tuple(M2, N2)
The dimensions to which to resize
kind : string
The kind of interpolation to use
use_scipy : boolean
Fall back to scipy.misc.imresize. This is a bad idea
because it casts everything to uint8, but it's what I
was doing accidentally for a while
Returns
-------
D2 : ndarray(M2, N2)
A resized array
"""
if use_scipy:
return scipy.misc.imresize(D, dims)
else:
M, N = dims
x1 = np.array(0.5 + np.arange(D.shape[1]), dtype=np.float32)/D.shape[1]
y1 = np.array(0.5 + np.arange(D.shape[0]), dtype=np.float32)/D.shape[0]
x2 = np.array(0.5 + np.arange(N), dtype=np.float32)/N
y2 = np.array(0.5 + | np.arange(M) | numpy.arange |
"""
This file contains the files for computing the argmax oracle.
argmax_oracle is the general oracle to be called. We also implement individual argmax_oracle_<model> methods that are
specific to each type of model class.
"""
from sklearn import svm
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from src.dataset import *
import numpy as np
import torch
def argmax_oracle_single_helper(name, w, seed=12345, visualize=False, return_test_accuracy=True):
"""
w: a numpy array of weights that we take inner product with.
"""
torch.random.manual_seed(seed)
# Normalize and transform to weighted classification where each weight is in [0, 1].
w = torch.from_numpy(w).float()
w = w / torch.max(torch.abs(w))
label = (w > 0).float()
dataset = get_dataset()
# Depending on the function class, we call different argmax oracles.
w.abs_()
if name == "sklogistic":
return argmax_oracle_sklogistic(dataset, w, label, return_test_accuracy)
elif name == "svm":
return argmax_oracle_svm(dataset, w, label, return_test_accuracy)
elif name == "sklinear":
return argmax_oracle_sklinear(dataset, w, label, return_test_accuracy)
elif name == "2d_thresh":
return argmax_oracle_2d_thresh(dataset, w, label, return_test_accuracy)
def argmax_oracle_single(w, seed=12345, visualize=False, return_test_accuracy=True):
return argmax_oracle_single_helper(model_name, w, seed=seed, visualize=visualize,
return_test_accuracy=return_test_accuracy)
def argmax_oracle_2d_thresh(dataset, w, label, return_test_accuracy):
# assuming only 0/1 in w
rnd = np.random.RandomState(12345)
hs = rnd.rand(10000, 2)
X = dataset["X"].numpy()
y = label.numpy()
X_test = dataset["X_test"].numpy()
y_test = dataset["Y_test"].numpy()
X_train = X[w != 0]
y_train = y[w != 0]
thresh = (hs[:, 0].reshape((-1, 1)) > X_train[:, 0]).astype(int) + (
hs[:, 1].reshape((-1, 1)) > X_train[:, 1]).astype(int)
thresh = (thresh == 2).astype(int)
best_thresh = np.argmax(np.sum((thresh == y_train.astype(int)).astype(int), axis=1))
# print(hs[best_thresh])
# import matplotlib.pyplot as plt
# plt.scatter(X_train[:, 0], X_train[:, 1], c=['red' if l == 1 else 'blue' for l in thresh[best_thresh]])
# plt.show()
pred = (hs[:, 0][best_thresh] > X[:, 0]).astype(int) + (hs[:, 1][best_thresh] > X[:, 1]).astype(int)
pred_test = (hs[:, 0][best_thresh] > X_test[:, 0]).astype(int) + (
hs[:, 1][best_thresh] > X_test[:, 1]).astype(int)
if return_test_accuracy:
return (pred == 2).astype(int), np.mean(((pred_test == 2).astype(int) == y_test).astype(float))
else:
return (pred == 2).astype(int)
def argmax_oracle_sklearn(model, dataset, w, label, return_test_accuracy):
X = dataset["X"].numpy()
y = label.numpy()
X_test = dataset["X_test"].numpy()
y_test = dataset["Y_test"].numpy()
if np.sum(y[w != 0]) == 0:
# All labels are 0.
if return_test_accuracy:
return np.zeros(y.shape), np.mean((y_test == 0).astype(float))
else:
return np.zeros(y.shape)
elif | np.sum(y[w != 0]) | numpy.sum |
#!/usr/bin/env python3
"""Machine Learning module for ADNI capstone project.
This module contains functions for use with the ADNI dataset.
"""
if 'pd' not in globals():
import pandas as pd
if 'np' not in globals():
import numpy as np
if 'plt' not in globals():
import matplotlib.pyplot as plt
if 'sns' not in globals():
import seaborn as sns
if 'scipy.stats' not in globals():
import scipy.stats
if 'StandardScaler' not in globals():
from sklearn.preprocessing import StandardScaler, MinMaxScaler
if 'KNeighborsClassifier' not in globals():
from sklearn.neighbors import KNeighborsClassifier
if 'SVC' not in globals():
from sklearn.svm import SVC
if 'train_test_split' not in globals():
from sklearn.model_selection import train_test_split, GridSearchCV
if 'MultinomialNB' not in globals():
from sklearn.naive_bayes import MultinomialNB
if 'confusion_matrix' not in globals():
from sklearn.metrics import roc_auc_score, confusion_matrix, classification_report
if 'RandomForestClassifier' not in globals():
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
if 'linear_model' not in globals():
from sklearn import linear_model
if 'PCA' not in globals():
from sklearn.decomposition import PCA
sns.set()
def get_delta_scaled(final_exam, neg_one=False):
"""Take the final_exam dataframe and return datasets.
This function returns five numpy arrays: feature_names, X_delta_male,
X_delta_female, y_delta_male, and y_delta_female. The two X arrays hold
the feature data. The two y arrays hold the diagnosis group labels.
The feature_names array hold a list of the features. The neg_one
parameter allows you to specify -1 for the negative class (for SVM)."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=nc_idx),
pd.DataFrame({'dx_group': -1}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
deltas_df = final_exam.loc[labels.index]
deltas_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
deltas_df = pd.get_dummies(deltas_df, drop_first=True, columns=['PTGENDER'])
# extract the features for change in diagnosis
X_delta = deltas_df.reindex(columns=['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta',
'RAVLT_delta', 'Hippocampus_delta', 'Ventricles_delta',
'WholeBrain_delta', 'Entorhinal_delta', 'MidTemp_delta',
'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta', 'RAVLT_delta',
'Hippocampus_delta', 'Ventricles_delta', 'WholeBrain_delta',
'Entorhinal_delta', 'MidTemp_delta', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_delta)
# extract the labels
yd = np.array(deltas_df.dx_group)
# return the data
return feature_names, Xd, yd
def plot_best_k(X_train, X_test, y_train, y_test, kmax=9):
"""This function will create a plot to help choose the best k for k-NN.
Supply the training and test data to compare accuracy at different k values.
Specifying a max k value is optional."""
# Setup arrays to store train and test accuracies
# view the plot to help pick the best k to use
neighbors = np.arange(1, kmax)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
if kmax < 11:
s = 2
elif kmax < 21:
s = 4
elif kmax < 41:
s = 5
elif kmax < 101:
s = 10
else:
s = 20
# Generate plot
_ = plt.title('k-NN: Varying Number of Neighbors')
_ = plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
_ = plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
_ = plt.legend()
_ = plt.xlabel('Number of Neighbors')
_ = plt.ylabel('Accuracy')
_ = plt.xticks(np.arange(0,kmax,s))
plt.show()
def plot_f1_scores(k, s, r, b, l, n):
"""This function accepts six dictionaries containing classification reports.
This function is designed to work specifically with the six dictionaries created
in the 5-Machine_Learning notebook, as the second dictionary is SVM, which
uses classes of -1 and 1, whereas the other classes are 0 and 1."""
# extract the data and store in a dataframe
df = pd.DataFrame({'score': [k['0']['f1-score'], k['1']['f1-score'], s['-1']['f1-score'], s['1']['f1-score'],
r['0']['f1-score'], r['1']['f1-score'], b['0']['f1-score'], b['1']['f1-score'],
l['0']['f1-score'], l['1']['f1-score'], n['0']['f1-score'], n['1']['f1-score']],
'model': ['KNN', 'KNN', 'SVM', 'SVM', 'Random Forest', 'Random Forest',
'AdaBoost', 'AdaBoost', 'Log Reg', 'Log Reg', 'Naive Bayes', 'Naive Bayes'],
'group': ['Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD',
'Non AD', 'AD', 'Non AD', 'AD']})
# create the plot
ax = sns.barplot('model', 'score', hue='group', data=df)
_ = plt.setp(ax.get_xticklabels(), rotation=25)
_ = plt.title('F1 Scores for Each Model')
_ = plt.ylabel('F1 Score')
_ = plt.xlabel('Model')
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def get_bl_data(final_exam, neg_one=False):
"""This function extracts the baseline data features for machine learning.
Pass the final_exam dataframe, specify optional neg_one=True for SVM (sets)
the non-Ad class as -1 vs 0. Returns features (X), labels (y), and
feature_names.
"""
# map the diagnosis group and assign to dx_group
non_ad_idx = final_exam[final_exam.DX != 'AD'].index
ad_idx = final_exam[final_exam.DX == 'AD'].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
bl_df = final_exam.loc[labels.index]
bl_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
bl_df = pd.get_dummies(bl_df, drop_first=True, columns=['PTGENDER'])
# extract the baseline features
X_bl = bl_df.reindex(columns=['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_bl)
# extract the labels
yd = np.array(bl_df.dx_group)
# return the data
return feature_names, Xd, yd
def run_clinical_models(final_exam, biomarkers):
"""This dataframe runs six machine learning models on only the clinical biomarkes.
A dataframe containing summary information will be returned."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
labeled_df = final_exam.loc[labels.index]
labeled_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
labeled_df = pd.get_dummies(labeled_df, drop_first=True, columns=['PTGENDER'])
if biomarkers == 'deltas':
# extract the features for change in diagnosis
X = labeled_df.reindex(columns=['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta',
'RAVLT_delta', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta', 'RAVLT_delta',
'PTGENDER_Male', 'AGE'])
elif biomarkers == 'baseline':
# extract the features for change in diagnosis
X = labeled_df.reindex(columns=['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl',
'RAVLT_immediate_bl', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl',
'RAVLT_immediate_bl', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X)
# extract the labels
yd = np.array(labeled_df.dx_group)
# split into training and test data
Xd_train, Xd_test, yd_train, yd_test = train_test_split(Xd, yd, test_size=0.3,
random_state=21, stratify=yd)
# initialize dataframe to hold summary info for the models
columns = ['model', 'hyper_params', 'train_acc', 'test_acc', 'auc', 'tp', 'fn', 'tn', 'fp',
'precision', 'recall', 'fpr', 'neg_f1', 'AD_f1']
df = pd.DataFrame(columns=columns)
# knn model
param_grid = {'n_neighbors': np.arange(1, 50)}
knn = KNeighborsClassifier()
knn_cv = GridSearchCV(knn, param_grid, cv=5)
knn_cv.fit(Xd_train, yd_train)
k = knn_cv.best_params_['n_neighbors']
hp = 'k: {}'.format(k)
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(Xd_train, yd_train)
y_pred = knn.predict(Xd_test)
train_acc = knn.score(Xd_train, yd_train)
test_acc = knn.score(Xd_test, yd_test)
y_pred_prob = knn.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
knn_df = pd.DataFrame({'model': 'knn', 'hyper_params': hp, 'train_acc': train_acc, 'test_acc': test_acc,
'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec, 'recall': recall,
'fpr': fpr, 'neg_f1': rep['0']['f1-score'], 'AD_f1': rep['1']['f1-score']}, index=[0])
df = df.append(knn_df, ignore_index=True, sort=False)
# SVM model
# map the svm labels
yd_train_svm = np.where(yd_train == 0, yd_train - 1, yd_train)
yd_test_svm = np.where(yd_test == 0, yd_test - 1, yd_test)
num_features = Xd_train.shape[1]
param_grid = {'C': [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.25, 1.5, 1.75],
'gamma': [(1/(num_features*Xd_train.var())), (1/num_features)]}
svm = SVC(class_weight='balanced', probability=True)
svm_cv = GridSearchCV(svm, param_grid, cv=5)
svm_cv.fit(Xd_train, yd_train_svm)
C = svm_cv.best_params_['C']
gamma = svm_cv.best_params_['gamma']
hp = 'C: {}'.format(C) + ', gamma: {:.4f}'.format(gamma)
svm = SVC(C=C, gamma=gamma, class_weight='balanced',
probability=True)
svm.fit(Xd_train, yd_train_svm)
y_pred = svm.predict(Xd_test)
train_acc = svm.score(Xd_train, yd_train_svm)
test_acc = svm.score(Xd_test, yd_test_svm)
y_pred_prob = svm.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test_svm, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test_svm, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test_svm, y_pred, output_dict=True)
roc_auc_score(yd_test_svm, y_pred_prob)
svm_df = pd.DataFrame({'model': 'svm', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec,
'recall': recall, 'fpr': fpr, 'neg_f1': rep['-1']['f1-score'], 'AD_f1': rep['1']['f1-score']},
index=[1])
df = df.append(svm_df, ignore_index=True, sort=False)
# Random Forests Model
trees = [101, 111, 121, 131, 141, 151, 161, 171, 181, 191, 201, 211, 221]
max_f = [1, num_features, 'log2', 'sqrt']
param_grid = {'n_estimators': trees, 'max_features': max_f}
r_forest = RandomForestClassifier(class_weight='balanced', random_state=42)
r_forest_cv = GridSearchCV(r_forest, param_grid, cv=5)
r_forest_cv.fit(Xd_train, yd_train)
n_est = r_forest_cv.best_params_['n_estimators']
n_feat = r_forest_cv.best_params_['max_features']
hp = 'trees: {}'.format(n_est) + ', max_feats: {}'.format(n_feat)
rfc = RandomForestClassifier(n_estimators=n_est, max_features=n_feat,
class_weight='balanced', random_state=42)
rfc.fit(Xd_train, yd_train)
y_pred = rfc.predict(Xd_test)
train_acc = rfc.score(Xd_train, yd_train)
test_acc = rfc.score(Xd_test, yd_test)
y_pred_prob = rfc.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
rfc_df = pd.DataFrame({'model': 'RF', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[2])
df = df.append(rfc_df, ignore_index=True, sort=False)
# AdaBoost Classifier
est = [31, 41, 51, 61, 71, 81, 91, 101]
param_grid = {'n_estimators': est}
boost = AdaBoostClassifier(random_state=42)
boost_cv = GridSearchCV(boost, param_grid, cv=5)
boost_cv.fit(Xd_train, yd_train)
n_est = boost_cv.best_params_['n_estimators']
hp = 'num_estimators: {}'.format(n_est)
model = AdaBoostClassifier(n_estimators=n_est, random_state=0)
model.fit(Xd_train, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(Xd_train, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
boost_df = pd.DataFrame({'model': 'AdaBoost', 'hyper_params': hp, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[3])
df = df.append(boost_df, ignore_index=True, sort=False)
# logistic regression
logreg = linear_model.LogisticRegression(solver='lbfgs', class_weight='balanced', random_state=42)
logreg.fit(Xd_train, yd_train)
y_pred = logreg.predict(Xd_test)
train_acc = logreg.score(Xd_train, yd_train)
test_acc = logreg.score(Xd_test, yd_test)
y_pred_prob = logreg.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
logreg_df = pd.DataFrame({'model': 'logreg', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[4])
df = df.append(logreg_df, ignore_index=True, sort=False)
# Naive Bayes
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(Xd_train)
model = MultinomialNB()
model.fit(X_scaled, yd_train)
y_pred = model.predict(Xd_test)
train_acc = model.score(X_scaled, yd_train)
test_acc = model.score(Xd_test, yd_test)
y_pred_prob = model.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
nb_df = pd.DataFrame({'model': 'bayes', 'hyper_params': None, 'train_acc': train_acc,
'test_acc': test_acc, 'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp,
'precision': prec, 'recall': recall, 'fpr': fpr, 'neg_f1': rep['0']['f1-score'],
'AD_f1': rep['1']['f1-score']}, index=[5])
df = df.append(nb_df, ignore_index=True, sort=False)
# return the dataframe
return df
def run_models(Xd_train, Xd_test, yd_train, yd_test):
"""This function runs all of the classification data supplied through the models.
Supply the training and test data.
"""
# initialize dataframe to hold summary info for the models
columns = ['model', 'hyper_params', 'train_acc', 'test_acc', 'auc', 'tp', 'fn', 'tn', 'fp',
'precision', 'recall', 'fpr', 'neg_f1', 'AD_f1']
df = pd.DataFrame(columns=columns)
# knn model
param_grid = {'n_neighbors': np.arange(1, 50)}
knn = KNeighborsClassifier()
knn_cv = GridSearchCV(knn, param_grid, cv=5)
knn_cv.fit(Xd_train, yd_train)
k = knn_cv.best_params_['n_neighbors']
hp = 'k: {}'.format(k)
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(Xd_train, yd_train)
y_pred = knn.predict(Xd_test)
train_acc = knn.score(Xd_train, yd_train)
test_acc = knn.score(Xd_test, yd_test)
y_pred_prob = knn.predict_proba(Xd_test)[:,1]
auc = roc_auc_score(yd_test, y_pred_prob)
tn, fp, fn, tp = confusion_matrix(yd_test, y_pred).ravel()
prec = tp / (tp + fp)
recall = tp / (tp + fn)
fpr = fp / (tn + fp)
rep = classification_report(yd_test, y_pred, output_dict=True)
knn_df = pd.DataFrame({'model': 'knn', 'hyper_params': hp, 'train_acc': train_acc, 'test_acc': test_acc,
'auc': auc, 'tp': tp, 'fn': fn, 'tn': tn, 'fp': fp, 'precision': prec, 'recall': recall,
'fpr': fpr, 'neg_f1': rep['0']['f1-score'], 'AD_f1': rep['1']['f1-score']}, index=[0])
df = df.append(knn_df, ignore_index=True, sort=False)
# SVM model
# map the svm labels
yd_train_svm = | np.where(yd_train == 0, yd_train - 1, yd_train) | numpy.where |
#!/usr/bin/env python3
"""Compute a background mask for X-ray microscopy data.
Functions
---------
parse_args
Parse command line arguments.
initialize_cloudvolume
Create a new CloudVolume archive.
load_image
Load an image from CloudVolume.
create_bg_mask
Create a mask of background regions in x-ray microscopy.
write_image
Write an image to CloudVolume.
Dependencies
------------
cloud-volume
mpi4py
numpy
scipy
scikit-image
"""
import argparse
import logging
import os
import re
from cloudvolume import CloudVolume
from mpi4py import MPI
import numpy as np
from scipy.signal import find_peaks, peak_prominences, peak_widths
from skimage.exposure import histogram
from skimage.filters import gaussian
from skimage.measure import label, regionprops
from skimage.morphology import remove_small_holes
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
LOGGER = logging.getLogger('create_background_mask.py')
syslog = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s Rank %(rank)s : %(message)s')
syslog.setFormatter(formatter)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(syslog)
LOGGER = logging.LoggerAdapter(LOGGER, {'rank': str(RANK)})
def parse_args():
"""Parse command line arguments."""
p = argparse.ArgumentParser()
p.add_argument('--input', type=str,
help='path to the CloudVolume archive')
p.add_argument('--output', type=str,
help='path to the bg mask CloudVolume archive')
p.add_argument('--resolution', type=int, nargs='*', default=[10, 10, 10],
help='resolution of the dataset')
p.add_argument('--mip', type=int, default=0,
help='number of mip levels to create')
p.add_argument('--chunk-size', type=int, nargs='*', default=[64, 64, 64],
help='size of each CloudVolume block file')
p.add_argument('--z-step', type=int, default=None)
p.add_argument('--factor', type=int, nargs='*', default=[2, 2, 2],
help='factor to scale between mip levels')
p.add_argument('--flip-xy', action='store_true',
help='pass to transplose the X and Y axes')
p.add_argument('--memory-limit', type=float, default=10000,
help='max memory available to CloudVolume')
p.add_argument('--offset', type=int, nargs='*', default=[0, 0, 0],
help='offset into the volume from the upper-left corner')
p.add_argument('--quiet', action='store_true',
help='pass to deactivate logging')
return p.parse_args()
def initialize_cloudvolume(path, resolution, offset, volume_size, chunk_size,
mip, factor):
"""Create a new CloudVolume archive.
Parameters
----------
path : str
Filepath to the location to write the archive.
resolution : tuple of int
Imaging resolution of the images in each dimension.
offset : tuple of int
Offset within the volume to the start of the archive.
volume_size : tuple of int
The dimensions of the volume in pixels.
chunk_size : tuple of int
The size of each CloudVolume block in pixels.
mip : int
The number of mip levels to include.
factor : tuple of int
The factor of change in each dimension across mip levels.
Returns
-------
cv_args : dict
The parameters needed to re-access the CloudVolume archive.
"""
# Set the parameters of the info file.
info = CloudVolume.create_new_info(
num_channels=1,
layer_type='segmentation',
data_type='uint32',
encoding='compressed_segmentation',
resolution=resolution,
voxel_offset=offset,
volume_size=volume_size[:-1],
chunk_size=chunk_size,
max_mip=0,
factor=factor
)
# Set up and initialize the CloudVolume object
cv_args = dict(
bounded=True, fill_missing=True, autocrop=False,
cache=False, compress_cache=None, cdn_cache=False,
progress=False, info=info, provenance=None, compress=True,
non_aligned_writes=True, parallel=1)
# for i in range(1, mip + 1):
# info['scales'][i]['compressed_segmentation_block_size'] = \
# info['scales'][0]['compressed_segmentation_block_size']
cv = CloudVolume(path, mip=0, **cv_args)
# Create the info file.
LOGGER.info('Initializing image layer with config {}'.format(cv_args))
cv.commit_info()
return cv_args
def load_subvolume(cv, z_start, z_end, flip_xy=False):
"""Load an image from CloudVolume.
Parameters
----------
cv : cloudvolume.CloudVolume
CloudVolume image layer to mask.
z_start : int
The index of the first image in the layer.
z_end : int
The index of the last image in the layer.
flip_xy : bool
CloudVolume reorders the dimension of image volumes, and the order of
the x and y dimensions can vary. If True, indicates that the CloudVolume
layer is saved in (Y, X, Z) order; otherwise it is saved as (X, Y, Z).
Returns
-------
subvol : numpy.ndarray
The subvolume with the dimensions reordered as (Z, Y, X).
"""
# Each entry in the z dimension represents one image. Extract an image.
subvol = cv[:, :, z_start:z_end, :]
subvol = np.squeeze(subvol)
# Transpose the dimensions back to
if not flip_xy:
subvol = np.transpose(subvol, axes=[2, 1, 0])
LOGGER.info('Loaded subvolume with shape {}.'.format(subvol.shape))
return subvol
def find_bg_mask(img):
"""Create a mask of background regions in x-ray microscopy.
Parameters
----------
img : numpy.ndarray
X-ray microscopy image.
Returns
-------
bgmask : numpy.ndarray
Binary mask of the background of ``img``.
"""
if img.ndim == 2:
img = np.expand_dims(img, axis=0)
bgmask = np.zeros((3,) +img.shape, dtype=np.uint8)
for d in range(img.ndim):
for i in range(img.shape[d]):
if d == 0:
subimg = img[i, :, :]
elif d == 1:
subimg = img[:, i, :]
elif d == 2:
subimg = img[:, :, i]
# Blur the image to smooth any background artifacts.
LOGGER.info('Blurring image.')
blur = gaussian(subimg, sigma=5, preserve_range=True)
# Compute the image histogram and find the peaks.
LOGGER.info('Finding histogram peaks.')
hist, bins = histogram(blur)
peaks, properties = find_peaks(hist) # , height=(0.3 * img.size))
prominences = peak_prominences(hist, peaks)
widths = peak_widths(hist, peaks, rel_height=0.333,
prominence_data=prominences)
# Select the left-most peak (backgrounds are usually dark) and use the
# width of the peak to select a threshold value. Create a mask of all
# pixels less than or equal to the threshold.
ordered = np.argsort(peaks)
threshold = peaks[ordered[0]] + (widths[0][ordered[0]] / 2.0)
# threshold = peaks[0] + (widths[0][0] / 2.0)
LOGGER.info('Setting hard threshold {} for image.'.format(threshold))
mask = np.zeros(subimg.shape, dtype=np.uint8)
mask[np.where(subimg <= threshold)] = 1
# Perform some clean up and find the largest connected component.
LOGGER.info('Cleaning mask of image.')
# remove_small_holes(mask, area_threshold=30, connectivity=2,
# in_place=True)
labels = label(mask)
objs = regionprops(labels)
# bg = None
# for obj in objs:
# if obj.bbox_area >= 0.85 * img.size:
# coords = obj.coords
# break
# Select the connected component with the largest bounding box as the
# background mask.
objs.sort(key=lambda x: x.bbox_area, reverse=True)
# objs = [o for o in objs
# if np.any(np.asarray(o.bbox[:mask.ndim]) == np.asarray(mask.shape))
# or np.any(np.asarray(o.bbox[mask.ndim:]) == 0)]
print(len(objs))
if len(objs) > 0:
coords = tuple([objs[0].coords[:, j] for j in range(subimg.ndim)])
LOGGER.info('Setting background mask of image.')
if d == 0:
bgmask[d, i, coords[0], coords[1]] = 1
elif d == 1:
bgmask[d, coords[0], i, coords[1]] = 1
elif d == 2:
bgmask[d, coords[0], coords[1], i] = 1
LOGGER.info('Full background mask covers {} voxels.'.format( | np.sum(bgmask) | numpy.sum |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DiscretizationFactory."""
import collections
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.aggregators import discretization
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
_test_struct_type_int = [tf.int32, (tf.int32, (2,)), (tf.int32, (3, 3))]
_test_struct_type_float = [tf.float32, (tf.float32, (2,)), (tf.float32, (3, 3))]
_test_nested_struct_type_float = collections.OrderedDict(
a=[tf.float32, [(tf.float32, (2, 2, 1))]], b=(tf.float32, (3, 3)))
def _make_test_nested_struct_value(value):
return collections.OrderedDict(
a=[
tf.constant(value, dtype=tf.float32),
[tf.constant(value, dtype=tf.float32, shape=[2, 2, 1])]
],
b=tf.constant(value, dtype=tf.float32, shape=(3, 3)))
def _discretization_sum(scale_factor=2,
stochastic=False,
beta=0,
prior_norm_bound=None):
return discretization.DiscretizationFactory(sum_factory.SumFactory(),
scale_factor, stochastic, beta,
prior_norm_bound)
def _named_test_cases_product(*args):
"""Utility for creating parameterized named test cases."""
named_cases = []
if len(args) == 2:
dict1, dict2 = args
for k1, v1 in dict1.items():
for k2, v2 in dict2.items():
named_cases.append(('_'.join([k1, k2]), v1, v2))
elif len(args) == 3:
dict1, dict2, dict3 = args
for k1, v1 in dict1.items():
for k2, v2 in dict2.items():
for k3, v3 in dict3.items():
named_cases.append(('_'.join([k1, k2, k3]), v1, v2, v3))
return named_cases
class DiscretizationFactoryComputationTest(test_case.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('float', tf.float32),
('struct_list_float_scalars', [tf.float16, tf.float32, tf.float64]),
('struct_list_float_mixed', _test_struct_type_float),
('struct_nested', _test_nested_struct_type_float))
def test_type_properties(self, value_type):
factory = _discretization_sum()
value_type = computation_types.to_type(value_type)
process = factory.create(value_type)
self.assertIsInstance(process, aggregation_process.AggregationProcess)
server_state_type = computation_types.at_server(
collections.OrderedDict(
scale_factor=tf.float32,
prior_norm_bound=tf.float32,
inner_agg_process=()))
expected_initialize_type = computation_types.FunctionType(
parameter=None, result=server_state_type)
self.assert_types_equivalent(process.initialize.type_signature,
expected_initialize_type)
expected_measurements_type = computation_types.at_server(
collections.OrderedDict(discretize=()))
expected_next_type = computation_types.FunctionType(
parameter=collections.OrderedDict(
state=server_state_type,
value=computation_types.at_clients(value_type)),
result=measured_process.MeasuredProcessOutput(
state=server_state_type,
result=computation_types.at_server(value_type),
measurements=expected_measurements_type))
self.assert_types_equivalent(process.next.type_signature,
expected_next_type)
@parameterized.named_parameters(('bool', tf.bool), ('string', tf.string),
('int32', tf.int32), ('int64', tf.int64),
('int_nested', [tf.int32, [tf.int32]]))
def test_raises_on_bad_component_tensor_dtypes(self, value_type):
factory = _discretization_sum()
value_type = computation_types.to_type(value_type)
with self.assertRaisesRegex(TypeError, 'must all be floats'):
factory.create(value_type)
@parameterized.named_parameters(
('plain_struct', [('a', tf.int32)]),
('sequence', computation_types.SequenceType(tf.int32)),
('function', computation_types.FunctionType(tf.int32, tf.int32)),
('nested_sequence', [[[computation_types.SequenceType(tf.int32)]]]))
def test_raises_on_bad_tff_value_types(self, value_type):
factory = _discretization_sum()
value_type = computation_types.to_type(value_type)
with self.assertRaisesRegex(TypeError, 'Expected `value_type` to be'):
factory.create(value_type)
@parameterized.named_parameters(('negative', -1), ('zero', 0),
('string', 'lol'), ('tensor', tf.constant(3)))
def test_raises_on_bad_scale_factor(self, scale_factor):
with self.assertRaisesRegex(ValueError, '`scale_factor` should be a'):
_discretization_sum(scale_factor=scale_factor)
@parameterized.named_parameters(('number', 3.14), ('string', 'lol'),
('tensor', tf.constant(True)))
def test_raises_on_bad_stochastic(self, stochastic):
with self.assertRaisesRegex(ValueError, '`stochastic` should be a'):
_discretization_sum(stochastic=stochastic)
@parameterized.named_parameters(('negative', -1), ('too_large', 1),
('string', 'lol'),
('tensor', tf.constant(0.5)))
def test_raises_on_bad_beta(self, beta):
with self.assertRaisesRegex(ValueError, '`beta` should be a'):
_discretization_sum(beta=beta)
@parameterized.named_parameters(('negative', -0.5), ('zero', 0),
('string', 'lol'), ('tensor', tf.constant(1)))
def test_raises_on_bad_prior_norm_bound(self, prior_norm_bound):
with self.assertRaisesRegex(ValueError, '`prior_norm_bound` should be a'):
_discretization_sum(prior_norm_bound=prior_norm_bound)
class DiscretizationFactoryExecutionTest(test_case.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('scalar', tf.float32, [1, 2, 3], 6, False),
('rank_1_tensor', (tf.float32, [7]),
[np.arange(7.), np.arange(7.) * 2], np.arange(7.) * 3, False),
('rank_2_tensor', (tf.float32, [1, 2]), [((1, 1),), ((2, 2),)],
((3, 3),), False), ('nested', _test_nested_struct_type_float, [
_make_test_nested_struct_value(123),
_make_test_nested_struct_value(456)
], _make_test_nested_struct_value(579), False),
('stochastic', tf.float32, [1, 2, 3], 6, True))
def test_sum(self, value_type, client_data, expected_sum, stochastic):
"""Integration test with sum."""
scale_factor = 3
factory = _discretization_sum(scale_factor, stochastic=stochastic)
process = factory.create(computation_types.to_type(value_type))
state = process.initialize()
for _ in range(3):
output = process.next(state, client_data)
self.assertEqual(output.state['scale_factor'], scale_factor)
self.assertEqual(output.state['prior_norm_bound'], 0)
self.assertEqual(output.state['inner_agg_process'], ())
self.assertEqual(output.measurements,
collections.OrderedDict(discretize=()))
# Use `assertAllClose` to compare structures.
self.assertAllClose(output.result, expected_sum, atol=0)
state = output.state
@parameterized.named_parameters(('int32', tf.int32), ('int64', tf.int64),
('float64', tf.float64))
def test_output_dtype(self, dtype):
"""Checks the tensor type gets casted during preprocessing."""
x = tf.range(8, dtype=dtype)
encoded_x = discretization._discretize_struct(
x, scale_factor=10, stochastic=False, beta=0, prior_norm_bound=0)
self.assertEqual(encoded_x.dtype, discretization.OUTPUT_TF_TYPE)
@parameterized.named_parameters(('int32', tf.int32), ('int64', tf.int64),
('float64', tf.float64))
def test_revert_to_input_dtype(self, dtype):
"""Checks that postprocessing restores the original dtype."""
x = tf.range(8, dtype=dtype)
encoded_x = discretization._discretize_struct(
x, scale_factor=1, stochastic=True, beta=0, prior_norm_bound=0)
decoded_x = discretization._undiscretize_struct(
encoded_x, scale_factor=1, tf_dtype_struct=dtype)
self.assertEqual(dtype, decoded_x.dtype)
class QuantizationTest(test_case.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
_named_test_cases_product(
{
'scale_factor_1': 0.1,
'scale_factor_2': 1,
'scale_factor_3': 314,
'scale_factor_4': 2**24
}, {
'stochastic_true': True,
'stochastic_false': False
}, {
'shape_1': (10,),
'shape_2': (10, 10),
'shape_3': (10, 5, 2)
}))
def test_error_from_rounding(self, scale_factor, stochastic, shape):
dtype = tf.float32
x = tf.random.uniform(shape=shape, minval=-10, maxval=10, dtype=dtype)
encoded_x = discretization._discretize_struct(
x, scale_factor, stochastic=stochastic, beta=0, prior_norm_bound=0)
decoded_x = discretization._undiscretize_struct(
encoded_x, scale_factor, tf_dtype_struct=dtype)
x, decoded_x = self.evaluate([x, decoded_x])
self.assertAllEqual(x.shape, decoded_x.shape)
# For stochastic rounding, errors are bounded by the effective bin width;
# for deterministic rounding, they are bounded by the half of the bin width.
quantization_atol = (1 if stochastic else 0.5) / scale_factor
self.assertAllClose(x, decoded_x, rtol=0.0, atol=quantization_atol)
class ScalingTest(test_case.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('scale_factor_1', 1), ('scale_factor_2', 97),
('scale_factor_3', 10**6))
def test_scaling(self, scale_factor):
# Integers to prevent rounding.
x = tf.random.stateless_uniform([100], (1, 1), -100, 100, dtype=tf.int32)
discretized_x = discretization._discretize_struct(
x, scale_factor, stochastic=True, beta=0, prior_norm_bound=0)
reverted_x = discretization._undiscretize_struct(
discretized_x, scale_factor, tf_dtype_struct=tf.int32)
x, discretized_x, reverted_x = self.evaluate([x, discretized_x, reverted_x])
self.assertAllEqual(x * scale_factor, discretized_x) # Scaling up.
self.assertAllEqual(x, reverted_x) # Scaling down.
class StochasticRoundingTest(test_case.TestCase, parameterized.TestCase):
def test_conditional_rounding_bounds_norm(self):
"""Compare avg rounded norm across different values of beta."""
num_trials = 500
x = tf.random.uniform([100], -100, 100, dtype=tf.float32)
rounded_norms = []
for beta in [0, 0.9]:
avg_rounded_norm_beta = tf.reduce_mean([
tf.norm(discretization._stochastic_rounding(x, beta=beta))
for i in range(num_trials)
])
rounded_norms.append(avg_rounded_norm_beta)
rounded_norms = self.evaluate(rounded_norms)
# Larger beta should give smaller average norms.
self.assertAllEqual(rounded_norms, sorted(rounded_norms, reverse=True))
@parameterized.named_parameters(('beta_1', 0.0), ('beta_2', 0.6))
def test_noop_on_integers(self, beta):
x = tf.range(100, dtype=tf.float32)
rounded_x = discretization._stochastic_rounding(x, beta=beta)
x, rounded_x = self.evaluate([x, rounded_x])
self.assertAllEqual(x, rounded_x)
self.assertEqual(rounded_x.dtype, np.float32)
@parameterized.named_parameters(
_named_test_cases_product({
'beta_1': 0.0,
'beta_2': 0.6
}, {
'value_1': 0.2,
'value_2': 42.6,
'value_3': -3.3
}))
def test_biased_inputs(self, beta, value):
num_trials = 5000
x = tf.constant(value, shape=[num_trials])
rounded_x = discretization._stochastic_rounding(x, beta=beta)
err_x = rounded_x - x
x, rounded_x, err_x = self.evaluate([x, rounded_x, err_x])
# Check errors match.
self.assertAllClose(np.mean(x), np.mean(rounded_x) - np.mean(err_x))
# Check expected value.
self.assertTrue(np.floor(value) < np.mean(rounded_x) < np.ceil(value))
# The rounding events are binomially distributed and we can compute the
# stddev of the error given the `num_trials` and allow for 4 stddevs as
# the tolerance to give ~0.006% probability of test failure.
decimal = | np.modf(value) | numpy.modf |
import unittest
import numpy as np
from osd.masking import Mask
class TestMaskScalar(unittest.TestCase):
"""
Uses this as a standard test data set:
np.array([ 0., 1., nan, 3., nan, nan, nan, 7., 8., 9., 10., 11., 12.,
13., nan])
"""
def test_mask(self):
np.random.seed(1)
data = np.arange(15, dtype=float)
data[np.random.uniform(size=15) < 0.2] = np.nan
mask = Mask(~np.isnan(data))
u = mask.mask(data)
q = len(u)
# test that the length is correct
np.testing.assert_equal(np.sum(~np.isnan(data)), q)
# test that the values are correct
actual = np.array([ 0., 1., 3., 7., 8., 9., 10., 11., 12., 13.])
np.testing.assert_equal(u, actual)
def test_unmask(self):
np.random.seed(1)
data = np.arange(15, dtype=float)
data[ | np.random.uniform(size=15) | numpy.random.uniform |
# Simple interest rate processes - some examples
#
# - Brownian motion
# - AR(1)-model
# - Vasiček-model
# - Cox-Ingersoll-Ross-model
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
# Brownian motion / Random walk
def sim_brownian(steps, start=0, sigma=1, discretization=1):
steps_mod = int(steps / discretization)
noise = sqrt(discretization) * sigma * np.random.standard_normal(steps_mod)
return(start + np.cumsum(noise))
# AR(1)-model
def sim_ar1(length_out, start=0, sigma=1, phi=1):
noise = sigma * np.random.standard_normal(length_out)
ar1 = np.zeros(length_out)
for i in range(length_out - 1):
ar1[i + 1] = phi * ar1[i] + noise[i]
return(start + ar1)
# Vasiček-model
def sim_vasicek(steps, start=0, sigma=1,
reversion_level=None, reversion_strength=1,
discretization=1):
if reversion_level is None:
reversion_level = start
steps_mod = int(steps / discretization)
v = | np.zeros(steps_mod) | numpy.zeros |
'''
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import ListedColormap
import matplotlib.collections as mcoll
import torch as torch
from matplotlib.patches import Ellipse
def gaussian(x, y, xmean, ymean, sigma):
# gaussian to used as fit.
return np.exp(-((x-xmean) ** 2 + (y-ymean) ** 2) / (2 * sigma ** 2))
def draw_lines(output,output_i,linestyle='-',alpha=1,darker=False,linewidth=2):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
loc = np.array(output[output_i,:,:,0:2])
loc = np.transpose( loc, [1,2,0] )
x = loc[:,0,:]
y = loc[:,1,:]
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
max_range = max( y_max-y_min, x_max-x_min )
xmin = (x_min+x_max)/2-max_range/2-0.1
xmax = (x_min+x_max)/2+max_range/2+0.1
ymin = (y_min+y_max)/2-max_range/2-0.1
ymax = (y_min+y_max)/2+max_range/2+0.1
cmaps = [ 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds' ]
cmaps = [ matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps ]
cmaps = [ ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps ]
if darker:
cmaps = [ ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps ]
for i in range(loc.shape[-1]):
lc = colorline(loc[:,0,i], loc[:,1,i], cmap=cmaps[i],linestyle=linestyle,alpha=alpha,linewidth=linewidth)
return xmin, ymin, xmax, ymax
def draw_lines_animation(output,linestyle='-',alpha=1,darker=False,linewidth=2, animationtype = 'default'):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# animation for output used to show how physical and computational errors propagate through system
global xmin, xmax, ymin, ymax
# output here is of form [perturbation, particles, timestep,(x,y)]
import matplotlib.pyplot as plt
from matplotlib import animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# scaling of variables.
loc_new = np.array(output)
loc_new_x = loc_new[:, :, :, 0]
loc_new_y = loc_new[:, :, :, 1]
fig = plt.figure()
x_min = np.min(loc_new_x[:,:,0:100])
x_max = np.max(loc_new_x[:,:,0:100])
y_min = np.min(loc_new_y[:,:,0:100])
y_max = np.max(loc_new_y[:,:,0:100])
max_range = max( y_max-y_min, x_max-x_min )
xmin = (x_min+x_max)/2-max_range/2-0.1
xmax = (x_min+x_max)/2+max_range/2+0.1
ymin = (y_min+y_max)/2-max_range/2-0.1
ymax = (y_min+y_max)/2+max_range/2+0.1
# if x >= xmax - 1.00:
# p011.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p021.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p031.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p032.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# plots for animation
ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin,ymax))
ax.set_xlabel('x')
ax.set_ylabel('y')
line, = ax.plot([],[],lw = 1)
lines = []
cmaps = [ 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds' ]
cmaps = [ matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps ]
cmaps = [ ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps ]
if darker:
cmaps = [ ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps ]
for i in range(len(loc_new_x)):
for j in range(len(loc_new_x[i])):
colour = cmaps[j].colors[int(len(cmaps[j].colors)-1)]
lobj = ax.plot([],[], lw =1, color = colour)[0]
lines.append(lobj)
def init():
# initialise the lines to be plotted
for line in lines:
line.set_data([],[])
return lines
xdata, ydata = [], []
def animate(i, xmax, xmin, ymax, ymin):
# animation step
xlist = []
ylist = []
# for j in range(len(loc_new_x)):
# for k in range(len(loc_new_x[j])):
# x = loc_new_x[j][k]
# y = loc_new_y[j][k]
# xlist.append(x)
# ylist.append(y)
if (i<=50):
for j in range(len(loc_new_x)):
for k in range(len(loc_new_x[j])):
x = loc_new_x[j][k][0:i]
y = loc_new_y[j][k][0:i]
xlist.append(x)
ylist.append(y)
for lnum, line in enumerate(lines):
line.set_data(xlist[lnum], ylist[lnum])
else:
for j in range(len(loc_new_x)):
for k in range(len(loc_new_x[j])):
x = loc_new_x[j][k][i-50:i]
y = loc_new_y[j][k][i-50:i]
xlist.append(x)
ylist.append(y)
if (np.any(xlist < xmin)) or (np.any(xlist > xmax)) or (np.any(ylist<ymin)) or (np.any(ylist>ymax)):
x_min, x_max, y_min, y_max = np.amin(np.asarray(xlist)), np.amax(np.asarray(xlist)), np.amin(np.asarray(ylist)), np.amax(np.asarray(ylist))
max_range = max(y_max - y_min, x_max - x_min)
xmin = (x_min + x_max) / 2 - max_range / 2 - 0.4
xmax = (x_min + x_max) / 2 + max_range / 2 + 0.4
ymin = (y_min + y_max) / 2 - max_range / 2 - 0.4
ymax = (y_min + y_max) / 2 + max_range / 2 + 0.4
for lnum, line in enumerate(lines):
line.axes.set_xlim(xmin, xmax)
line.axes.set_ylim(ymin, ymax)
for lnum, line in enumerate(lines):
line.set_data(xlist[lnum], ylist[lnum])
return lines
anim = animation.FuncAnimation(fig, animate, init_func=init, frames = len(loc_new_x[0][0]),fargs= (xmax, xmin, ymax, ymin) ,interval = 10)
plt.show()
anim.save(animationtype + '.mp4', writer = writer)
def draw_lines_sigma(output,output_i,sigma_plot,ax, linestyle='-',alpha=1, darker=False,linewidth=2, plot_ellipses= False):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# plot the trajectories for sigma with ellipses of size sigma used to visualise the predictions of sigma we get out.
loc = np.array(output[output_i,:,:,0:2])
loc = np.transpose( loc, [1,2,0] )
# scaling of variables.
x = loc[:,0,:]
y = loc[:,1,:]
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
max_range = max( y_max-y_min, x_max-x_min )
xmin = (x_min+x_max)/2-max_range/2-0.1
xmax = (x_min+x_max)/2+max_range/2+0.1
ymin = (y_min+y_max)/2-max_range/2-0.1
ymax = (y_min+y_max)/2+max_range/2+0.1
cmaps = [ 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds' ]
cmaps = [ matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps ]
cmaps = [ ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps ]
if darker:
cmaps = [ ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps ]
# ensure we use the same colour for ellipses as for the particles, also use a small alpha to make it more transparent
for i in range(loc.shape[-1]):
lc = colorline(loc[:,0,i], loc[:,1,i], cmap=cmaps[i],linestyle=linestyle,alpha=alpha,linewidth=linewidth)
if plot_ellipses:
# isotropic therefore the ellipses become circles
colour = cmaps[i].colors[int(len(cmaps[i].colors)/4)]
positions = output[output_i,:,:,0:2]
sigma_plot_pos = sigma_plot[output_i, :,:,0:2]
ellipses = []
# get the first timestep component of (x,y)
ellipses.append(Ellipse((positions[i][0][0], positions[i][0][1]),
width=sigma_plot_pos[i][0][0],
height=sigma_plot_pos[i][0][0], angle=0.0, color = colour))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
# keeps track of current plot value
l = 0
for k in range(len(positions[i]) - 1):
deltar = np.linalg.norm(positions[i][k + 1] - positions[i][l])
deltasigma = np.linalg.norm(sigma_plot_pos[i][l])
if (deltar > 2 * deltasigma):
# check that it is far away from others
isfarapart = True
for m in range(len(positions)):
for n in range(len(positions[m])):
if (m != i):
deltar = np.linalg.norm(positions[m][n] - positions[i][k + 1])
deltasigma = np.linalg.norm(sigma_plot_pos[i][k + 1])
if (deltar < deltasigma):
isfarapart = False
if isfarapart:
ellipses.append(Ellipse((positions[i][k + 1][0], positions[i][k + 1][1]),
width=sigma_plot_pos[i][k + 1][0],
height=sigma_plot_pos[i][k + 1][0], angle=0.0, color = colour))
# updates to new r0 : Deltar = r - r0:
l = k
# fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ellipses:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
return xmin, ymin, xmax, ymax
def draw_lines_anisotropic(output,output_i,sigma_plot, vel_plot, ax, linestyle='-',alpha=1, darker=False,linewidth=2, plot_ellipses= False):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# plot the trajectories for sigma with ellipses of size sigma used to visualise the predictions of sigma we get out.
# here we use anisotropic sigma case
loc = np.array(output[output_i,:,:,0:2])
loc = np.transpose( loc, [1,2,0] )
x = loc[:,0,:]
y = loc[:,1,:]
x_min = np.min(x)
x_max = np.max(x)
y_min = np.min(y)
y_max = np.max(y)
max_range = max( y_max-y_min, x_max-x_min )
xmin = (x_min+x_max)/2-max_range/2-0.1
xmax = (x_min+x_max)/2+max_range/2+0.1
ymin = (y_min+y_max)/2-max_range/2-0.1
ymax = (y_min+y_max)/2+max_range/2+0.1
cmaps = [ 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds' ]
cmaps = [ matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps ]
cmaps = [ ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps ]
if darker:
cmaps = [ ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps ]
# ensure we use the same colour for ellipses as for the particles, also use a small alpha to make it more transparent
for i in range(loc.shape[-1]):
lc = colorline(loc[:,0,i], loc[:,1,i], cmap=cmaps[i],linestyle=linestyle,alpha=alpha,linewidth=linewidth)
if plot_ellipses:
colour = cmaps[i].colors[int(len(cmaps[i].colors) / 4)]
positions = output[output_i, :, :, 0:2]
sigma_plot_pos = sigma_plot[output_i, :, :, 0:2]
indices_3 = torch.LongTensor([0])
if vel_plot.is_cuda:
indices_3 = indices_3.cuda()
# plots the uncertainty ellipses for gaussian case.
# iterate through each of the atoms
# need to get the angles of the terms to be plotted:
velnorm = vel_plot.norm(p=2, dim=3, keepdim=True)
normalisedvel = vel_plot.div(velnorm.expand_as(vel_plot))
normalisedvel[torch.isnan(normalisedvel)] = np.power(1 / 2, 1 / 2)
# v||.x is just the first term of the tensor
normalisedvelx = torch.index_select(normalisedvel, 3, indices_3)
# angle of rotation is Theta = acos(v||.x) for normalised v|| and x (need angle in degrees not radians)
angle = torch.acos(normalisedvelx).squeeze() * 180 / 3.14159
ellipses = []
ellipses.append(
Ellipse((positions[i][0][0], positions[i][0][1]),
width=sigma_plot_pos[i][0][0],
height=sigma_plot_pos[i][0][1], angle=angle.tolist()[output_i][i][0], color = colour))
# iterate through each of the atoms
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
# keeps track of current plot value
l = 0
for k in range(len(positions[i]) - 1):
deltar = np.linalg.norm(positions[i][k + 1] - positions[i][l])
deltasigma = np.linalg.norm(sigma_plot_pos[i][l])
if (deltar > 2 * deltasigma):
# check that it is far away from others
isfarapart = True
for m in range(len(positions)):
for n in range(len(positions[m])):
if (m != i):
deltar = np.linalg.norm(positions[m][n] - positions[i][k + 1])
deltasigma = np.linalg.norm(sigma_plot_pos[i][k + 1])
if (deltar < deltasigma):
isfarapart = False
if isfarapart:
ellipses.append(Ellipse(
(positions[i][k + 1][0], positions[i][k + 1][1]),
width=sigma_plot_pos[i][k + 1][0],
height=sigma_plot_pos[i][k + 1][0], angle=angle.tolist()[output_i][i][k + 1], color = colour))
# updates to new r0 : Deltar = r - r0:
l = k
for e in ellipses:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
return xmin, ymin, xmax, ymax
def draw_lines_sigma_animation(output_1, output_2, output_i, sigma_plot, vel_plot, alpha=1,darker=False,linewidth=2, animationtype = 'default'):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# output_1 = true output
# output_2 = predicted output
# animation for output used to show how physical and computational errors propagate through system
global xmin, xmax, ymin, ymax
# output here is of form [perturbation, particles, timestep,(x,y)]
import matplotlib.pyplot as plt
from matplotlib import animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# gets the locations of true and predicted trajectories
loc_true = np.array(output_1[output_i, :, :, 0:2])
loc_true = np.transpose(loc_true, [1, 2, 0])
loc_pred = np.array(output_2[output_i, :, :, 0:2])
loc_pred = np.transpose(loc_pred, [1, 2, 0])
# rescales the coordinates
fig = plt.figure()
x = loc_true[:, 0, :]
y = loc_true[:, 1, :]
x_pred = loc_pred[:, 0, :]
y_pred = loc_pred[:, 1, :]
x_min_true = np.min(x)
x_max_true = np.max(x)
y_min_true = np.min(y)
y_max_true = np.max(y)
x_min_pred = np.min(x)
x_max_pred = np.max(x)
y_min_pred = np.min(y)
y_max_pred = np.max(y)
x_min = np.minimum(x_min_true, x_min_pred)
x_max = np.maximum(x_max_true, x_max_pred)
y_min = np.minimum(y_min_true, y_min_pred)
y_max = np.maximum(y_max_true, y_max_pred)
max_range = max(y_max - y_min, x_max - x_min)
xmin = (x_min + x_max) / 2 - max_range / 2 - 0.1
xmax = (x_min + x_max) / 2 + max_range / 2 + 0.1
ymin = (y_min + y_max) / 2 - max_range / 2 - 0.1
ymax = (y_min + y_max) / 2 + max_range / 2 + 0.1
# if x >= xmax - 1.00:
# p011.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p021.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p031.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# p032.axes.set_xlim(x - xmax + 1.0, x + 1.0)
# plots for animation
ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin,ymax))
ax.set_xlabel('x')
ax.set_ylabel('y')
line, = ax.plot([],[],lw = 1)
lines = []
cmaps = ['Purples', 'Greens', 'Blues', 'Oranges', 'Reds', 'Purples', 'Greens', 'Blues', 'Oranges', 'Reds']
cmaps = [matplotlib.cm.get_cmap(cmap, 512) for cmap in cmaps]
cmaps = [ListedColormap(cmap(np.linspace(0., 0.8, 256))) for cmap in cmaps]
if darker:
cmaps = [ListedColormap(cmap(np.linspace(0.2, 0.8, 256))) for cmap in cmaps]
# ensure we use the same colour for ellipses as for the particles, also use a small alpha to make it more transparent
for i in range(loc_true.shape[-1]):
colour = cmaps[i].colors[int(len(cmaps[i].colors) / 4)]
lobj = mcoll.LineCollection([], cmap='hot', lw=2, alpha=0.6)
lines.append(lobj)
for j in range(loc_pred.shape[-1]):
colour = cmaps[j].colors[int(len(cmaps[j].colors) / 4)]
lobj = mcoll.LineCollection([], cmap='hot', lw=2, alpha=0.6)
lines.append(lobj)
for line in lines:
ax.add_collection(line)
def init():
# initialise the lines to be plotted
for line in lines:
line.set_data([],[])
return lines
xdata, ydata = [], []
def animate(i, xmax, xmin, ymax, ymin):
for j in range(loc_true.shape[-1]):
lc_true = colorline(loc_true[0:i, 0, j], loc_true[0:i, 1, j], cmap=cmaps[j], linestyle='-', alpha=alpha, linewidth=linewidth)
lc_pred = colorline(loc_pred[0:i, 0, j], loc_pred[0:i, 1, j], cmap=cmaps[j], linestyle=':', alpha=alpha, linewidth=linewidth)
colour = cmaps[j].colors[int(len(cmaps[j].colors) / 4)]
lines[j] = lc_true
lines[j+loc_true.shape[-1]] = lc_pred
positions = output_2[output_i, :, 0:i, 0:2]
sigma_plot_pos = sigma_plot[output_i, :, 0:i, 0:2]
indices_3 = torch.LongTensor([0])
if vel_plot.is_cuda:
indices_3 = indices_3.cuda()
# plots the uncertainty ellipses for gaussian case.
# iterate through each of the atoms
# need to get the angles of the terms to be plotted:
velnorm = vel_plot.norm(p=2, dim=3, keepdim=True)
normalisedvel = vel_plot.div(velnorm.expand_as(vel_plot))
normalisedvel[torch.isnan(normalisedvel)] = np.power(1 / 2, 1 / 2)
# v||.x is just the first term of the tensor
normalisedvelx = torch.index_select(normalisedvel, 3, indices_3)
# angle of rotation is Theta = acos(v||.x) for normalised v|| and x (need angle in degrees not radians)
angle = torch.acos(normalisedvelx).squeeze() * 180 / 3.14159
ellipses = []
ellipses.append(
Ellipse((positions[j][0][0], positions[j][0][1]),
width=sigma_plot_pos[j][0][0],
height=sigma_plot_pos[j][0][1], angle=angle.tolist()[output_i][j][0], color=colour))
# iterate through each of the atoms
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
# keeps track of current plot value
l = 0
for k in range(len(positions[j]) - 1):
deltar = np.linalg.norm(positions[j][k + 1] - positions[j][l])
deltasigma = np.linalg.norm(sigma_plot_pos[j][l])
if (deltar > 2 * deltasigma):
# check that it is far away from others
isfarapart = True
for m in range(len(positions)):
for n in range(len(positions[m])):
if (m != j):
deltar = np.linalg.norm(positions[m][n] - positions[j][k + 1])
deltasigma = np.linalg.norm(sigma_plot_pos[j][k + 1])
if (deltar < deltasigma):
isfarapart = False
if isfarapart:
ellipses.append(Ellipse(
(positions[j][k + 1][0], positions[j][k + 1][1]),
width=sigma_plot_pos[j][k + 1][0],
height=sigma_plot_pos[j][k + 1][0], angle=angle.tolist()[output_i][j][k + 1], color=colour))
# updates to new r0 : Deltar = r - r0:
l = k
for e in ellipses:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
return lines
anim = animation.FuncAnimation(fig, animate, init_func=init, frames = len(output_1[0][0]),fargs= (xmax, xmin, ymax, ymin) ,interval = 10)
plt.show()
anim.save(animationtype + '.mp4', writer = writer)
def colorline(
x, y, z=None, cmap='copper', norm=plt.Normalize(0.0, 1.0),
linewidth=2, alpha=0.8, linestyle='-'):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
if not hasattr(z, "__iter__"):
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha, linestyle=linestyle)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
points = | np.array([x, y]) | numpy.array |
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tarfile
import warnings
from collections import defaultdict
import numpy as np
import tensorflow as tf
from scipy import linalg
from six.moves import urllib
import pandas as pd
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
pool3 = None
pool3_mean_real = None
pool3_std_real = None
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_features(images):
assert ((images.shape[3]) == 3)
assert (np.max(images) > 10)
assert (np.min(images) >= 0.0)
images = images.astype(np.float32)
bs = 100
sess = tf.get_default_session()
preds = []
feats = []
for inp in np.array_split(images, round(images.shape[0] / bs)):
# sys.stdout.write(".")
# sys.stdout.flush()
[feat, pred] = sess.run([pool3, softmax], {'InputTensor:0': inp})
feats.append(feat.reshape(-1, 2048))
preds.append(pred)
feats = np.concatenate(feats, 0)
preds = np.concatenate(preds, 0)
return preds, feats
def update_fid_mean(images):
global pool3_mean_real
global pool3_std_real
preds, feats = get_features(images)
pool3_mean_real = np.mean(feats, axis=0)
pool3_std_real = np.cov(feats, rowvar=False)
def calc_scores(images, splits=10):
preds, feats = get_features(images)
# calc inception
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
inception_m = np.mean(scores)
inception_s = np.std(scores)
# fid
mu2 = np.mean(feats, axis=0)
sigma2 = np.cov(feats, rowvar=False)
fid = calculate_frechet_distance(pool3_mean_real, pool3_std_real, mu2, sigma2)
return inception_m, inception_s, fid
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = | np.atleast_1d(mu2) | numpy.atleast_1d |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
import os
class Pattern(object):
def __init__(self, x=None, y=None, name=''):
if x is None:
self._x = np.linspace(0.1, 15, 100)
else:
self._x = x
if y is None:
self._y = np.log(self._x ** 2) - (self._x * 0.2) ** 2
else:
self._y = y
self.name = name
self.offset = 0
self._scaling = 1
self.smoothing = 0
self.bkg_pattern = None
def load(self, filename, skiprows=0):
try:
if filename.endswith('.chi'):
skiprows = 4
data = np.loadtxt(filename, skiprows=skiprows)
self._x = data.T[0]
self._y = data.T[1]
self.name = os.path.basename(filename).split('.')[:-1][0]
except ValueError:
print('Wrong data format for pattern file! - ' + filename)
return -1
@staticmethod
def from_file(filename, skip_rows=0):
try:
if filename.endswith('.chi'):
skip_rows = 4
data = np.loadtxt(filename, skiprows=skip_rows)
x = data.T[0]
y = data.T[1]
name = os.path.basename(filename).split('.')[:-1][0]
return Pattern(x, y, name)
except ValueError:
print('Wrong data format for pattern file! - ' + filename)
return -1
def save(self, filename, header=''):
data = np.dstack((self._x, self._y))
np.savetxt(filename, data[0], header=header)
def set_background(self, pattern):
self.bkg_pattern = pattern
def reset_background(self):
self.bkg_pattern = None
def set_smoothing(self, amount):
self.smoothing = amount
def rebin(self, bin_size):
"""
Returns a new pattern which is a rebinned version of the current one.
"""
x, y = self.data
x_min = np.round(np.min(x) / bin_size) * bin_size
x_max = np.round(np.max(x) / bin_size) * bin_size
new_x = np.arange(x_min, x_max + 0.1 * bin_size, bin_size)
bins = np.hstack((x_min - bin_size * 0.5, new_x + bin_size * 0.5))
new_y = (np.histogram(x, bins, weights=y)[0] / np.histogram(x, bins)[0])
return Pattern(new_x, new_y)
@property
def data(self):
if self.bkg_pattern is not None:
# create background function
x_bkg, y_bkg = self.bkg_pattern.data
if not np.array_equal(x_bkg, self._x):
# the background will be interpolated
f_bkg = interp1d(x_bkg, y_bkg, kind='linear')
# find overlapping x and y values:
ind = np.where((self._x <= np.max(x_bkg)) & (self._x >= np.min(x_bkg)))
x = self._x[ind]
y = self._y[ind]
if len(x) == 0:
# if there is no overlapping between background and pattern, raise an error
raise BkgNotInRangeError(self.name)
y = y * self._scaling + self.offset - f_bkg(x)
else:
# if pattern and bkg have the same x basis we just delete y-y_bkg
x, y = self._x, self._y * self._scaling + self.offset - y_bkg
else:
x, y = self.original_data
if self.smoothing > 0:
y = gaussian_filter1d(y, self.smoothing)
return x, y
@data.setter
def data(self, data):
(x, y) = data
self._x = x
self._y = y
self.scaling = 1
self.offset = 0
@property
def original_data(self):
return self._x, self._y * self._scaling + self.offset
@property
def x(self):
return self._x
@x.setter
def x(self, new_value):
self._x = new_value
@property
def y(self):
return self._y
@y.setter
def y(self, new_y):
self._y = new_y
@property
def scaling(self):
return self._scaling
@scaling.setter
def scaling(self, value):
if value < 0:
self._scaling = 0
else:
self._scaling = value
def limit(self, x_min, x_max):
x, y = self.data
return Pattern(x[np.where((x_min < x) & (x < x_max))],
y[np.where((x_min < x) & (x < x_max))])
def extend_to(self, x_value, y_value):
"""
Extends the current pattern to a specific x_value by filling it with the y_value. Does not modify inplace but
returns a new filled Pattern
:param x_value: Point to which extend the pattern should be smaller than the lowest x-value in the pattern or
vice versa
:param y_value: number to fill the pattern with
:return: extended Pattern
"""
x_step = np.mean(np.diff(self.x))
x_min = np.min(self.x)
x_max = np.max(self.x)
if x_value < x_min:
x_fill = np.arange(x_min - x_step, x_value-x_step*0.5, -x_step)[::-1]
y_fill = np.zeros(x_fill.shape)
y_fill.fill(y_value)
new_x = np.concatenate((x_fill, self.x))
new_y = np.concatenate((y_fill, self.y))
elif x_value > x_max:
x_fill = np.arange(x_max + x_step, x_value+x_step*0.5, x_step)
y_fill = np.zeros(x_fill.shape)
y_fill.fill(y_value)
new_x = np.concatenate((self.x, x_fill))
new_y = np.concatenate((self.y, y_fill))
else:
return self
return Pattern(new_x, new_y)
def plot(self, show=False, *args, **kwargs):
import matplotlib.pyplot as plt
plt.plot(self.x, self.y, *args, **kwargs)
if show:
plt.show()
# Operators:
def __sub__(self, other):
orig_x, orig_y = self.data
other_x, other_y = other.data
if orig_x.shape != other_x.shape:
# todo different shape subtraction of spectra seems the fail somehow...
# the background will be interpolated
other_fcn = interp1d(other_x, other_x, kind='linear')
# find overlapping x and y values:
ind = np.where((orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)))
x = orig_x[ind]
y = orig_y[ind]
if len(x) == 0:
# if there is no overlapping between background and pattern, raise an error
raise BkgNotInRangeError(self.name)
return Pattern(x, y - other_fcn(x))
else:
return Pattern(orig_x, orig_y - other_y)
def __add__(self, other):
orig_x, orig_y = self.data
other_x, other_y = other.data
if orig_x.shape != other_x.shape:
# the background will be interpolated
other_fcn = interp1d(other_x, other_x, kind='linear')
# find overlapping x and y values:
ind = np.where((orig_x <= np.max(other_x)) & (orig_x >= np.min(other_x)))
x = orig_x[ind]
y = orig_y[ind]
if len(x) == 0:
# if there is no overlapping between background and pattern, raise an error
raise BkgNotInRangeError(self.name)
return Pattern(x, y + other_fcn(x))
else:
return Pattern(orig_x, orig_y + other_y)
def __rmul__(self, other):
orig_x, orig_y = self.data
return Pattern( | np.copy(orig_x) | numpy.copy |
# coding: utf-8
# In[1]:
# This code can be downloaded as a Python script and run as:
# python full_vs_EM_any_dataset.py random_state dataset_name test_proportion val_proportion M_method M_alpha M_beta
# test_proportion: The test proportion is from all the available true labels
# val_proportion: The validation proportion is from the remaining training proportion with the true labels
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
import sys
import argparse
import numpy
import matplotlib
import os
import glob
import pandas
import keras
from keras import backend as K
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from sklearn.utils import shuffle
from wlc.WLweakener import computeM, generateWeak, weak_to_index, binarizeWeakLabels
from experiments.visualizations import plot_history
from experiments.visualizations import plot_multilabel_scatter
cmap = plt.cm.get_cmap('tab20')
from experiments.utils import compute_friedmanchisquare
from experiments.utils import rankings_to_latex
dataset_name = 'mnist'
def statistical_tests(table, filename):
# Friedman test
ftest = compute_friedmanchisquare(table)
df_rankings = pandas.DataFrame(table.rank(axis=1).mean(axis=0).sort_index()).T
with open(filename + '.tex', 'w') as tf:
tf.write('''\\centering\n\\caption{{Average rankings. Friedman test {:.2f}, p-value
{:.2e}}}\n'''.format(ftest.statistic,
ftest.pvalue) +
df_rankings.to_latex(float_format='%.2f',
column_format='c'*(1 +
df_rankings.shape[1])))
def generate_summary(errorbar=True, zoom=False):
cmap = plt.cm.get_cmap('tab20')
from cycler import cycler
default_cycler = (cycler(color=['darkred', 'forestgreen', 'darkblue', 'violet', 'darkorange', 'saddlebrown']) +
cycler(linestyle=['-', '--', '-.', '-', '--', '-.']) +
cycler(marker=['o', 'v', 'x', '*', '+', '.']) +
cycler(lw=[2, 1.8, 1.6, 1.4, 1.2, 1]))
plt.rcParams['figure.figsize'] = (5, 2.5)
plt.rcParams["figure.dpi"] = 100
plt.rc('lines', linewidth=1)
plt.rc('axes', prop_cycle=default_cycler)
files_list = glob.glob("./Example_13*summary.csv")
print('List of files to aggregate')
print(files_list)
list_ = []
for file_ in files_list:
df = pandas.read_csv(file_,index_col=0, header=None, quotechar='"').T
list_.append(df)
df = pandas.concat(list_, axis = 0, ignore_index = True)
df = df[df['dataset_name'] == dataset_name]
# TODO: need to sort this number out
df.weak_true_prop = df.weak_true_prop.astype(float)
df.n_samples_train = df.n_samples_train.astype(float)
del df['dataset_name']
df_grouped = df.groupby(['alpha', 'M_method_list', 'weak_true_prop'])
for name, df_ in df_grouped:
print(name)
true_labels = round(min(df_.n_samples_train))
filename = 'Example_13_{}_{}true'.format(dataset_name, true_labels)
n_iterations = len(df_['random_state'].unique())
columns = df_['models'].iloc[0].split(',')
# TODO: To be added
#statistical_tests(df_[columns], filename)
columns.append('n_samples_train')
df_ = df_[columns]
df_ = df_.apply(pandas.to_numeric)
df_.index = df_['n_samples_train']
del df_['n_samples_train']
df_.sort_index(inplace=True)
df_mean = df_.groupby(df_.index).mean()
df_std = df_.groupby(df_.index).std()
df_count = df_.groupby(df_.index).count()
min_repetitions = df_count.min().min()
max_repetitions = df_count.max().max()
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111)
for column in sorted(df_mean.columns):
if errorbar:
markers, caps, bars = ax.errorbar(df_mean.index, df_mean[column],
yerr=df_std[column], label=column, elinewidth=0.5,
capsize=2.0)
# loop through bars and caps and set the alpha value
[bar.set_alpha(0.5) for bar in bars]
[cap.set_alpha(0.7) for cap in caps]
else:
ax.plot(df_mean.index, df_mean[column], label=column)
#ax.set_title('dataset {}, alpha = {}'.format(dataset_name, name[0]))
ax.grid(color='lightgrey')
ax.set_ylabel('Mean acc. (#rep [{}-{}])'.format(min_repetitions,
max_repetitions))
ax.set_xlabel('Number of training samples')
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3,
mode="expand", borderaxespad=0., fontsize=8)
ax.set_ylim([0.0, 0.925])
ax.set_xlim([0, 36000])
fig.tight_layout()
fig.savefig(filename + '.svg')
ax.spines['bottom'].set_visible(False)
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False,
linestyle='-', marker=',', lw=1.2 )
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((-d, +d), (-d-d, +d-d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
ax.plot((1 - d, 1 + d), (-d-d, +d-d), **kwargs) # top-right diagonal
ax.set_ylim([0.886, 0.925])
fig.tight_layout()
fig.savefig(filename + '_zoom.svg')
if is_interactive():
get_ipython().magic(u'matplotlib inline')
sys.path.append('../')
# Define all the variables for this experiment
random_state = 0
train_val_test_proportions = numpy.array([0.5, 0.2, 0.3]) # Train, validation and test proportions
w_wt_drop_proportions = numpy.array([0.9, 0.1]) # Train set: for weak, for true [the rest to drop]
M_method_list = ['complementary'] # Weak labels in training
alpha = 0.0 # alpha = 0 (all noise), alpha = 1 (no noise)
beta = 1 - alpha # beta = 1 (all noise), beta = 0 (no noise)
max_epochs = 1000 # Upper limit on the number of epochs
else:
parser = argparse.ArgumentParser(description='''Example 13''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--random-state', type=int,
default=42,
help='''Random seed.''')
parser.add_argument('-w', '--weak-prop', type=float, default=0.5,
help='Proportion of weak labels')
parser.add_argument('-t', '--weak-true-prop', type=float,
default=0.5,
help='''Proportion of true labels in the weak label set''')
parser.add_argument('-p', '--plot-only',
default=False, action='store_true',
help='''Generate the summary plots and exit''')
args = parser.parse_args()
random_state = args.random_state
weak_prop = args.weak_prop
weak_true_prop = args.weak_true_prop
train_val_test_proportions = numpy.array([0.5, 0.2, 0.3]) # Train, validation and test proportions
w_wt_drop_proportions = numpy.array([weak_prop*( 1 - weak_true_prop), weak_true_prop]) # Train set: for weak, for true [the rest to drop]
M_method_list = ['complementary'] # Weak labels in training
alpha = 0.0 # alpha = 0 (all noise), alpha = 1 (no noise)
beta = 1 - alpha # beta = 1 (all noise), beta = 0 (no noise)
max_epochs = 1000 # Upper limit on the number of epochs
matplotlib.use('Agg')
if args.plot_only:
generate_summary()
exit()
# # 1. Generation of a dataset
# ## 1.a. Obtain dataset with true labels
# In[2]:
from keras.datasets import cifar10, mnist
# cifar100.load_data(label_mode='fine')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
X = numpy.concatenate((x_train, x_test))
y = numpy.concatenate((y_train, y_test)).flatten()
X = X.astype('float32')
X /= 255
X, y = shuffle(X, y)
n_samples = X.shape[0]
n_features = sum(X[0].shape)
n_classes = 10
Y = label_binarize(y, range(n_classes))
print('n_samples = {}'.format(n_samples))
print('n_features = {}'.format(n_features))
# ## 1.b. Divide into training, validation and test
#
# - Validation and test will always have only true labels, while the training may have weak labels as well
#
# - $S_{train} = \{S_{wt-train}, S_{w-train}\} = [\{(x_i, b_i, y_i), i = 1,...,n\} X x Z x C, \{(x_i, b_i), i = 1,...,n\} \in X x Z\}]$
# - $S_{val} = \{(x_i, y_i), i = 1,...,n\} \in X x C$
# - $S_{test} = \{(x_i, y_i), i = 1,...,n\} \in X x C$
# In[3]:
#train_val_test_proportions = numpy.array([0.5, 0.2, 0.3])
print('Original proportions for the 3 partitions (train, validation and test)')
print(train_val_test_proportions)
# Ensure that all proportions sum to 1
train_val_test_proportions /= train_val_test_proportions.sum()
print('Proportions where to split')
train_val_test_proportions = numpy.cumsum(train_val_test_proportions)
print(train_val_test_proportions)
print('Indices where to split (from a total of {} samples)'.format(X.shape[0]))
indices = (train_val_test_proportions*X.shape[0]).astype(int)[:-1]
print(indices)
# # Divide into training, validation and test
X_train, X_val, X_test = numpy.array_split(X, indices)
Y_train, Y_val, Y_test = numpy.array_split(Y, indices)
y_train, y_val, y_test = numpy.array_split(y, indices)
print('Final sizes')
print('Training samples = {}'.format(X_train.shape[0]))
print('Validation samples = {}'.format(X_val.shape[0]))
print('Test samples = {}'.format(X_test.shape[0]))
# ## 1.c. Generate weakening processes
#
# - This will generate weak labels given the specified mixing process.
# - It will also show 3 plots with the true labels, weak labels and the corresponding rows of the mixing matrix M.
# - In all the mixing processes we remove the unlabeled option as this can be seen as the all labels (if we assume that every samples belongs to one class)
# In[4]:
#M_method_list = ['odd_even', 'random_weak', 'noisy', 'random_noise', 'IPL', 'quasi_IPL']
#alpha = 0.1
#beta = 1 - alpha
M_list = []
for i, key in enumerate(M_method_list):
M_list.append(computeM(n_classes, alpha=alpha, beta=beta, method=key, seed=random_state,
unsupervised=False))
print('\nMixing matrix for set {} of type {} and shape = {}\n{}'.format(
i, key, M_list[-1].shape, numpy.round(M_list[-1], decimals=2)))
# ## 1.d. Divide training into weak portions
#
# - Currently every weak partition is of the same size
# - We will assume that a proportion of each weak set has been annotated with the true labels
# In[5]:
#w_wt_drop_proportions = numpy.array([0.1, 0.1]) # for weak, for true [the rest to drop]
cut_indices = (w_wt_drop_proportions.cumsum()*X_train.shape[0]).astype(int)
print('Indices for the cuts = {}'.format(cut_indices))
X_w_train, X_wt_train, _ = numpy.array_split(X_train, cut_indices)
y_w_train, y_wt_train, _ = numpy.array_split(y_train, cut_indices)
Y_w_train, Y_wt_train, _ = numpy.array_split(Y_train, cut_indices)
print('Portion with only weak labels = {}'.format(X_w_train.shape[0]))
print('Portion with weak and true labels = {}'.format(X_wt_train.shape[0]))
X_w_train_list = numpy.array_split(X_w_train, len(M_method_list))
y_w_train_list = numpy.array_split(y_w_train, len(M_method_list))
Y_w_train_list = numpy.array_split(Y_w_train, len(M_method_list))
Z_w_train_list = []
z_w_train_list = []
print('## Portion with only weak labels ##')
for i, M in enumerate(M_list):
print('Generating weak labels for set {} with mixing process {}'.format(i, M_method_list[i]))
z_w_train_list.append(generateWeak(y_w_train_list[i], M))
Z_w_train_list.append(binarizeWeakLabels(z_w_train_list[i], n_classes))
print('Total shape = {}'.format(z_w_train_list[-1].shape))
print('Sample of z labels\n{}'.format(z_w_train_list[-1][:3]))
print('Sample of Z labels\n{}'.format(Z_w_train_list[-1][:3]))
X_wt_train_list = numpy.array_split(X_wt_train, len(M_method_list))
y_wt_train_list = numpy.array_split(y_wt_train, len(M_method_list))
Y_wt_train_list = numpy.array_split(Y_wt_train, len(M_method_list))
Z_wt_train_list = []
z_wt_train_list = []
print('## Portion with both weak and true labels ##')
for i, M in enumerate(M_list):
print('Generating weak labels for set {} with mixing process {}'.format(i, M_method_list[i]))
z_wt_train_list.append(generateWeak(y_wt_train_list[i], M))
Z_wt_train_list.append(binarizeWeakLabels(z_wt_train_list[i], n_classes))
print('Total shape = {}'.format(z_wt_train_list[-1].shape))
print('Sample of z labels\n{}'.format(z_wt_train_list[-1][:3]))
print('Sample of Z labels\n{}'.format(Z_wt_train_list[-1][:3]))
# In[6]:
from experiments.visualizations import plot_multilabel_scatter
fig = plt.figure(figsize=(6, len(z_wt_train_list)*3))
j = 1
for i in range(len(Z_wt_train_list)):
X_sample = X_wt_train_list[i][:100].reshape((100, -1))
ax = fig.add_subplot(len(Z_wt_train_list), 2, j)
_ = plot_multilabel_scatter(X_sample, Z_wt_train_list[i][:100], fig=fig,
ax=ax, title='Weak labels set {}'.format(i), cmap=cmap)
ax.set_ylabel('M {}'.format(M_method_list[i]))
ax = fig.add_subplot(len(Z_wt_train_list), 2, j+1)
_ = plot_multilabel_scatter(X_sample, Y_wt_train_list[i][:100], fig=fig,
ax=ax, title='True labels set {}'.format(i), cmap=cmap)
j += 2
fig.tight_layout()
# # Define a common model
# In[7]:
from keras.callbacks import EarlyStopping, Callback
from keras import regularizers
def log_loss(y_true, y_pred):
y_pred = K.clip(y_pred, K.epsilon(), 1.0-K.epsilon())
out = -y_true*K.log(y_pred)
return K.mean(out, axis=-1)
#max_epochs = 1000
# Callback to show performance per epoch in the same line
class EpochCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
print('\rEpoch {}, val_loss = {:.2e}, val_acc = {:.2f}'.format(epoch, logs['val_loss'], logs['val_acc']), end=' ')
# Callback for early stopping
epoch_callback = EpochCallback()
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=int(max_epochs/20),
verbose=2, mode='auto', baseline=None,
restore_best_weights=True)
def make_model(loss, l2=0.0):
# Careful that it is ussing global variables for the input and output shapes
numpy.random.seed(random_state)
model = keras.models.Sequential()
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(Y.shape[1], input_shape=X[0].shape,
kernel_regularizer=regularizers.l2(l2),
activation='softmax'))
model.compile(optimizer='adam', loss=loss, metrics=['ce', 'mse', 'acc'])
return model
# Keyword arguments for the fit function
fit_kwargs = dict(validation_data=(X_val, Y_val), epochs=max_epochs, verbose=0,
callbacks=[early_stopping, epoch_callback], shuffle=True)
# Save the final model for each method
final_models = {}
# # Fully supervised (upperbound)
#
# Train with all true labels
# In[8]:
train_method = 'Supervised'
# In this dataset the best l2 parameter is 0.0
l2_list = numpy.array([0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
l2_list = numpy.array([1e-9])
model_supervised_list = []
val_losses = numpy.zeros_like(l2_list)
for i, l2 in enumerate(l2_list):
print('Evaluating l2 regularization = {}'.format(l2))
model = make_model(log_loss, l2=l2)
history = model.fit(numpy.concatenate((*X_w_train_list, *X_wt_train_list)),
numpy.concatenate((*Y_w_train_list, *Y_wt_train_list)),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
model_supervised_list.append(model)
best_epoch = numpy.argmin(model.history.history['val_loss'])
val_losses[i] = model.history.history['val_loss'][best_epoch]
plt.show()
best_supervised = numpy.argmin(val_losses)
final_models[train_method] = model_supervised_list[best_supervised]
l2 = l2_list[best_supervised]
print('Best l2 = {}'.format(l2))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogx(l2_list, val_losses, 'o-')
ax.scatter(l2, val_losses[best_supervised], color='gold',
edgecolor='black', marker='*', s=150, zorder=3)
# # Our method with EM and original M
#
# Train EM with all weak labels
# In[ ]:
def EM_log_loss(y_true, y_pred):
y_pred = K.clip(y_pred, K.epsilon(), 1.0-K.epsilon())
Q = y_true * y_pred
Z_em_train = Q / K.sum(Q, axis=-1, keepdims=True)
out = -K.stop_gradient(Z_em_train)*K.log(y_pred)
return K.mean(out, axis=-1)
model = make_model(EM_log_loss, l2=l2)
M_true_list = []
n_samples_train = X_w_train.shape[0] + X_wt_train.shape[0]
# Add weak samples
for i, M in enumerate(M_list):
q = (X_w_train_list[i].shape[0]/n_samples_train)
M_true_list.append(M * q)
print('q_{} weak = {:.3f}'.format(i, q))
# Add true samples
M_supervised = computeM(n_classes, method='supervised')
for i, M in enumerate(M_list):
q = (X_wt_train_list[i].shape[0]/n_samples_train)
M_true_list.append(M_supervised * q)
print('q_{} true = {:.3f}'.format(i, q))
M_true = numpy.concatenate(M_true_list)
last_index = 0
Z_train_index_list = []
V_train_list = []
# Add weak samples
for i in range(len(M_method_list)):
Z_train_index_list.append(last_index + weak_to_index(Z_w_train_list[i], method=M_method_list[i]))
last_index += len(M_list[i])
V_train_list.append(M_true[Z_train_index_list[-1]])
# Add true samples
for i in range(len(M_method_list)):
Z_train_index_list.append(last_index + weak_to_index(Y_wt_train_list[i], method='supervised'))
last_index += n_classes
V_train_list.append(M_true[Z_train_index_list[-1]])
history = model.fit(numpy.concatenate((*X_w_train_list, *X_wt_train_list)),
numpy.concatenate(V_train_list),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['EM original M'] = model
# # Our method with EM and estimated M
# In[ ]:
from wlc.WLweakener import estimate_M
model = make_model(EM_log_loss, l2=l2)
M_estimated_list = []
n_samples_train = X_w_train.shape[0] + X_wt_train.shape[0]
# Add weak samples
for i in range(len(M_list)):
M = estimate_M(Z_wt_train_list[i], Y_wt_train_list[i],
range(n_classes), reg='Partial', Z_reg=Z_w_train_list[i], alpha=1)
q = (X_w_train_list[i].shape[0]/n_samples_train)
M_estimated_list.append(M * q)
print('q_{} weak = {:.3f}'.format(i, q))
# Add true samples
M_supervised = computeM(n_classes, method='supervised')
for i in range(len(M_list)):
q = (X_wt_train_list[i].shape[0]/n_samples_train)
M_estimated_list.append(M_supervised * q)
print('q_{} true = {:.3f}'.format(i, q))
M_estimated = numpy.concatenate(M_estimated_list)
last_index = 0
Z_train_index_list = []
V_train_list = []
# Add weak samples
for i in range(len(M_method_list)):
Z_train_index_list.append(last_index + weak_to_index(Z_w_train_list[i], method='random_weak'))
last_index += 2**n_classes
V_train_list.append(M_estimated[Z_train_index_list[-1]])
# Add true samples
for i in range(len(M_method_list)):
Z_train_index_list.append(last_index + weak_to_index(Y_wt_train_list[i], method='supervised'))
last_index += n_classes
V_train_list.append(M_estimated[Z_train_index_list[-1]])
history = model.fit(numpy.concatenate((*X_w_train_list, *X_wt_train_list)),
numpy.concatenate(V_train_list),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['EM estimated M'] = model
# In[ ]:
for i, (m1, m2) in enumerate(zip(M_true_list, M_estimated_list)):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1,3,1)
ax.set_title('True M')
cax = ax.imshow(m1, interpolation='nearest', aspect='auto')
fig.colorbar(cax, orientation="horizontal")
ax = fig.add_subplot(1,3,2)
ax.set_title('Estimated M')
cax = ax.imshow(m2, interpolation='nearest', aspect='auto')
fig.colorbar(cax, orientation="horizontal")
if m1.shape == m2.shape:
mse = numpy.power(m1 - m2, 2).sum()
ax = fig.add_subplot(1,3,3)
ax.set_title('MSE = {:.2f}'.format(mse))
cax = ax.imshow(numpy.power(m1 - m2, 2), interpolation='nearest', aspect='auto')
fig.colorbar(cax, orientation="horizontal")
# # Weak (lowerbound)
# In[ ]:
model = make_model(log_loss, l2=l2)
history = model.fit(numpy.concatenate((*X_w_train_list, *X_wt_train_list)),
| numpy.concatenate((*Z_w_train_list, *Y_wt_train_list)) | numpy.concatenate |
import numpy as np
import pytest
import cubepy
def integrand_ellipsoid_v(x, r):
rho = np.asarray(x[0])
# phi = np.array(x[1])
theta = np.asarray(x[2])
return np.prod(r, axis=0) * rho ** 2 * np.sin(theta)
def test_brick():
def integrand_brick(x):
return np.ones_like(x)
def exact_brick(r):
return np.prod(r, axis=0)
value, error = cubepy.integrate(integrand_brick, 0.0, 1.0)
assert np.allclose(value, exact_brick(1.0))
assert np.all(error < 1e-6)
lo = [0, 0, 0]
hi = [1, 2, 3]
value, error = cubepy.integrate(integrand_brick, lo, hi, is_1d=False)
assert np.allclose(value, exact_brick(hi))
assert np.all(error < 1e-6)
def test_sphere():
def integrand_sphere(x):
r, _, phi = x
return r ** 2 * np.sin(phi)
def exact_sphere(r):
return (4.0 / 3.0) * np.pi * r ** 3
value, error = cubepy.integrate(
integrand_sphere, [0.0, 0.0, 0.0], [1.0, 2.0 * np.pi, np.pi]
)
assert np.allclose(value, exact_sphere(1.0))
assert np.all(error < 1e-5)
def test_ellipsoid():
def integrand_ellipsoid(x, a, b, c):
rho, _, theta = x
return a * b * c * rho ** 2 * np.sin(theta)
def exact_ellipsoid(axes):
return (4 / 3) * np.pi * np.prod(axes, axis=0)
value, error = cubepy.integrate(
integrand_ellipsoid, [0.0, 0.0, 0.0], [1.0, 2.0 * np.pi, np.pi], args=(1, 2, 3)
)
assert np.allclose(value, exact_ellipsoid([1, 2, 3]))
assert np.all(error < 1e-5)
def test_multi():
def integrand(x):
return 1 + 8 * x[0] * x[1]
low = np.array(
[
1000000 * [0.0],
1000000 * [1.0],
]
)
high = np.array(
[
1000000 * [3.0],
1000000 * [2.0],
]
)
value, error = cubepy.integrate(integrand, low, high)
assert np.allclose(value, 57)
assert np.all(error < 1e-6)
def test_van_dooren_de_riddler_simple_1():
lo = np.array([0.0, 0.0, 0.0, -1.0, -1.0, -1.0])
hi = np.array([2.0, 1.0, (np.pi / 2.0), 1.0, 1.0, 1.0])
value, error = cubepy.integrate(
lambda x: (x[0] * x[1] ** 2 * np.sin(x[2])) / (4 + x[3] + x[4] + x[5]), lo, hi
)
assert np.allclose(value, 1.434761888397263)
assert np.all(error < 1e-2)
def test_van_dooren_de_riddler_simple_2():
value, error = cubepy.integrate(
lambda x: x[2] ** 2 * x[3] * np.exp(x[2] * x[3]) * (1 + x[0] + x[1]) ** -2,
np.array([0.0, 0.0, 0.0, 0.0]),
np.array([1.0, 1.0, 1.0, 2.0]),
)
assert np.allclose(value, 0.5753641449035616)
assert np.all(error < 1e-4)
def test_van_dooren_de_riddler_simple_3():
value, error = cubepy.integrate(
lambda x: 8 / (1 + 2 * (np.sum(x, axis=0))),
np.array([0.0, 0.0, 0.0]),
np.array([1.0, 1.0, 1.0]),
)
assert | np.allclose(value, 2.152142832595894) | numpy.allclose |
import numpy as np
import matplotlib.pyplot as plt
from mnist import mnist_data
class Perceptron(object):
def __init__(self, num_inputs, epochs=500, learning_rate=0.1):
self.weights = np.random.uniform(low=-1., high=1., size=(num_inputs + 1) * 10).reshape(257, 10)
self.weights[-1:, :] = 1
self.epochs = epochs
self.learning_rate = learning_rate
self.train_history = []
self.test_history = []
def predict(self, inputs):
# Now the matrix mult with the weights to get the output
output = np.dot(inputs, self.weights)
# Now need to choose which one is the largest to give the output for this one
activation = np.argmax(output)
# Return the activation
return activation
def predict_on_set(self, test_inputs, labels, verbose=False):
training_inputs = np.c_[test_inputs, np.ones(test_inputs.shape[0])] # Add biases
combined = np.asarray(list(zip(training_inputs, labels)))
right = 0
wrong = 0
for inputs, label in combined:
prediction = self.predict(inputs)
if prediction == label:
right += 1
else:
wrong += 1
if verbose:
print((right / (wrong + right)))
return (right / (wrong + right))
def train(self, training_inputs, labels, shuffle=False, neg_pred=10, pos_pred=1):
training_inputs = np.c_[training_inputs, np.ones(training_inputs.shape[0])] # Add biases
combined = np.asarray(list(zip(training_inputs, labels)))
for _ in range(self.epochs):
right = 0
wrong = 0
self.test_history.append(self.predict_on_set(x_test, y_test))
if shuffle:
np.random.shuffle(combined)
for inputs, label in combined:
ldir = np.zeros((10, 1))
prediction = self.predict(inputs)
label = label[0]
if prediction == label:
ldir[label] += pos_pred
else:
ldir[label] += neg_pred
ldir[prediction] -= neg_pred
if prediction == label:
right += 1
else:
wrong += 1
self.weights[:-1] += self.learning_rate * (ldir * inputs[:-1]).T
self.weights[-1:] += self.learning_rate * ldir.T
self.train_history.append(self.acc(right, wrong))
self.plot_training(neg_pred, pos_pred)
return self.train_history, self.test_history
def plot_training(self, neg_val, pos_val):
timesteps = [i for i in range(len(self.train_history))]
plt.plot(timesteps, self.train_history, label='Training Set')
plt.plot(timesteps, self.test_history, label='Test Set')
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.title("Accuracy over Epoch Neg: {} Pos: {}".format(np.round(neg_val, 6), np.round(pos_val, 6)))
plt.legend(loc='best')
plt.savefig("Neg_{}_Pos_{}_Test_{}_Train_{}.png".format(neg_val, pos_val, self.test_history[-1],
self.train_history[-1]), dpi=300)
plt.cla()
def acc(self, right, wrong):
return right / (right + wrong)
x_train, y_train, x_test, y_test = mnist_data("data")
# Reshape to 256 elements
x_train = x_train.reshape((-1, 256))
x_test = x_test.reshape((-1, 256))
np.random.seed(1337)
network = Perceptron(256)
network.predict_on_set(x_test, y_test)
network.train(x_train, y_train, shuffle=True)
network.predict_on_set(x_test, y_test)
trained_precdictions = []
test_predictions = []
for neg_val in np.logspace(-5, 5, num=10):
for pos_val in np.logspace(-5, 5, num=10):
print("Neg Val: {} Pos Val: {}".format(neg_val, pos_val))
np.random.seed(1337)
network = Perceptron(256)
network.predict_on_set(x_test, y_test, verbose=True)
train_hist, test_hist = network.train(x_train, y_train, shuffle=True, neg_pred=neg_val, pos_pred=pos_val)
trained_precdictions.append(train_hist)
test_predictions.append(test_hist)
# Now have all the predictions
# Plot them all
vals = np.logspace(-5, 5, num=10)
fig, axes = plt.subplots(10, 10, sharex="all", sharey="all", figsize=(20, 20))
fig.subplots_adjust(wspace=0)
# Now go through and plot everything
for neg_val in range(10):
for pos_val in range(10):
axes[neg_val, pos_val].plot([i for i in range(len(trained_precdictions[neg_val + pos_val]))],
trained_precdictions[neg_val + pos_val])
axes[neg_val, pos_val].plot([i for i in range(len(test_predictions[neg_val + pos_val]))],
test_predictions[neg_val + pos_val])
fig.text(0.5, 0.005, 'Epoch, pos_pred value', ha='center', va='center')
fig.text(0.005, 0.5, 'Fraction Correct, neg_pred value', ha='center', va='center', rotation='vertical')
for index, val in enumerate(vals):
axes[index, 0].set_ylabel(str( | np.round(val, 4) | numpy.round |
import numpy as np
import copy
from generative_playground.codec.codec import get_codec
from generative_playground.molecules.model_settings import get_settings
from rdkit import Chem
from math import floor
class GraphEnvironment:
def __init__(self,
mask_gen,
reward_fun=None,
batch_size=1,
save_dataset=None):
self.mask_gen = mask_gen
self.codec = mask_gen.grammar
self.action_dim = self.codec.feature_len()
self.state_dim = self.action_dim
self._max_episode_steps = mask_gen.MAX_LEN
self.reward_fun = reward_fun
self.batch_size = batch_size
self.save_dataset = save_dataset
self.smiles = None
self.seq_len = None
self.valid = None
self.done_rewards = None
self.reset()
def reset(self):
self.mask_gen.reset()
next_action = (None, [None for _ in range(self.batch_size)])
graphs, node_mask, full_logit_priors = self.mask_gen.step(next_action)
self.done_rewards = [None for _ in range(self.batch_size)]
self.smiles = [None for _ in range(self.batch_size)]
self.seq_len = np.zeros([self.batch_size])
self.valid = np.zeros([self.batch_size])
return copy.deepcopy(graphs), node_mask, full_logit_priors
def step(self, full_action):
'''
Convention says environment outputs np.arrays
:param rule_action: LongTensor(batch_size), or np.array(batch_size) of ints last discrete rule_action chosen
:return:
'''
if type(full_action)==tuple and len(full_action) == 2:
# old-style inputs, separate node and rule selections
node_action, rule_action = full_action
else: # new-style inputs, node and rule encoded in one int
node_action = [floor(a.item() / self.action_dim) for a in full_action]
rule_action = [a.item() % self.action_dim for a in full_action]
next_state = self.mask_gen.step((node_action, rule_action))
graphs, node_mask, full_logit_priors = next_state
# TODO: the rest here is just bookkeeping + reward calculation
if self.mask_gen.t < self._max_episode_steps:
done = self.codec.is_padding(np.array(rule_action))# max index is padding, by convention
else:
done = np.ones_like(rule_action) == 1
reward = np.zeros_like(rule_action, dtype=np.float)
# for those sequences just computed, calculate the reward
for i in range(len(rule_action)):
if self.done_rewards[i] is None and done[i]:
this_graph = graphs[i]
self.smiles[i] = this_graph.to_smiles()
this_mol = Chem.MolFromSmiles(self.smiles[i])
if this_mol is None:
print(self.smiles[i])
self.valid[i] = 0
else:
self.valid[i] = 1
this_reward = self.reward_fun([self.smiles[i]])[0]
self.done_rewards[i] = this_reward
reward[i] = this_reward
self.seq_len[i] = self.mask_gen.t
#TODO: put the special string handling into the hdf5 wrapper
if self.save_dataset is not None:
import h5py
dt = h5py.special_dtype(vlen=str) # PY3 hdf5 datatype for variable-length Unicode strings
if len(self.actions) == self._max_episode_steps:
# dump the whole batch to disk
append_data = {'smiles': np.array(self.smiles, dtype=dt),
'actions': np.concatenate(self.actions, axis=1),
'seq_len': self.seq_len}
self.save_dataset.append(append_data)
return next_state, reward, done, (self.smiles, self.valid)
def seed(self, random_seed):
return random_seed
class SequenceEnvironment:
def __init__(self,
molecules=True,
grammar=True,
reward_fun=None,
batch_size=1,
max_steps=None,
save_dataset=None):
settings = get_settings(molecules, grammar)
self.codec = get_codec(molecules, grammar, settings['max_seq_length'])
self.action_dim = self.codec.feature_len()
self.state_dim = self.action_dim
if max_steps is None:
self._max_episode_steps = settings['max_seq_length']
else:
self._max_episode_steps = max_steps
self.reward_fun = reward_fun
self.batch_size = batch_size
self.save_dataset = save_dataset
self.smiles = None
self.seq_len = None
self.valid = None
self.actions = None
self.done_rewards = None
self.reset()
def reset(self):
self.actions = []
self.done_rewards = [None for _ in range(self.batch_size)]
self.smiles = [None for _ in range(self.batch_size)]
self.seq_len = np.zeros([self.batch_size])
self.valid = np.zeros([self.batch_size])
return [None]*self.batch_size
def step(self, action):
'''
Convention says environment outputs np.arrays
:param action: LongTensor(batch_size), or np.array(batch_sizelast discrete action chosen
:return:
'''
try: # in case action is a torch.Tensor
action = action.cpu().to_numpy()
except:
pass
self.actions.append(action[:,None])
next_state = action
if len(self.actions) < self._max_episode_steps:
done = self.codec.is_padding(action) # max index is padding, by convention
else:
done = np.ones_like(action) == 1
reward = | np.zeros_like(action, dtype=np.float) | numpy.zeros_like |
import numpy as np
import matplotlib.pyplot as plt
def input_xydim():
"""
Specify the parking lot dimensions in x,y directions by the user
return 1*2 np.array 'xy_dim'
the first element of 'xy_dim' is the dimension in x_direction
the second element of 'xy_dim' is the dimension in y_direction
"""
print('Please specify the parking lot dimensions:')
# Input needs to be integers
x_dim = np.intp(input('In x-direction:'))
y_dim = np.intp(input('In y-direction:'))
xy_dim = np.array([x_dim,y_dim])
return xy_dim
def gdim_frm_xydim(xy_dim):
"""
Obtain the parking lot dimension for the grid index from xy_dim
return 'g_dim'
"""
g_dim = np.intp(xy_dim[0]*xy_dim[1])
return g_dim
def input_pkgidx(g_dim):
"""
Specify the parking spots index by the user
return 1*pk_dim np.array 'pk_g_idx' where pk_dim is the number of spots
"""
#print('Please specify the num of parking spots:')
pk_dim = np.int(input('Please specify the num of parking spots:'))
while pk_dim >= g_dim:
print('Too many parking spots!')
pk_dim = np.int(input('Please specify the num of parking spots:'))
pk_g_idx = -np.ones(pk_dim, dtype = int)
for idx in range(pk_dim):
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while spot_idx in pk_g_idx:
print('Repeated input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
while (spot_idx < 0) or (spot_idx >= g_dim):
print('Invalid input!')
print('Input as grid index ranging from 0 to',g_dim-1)
spot_idx = np.int(input())
pk_g_idx[idx] = spot_idx
pk_g_idx.sort()
return pk_g_idx
def input_target_speed():
"""
Specify the desired target speed in km/h
return the desired target speed in km/h
"""
target_speed = np.float32(input('Please specify the desired speed in km/h: '))/3.6
return target_speed
def gidx_frm_xycrd(xy_crd, xy_dim):
"""
Obtain the grid idex from grid xy_cord
return 'g_idx', starting from 0. Example for a 3*2 grid
1 | 3 | 5
---------
0 | 2 | 4
"""
g_idx = np.intp((xy_crd[0]-1)*xy_dim[1]+xy_crd[1]-1)
return g_idx
def xycrd_frm_gidx(g_idx, xy_dim):
"""
Obtain the xy_cord from grid_idx
return 'xy_cord', starting from [1,1]. Example for a 3*2 grid
[1,2] | [2,2] | [3,2]
---------------------
[1,1] | [2,1] | [3,1]
"""
x_crd = | np.floor_divide(g_idx, xy_dim[1]) | numpy.floor_divide |
import numpy as np
from glob import glob
import os
import json
from neuralparticles.tools.param_helpers import *
from neuralparticles.tools.data_helpers import particle_radius
from neuralparticles.tools.shell_script import *
from neuralparticles.tools.uniio import writeParticlesUni, writeNumpyRaw, readNumpyOBJ, writeNumpyOBJ, readParticlesUni, writeUni
from neuralparticles.tools.particle_grid import ParticleIdxGrid
from neuralparticles.tensorflow.losses.tf_approxmatch import approx_vel, emd_loss
from scipy import optimize
import random
import math
from collections import OrderedDict
import time
import imageio
import keras.backend as K
def _approx_vel(pos, npos, h=0.5, it=1):
"""cost = np.linalg.norm(np.expand_dims(pos, axis=1) - np.expand_dims(npos, axis=0), axis=-1)
idx = optimize.linear_sum_assignment(cost)
vel = np.zeros_like(pos)
vel[idx[0]] = npos[idx[1]] - pos[idx[0]]"""
vel = K.eval(approx_vel(K.constant(np.expand_dims(pos, 0)), K.constant(np.expand_dims(npos, 0))))[0]
dist = np.linalg.norm(np.expand_dims(pos, axis=0) - np.expand_dims(pos, axis=1), axis=-1)
dist = np.exp(-dist/h)
w = np.clip(np.sum(dist, axis=1, keepdims=True), 1, 10000)
for i in range(it):
vel = np.dot(dist, vel)/w
return vel
def project(n, v):
return v - np.dot(n,v) * n
def deviation(n, v0, v1):
t = project(n, v0)
return t/np.dot(t,v0)
def viewWorldM(rot, pos):
m = np.zeros((3,4))
m[0,0] = 1 - 2*rot[2]**2 - 2*rot[3]**2
m[0,1] = 2*rot[1]*rot[2] - 2*rot[3]*rot[0]
m[0,2] = 2*rot[1]*rot[3] + 2*rot[2]*rot[0]
m[1,0] = 2*rot[1]*rot[2] + 2*rot[3]*rot[0]
m[1,1] = 1 - 2*rot[1]**2 - 2*rot[3]**2
m[1,2] = 2*rot[2]*rot[3] - 2*rot[1]*rot[0]
m[2,0] = 2*rot[1]*rot[3] - 2*rot[2]*rot[0]
m[2,1] = 2*rot[2]*rot[3] + 2*rot[1]*rot[0]
m[2,2] = 1 - 2*rot[1]**2 - 2*rot[2]**2
m[0:3,3] = -pos
return m
A_l = np.array([
[+1.,+0.,+0.,+0.],
[+0.,+0.,+1.,+0.],
[-3.,+3.,-2.,-1.],
[+2.,-2.,+1.,+1.]
])
A_r = np.array([
[+1.,+0.,-3.,+2.],
[+0.,+0.,+3.,-2.],
[+0.,+1.,-2.,+1.],
[+0.,+0.,-1.,+1.]
])
data_path = getParam("data", "data/")
mesh_path = getParam("mesh", "")
config_path = getParam("config", "config/version_00.txt")
debug = int(getParam("debug", 0)) != 0
res = int(getParam("res", -1))
eval = int(getParam("eval", 0)) != 0
test = int(getParam("test", 0)) != 0
t_end = int(getParam("t_end", -1))
gpu = getParam("gpu", "-1")
min_v = np.asarray(getParam("min_v", "-2,-2,2").split(","), dtype="float32")
max_v = np.asarray(getParam("max_v", "2,2,-2").split(","), dtype="float32")
scale = np.abs(max_v - min_v)
checkUnusedParams()
if not gpu is "-1":
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
if mesh_path == "":
mesh_path = data_path
with open(config_path, 'r') as f:
config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['data'], 'r') as f:
data_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['preprocess'], 'r') as f:
pre_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['train'], 'r') as f:
train_config = json.loads(f.read())
sub_res = data_config['sub_res']
dim = data_config['dim']
vert_area_ratio = sub_res ** 2
if res < 0:
res = data_config['res']
bnd = data_config['bnd']
min_n = pre_config['min_n']
factor = pre_config['factor']
factor_d = math.pow(factor,1/dim)
lres = int(res/factor_d)
search_r = res/lres * (1/sub_res) * 0.77 if factor > 1 else 0
off = data_config['data_count'] if eval else 0
discretize = data_config['disc']
max_z = data_config['max_z']
t_int = data_config['t_int']
culling = data_config["cull"]
scan = data_config["scan"]
random.seed(data_config['seed'])
np.random.seed(data_config['seed'])
if test:
src_path = "%sreal/%s_%s_" % (data_path, data_config['prefix'], data_config['id']) + "d%03d_%03d"
if not os.path.exists(data_path + "real/"):
os.makedirs(data_path + "real/")
t_int = 1
obj_cnt = len(glob(mesh_path + "*"))
else:
ref_path = "%sreference/%s_%s_" % (data_path, data_config['prefix'], data_config['id']) + "d%03d_%03d"
src_path = "%ssource/%s_%s-%s_" % (data_path, data_config['prefix'], data_config['id'], pre_config['id']) + "d%03d_var00_%03d"
if not os.path.exists(data_path + "reference/"):
os.makedirs(data_path + "reference/")
if not os.path.exists(data_path + "source/"):
os.makedirs(data_path + "source/")
obj_cnt = len(glob(mesh_path + "objs/*"))
frame_cnt = data_config['frame_count'] if t_end < 0 else t_end
#if (eval and obj_cnt != data_config['test_count']) or (not eval and obj_cnt != data_config['data_count']):
# print("Mismatch between obj count and 'data_count'/'test_count' in config file!")
# exit()
if debug:
ref_path = data_path + "debug/ref/d%03d_%03d"
src_path = data_path + "debug/src/d%03d_%03d"
if not os.path.exists(data_path + "debug/ref/"):
os.makedirs(data_path + "debug/ref/")
if not os.path.exists(data_path + "debug/src/"):
os.makedirs(data_path + "debug/src/")
obj_cnt = 1
frame_cnt = 3
for d in range(obj_cnt):
print("Load dataset %d/%d" % (d+1, obj_cnt))
cam_cnt = 1
if scan:
scan_path = mesh_path + "scans/%04d/" % d
with open(scan_path + "cam_data.json") as f:
cam_data = json.loads(f.read())
cam_cnt = len(cam_data['transform'])
near, width, height = cam_data['near'], cam_data['width'], cam_data['height']
scan_path += "%04d"
if not test:
obj_path = mesh_path + "objs/%04d/" % d + "%04d.obj"
vertices = None
normals = None
faces = None
for t in range(frame_cnt*t_int):
obj = readNumpyOBJ(obj_path%t)
if t == 0:
vertices = np.empty((frame_cnt*t_int, obj[0].shape[0], 3))
normals = np.empty((frame_cnt*t_int,), dtype=object)
faces = np.empty((frame_cnt*t_int, obj[2].shape[0], 2, 4),dtype=int)
vertices[t] = obj[0]
normals[t] = obj[1]
faces[t] = obj[2]
min_v = np.min(vertices,axis=(0,1))
max_v = np.max(vertices,axis=(0,1))
scale = np.abs(max_v - min_v)
vertices -= min_v + [0.5,0,0.5] * scale
vertices *= (res - 4 * bnd) / np.max(scale)
vertices += [res/2, bnd*2, res/2]
print(np.min(vertices,axis=(0,1)))
print(np.max(vertices,axis=(0,1)))
bary_coord = np.empty((len(faces[0]),),dtype=object)
data_cnt = 0
d_idx = None
prev_ref = None
prev_src = None
prev_idx = None
hdrsdf = OrderedDict([ ('dimX',lres),
('dimY',lres),
('dimZ',1 if dim == 2 else lres),
('gridType', 16),
('elementType',1),
('bytesPerElement',4),
('info',b'\0'*252),
('dimT',1),
('timestamp',(int)(time.time()*1e6))])
hdr = OrderedDict([ ('dim',0),
('dimX',res),
('dimY',res),
('dimZ',1 if dim == 2 else res),
('elementType',0),
('bytesPerElement',16),
('info',b'\0'*256),
('timestamp',(int)(time.time()*1e6))])
hdrv = hdr.copy()
hdrv['elementType'] = 1
hdrv['bytesPerElement'] = 12
for ci in range(cam_cnt):
print("Load cam: %d/%d" % (ci+1, cam_cnt))
if scan:
viewWorld = np.array(cam_data['transform'][ci])
if os.path.isfile(scan_path%ci + ".npz"):
scan_data = np.load(scan_path%ci + ".npz")['arr_0']
if discretize:
scan_data = np.floor(np.clip(scan_data / max_z, 0, 1) * 256)/256 * max_z
else:
scan_img_path = scan_path%ci + "/%04d.png"
tmp = max_z - imageio.imread(scan_img_path%0)[::-1,:,:1]/256 * max_z
scan_data = np.empty((frame_cnt*t_int, tmp.shape[0], tmp.shape[1], 1))
scan_data[0] = tmp
for t in range(1, frame_cnt*t_int):
scan_data[t] = max_z - imageio.imread(scan_img_path%t)[::-1,:,:1]/256 * max_z
viewV = np.dot(viewWorld[:3,:3], np.array([0,0,-1]))
viewV = np.dot(np.array([[1,0,0],[0,0,1],[0,-1,0]]), viewV)
for ti in range(1 if eval else t_int):
print("Time Intervall: %d/%d" % (ti+1, t_int))
d_idx = ti + (ci + d*cam_cnt)*(1 if eval else t_int) + off
print("Dataset: %d" % d_idx)
for t in range(frame_cnt):
t_off = ti+t*t_int
print("Load mesh: %d/%d (t_off: %d/%d)" % (t+1, frame_cnt, t_off, frame_cnt*t_int))
if not test:
if t == 0:
data_cnt = 0
for fi, f in enumerate(faces[t_off]):
v = vertices[t_off,f[0]]
area = np.linalg.norm(np.cross(v[1]-v[0], v[2]-v[0]))/2
area += np.linalg.norm(np.cross(v[2]-v[0], v[3]-v[0]))/2
par_cnt = vert_area_ratio * area
par_cnt = int(par_cnt) + int(np.random.random() < par_cnt % 1)
bary_coord[fi] = np.random.random((par_cnt, 2))
data_cnt += par_cnt
data = np.empty((data_cnt, 3))
di = 0
fltr_idx = np.zeros((data_cnt,), dtype="int32")
fltr_i = 0
for fi, f in enumerate(faces[t_off]):
v = vertices[t_off,f[0]]
n = normals[t_off][f[1]]
x01 = (v[1] - v[0])
x01 /= np.linalg.norm(x01,axis=-1,keepdims=True)
x32 = (v[2] - v[3])
x32 /= np.linalg.norm(x32,axis=-1,keepdims=True)
y12 = (v[2] - v[1])
y12 /= np.linalg.norm(y12,axis=-1,keepdims=True)
y03 = (v[3] - v[0])
y03 /= np.linalg.norm(y03,axis=-1,keepdims=True)
A_f = np.zeros((4,4,3))
A_f[0,0] = v[0]
A_f[0,1] = v[3]
A_f[1,0] = v[1]
A_f[1,1] = v[2]
A_f[0,2] = deviation(n[0], y03, x01)
A_f[0,3] = deviation(n[3], y03, x32)
A_f[1,2] = deviation(n[1], y12, x01)
A_f[1,3] = deviation(n[2], y12, x32)
A_f[2,0] = deviation(n[0], x01, -y03)
A_f[2,1] = deviation(n[3], x32, -y03)
A_f[3,0] = deviation(n[1], x01, -y12)
A_f[3,1] = deviation(n[2], x32, -y12)
A_f[2,2] = (A_f[2,1] - A_f[2,0])/np.linalg.norm(y03)
A_f[2,3] = (A_f[2,1] - A_f[2,0])/np.linalg.norm(y03)
A_f[3,2] = (A_f[3,1] - A_f[3,0])/ | np.linalg.norm(y12) | numpy.linalg.norm |
import numpy as np
import typing as ty
import importlib
import pytest
import pickle as pk
from ..submitter import Submitter
from ..core import Workflow
from ...mark import task, annotate
from .utils import identity
from ..helpers import hash_value
if importlib.util.find_spec("numpy") is None:
pytest.skip("can't find numpy library", allow_module_level=True)
@task
@annotate({"return": {"b": ty.Any}})
def arrayout(val):
return np.array([val, val])
def test_multiout(tmpdir):
""" testing a simple function that returns a numpy array"""
wf = Workflow("wf", input_spec=["val"], val=2)
wf.add(arrayout(name="mo", val=wf.lzin.val))
wf.set_output([("array", wf.mo.lzout.b)])
wf.cache_dir = tmpdir
with Submitter(plugin="cf", n_procs=2) as sub:
sub(runnable=wf)
results = wf.result(return_inputs=True)
assert results[0] == {"wf.val": 2}
assert np.array_equal(results[1].output.array, np.array([2, 2]))
def test_multiout_st(tmpdir):
""" testing a simple function that returns a numpy array, adding splitter"""
wf = Workflow("wf", input_spec=["val"], val=[0, 1, 2])
wf.add(arrayout(name="mo", val=wf.lzin.val))
wf.mo.split("val").combine("val")
wf.set_output([("array", wf.mo.lzout.b)])
wf.cache_dir = tmpdir
with Submitter(plugin="cf", n_procs=2) as sub:
sub(runnable=wf)
results = wf.result(return_inputs=True)
assert results[0] == {"wf.val": [0, 1, 2]}
for el in range(3):
assert np.array_equal(results[1].output.array[el], np.array([el, el]))
def test_numpy_hash_1():
"""hashing check for numeric numpy array"""
A = np.array([1, 2])
A_pk = pk.loads(pk.dumps(A))
assert (A == A_pk).all()
assert hash_value(A) == hash_value(A_pk)
def test_numpy_hash_2():
"""hashing check for numpy array of type object"""
A = np.array([["NDAR"]], dtype=object)
A_pk = pk.loads(pk.dumps(A))
assert (A == A_pk).all()
assert hash_value(A) == hash_value(A_pk)
def test_task_numpyinput_1(tmpdir):
""" task with numeric numpy array as an input"""
nn = identity(name="NA", x=[np.array([1, 2]), np.array([3, 4])])
nn.cache_dir = tmpdir
nn.split("x")
# checking the results
results = nn()
assert (results[0].output.out == | np.array([1, 2]) | numpy.array |
import cv2
import torch
import numpy as np
import torchvision.transforms as transforms
from torch.autograd import Variable
from XuelangYOLOv1ByBobo.config import opt
def predict_result(model,image_name,root_path=''):
'''
预测一张测试照片
'''
result = []
image = cv2.imread(root_path+image_name)
h,w,_ = image.shape
# 将图像规范化到(224,224)
img = cv2.resize(image,(224,224))
# 转换为RGB
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
mean = (123,117,104)#RGB
# 减去均值
img = img - np.array(mean,dtype=np.float32)
#对图像进行转化
transform = transforms.Compose([transforms.ToTensor(),])
img = transform(img)
# volatile相当于requires_grad=False,不保存中间变量。仅用于纯推断
img = Variable(img[None,:,:,:],volatile=True)
if opt.use_gpu:
img = img.cuda()
pred = model(img) #1x7x7x30
pred = pred.cpu()
# 将网络输出结果转化为 可视化格式
boxes,cls_indexs,probs = decoder(pred)
# 遍历一张图像上所有的预测候选框
for i,box in enumerate(boxes):
x1 = int(box[0]*w)
x2 = int(box[2]*w)
y1 = int(box[1]*h)
y2 = int(box[3]*h)
cls_index = cls_indexs[i]
cls_index = int(cls_index) # convert LongTensor to int
prob = probs[i]
prob = float(prob)
result.append([(x1,y1),(x2,y2),opt.VOC_CLASSES[cls_index],image_name,prob])
return result
def decoder(pred):
'''
解码
pred (tensor) 1x7x7x30
return (tensor) box[[x1,y1,x2,y2]] label[...]
'''
boxes=[]
cls_indexs=[]
probs = []
cell_size = 1./7
pred = pred.data
pred = pred.squeeze(0) #7x7x30
contain1 = pred[:,:,4].unsqueeze(2)
contain2 = pred[:,:,9].unsqueeze(2)
contain = torch.cat((contain1,contain2),2)
mask1 = contain > 0.9 #大于阈值
mask2 = (contain==contain.max()) #we always select the best contain_prob what ever it>0.9
mask = (mask1+mask2).gt(0)
min_score,min_index = torch.min(mask,2) #每个cell只选最大概率的那个预测框
for i in range(7):
for j in range(7):
for b in range(2):
index = min_index[i,j]
mask[i,j,index] = 0
if mask[i,j,b] == 1:
#print(i,j,b)
box = pred[i,j,b*5:b*5+4]
contain_prob = torch.FloatTensor([pred[i,j,b*5+4]])
xy = torch.FloatTensor([j,i])*cell_size #cell左上角 up left of cell
box[:2] = box[:2]*cell_size + xy # return cxcy relative to image
box_xy = torch.FloatTensor(box.size())#转换成xy形式 convert[cx,cy,w,h] to [x1,xy1,x2,y2]
box_xy[:2] = box[:2] - 0.5*box[2:]
box_xy[2:] = box[:2] + 0.5*box[2:]
max_prob,cls_index = torch.max(pred[i,j,10:],0)
boxes.append(box_xy.view(1,4))
cls_indexs.append(cls_index)
probs.append(contain_prob)
boxes = torch.cat(boxes,0) #(n,4)
probs = torch.cat(probs,0) #(n,)
cls_indexs = torch.cat(cls_indexs,0) #(n,)
keep = nms(boxes,probs)
return boxes[keep],cls_indexs[keep],probs[keep]
def nms(bboxes,scores,threshold=0.5):
'''
bboxes(tensor) [N,4]
scores(tensor) [N,]
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_,order = scores.sort(0,descending=True)
keep = []
while order.numel() > 0:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct ap caculation
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(preds, target, VOC_CLASSES=opt.VOC_CLASSES, threshold=0.5, use_07_metric=False, ):
'''
preds {'cat':[[image_id,confidence,x1,y1,x2,y2],...],'dog':[[],...]}
target {(image_id,class):[[],]}
举例:
preds = {
'cat': [['image01', 0.9, 20, 20, 40, 40], ['image01', 0.8, 20, 20, 50, 50], ['image02', 0.8, 30, 30, 50, 50]],
'dog': [['image01', 0.78, 60, 60, 90, 90]]}
target = {('image01', 'cat'): [[20, 20, 41, 41]], ('image01', 'dog'): [[60, 60, 91, 91]],
('image02', 'cat'): [[30, 30, 51, 51]]}
'''
aps = []
# 遍历所有的类别
for i, class_ in enumerate(VOC_CLASSES):
pred = preds[class_] # [[image_id,confidence,x1,y1,x2,y2],...]
if len(pred) == 0: # 如果这个类别一个都没有检测到的异常情况
ap = -1
print('---class {} ap {}---'.format(class_, ap))
aps += [ap]
break
# print(pred)
image_ids = [x[0] for x in pred]
confidence = np.array([float(x[1]) for x in pred])
BB = np.array([x[2:] for x in pred])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
npos = 0.
for (key1, key2) in target:
if key2 == class_:
npos += len(target[(key1, key2)]) # 统计这个类别的正样本,在这里统计才不会遗漏
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d, image_id in enumerate(image_ids):
bb = BB[d] # 预测框
if (image_id, class_) in target:
BBGT = target[(image_id, class_)] # [[],]
for bbgt in BBGT:
# compute overlaps
# intersection
ixmin = np.maximum(bbgt[0], bb[0])
iymin = np.maximum(bbgt[1], bb[1])
ixmax = np.minimum(bbgt[2], bb[2])
iymax = np.minimum(bbgt[3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
union = (bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (bbgt[2] - bbgt[0] + 1.) * (
bbgt[3] - bbgt[1] + 1.) - inters
if union == 0:
print(bb, bbgt)
overlaps = inters / union
if overlaps > threshold:
tp[d] = 1
BBGT.remove(bbgt) # 这个框已经匹配到了,不能再匹配
if len(BBGT) == 0:
del target[(image_id, class_)] # 删除没有box的键值
break
fp[d] = 1 - tp[d]
else:
fp[d] = 1
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, | np.finfo(np.float64) | numpy.finfo |
#!/usr/bin/env python
# coding: utf-8
# # Crypto Currency Analysis
#
# Crpytocurrency exchanges are websites that enable the purchase, sale, and exchange of crypto and traditional currencies. These exchanges serve the essential functions of providing liquidity for owners and establishing the relative of these currencies. As of this writing (mid-2022), [it is estimated](https://www.statista.com/statistics/730876/cryptocurrency-maket-value/) that crypocurrencies have a collective market capitalization of more than 2 trillion USD.
#
# The purpose of this notebook is to explore the efficiency of these exchanges by testing. An arbitrage exists on an exchange if, through a risk-free series of trades, a customer could realize a net profit. The efficient market hypothesis assumes any sustained arbitrage opportunities would be identified by investors and, as result of their trading, quickly wiped as prices reach a new equilibrium. In an efficient market, any arbitrage would be small and fleeting. Still, the market has to get to equilibrium somehow, so perhaps with real-time data and rapid execution, can a trader put themself in a position to profit from these fleeting market adjustments?
# ## Bibliographic Notes
#
# Crytocurrency markets are still a relatively new and relatively few academic papers are available that specifically address arbitrage on those markets. Early studies, such as the following, reported periods of large, recurrent arbitrage opportunities that exist across exchanges, and that can persist for several days or weeks.
#
# > <NAME>., & <NAME>. (2020). Trading and arbitrage in cryptocurrency markets. Journal of Financial Economics, 135(2), 293-319.
#
# Subsequent work reports these prices differentials do exist, but only at a fraction of the values previously reported, and only for fleeting periods of time.
#
# > <NAME>., & <NAME>. (2020). Arbitrage in the Market for Cryptocurrencies. Available at SSRN 3606053. https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3606053
#
# The use of network algorithms to identify cross-exchange arbitrage has appeared in the academic literature, and in numerous web sites demonstrating optimization and network applications. Representative examples are cited below.
#
# > <NAME>., <NAME>., & <NAME>. (2021, September). JACK THE RIPPLER: Arbitrage on the Decentralized Exchange of the XRP Ledger. In 2021 3rd Conference on Blockchain Research & Applications for Innovative Networks and Services (BRAINS) (pp. 1-2). IEEE. https://arxiv.org/pdf/2106.16158.pdf
#
# > <NAME>., & <NAME>. (2022). Network analysis on Bitcoin arbitrage opportunities. The North American Journal of Economics and Finance, 59, 101562. https://doi.org/10.1016/j.najef.2021.101562
#
# > <NAME>., & <NAME>. (2022). Dataset for Bitcoin arbitrage in different cryptocurrency exchanges. Data in Brief, 40, 107731.
#
# The work in this notebook is related to materials found in the following web resources.
#
# > https://anilpai.medium.com/currency-arbitrage-using-bellman-ford-algorithm-8938dcea56ea
#
# > [Crypto Trading and Arbitrage Identification Strategies](https://nbviewer.org/github/rcroessmann/sharing_public/blob/master/arbitrage_identification.ipynb)
#
# A more complete analysis of trading and exploiting arbitrage opportunities in decentralized finance markets is available in the following paper and thesis.
#
# > <NAME>. An Exploration of Novel Trading and Arbitrage Methods within Decentralised Finance. https://www.scss.tcd.ie/Donal.OMahony/bfg/202021/StephenByrneDissertation.pdf
#
# > <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Intelligent System for Arbitrage Situations Searching in the Cryptocurrency Market. In CEUR Workshop Proceedings (pp. 407-440). http://ceur-ws.org/Vol-2917/paper32.pdf
#
# In addition to the analysis of arbitrage opportunities, convex optimization may also have an important role in the developing of trading algorithms for crypocurrency exchanges.
#
# > <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2021). Constant function market makers: Multi-asset trades via convex optimization. arXiv preprint arXiv:2107.12484. https://baincapitalcrypto.com/constant-function-market-makers-multi-asset-trades-via-convex-optimization/ and https://arxiv.org/pdf/2107.12484.pdf
#
#
#
#
# ## Installations and Imports
#
# This notebook requires multiple libraries. The following cell performs the required installations for Google Colab. To operate your own device you will need to install the `pyomo`,`ccxt`, and `graphviz` python libraries, the graphviz executables, and a linear solver for Pyomo.
# In[20]:
import sys
if "google.colab" in sys.modules:
get_ipython().system('pip install -q ccxt')
get_ipython().system('pip install -q netgraph')
get_ipython().system('wget -N -q https://raw.githubusercontent.com/jckantor/MO-book/main/tools/install_on_colab.py ')
get_ipython().run_line_magic('run', 'install_on_colab.py')
# In[21]:
import os
from time import time
from timeit import default_timer as timer
import numpy as np
import pandas as pd
import pyomo.environ as pyo
# ## Cryptocurrency Exchanges
#
# The [open-source library `ccxt`](https://github.com/ccxt/ccxt) currently supports real-time APIs for 114 exchanges on which cryptocurrencies are traded. Here we import the library and list current exchanges supported by `ccxt`.
# In[22]:
import ccxt
print(ccxt.exchanges)
# ## Exchange markets and symbols
#
# Each of the exchanges supported by `ccxt` offers multiple markets, each market consisting of trade between two (possibly more) currencies. `ccxt` labels each market with a symbol and market id. The market id is for HTTP request-response purposes and not relevant to this analysis. The symbol, however, is common across exchanges and suitable for arbitrage and other cross-exchange analyses.
#
# Each symbol is an upper case string with names for a pair of traded currencies separated by a slash. The first name is the base currency, the second name is the quote currency.
#
# The following cell creates a directed graph to visualize the markets available on a single exchange. The directed graph consists of nodes representing currencies, and edges trading between those currencies. The symbol for each market forms an edge with the source located at the quote currency and directed towards a destination at the base currency.
# In[29]:
import ccxt
import matplotlib.pyplot as plt
import networkx as nx
from netgraph import Graph
# global variables used in subsequent cells
exchange = ccxt.binanceus()
markets = exchange.load_markets()
symbols = exchange.symbols
def symbols_to_dg(symbols, in_degree=1):
dg = nx.DiGraph()
for base, quote in [symbol.split("/") for symbol in symbols]:
dg.add_edge(quote, base)
for node in dg.nodes():
if dg.out_degree(node) > 0:
dg.nodes[node]["color"] = "gold"
else:
dg.nodes[node]["color"] = "lightblue"
remove_nodes = []
for node in [node for node in dg.nodes() if dg.out_degree(node) == 0]:
if dg.in_degree(node) <= in_degree:
remove_nodes.append(node)
dg.remove_nodes_from(remove_nodes)
return dg
dg_symbols = symbols_to_dg(symbols, 2)
# In[30]:
def netgraph_dg(dg):
fig = plt.figure(figsize=(12, 18))
Graph(
dg,
arrows=True,
node_layout="dot",
node_labels=True,
node_size=3,
node_color={node: dg.nodes[node]["color"] for node in dg.nodes()},
edge_width=0.5,
edge_alpha=0.4,
)
netgraph_dg(dg_symbols)
# Nodes of a directed graph are characterized incoming and outgoing edges. A node's in-degree refers to the number of incoming edges, out-degree refers to the number of outgoing edges. In this case, node with outgoing edges are highlighted because they represent currencies used to quote the price of other currencies. The currency nodes have only in-coming edge. Nodes with only one incoming edge are not candidates for arbitrage. A parameter `in_degree` specifies a minimum threshold value for these nodes to be retained for further analysis.
# In[31]:
def draw_dg(dg):
print(f"Number of nodes = {len(dg.nodes()):3d}")
print(f"Number of edges = {len(dg.edges()):3d}")
fig = plt.figure(figsize=(12, 12))
pos = nx.circular_layout(dg)
nx.draw(
dg,
pos,
with_labels=True,
node_color=[dg.nodes[node]["color"] for node in dg.nodes()],
node_size=1000,
font_size=8,
arrowsize=15,
connectionstyle="arc3, rad=0.1",
)
# nx.draw_networkx_edge_labels(
# G, pos, edge_labels={(src, dst): f"{src}/{dst}" for src, dst in dg.edges()}
# )
draw_dg(dg_symbols)
# ## An Exchange Order Book
# An currency exchange order book presents a real-time inventory of trading orders.
#
# A **bid** is an order to buy some amount of the base currency at a price given in the quote currency. The buyer will receive the base currency if a transaction occurs. The price paid may be less than bid price if the exchange matches the bid to a previous offer to sell at a lower price. The size the transaction is specified in terms of the base currency that is bought or sold.
#
# An **ask** is an offer to sell an amount of the base currency at a quoted price. If a transaction occurs, then seller receives the quote currency at a unit price that may be higher if the exchange matches the ask order to a higher bid.
#
# The exchange order book maintains a list of all active orders for all symbols traded on the exchange. The highest bid will below the lowest ask. Incoming bids above the lowest ask, or incoming asks below the highest bid, will be matched and transactions executed following exchange rules.
#
# The following cell fetches the highest bid and lowest ask from the order book for
# In[32]:
import pandas as pd
def fetch_order_book(dg):
# get trading symbols from exchange graph
trade_symbols = ["/".join([base, quote]) for quote, base in dg.edges()]
def fetch_order_book_symbol(symbol, limit=1, exchange=exchange):
"""return order book data for a specified symbol"""
start_time = timer()
result = exchange.fetch_order_book(symbol, limit)
result["base"], result["quote"] = symbol.split("/")
result["run_time"] = timer() - start_time
result["timestamp"] = exchange.milliseconds()
if result["bids"]:
result["bid_price"] = result["bids"][0][0]
result["bid_volume"] = result["bids"][0][1]
if result["asks"]:
result["ask_price"] = result["asks"][0][0]
result["ask_volume"] = result["asks"][0][1]
return result
# fetch order book data and store in a dictionary
order_book = {symbol: fetch_order_book_symbol(symbol) for symbol in trade_symbols}
# convert to pandas dataframe
order_book = pd.DataFrame(order_book).T
order_book.drop(columns=["datetime", "symbol"], inplace=True)
order_book["timestamp"] = pd.to_datetime(order_book["timestamp"], unit="ms")
return order_book
order_book = fetch_order_book(dg_symbols)
display(order_book)
# ## Order Book as a Directed Graph
# Here we visual the order book as a directed graph. Each order for a particular symbol is represented as an edge. Consider a symbol with base currency $b$ and quote currency $q$, with a corresponding trading symbol $b/q$.
#
# The appearance of a sell order for symbol $b/q$ in the order book presents an opportunity to purchase an amount of currency $b$ at the specified ask price. This opportunity is represented on the directed graph by a directed edge from the quote currency to the base currency with a 'conversion' value $a_{q\rightarrow b}$ equal to inverse of the ask price. The conversion means that one unit of the quote currency can be converted to $a_{q\rightarrow b}$ units of the base currency. The capacity of the edge is the ask volume multiplied by the ask price.
#
# A buy order for symbol $b/q$ presents an opportunity to sell an amount of currency $b$ at the specified bid price. The is represented by a directed edge from the base currency to the quote currency with a conversion value $a_{b\rightarrow q}$ equal to the bid price. The conversion means that one unit of the base currency can be converted to $a_{b\rightarrow q}$ units of the quote currency. The capacity of the edge is equal to bid volume.
# In[33]:
# dictionary of edges for (src, dst) tuples
# type: 'bid' or 'ask'
# conv: 1 unit of src currency produces conv units of dst currency
# log10_conv: log10 of conv
def order_book_to_dg(order_book):
edges = dict()
for symbol in order_book.index:
#
if not np.isnan(order_book.at[symbol, "bid_volume"]):
src = order_book.at[symbol, "base"]
if src == "USD":
src = "USD-SRC"
dst = order_book.at[symbol, "quote"]
if dst == "USD":
dst = "USD-DST"
edges[(src, dst)] = {
"type": "bid",
"conv": order_book.at[symbol, "bid_price"],
"log10_conv": np.log10(order_book.at[symbol, "bid_price"]),
"capacity": order_book.at[symbol, "bid_volume"],
}
if not | np.isnan(order_book.at[symbol, "ask_volume"]) | numpy.isnan |
"""
Tests for SimpleFFCEngine
"""
from __future__ import division
from unittest import TestCase
from itertools import product
from numpy import (
full,
isnan,
nan,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
MultiIndex,
rolling_mean,
Series,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from testfixtures import TempDirectory
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.synthetic import (
ConstantLoader,
MultiColumnLoader,
NullAdjustmentReader,
SyntheticDailyBarWriter,
)
from zipline.data.ffc.frame import (
DataFrameFFCLoader,
MULTIPLY,
)
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
USEquityPricingLoader,
)
from zipline.finance.trading import TradingEnvironment
from zipline.modelling.engine import SimpleFFCEngine
from zipline.modelling.factor import TestingFactor
from zipline.modelling.factor.technical import (
MaxDrawdown,
SimpleMovingAverage,
)
from zipline.utils.lazyval import lazyval
from zipline.utils.test_utils import (
make_rotating_asset_info,
make_simple_asset_info,
product_upper_triangle,
check_arrays,
)
class RollingSumDifference(TestingFactor):
window_length = 3
inputs = [USEquityPricing.open, USEquityPricing.close]
def from_windows(self, open, close):
return (open - close).sum(axis=0)
def assert_product(case, index, *levels):
"""Assert that a MultiIndex contains the product of `*levels`."""
case.assertIsInstance(index, MultiIndex, "%s is not a MultiIndex" % index)
case.assertEqual(set(index), set(product(*levels)))
class ConstantInputTestCase(TestCase):
def setUp(self):
self.constants = {
# Every day, assume every stock starts at 2, goes down to 1,
# goes up to 4, and finishes at 3.
USEquityPricing.low: 1,
USEquityPricing.open: 2,
USEquityPricing.close: 3,
USEquityPricing.high: 4,
}
self.assets = [1, 2, 3]
self.dates = date_range('2014-01-01', '2014-02-01', freq='D', tz='UTC')
self.loader = ConstantLoader(
constants=self.constants,
dates=self.dates,
assets=self.assets,
)
self.asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
environment = TradingEnvironment()
environment.write_data(equities_df=self.asset_info)
self.asset_finder = environment.asset_finder
def test_bad_dates(self):
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
msg = "start_date must be before end_date .*"
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[1])
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[2])
def test_single_factor(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
result_shape = (num_dates, num_assets) = (5, len(assets))
dates = self.dates[10:10 + num_dates]
factor = RollingSumDifference()
result = engine.factor_matrix({'f': factor}, dates[0], dates[-1])
self.assertEqual(set(result.columns), {'f'})
assert_product(self, result.index, dates, finder.retrieve_all(assets))
assert_array_equal(
result['f'].unstack().values,
full(result_shape, -factor.window_length),
)
def test_multiple_rolling_factors(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
shape = num_dates, num_assets = (5, len(assets))
dates = self.dates[10:10 + num_dates]
short_factor = RollingSumDifference(window_length=3)
long_factor = RollingSumDifference(window_length=5)
high_factor = RollingSumDifference(
window_length=3,
inputs=[USEquityPricing.open, USEquityPricing.high],
)
results = engine.factor_matrix(
{'short': short_factor, 'long': long_factor, 'high': high_factor},
dates[0],
dates[-1],
)
self.assertEqual(set(results.columns), {'short', 'high', 'long'})
assert_product(self, results.index, dates, finder.retrieve_all(assets))
# row-wise sum over an array whose values are all (1 - 2)
assert_array_equal(
results['short'].unstack().values,
full(shape, -short_factor.window_length),
)
assert_array_equal(
results['long'].unstack().values,
full(shape, -long_factor.window_length),
)
# row-wise sum over an array whose values are all (1 - 3)
assert_array_equal(
results['high'].unstack().values,
full(shape, -2 * high_factor.window_length),
)
def test_numeric_factor(self):
constants = self.constants
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
num_dates = 5
dates = self.dates[10:10 + num_dates]
high, low = USEquityPricing.high, USEquityPricing.low
open, close = USEquityPricing.open, USEquityPricing.close
high_minus_low = RollingSumDifference(inputs=[high, low])
open_minus_close = RollingSumDifference(inputs=[open, close])
avg = (high_minus_low + open_minus_close) / 2
results = engine.factor_matrix(
{
'high_low': high_minus_low,
'open_close': open_minus_close,
'avg': avg,
},
dates[0],
dates[-1],
)
high_low_result = results['high_low'].unstack()
expected_high_low = 3.0 * (constants[high] - constants[low])
assert_frame_equal(
high_low_result,
DataFrame(
expected_high_low,
index=dates,
columns=self.assets,
)
)
open_close_result = results['open_close'].unstack()
expected_open_close = 3.0 * (constants[open] - constants[close])
assert_frame_equal(
open_close_result,
DataFrame(
expected_open_close,
index=dates,
columns=self.assets,
)
)
avg_result = results['avg'].unstack()
expected_avg = (expected_high_low + expected_open_close) / 2.0
assert_frame_equal(
avg_result,
DataFrame(
expected_avg,
index=dates,
columns=self.assets,
)
)
class FrameInputTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
day = cls.env.trading_day
cls.assets = Int64Index([1, 2, 3])
cls.dates = date_range(
'2015-01-01',
'2015-01-31',
freq=day,
tz='UTC',
)
asset_info = make_simple_asset_info(
cls.assets,
start_date=cls.dates[0],
end_date=cls.dates[-1],
)
cls.env.write_data(equities_df=asset_info)
cls.asset_finder = cls.env.asset_finder
@classmethod
def tearDownClass(cls):
del cls.env
del cls.asset_finder
def setUp(self):
self.dates = FrameInputTestCase.dates
self.assets = FrameInputTestCase.assets
@lazyval
def base_mask(self):
return self.make_frame(True)
def make_frame(self, data):
return DataFrame(data, columns=self.assets, index=self.dates)
def test_compute_with_adjustments(self):
dates, assets = self.dates, self.assets
low, high = USEquityPricing.low, USEquityPricing.high
apply_idxs = [3, 10, 16]
def apply_date(idx, offset=0):
return dates[apply_idxs[idx] + offset]
adjustments = DataFrame.from_records(
[
dict(
kind=MULTIPLY,
sid=assets[1],
value=2.0,
start_date=None,
end_date=apply_date(0, offset=-1),
apply_date=apply_date(0),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=3.0,
start_date=None,
end_date=apply_date(1, offset=-1),
apply_date=apply_date(1),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=5.0,
start_date=None,
end_date=apply_date(2, offset=-1),
apply_date=apply_date(2),
),
]
)
low_base = DataFrame(self.make_frame(30.0))
low_loader = DataFrameFFCLoader(low, low_base.copy(), adjustments=None)
# Pre-apply inverse of adjustments to the baseline.
high_base = DataFrame(self.make_frame(30.0))
high_base.iloc[:apply_idxs[0], 1] /= 2.0
high_base.iloc[:apply_idxs[1], 1] /= 3.0
high_base.iloc[:apply_idxs[2], 1] /= 5.0
high_loader = DataFrameFFCLoader(high, high_base, adjustments)
loader = MultiColumnLoader({low: low_loader, high: high_loader})
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
for window_length in range(1, 4):
low_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.low],
window_length=window_length,
)
high_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.high],
window_length=window_length,
)
bounds = product_upper_triangle(range(window_length, len(dates)))
for start, stop in bounds:
results = engine.factor_matrix(
{'low': low_mavg, 'high': high_mavg},
dates[start],
dates[stop],
)
self.assertEqual(set(results.columns), {'low', 'high'})
iloc_bounds = slice(start, stop + 1) # +1 to include end date
low_results = results.unstack()['low']
assert_frame_equal(low_results, low_base.iloc[iloc_bounds])
high_results = results.unstack()['high']
assert_frame_equal(high_results, high_base.iloc[iloc_bounds])
class SyntheticBcolzTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.first_asset_start = Timestamp('2015-04-01', tz='UTC')
cls.env = TradingEnvironment()
cls.trading_day = cls.env.trading_day
cls.asset_info = make_rotating_asset_info(
num_assets=6,
first_start=cls.first_asset_start,
frequency=cls.trading_day,
periods_between_starts=4,
asset_lifetime=8,
)
cls.all_assets = cls.asset_info.index
cls.all_dates = date_range(
start=cls.first_asset_start,
end=cls.asset_info['end_date'].max(),
freq=cls.trading_day,
)
cls.env.write_data(equities_df=cls.asset_info)
cls.finder = cls.env.asset_finder
cls.temp_dir = TempDirectory()
cls.temp_dir.create()
cls.writer = SyntheticDailyBarWriter(
asset_info=cls.asset_info[['start_date', 'end_date']],
calendar=cls.all_dates,
)
table = cls.writer.write(
cls.temp_dir.getpath('testdata.bcolz'),
cls.all_dates,
cls.all_assets,
)
cls.ffc_loader = USEquityPricingLoader(
BcolzDailyBarReader(table),
NullAdjustmentReader(),
)
@classmethod
def tearDownClass(cls):
del cls.env
cls.temp_dir.cleanup()
def test_SMA(self):
engine = SimpleFFCEngine(
self.ffc_loader,
self.env.trading_days,
self.finder,
)
dates, assets = self.all_dates, self.all_assets
window_length = 5
SMA = SimpleMovingAverage(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.factor_matrix(
{'sma': SMA},
dates[window_length],
dates[-1],
)
raw_closes = self.writer.expected_values_2d(dates, assets, 'close')
expected_sma_result = rolling_mean(
raw_closes,
window_length,
min_periods=1,
)
expected_sma_result[ | isnan(raw_closes) | numpy.isnan |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME> and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import numpy as np
from scipy.stats.stats import pearsonr
import torch
def nanpearsonr(real, pred):
'''Compute Pearson's correlation, omit NAN
Args:
real (ndarray): original value
pred (ndarray): predicted value
Returns:
ndarray: Correlation value
'''
n = real.shape[1]
res = np.zeros((n))
for i in range(n):
tmp = np.logical_not(np.isnan(real[:, i]))
res[i] = pearsonr(real[tmp, i], pred[tmp, i])[0]
return res
def cod_znormed(real, pred):
'''Compute COD (Coefficient of Determination) for z normed data
Args:
real (ndarray): original value
pred (ndarray): predicted value
Returns:
float: COD value
'''
tot = np.sum((real)**2, axis=-1)
res = np.sum((real - pred)**2, axis=-1)
return 1 - res / tot
def nancod_znormed(real, pred):
'''Compute COD (Coefficient of Determination) for z normed data, omit NAN
Args:
real (ndarray): original value
pred (ndarray): predicted value
Returns:
ndarray: COD value
'''
tot = np.nansum((real)**2, axis=-2)
res = np.nansum((real - pred)**2, axis=-2)
return 1 - np.divide(res, tot)
def split_tra_val_with_y(split, y_test):
'''split K subjects into training and validation
Args:
split (ndarray): array that indicate K subjects
y_test (ndarray): test data to avoid same value after split train
and validation data
Returns:
Tuple: split array for training and validation data
'''
n = split.shape[0]
n_rng = 1
while n_rng != 0:
k = np.where(split == 0)[0]
m = k.shape[0]
m_tra = int(m * 0.8)
k = np.random.permutation(k)
split_tra = np.zeros((n))
split_tra[k[:m_tra]] = 1
split_tra = split_tra.astype(bool)
split_val = np.zeros((n))
split_val[k[m_tra:]] = 1
split_val = split_val.astype(bool)
y_test_tra = y_test[split_tra]
if np.unique(y_test_tra).shape[0] > 1:
n_rng = 0
else:
np.random.seed(100 + n_rng)
n_rng += 1
return split_tra, split_val
def mics_z_norm(train_y, valid_y, test_y=None):
'''z normalize y of training, validation and test set based on training set
Args:
train_y (ndarray): training y data
valid_y (ndarray): validation y data
test_y (ndarray, optional): testing y data
Returns:
Tuple: contains z-normed y data and std of training y data
'''
# subtract mean of y of training set
t_mu = np.nanmean(train_y, axis=0, keepdims=True)
train_y = train_y - t_mu
valid_y = valid_y - t_mu
if test_y:
test_y = test_y - t_mu
# divide std of y of training set
t_sigma = np.nanstd(train_y, axis=0)
if train_y.ndim == 2:
t_sigma_d = t_sigma[np.newaxis, :]
else:
t_sigma_d = t_sigma
if t_sigma == 0:
print('t_sigma is 0, pass divide std')
return [train_y, valid_y, test_y, t_sigma]
train_y = train_y / t_sigma_d
valid_y = valid_y / t_sigma_d
if test_y:
test_y = test_y / t_sigma_d
# return processed y and std for future MAE calculation
return [train_y, valid_y, test_y, t_sigma]
def mics_infer_metric(dataloader,
net,
criterion,
device,
t_sigma=None,
need_value=False,
output_size=1):
'''performance inference with net on data from dataloader and calculate
metric
Args:
dataloader: dataloader to load data for PyTorch framework
net: PyTorch deep learning network
criterion: criterion for loss calculation
device: torch device indicate which GPU is running
t_sigma (float, optional): std of training y data, only use if sex is
not the behavioral measuers
need_value (bool, optional): whether return record of real and
predicted value
output_size (int, optional): size of network output
Returns:
Tuple: if t_sigma is not None, correlation, MAE and loss are returned.
If t_sigma is None, auccuracy and loss are returned. If need_value
set to True, tuple returned also returns record of real and
predicted y value alongside the metrics. If need_value is false,
only metrics are returned.
'''
# initialize variable for record
record_loss = 0.0
if t_sigma is None:
record_correct = 0.0 # count of correct prediction
record_total = 0.0 # count of total prediction
record_real = np.zeros((0))
record_pred = np.zeros((0, 2))
else:
record_real = np.zeros((0, output_size)) # real value
record_pred = np.zeros((0, output_size)) # prediction value
# perform inference
for (x, y) in dataloader:
x, y = x.to(device), y.to(device)
outputs = net(x)
loss = criterion(outputs, y)
record_loss += loss.item()
record_real = np.concatenate((record_real, y.data.cpu().numpy()),
axis=0)
record_pred = np.concatenate((record_pred, outputs.data.cpu().numpy()),
axis=0)
if t_sigma is None:
_, predicted = torch.max(outputs.data, 1)
record_total += y.size(0)
record_correct += (predicted == y.data).sum()
# metric calculation
loss = record_loss / len(dataloader)
if t_sigma is None:
aucc = record_correct.to(torch.float) / record_total
if need_value:
return aucc, loss, record_real, record_pred
else:
return aucc, loss
else:
corr = nanpearsonr(record_real, record_pred)
cod = nancod_znormed(record_real, record_pred)
mae = np.nanmean(np.abs(record_real - record_pred), 0) * t_sigma
if need_value:
return corr, cod, mae, loss, record_real, record_pred
else:
return corr, cod, mae, loss
def mics_log(model_name, out_path, metric='cor', index=None, **kwargs):
'''function to calculate the final result and save the record
Args:
model_name (str): name of network/model
out_path (str): path to save the log
metric (str, optional): metric to select best validation
index (int, optional): index of optimal epoch
**kwargs: record of training, validation and test value
Returns:
None
'''
if index is None:
val_record = kwargs['val_' + metric + '_record']
temp = np.mean(np.mean(val_record, axis=0), axis=1)
temp = np.convolve(temp, np.ones(3, dtype=int), 'valid') / 3
index = np.nanargmax(temp)
index = index + 1
print('\nBest validation at index: ', index)
val_cor_record = kwargs['val_cor_record']
val_cod_record = kwargs['val_cod_record']
val_mae_record = kwargs['val_mae_record']
# save record value for future use
file_str = model_name + '_base.npz'
name_str = os.path.join(out_path, file_str)
os.makedirs(out_path, exist_ok=True)
np.savez(name_str, **kwargs)
print('file saved:', name_str)
# get average result for validation and test data
print('Average validation corr:',
np.nanmean(np.nanmean(val_cor_record[:, index, :], axis=0)),
', COD:', np.nanmean(
np.nanmean(val_cod_record[:, index, :], axis=0)), ', MAE:',
np.nanmean( | np.nanmean(val_mae_record[:, index, :], axis=0) | numpy.nanmean |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://gist.github.com/ajdawson/dd536f786741e987ae4e
from copy import copy
import cartopy.crs as ccrs
import numpy as np
import shapely.geometry as sgeom
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
import os
### Define colorbar colors
champ = 255.
# tot. precipitable water (grey scale)
no1 = np.array([255,255,255])/champ
no2 = np.array([231,231,231])/champ
no3 = np.array([201,201,201])/champ
no4 = np.array([171,171,171])/champ
no5 = np.array([140,140,140])/champ
no6 = np.array([110,110,110])/champ
no7 = np.array([80,80,80])/champ
# 250 hPa wind speed (colored scale)
no11 = np.array([255,255,255])/champ
no12 = np.array([196,225,255])/champ
no13 = np.array([131,158,255])/champ
no14 = np.array([255,209,177])/champ
no15 = np.array([255,118,86])/champ
no16 = np.array([239,102,178])/champ
no17 = np.array([243,0,146])/champ
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def find_yx(lat, lon, point_lat, point_lon):
abs_lat = abs(lat - point_lat)
abs_lon = abs(lon - point_lon)
c = np.maximum(abs_lat, abs_lon)
y, x = np.where(c == c.min())
y = y[0]
x = x[0]
xx = lat[y, x].x
yy = lon[y, x].y
return(xx, yy)
def find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side])
def lambert_xticks(ax, ticks):
"""Draw ticks on the bottom x-axis of a Lambert Conformal projection."""
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
def lambert_yticks(ax, ticks):
"""Draw ricks on the left y-axis of a Lamber Conformal projection."""
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""Get the tick locations and labels for an axis of a Lambert Conformal projection."""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
def plt_Jet_Thick_MSLP(fnx, u_250, mslp, Z_thickness, pw, andenes_x, andenes_y):
projection = ccrs.LambertConformal(central_longitude =fnx.projection_lambert.longitude_of_central_meridian,
central_latitude =fnx.projection_lambert.latitude_of_projection_origin,
standard_parallels = fnx.projection_lambert.standard_parallel)
f, ax = plt.subplots(subplot_kw={'projection' : projection}, )
#ax.set_title('Ensemble mean %s')# %s' %(_dm.time))
ax.coastlines(resolution = '50m')
###################################################
levels_u = np.arange(40,120,10)
levels_th1 = np.arange(402,546,6)
levels_th2 = np.arange(546,650,6)
levels_p = np.arange(800,1100,4)
# levels_pw = np.arange(22,78,8)
# levels_pw = np.arange(14,62,8)
levels_pw = np.arange(0,62,8)
###################################################
# Plot contour lines for 250-hPa wind and fill
U_map = colors.ListedColormap([no11, no12, no13, no14, no15, no16, no17])
norm = colors.BoundaryNorm(boundaries = levels_u, ncolors=U_map.N)
_U_250 = u_250.plot.pcolormesh(ax = ax,
transform = projection,
levels = levels_u,
cmap = U_map,
norm = norm,
add_colorbar = False,
extend = 'both'
)
cb_U_250 = plt.colorbar(_U_250, ax=ax, orientation="vertical",extend='both',
shrink = 0.5)
cb_U_250.set_label(label='250$\,$hPa Wind (m$\,$s$^{-1}$)', #size='large',
weight='bold')
###################################################
# Plot MSL pressure every 4 hPa
CS_p = mslp.plot.contour(ax = ax,
transform = projection,
levels = levels_p,
colors = 'k',
linewidths = 1.8)
ax.clabel(CS_p, levels_p[::2],
inline=1, fmt='%1.0f', #fontsize=10
)
###################################################
# Plot the 1000-500 hPa thickness
CS_th1 = Z_thickness.where(Z_thickness < 546).plot.contour(ax = ax,
transform = projection,
levels = levels_th1,
colors = 'b',
linewidths = 2.,
linestyles = '--')
ax.clabel(CS_th1, levels_th1, inline = 1, fmt = '%1.0f')
try:
CS_th2 = Z_thickness.plot.contour(ax = ax,
transform = projection,
levels = levels_th2,
colors = 'r',
linewidths = 2.,
linestyles = '--')
ax.clabel(CS_th2, levels_th2, inline = 1, fmt = '%1.0f')
except (ValueError):
pass
# labels = ['line1']
# CS_th1.collections[0].set_label(labels[0])
# plt.legend(bbox_to_anchor=(1.1, 1.05))
###################################################
# Plot contourf for precipitable water
PW_map = colors.ListedColormap([no1, no2, no3, no4, no5, no6, no7])
PW_norm = colors.BoundaryNorm(boundaries = levels_pw, ncolors=PW_map.N)
_PW = pw.plot.pcolormesh(ax = ax,
transform = projection,
levels = levels_u,
cmap = PW_map,
norm = PW_norm,
add_colorbar = False,
extend = 'both'
)
cb_PW = plt.colorbar(_PW, ax=ax, orientation="vertical",extend='both',
shrink = 0.5)
cb_PW.set_label(label='Precipitable water (m)', #size='large',
weight='bold')
###################################################
ax.plot([andenes_x], [andenes_y], color = 'red', marker = "^", transform=projection, markersize = 22 )
###################################################
map_design(f,ax)
def plt_700_humidity(fnx, geop, temp, RH,andenes_x, andenes_y):
projection = ccrs.LambertConformal(central_longitude =fnx.projection_lambert.longitude_of_central_meridian,
central_latitude =fnx.projection_lambert.latitude_of_projection_origin,
standard_parallels = fnx.projection_lambert.standard_parallel)
f, ax = plt.subplots(subplot_kw={'projection' : projection}, )
#ax.set_title('Ensemble mean %s')# %s' %(_dm.time))
ax.coastlines(resolution = '50m')
###################################################
# Geopotential
levels_g = | np.arange(200,300,4) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 12 15:59:10 2021
@author: joelr
Estudos da Biblioteca Numpy
"""
import numpy as np
my_array = np.array([[1, 2, 3, 4 , 5], [6, 7, 8, 9, 10 ]])
my_array = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
x = my_array.copy()
print(my_array)
print(my_array.shape[0]) #Mostra n elementos
print(np.arange(10)) #Cria vetor sequencial
print(my_array[1,1])
my_array = np.empty([2,5]) #cria valores aleatórios
for i in range(2):
for j in range(5):
my_array[i,j] = i*j
print(my_array[i,j])
my_array = np.zeros([2,5]) #cria matriz de zeros
my_array = np.ones([2,5]) #cria matriz de 1
aleatorio = np.random.random()
my_array = np.random.random([2,5])
print(my_array[1,0:4])
print(my_array[:,3])
print(my_array[1][2])
print(type(my_array))
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6], [7, 8]])
soma = a+b
difference = a-b
product = a*b
quotient = a/b
matrix_product = a.dot(b)
for i in range(my_array.shape[0]):
for j in range(my_array.shape[1]):
for k in range(my_array.shape[2]):
print(my_array[i,j,k])
my_array = np.array([1, 2, 3, 4], ndmin=5)
print(my_array.ndim)
print(my_array[1, -1])#indice negativo acessa o final do vetor
print(my_array[:,0:4:2])
print(my_array[:,-3:-1])
print(my_array[:,::2])
my_array2 = np.array([1, 2, 3, 4], dtype='S')
my_array2= np.array(['banana', 'maçã','3'])
print(my_array.dtype)
for i in my_array:
for j in i:
print(j)
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
print("x represents the 2-D array:")
print(x)
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.concatenate((arr1, arr2))
print(arr)
arr1 = np.array([[1, 2], [3, 4]])
arr2 = np.array([[5, 6], [7, 8]])
arr = np.concatenate((arr1, arr2), axis=1)
print(arr)
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.stack((arr1, arr2), axis=1)
print(arr)
word = "apartamento"
for i in range(len(word)-1,-1,-1):
print(word[i])
np.array_split(my_array,2)
x = np.where(my_array == 4) #tuple com linha e coluna
x = np.where(my_array[0,:]%2 == 0) #posição do número é impar
x = np.where(my_array[0,:]%2 == 1) #posição do número par
arr = np.array(['banana', 'cherry', 'apple'])
arr = np.array([True, False, True])
arr = np.array([[3, 2, 4], [5, 0, 1]])
print(np.sort(arr))
#Filtros
arr = np.array([41, 42, 43, 44])#mask
x = [True, False, True, False]
newarr = arr[x]
print(newarr)
my_array = np.array([41, 42, 43, 44])
# Create an empty list
filter_arr = []
# go through each element in arr
for element in arr:
# if the element is higher than 42, set the value to True, otherwise False:
if element > 42:
filter_arr.append(True)
else:
filter_arr.append(False)
new_array = my_array[filter_arr]
print(filter_arr)
print(new_array)
arr = np.array([41, 42, 43, 44])
filter_arr = arr > 42
newarr = arr[filter_arr]
print(filter_arr)
print(newarr)
#Comandos Ramdom
from numpy import random
x = random.randint(100)#Random inteiro
x = random.rand()
x = random.rand(5)
x = random.rand(3,5)
x=random.randint(100, size=(5,5))
x = random.choice([3, 5, 7, 9]) #retorna um valor do vetor
x = random.choice([3,4,5, 6,7,8], size=(3,5)) #Constrói a matrix puxando números da base referida
#configura probabolidade de escolha dos números
x = random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0.0], size=(100))
x = random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0.0], size=(3, 5))
my_array = np.array([1, 2, 3, 4, 5])
random.shuffle(my_array) #embaralha os números e altera a matriz
random.permutation(my_array) #embaralha os números criando uma nova matriz
import matplotlib.pyplot as plt
import seaborn as sns
sns.distplot([0, 1, 2, 3, 4, 5], hist = True)
plt.show()
#Distribuição Normal Analógica
# loc - (Mean) where the peak of the bell exists.
# scale - (Standard Deviation) how flat the graph distribution should be.
# size - The shape of the returned array.
x = random.normal(size=(100, 1000))
x = random.normal(loc=0, scale=5, size=(100, 100))
sns.distplot(x, hist = True)
plt.show()
#distribuição binomial (Normal Digital)
# n - number of trials.
# p - probability of occurence of each trial (e.g. for toss of a coin 0.5 each).
# size - The shape of the returned array.
x = random.binomial(n=10, p=0.5, size=10)
x = random.binomial(n=10, p=0.9, size=1000)
sns.distplot(x, hist=True, kde=False)
plt.show()
#comparação entre métodos
sns.distplot(random.normal(loc=50, scale=5, size=1000), hist=False, label='normal')
sns.distplot(random.binomial(n=100, p=0.5, size=1000), hist=False, label='binomial')
plt.show()
# Poisson Distribution is a Discrete Distribution.
# It estimates how many times an event can happen in a specified time. e.g. If someone eats twice a day what is probability he will eat thrice?
# It has two parameters:
# lam - rate or known number of occurences e.g. 2 for above problem.
# size - The shape of the returned array.
x = random.poisson(lam=2, size=10)
sns.distplot(random.poisson(lam=7, size=10000), kde=False)
plt.show()
#comparação entre normal e poisson
sns.distplot(random.normal(loc=50, scale=7, size=1000), hist=False, label='normal')
sns.distplot(random.poisson(lam=50, size=1000), hist=False, label='poisson')
plt.show()
# Uniform Distribution
# Used to describe probability where every event has equal chances of occuring.
# E.g. Generation of random numbers.
# It has three parameters:
# a - lower bound - default 0 .0.
# b - upper bound - default 1.0.
# size - The shape of the returned array.
sns.distplot(random.uniform(size=100), hist=False)
plt.show()
# Logistic Distribution
# Logistic Distribution is used to describe growth.
# Used extensively in machine learning in logistic regression, neural networks etc.
# It has three parameters:
# loc - mean, where the peak is. Default 0.
# scale - standard deviation, the flatness of distribution. Default 1.
# size - The shape of the returned array.
sns.distplot(random.logistic(size=1000), hist=False)
plt.show()
#gaussiana x logistic distribuition
sns.distplot(random.normal(scale=2, size=1000), hist=False, label='normal')
sns.distplot(random.logistic(size=1000), hist=False, label='logistic')
plt.show()
# Pareto Distribution
# A distribution following Pareto's law i.e. 80-20 distribution (20% factors cause 80% outcome).
# It has two parameter:
# a - shape parameter.
# size - The shape of the returned array.
sns.distplot(random.pareto(a=2, size=1000), kde=False)
plt.show()
### Functions
#Somar elementos entre vetores
x = [1, 2, 3, 4] #lista
y = [4, 5, 6, 7]
x = np.array([1, 2, 3])
y = np.array([5, 6, 7])
z = np.add(x, y)
z = np.sum([x,y])
z = np.subtract(x, y)
z = np.multiply(x, y)
z = np.divide(x, y)
z = np.power(x,y)
z = np.mod(x,y)
z = np.remainder(x,y)
z = np.divmod(x,y)
z = np.absolute(x,y)
print(z)
x = np.array([-1, 2.555, 3.9])
z = np.trunc(x)
z = np.fix(x)
z = np.around(x)
z = np.floor(x)
z = np.ceil(x)
x = np.arange(1,10)#não inclui o 10
z = np.log2(x)
z = np.log10(x)
z = np.log()
x = np.array([1, 2, 3])
z = np.prod(x)
z = np.prod([x,x])
# Finding LCM (Lowest Common Multiple)
num1 = 4
num2 = 6
x = | np.lcm(num1, num2) | numpy.lcm |
#!/usr/bin/env python3
# TAMV version 2.0RC1
# Python Script to align multiple tools on Jubilee printer with Duet3d Controller
# Using images from USB camera and finding circles in those images
#
# TAMV originally Copyright (C) 2020 <NAME> all rights reserved.
# TAMV 2.0 Copyright (C) 2021 <NAME> all rights reserved.
# Released under The MIT License. Full text available via https://opensource.org/licenses/MIT
#
# Requires OpenCV to be installed on Pi
# Requires running via the OpenCV installed python (that is why no shebang)
# Requires network connection to Duet based printer running Duet/RepRap V2 or V3
#
# GUI imports
from PyQt5.QtWidgets import (
QAction,
QApplication,
QCheckBox,
QCheckBox,
QComboBox,
QDesktopWidget,
QDialog,
QDialogButtonBox,
QGridLayout,
QGroupBox,
QHBoxLayout,
QHeaderView,
QInputDialog,
QLabel,
QLineEdit,
QMainWindow,
QMenu,
QMenuBar,
QMessageBox,
QPushButton,
QSlider,
QSpinBox,
QStatusBar,
QStyle,
QTableWidget,
QTableWidgetItem,
QTextEdit,
QVBoxLayout,
QWidget
)
from PyQt5.QtGui import QPixmap, QImage, QPainter, QColor, QIcon
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread, QMutex, QPoint, QSize
# Core imports
import os
import sys
import cv2
import numpy as np
import math
import DuetWebAPI as DWA
import KlipperAPI as KA
from time import sleep, time
import datetime
import json
import time
# graphing imports
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as patches
from matplotlib.ticker import FormatStrFormatter
# styles
global style_green, style_red, style_disabled, style_orange
style_green = 'background-color: green; color: white;'
style_red = 'background-color: red; color: white;'
style_disabled = 'background-color: #cccccc; color: #999999; border-style: solid;'
style_orange = 'background-color: dark-grey; color: orange;'
class CPDialog(QDialog):
def __init__(self,
parent=None,
title='Set Controlled Point',
summary='<b>Instructions:</b><br>Jog until controlled point is centered in the window.<br>Click OK to save and return to main window.',
disabled = False):
super(CPDialog,self).__init__(parent=parent)
self.setWindowFlag(Qt.WindowContextHelpButtonHint,False)
self.setWindowTitle(title)
QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.layout = QGridLayout()
self.layout.setSpacing(3)
# add information panel
self.cp_info = QLabel(summary)
# add jogging grid
self.buttons={}
buttons_layout = QGridLayout()
# X
self.button_x1 = QPushButton('-1')
self.button_x2 = QPushButton('-0.1')
self.button_x3 = QPushButton('-0.01')
self.button_x4 = QPushButton('+0.01')
self.button_x5 = QPushButton('+0.1')
self.button_x6 = QPushButton('+1')
# set X sizes
self.button_x1.setFixedSize(60,60)
self.button_x2.setFixedSize(60,60)
self.button_x3.setFixedSize(60,60)
self.button_x4.setFixedSize(60,60)
self.button_x5.setFixedSize(60,60)
self.button_x6.setFixedSize(60,60)
# attach actions
'''
self.button_x1.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X-1 G90'))
self.button_x2.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X-0.1 G90'))
self.button_x3.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X-0.01 G90'))
self.button_x4.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X0.01 G90'))
self.button_x5.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X0.1 G90'))
self.button_x6.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 X1 G90'))
'''
self.button_x1.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X-1','G90']))
self.button_x2.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X-0.1','G90']))
self.button_x3.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X-0.01','G90']))
self.button_x4.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X0.01','G90']))
self.button_x5.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X0.1','G90']))
self.button_x6.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 X1','G90']))
# add buttons to window
x_label = QLabel('X')
buttons_layout.addWidget(x_label,0,0)
buttons_layout.addWidget(self.button_x1,0,1)
buttons_layout.addWidget(self.button_x2,0,2)
buttons_layout.addWidget(self.button_x3,0,3)
buttons_layout.addWidget(self.button_x4,0,4)
buttons_layout.addWidget(self.button_x5,0,5)
buttons_layout.addWidget(self.button_x6,0,6)
# Y
self.button_y1 = QPushButton('-1')
self.button_y2 = QPushButton('-0.1')
self.button_y3 = QPushButton('-0.01')
self.button_y4 = QPushButton('+0.01')
self.button_y5 = QPushButton('+0.1')
self.button_y6 = QPushButton('+1')
# set X sizes
self.button_y1.setFixedSize(60,60)
self.button_y2.setFixedSize(60,60)
self.button_y3.setFixedSize(60,60)
self.button_y4.setFixedSize(60,60)
self.button_y5.setFixedSize(60,60)
self.button_y6.setFixedSize(60,60)
# attach actions
'''
self.button_y1.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y-1 G90'))
self.button_y2.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y-0.1 G90'))
self.button_y3.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y-0.01 G90'))
self.button_y4.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y0.01 G90'))
self.button_y5.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y0.1 G90'))
self.button_y6.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Y1 G90'))
'''
self.button_y1.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y-1','G90']))
self.button_y2.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y-0.1','G90']))
self.button_y3.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y-0.01','G90']))
self.button_y4.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y0.01','G90']))
self.button_y5.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y0.1','G90']))
self.button_y6.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Y1','G90']))
# add buttons to window
y_label = QLabel('Y')
buttons_layout.addWidget(y_label,1,0)
buttons_layout.addWidget(self.button_y1,1,1)
buttons_layout.addWidget(self.button_y2,1,2)
buttons_layout.addWidget(self.button_y3,1,3)
buttons_layout.addWidget(self.button_y4,1,4)
buttons_layout.addWidget(self.button_y5,1,5)
buttons_layout.addWidget(self.button_y6,1,6)
# Z
self.button_z1 = QPushButton('-1')
self.button_z2 = QPushButton('-0.1')
self.button_z3 = QPushButton('-0.01')
self.button_z4 = QPushButton('+0.01')
self.button_z5 = QPushButton('+0.1')
self.button_z6 = QPushButton('+1')
# set X sizes
self.button_z1.setFixedSize(60,60)
self.button_z2.setFixedSize(60,60)
self.button_z3.setFixedSize(60,60)
self.button_z4.setFixedSize(60,60)
self.button_z5.setFixedSize(60,60)
self.button_z6.setFixedSize(60,60)
# attach actions
'''
self.button_z1.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z-1 G90'))
self.button_z2.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z-0.1 G90'))
self.button_z3.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z-0.01 G90'))
self.button_z4.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z0.01 G90'))
self.button_z5.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z0.1 G90'))
self.button_z6.clicked.connect(lambda: self.parent().printer.gCode('G91 G1 Z1 G90'))
'''
self.button_z1.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z-1','G90']))
self.button_z2.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z-0.1','G90']))
self.button_z3.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z-0.01','G90']))
self.button_z4.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z0.01','G90']))
self.button_z5.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z0.1','G90']))
self.button_z6.clicked.connect(lambda: self.parent().printer.gCodeBatch(['G91','G1 Z1','G90']))
# add buttons to window
z_label = QLabel('Z')
buttons_layout.addWidget(z_label,2,0)
buttons_layout.addWidget(self.button_z1,2,1)
buttons_layout.addWidget(self.button_z2,2,2)
buttons_layout.addWidget(self.button_z3,2,3)
buttons_layout.addWidget(self.button_z4,2,4)
buttons_layout.addWidget(self.button_z5,2,5)
buttons_layout.addWidget(self.button_z6,2,6)
#self.macro_field = QLineEdit()
#self.button_macro = QPushButton('Run macro')
#buttons_layout.addWidget(self.button_macro,3,1,2,1)
#buttons_layout.addWidget(self.macro_field,3,2,1,-1)
# Set up items on dialog grid
self.layout.addWidget(self.cp_info,0,0,1,-1)
self.layout.addLayout(buttons_layout,1,0,3,7)
# OK/Cancel buttons
self.layout.addWidget(self.buttonBox)
# apply layout
self.setLayout(self.layout)
def setSummaryText(self, message):
self.cp_info.setText(message)
class DebugDialog(QDialog):
def __init__(self,parent=None, message=''):
super(DebugDialog,self).__init__(parent=parent)
self.setWindowFlag(Qt.WindowContextHelpButtonHint,False)
self.setWindowTitle('Debug Information')
# Set layout details
self.layout = QGridLayout()
self.layout.setSpacing(3)
# text area
self.textarea = QTextEdit()
self.textarea.setAcceptRichText(False)
self.textarea.setReadOnly(True)
self.layout.addWidget(self.textarea,0,0)
# apply layout
self.setLayout(self.layout)
temp_text = ''
try:
if self.parent().video_thread.isRunning():
temp_text += 'Video thread running\n'
except Exception as e1:
None
if len(message) > 0:
temp_text += '\nCalibration Debug Messages:\n' + message
self.textarea.setText(temp_text)
class CameraSettingsDialog(QDialog):
def __init__(self,parent=None, message=''):
super(CameraSettingsDialog,self).__init__(parent=parent)
self.setWindowFlag(Qt.WindowContextHelpButtonHint,False)
self.setWindowTitle('Camera Settings')
#QBtn = QDialogButtonBox.Close
#self.buttonBox = QDialogButtonBox(QBtn)
#self.buttonBox.accepted.connect(self.accept)
#self.buttonBox.rejected.connect(self.reject)
# Get camera settings from video thread
try:
(brightness_input, contrast_input, saturation_input, hue_input) = self.parent().video_thread.getProperties()
except Exception as set1:
self.updateStatusbar('Error fetching camera parameters.')
print('ERROR: Camera Settings: ' + str(set1))
# Set layout details
self.layout = QVBoxLayout()
self.layout.setSpacing(3)
# apply layout
self.setLayout(self.layout)
# Camera Combobox
self.camera_combo = QComboBox()
camera_description = str(video_src) + ': ' \
+ str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) \
+ 'x' + str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ' @ ' \
+ str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FPS)) + 'fps'
self.camera_combo.addItem(camera_description)
#self.camera_combo.currentIndexChanged.connect(self.parent().video_thread.changeVideoSrc)
# Get cameras button
self.camera_button = QPushButton('Get cameras')
self.camera_button.clicked.connect(self.getCameras)
if self.parent().video_thread.alignment:
self.camera_button.setDisabled(True)
else: self.camera_button.setDisabled(False)
#self.getCameras()
# Brightness slider
self.brightness_slider = QSlider(Qt.Horizontal)
self.brightness_slider.setMinimum(0)
self.brightness_slider.setMaximum(255)
self.brightness_slider.setValue(int(brightness_input))
self.brightness_slider.valueChanged.connect(self.changeBrightness)
self.brightness_slider.setTickPosition(QSlider.TicksBelow)
self.brightness_slider.setTickInterval(1)
self.brightness_label = QLabel(str(int(brightness_input)))
# Contrast slider
self.contrast_slider = QSlider(Qt.Horizontal)
self.contrast_slider.setMinimum(0)
self.contrast_slider.setMaximum(255)
self.contrast_slider.setValue(int(contrast_input))
self.contrast_slider.valueChanged.connect(self.changeContrast)
self.contrast_slider.setTickPosition(QSlider.TicksBelow)
self.contrast_slider.setTickInterval(1)
self.contrast_label = QLabel(str(int(contrast_input)))
# Saturation slider
self.saturation_slider = QSlider(Qt.Horizontal)
self.saturation_slider.setMinimum(0)
self.saturation_slider.setMaximum(255)
self.saturation_slider.setValue(int(saturation_input))
self.saturation_slider.valueChanged.connect(self.changeSaturation)
self.saturation_slider.setTickPosition(QSlider.TicksBelow)
self.saturation_slider.setTickInterval(1)
self.saturation_label = QLabel(str(int(saturation_input)))
# Hue slider
self.hue_slider = QSlider(Qt.Horizontal)
self.hue_slider.setMinimum(0)
self.hue_slider.setMaximum(8)
self.hue_slider.setValue(int(hue_input))
self.hue_slider.valueChanged.connect(self.changeHue)
self.hue_slider.setTickPosition(QSlider.TicksBelow)
self.hue_slider.setTickInterval(1)
self.hue_label = QLabel(str(int(hue_input)))
# Reset button
self.reset_button = QPushButton("Reset to defaults")
self.reset_button.setToolTip('Reset camera settings to defaults.')
self.reset_button.clicked.connect(self.resetDefaults)
# Save button
self.save_button = QPushButton('Save and Close')
self.save_button.setToolTip('Save current parameters to settings.json file')
self.save_button.clicked.connect(self.sendUserParameters)
self.save_button.setObjectName('active')
# Close button
self.close_button = QPushButton('Cancel and close')
self.close_button.setToolTip('Cancel changes and return to main program.')
self.close_button.clicked.connect(self.closeCPWindow)
self.close_button.setObjectName('terminate')
# Layout objects
# Camera drop-down
self.camera_box = QGroupBox('Camera')
self.layout.addWidget(self.camera_box)
cmbox = QHBoxLayout()
self.camera_box.setLayout(cmbox)
cmbox.addWidget(self.camera_combo)
cmbox.addWidget(self.camera_button)
# Brightness
self.brightness_box =QGroupBox('Brightness')
self.layout.addWidget(self.brightness_box)
bvbox = QHBoxLayout()
self.brightness_box.setLayout(bvbox)
bvbox.addWidget(self.brightness_slider)
bvbox.addWidget(self.brightness_label)
# Contrast
self.contrast_box =QGroupBox('Contrast')
self.layout.addWidget(self.contrast_box)
cvbox = QHBoxLayout()
self.contrast_box.setLayout(cvbox)
cvbox.addWidget(self.contrast_slider)
cvbox.addWidget(self.contrast_label)
# Saturation
self.saturation_box =QGroupBox('Saturation')
self.layout.addWidget(self.saturation_box)
svbox = QHBoxLayout()
self.saturation_box.setLayout(svbox)
svbox.addWidget(self.saturation_slider)
svbox.addWidget(self.saturation_label)
# Hue
self.hue_box =QGroupBox('Hue')
self.layout.addWidget(self.hue_box)
hvbox = QHBoxLayout()
self.hue_box.setLayout(hvbox)
hvbox.addWidget(self.hue_slider)
hvbox.addWidget(self.hue_label)
# Reset button
self.layout.addWidget(self.reset_button)
self.layout.addWidget(self.save_button)
self.layout.addWidget(self.close_button)
# OK Cancel buttons
#self.layout.addWidget(self.buttonBox)
def resetDefaults(self):
self.parent().video_thread.resetProperties()
(brightness_input, contrast_input, saturation_input, hue_input) = self.parent().video_thread.getProperties()
brightness_input = int(brightness_input)
contrast_input = int(contrast_input)
saturation_input = int(saturation_input)
hue_input = int(hue_input)
self.brightness_slider.setValue(brightness_input)
self.brightness_label.setText(str(brightness_input))
self.contrast_slider.setValue(contrast_input)
self.contrast_label.setText(str(contrast_input))
self.saturation_slider.setValue(saturation_input)
self.saturation_label.setText(str(saturation_input))
self.hue_slider.setValue(hue_input)
self.hue_label.setText(str(hue_input))
def changeBrightness(self):
parameter = int(self.brightness_slider.value())
try:
self.parent().video_thread.setProperty(brightness=parameter)
except:
None
self.brightness_label.setText(str(parameter))
def changeContrast(self):
parameter = int(self.contrast_slider.value())
try:
self.parent().video_thread.setProperty(contrast=parameter)
except:
None
self.contrast_label.setText(str(parameter))
def changeSaturation(self):
parameter = int(self.saturation_slider.value())
try:
self.parent().video_thread.setProperty(saturation=parameter)
except:
None
self.saturation_label.setText(str(parameter))
def changeHue(self):
parameter = int(self.hue_slider.value())
try:
self.parent().video_thread.setProperty(hue=parameter)
except:
None
self.hue_label.setText(str(parameter))
def getCameras(self):
# checks the first 6 indexes.
i = 6
index = 0
self.camera_combo.clear()
_cameras = []
original_camera_description = str(video_src) + ': ' \
+ str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) \
+ 'x' + str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ' @ ' \
+ str(self.parent().video_thread.cap.get(cv2.CAP_PROP_FPS)) + 'fps'
_cameras.append(original_camera_description)
while i > 0:
if index != video_src:
tempCap = cv2.VideoCapture(index)
if tempCap.read()[0]:
api = tempCap.getBackendName()
camera_description = str(index) + ': ' \
+ str(tempCap.get(cv2.CAP_PROP_FRAME_WIDTH)) \
+ 'x' + str(tempCap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ' @ ' \
+ str(tempCap.get(cv2.CAP_PROP_FPS)) + 'fps'
_cameras.append(camera_description)
tempCap.release()
index += 1
i -= 1
#cameras = [line for line in allOutputs if float(line['propmode']) > -1 ]
_cameras.sort()
for camera in _cameras:
self.camera_combo.addItem(camera)
self.camera_combo.setCurrentText(original_camera_description)
def sendUserParameters(self):
_tempSrc = self.camera_combo.currentText()
_tempSrc = _tempSrc[:_tempSrc.find(':')]
self.parent().saveUserParameters(cameraSrc=_tempSrc)
self.close()
def closeCPWindow(self):
self.parent().updateStatusbar('Camera changes discarded.')
self.close()
class OverlayLabel(QLabel):
def __init__(self):
super(OverlayLabel, self).__init__()
self.display_text = 'Welcome to TAMV. Enter your printer address and click \"Connect..\" to start.'
def paintEvent(self, event):
super(OverlayLabel, self).paintEvent(event)
pos = QPoint(10, 470)
painter = QPainter(self)
painter.setBrush(QColor(204,204,204,230))
painter.setPen(QColor(255, 255, 255,0))
painter.drawRect(0,450,640,50)
painter.setPen(QColor(0, 0, 0))
painter.drawText(pos, self.display_text)
def setText(self, textToDisplay):
self.display_text = textToDisplay
class CalibrateNozzles(QThread):
# Signals
status_update = pyqtSignal(str)
message_update = pyqtSignal(str)
change_pixmap_signal = pyqtSignal(np.ndarray)
calibration_complete = pyqtSignal()
detection_error = pyqtSignal(str)
result_update = pyqtSignal(object)
alignment = False
_running = False
display_crosshair = False
detection_on = False
align_one_tool = False
def __init__(self, parent=None, th1=1, th2=50, thstep=1, minArea=600, minCircularity=0.8,numTools=0,cycles=1, align=False):
super(QThread,self).__init__(parent=parent)
# transformation matrix
self.transform_matrix = []
self.xray = False
self.loose = False
self.detector_changed = False
self.detect_th1 = th1
self.detect_th2 = th2
self.detect_thstep = thstep
self.detect_minArea = minArea
self.detect_minCircularity = minCircularity
self.numTools = numTools
self.toolNames = []
self.cycles = cycles
self.alignment = align
self.message_update.emit('Detector created, waiting for tool..')
# start with detection off
self.display_crosshair = False
self.detection_on = False
self.align_one_tool = False
# Video Parameters
self.brightness_default = 0
self.contrast_default = 0
self.saturation_default = 0
self.hue_default = 0
self.brightness = -1
self.contrast = -1
self.saturation = -1
self.hue = -1
# Start Video feed
self.cap = cv2.VideoCapture(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.brightness_default = self.cap.get(cv2.CAP_PROP_BRIGHTNESS)
self.contrast_default = self.cap.get(cv2.CAP_PROP_CONTRAST)
self.saturation_default = self.cap.get(cv2.CAP_PROP_SATURATION)
self.hue_default = self.cap.get(cv2.CAP_PROP_HUE)
self.ret, self.cv_img = self.cap.read()
if self.ret:
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
else:
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.ret, self.cv_img = self.cap.read()
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
def toggleXray(self):
if self.xray:
self.xray = False
else: self.xray = True
def toggleLoose(self):
self.detector_changed = True
if self.loose:
self.loose = False
else: self.loose = True
def setProperty(self,brightness=-1, contrast=-1, saturation=-1, hue=-1):
try:
if int(brightness) >= 0:
self.brightness = brightness
self.cap.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
except Exception as b1:
print('Brightness exception: ', b1 )
try:
if int(contrast) >= 0:
self.contrast = contrast
self.cap.set(cv2.CAP_PROP_CONTRAST,self.contrast)
except Exception as c1:
print('Contrast exception: ', c1 )
try:
if int(saturation) >= 0:
self.saturation = saturation
self.cap.set(cv2.CAP_PROP_SATURATION,self.saturation)
except Exception as s1:
print('Saturation exception: ', s1 )
try:
if int(hue) >= 0:
self.hue = hue
self.cap.set(cv2.CAP_PROP_HUE,self.hue)
except Exception as h1:
print('Hue exception: ', h1 )
def getProperties(self):
return (self.brightness_default, self.contrast_default, self.saturation_default,self.hue_default)
def resetProperties(self):
self.setProperty(brightness=self.brightness_default, contrast = self.contrast_default, saturation=self.saturation_default, hue=self.hue_default)
def run(self):
self.createDetector()
while True:
if self.detection_on:
if self.alignment:
try:
if self.loose:
self.detect_minCircularity = 0.3
else: self.detect_minCircularity = 0.8
if self.detector_changed:
self.createDetector()
self.detector_changed = False
self._running = True
while self._running:
self.cycles = self.parent().cycles
for rep in range(self.cycles):
# if self.align_one_tool == False:
# _tools_to_align = self.parent().toolNames
# else:
# _tools_to_align = []
# for button in self.toolButtons:
# if button.isChecked():
# _tools_to_align.append(button)
for j,tool in enumerate(self.toolNames):
# tool = int(button.text().replace('T', ''))
#for tool in range(self.parent().num_tools):
# process GUI events
app.processEvents()
# Update status bar
self.status_update.emit('Calibrating T' + tool + ', cycle: ' + str(rep+1) + '/' + str(self.cycles))
# Load next tool for calibration
self.parent().printer.gCode('T'+tool)
# Move tool to CP coordinates
self.parent().printer.gCode('G1 X' + str(self.parent().cp_coords['X']))
self.parent().printer.gCode('G1 Y' + str(self.parent().cp_coords['Y']))
self.parent().printer.gCode('G1 Z' + str(self.parent().cp_coords['Z']))
# Wait for moves to complete
#while self.parent().printer.getStatus() not in 'idle':
while self.parent().printer.getStatus() not in 'ready':
# process GUI events
app.processEvents()
self.ret, self.cv_img = self.cap.read()
if self.ret:
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
else:
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.ret, self.cv_img = self.cap.read()
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
continue
# Update message bar
self.message_update.emit('Searching for nozzle..')
# Process runtime algorithm changes
if self.loose:
self.detect_minCircularity = 0.3
else: self.detect_minCircularity = 0.8
if self.detector_changed:
self.createDetector()
self.detector_changed = False
# Analyze frame for blobs
(c, transform, mpp) = self.calibrateTool(int(tool), rep)
# process GUI events
app.processEvents()
# apply offsets to machine
#self.parent().printer.gCode( 'G10 P' + tool + ' X' + str(c['X']) + ' Y' + str(c['Y']) )
# signal end of execution
self._running = False
# Update status bar
self.status_update.emit('Calibration complete: Resetting machine.')
# HBHBHB
# Update debug window with results
# self.parent().debugString += '\nCalibration output:\n'
#self.printer.gCode('T-1')
#self.parent().printer.gCode('TOOL_DROPOFF')
self.parent().printer.gCode('G1 X' + str(self.parent().cp_coords['X']))
self.parent().printer.gCode('G1 Y' + str(self.parent().cp_coords['Y']))
self.parent().printer.gCode('G1 Z' + str(self.parent().cp_coords['Z']))
self.status_update.emit('Calibration complete: Done.')
self.alignment = False
self.detection_on = False
self.display_crosshair = False
self._running = False
self.calibration_complete.emit()
except Exception as mn1:
self.alignment = False
self.detection_on = False
self.display_crosshair = False
self._running = False
self.detection_error.emit(str(mn1))
self.cap.release()
else:
# don't run alignment - fetch frames and detect only
try:
if self.loose:
self.detect_minCircularity = 0.3
else: self.detect_minCircularity = 0.8
self._running = True
# transformation matrix
#self.transform_matrix = []
while self._running and self.detection_on:
# Update status bar
#self.status_update.emit('Detection mode: ON')
# Process runtime algorithm changes
if self.loose:
self.detect_minCircularity = 0.3
else: self.detect_minCircularity = 0.8
if self.detector_changed:
self.createDetector()
self.detector_changed = False
# Run detection and update output
self.analyzeFrame()
# process GUI events
app.processEvents()
except Exception as mn1:
self._running = False
self.detection_error.emit(str(mn1))
self.cap.release()
else:
while not self.detection_on:
try:
self.ret, self.cv_img = self.cap.read()
if self.ret:
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
else:
# reset capture
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.ret, self.cv_img = self.cap.read()
if self.ret:
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
continue
app.processEvents()
except Exception as mn2:
self.status_update( 'Error: ' + str(mn2) )
print('Error: ' + str(mn2))
self.cap.release()
self.detection_on = False
self._running = False
exit()
app.processEvents()
app.processEvents()
continue
self.cap.release()
def analyzeFrame(self):
# Placeholder coordinates
xy = [0,0]
# Counter of frames with no circle.
nocircle = 0
# Random time offset
rd = int(round(time.time()*1000))
# reset capture
#self.cap.open(video_src)
#self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
#self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
#self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
#toolCoordinates = None # ADDED TO FIX PROBLEM ??????
while True and self.detection_on:
app.processEvents()
self.ret, self.frame = self.cap.read()
if not self.ret:
# reset capture
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
continue
if self.alignment:
try:
# capture tool location in machine space before processing
toolCoordinates = self.parent().printer.getCoords()
except Exception as c1:
toolCoordinates = None
# capture first clean frame for display
cleanFrame = self.frame
# apply nozzle detection algorithm
# Detection algorithm 1:
# gamma correction -> use Y channel from YUV -> GaussianBlur (7,7),6 -> adaptive threshold
gammaInput = 1.2
self.frame = self.adjust_gamma(image=self.frame, gamma=gammaInput)
yuv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2YUV)
yuvPlanes = cv2.split(yuv)
yuvPlanes[0] = cv2.GaussianBlur(yuvPlanes[0],(7,7),6)
yuvPlanes[0] = cv2.adaptiveThreshold(yuvPlanes[0],255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,35,1)
self.frame = cv2.cvtColor(yuvPlanes[0],cv2.COLOR_GRAY2BGR)
target = [int(np.around(self.frame.shape[1]/2)),int(np.around(self.frame.shape[0]/2))]
# Process runtime algorithm changes
if self.loose:
self.detect_minCircularity = 0.3
else: self.detect_minCircularity = 0.8
if self.detector_changed:
self.createDetector()
self.detector_changed = False
# run nozzle detection for keypoints
keypoints = self.detector.detect(self.frame)
# draw the timestamp on the frame AFTER the circle detector! Otherwise it finds the circles in the numbers.
if self.xray:
cleanFrame = self.frame
# check if we are displaying a crosshair
if self.display_crosshair:
self.frame = cv2.line(cleanFrame, (target[0], target[1]-25), (target[0], target[1]+25), (0, 255, 0), 1)
self.frame = cv2.line(self.frame, (target[0]-25, target[1] ), (target[0]+25, target[1] ), (0, 255, 0), 1)
else: self.frame = cleanFrame
# update image
local_img = self.frame
self.change_pixmap_signal.emit(local_img)
if(nocircle> 25):
self.message_update.emit( 'Error in detecting nozzle.' )
nocircle = 0
continue
num_keypoints=len(keypoints)
if (num_keypoints == 0):
if (25 < (int(round(time.time() * 1000)) - rd)):
nocircle += 1
self.frame = self.putText(self.frame,'No circles found',offsety=3)
self.message_update.emit( 'No circles found.' )
local_img = self.frame
self.change_pixmap_signal.emit(local_img)
continue
if (num_keypoints > 1):
if (25 < (int(round(time.time() * 1000)) - rd)):
self.message_update.emit( 'Too many circles found. Please stop and clean the nozzle.' )
self.frame = self.putText(self.frame,'Too many circles found '+str(num_keypoints),offsety=3, color=(255,255,255))
self.frame = cv2.drawKeypoints(self.frame, keypoints, np.array([]), (255,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
local_img = self.frame
self.change_pixmap_signal.emit(local_img)
continue
# Found one and only one circle. Put it on the frame.
nocircle = 0
xy = np.around(keypoints[0].pt)
r = np.around(keypoints[0].size/2)
# draw the blobs that look circular
self.frame = cv2.drawKeypoints(self.frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Note its radius and position
ts = 'U{0:3.0f} V{1:3.0f} R{2:2.0f}'.format(xy[0],xy[1],r)
xy = np.uint16(xy)
#self.frame = self.putText(self.frame, ts, offsety=2, color=(0, 255, 0), stroke=2)
self.message_update.emit(ts)
# show the frame
local_img = self.frame
self.change_pixmap_signal.emit(local_img)
rd = int(round(time.time() * 1000))
#end the loop
break
# and tell our parent.
if self.detection_on:
return (xy, target, toolCoordinates, r)
else:
return
def calibrateTool(self, tool, rep):
# timestamp for caluclating tool calibration runtime
self.startTime = time.time()
# average location of keypoints in frame
self.average_location=[0,0]
# current location
self.current_location = {'X':0,'Y':0}
# guess position used for camera calibration
self.guess_position = [1,1]
# current keypoint location
self.xy = [0,0]
# previous keypoint location
self.oldxy = self.xy
# Tracker flag to set which state algorithm is running in
self.state = 0
# detected blob counter
self.detect_count = 0
# Save CP coordinates to local class
self.cp_coordinates = self.parent().cp_coords
# number of average position loops
self.position_iterations = 5
# calibration move set (0.5mm radius circle over 10 moves)
self.calibrationCoordinates = [ [0,-0.5], [0.294,-0.405], [0.476,-0.155], [0.476,0.155], [0.294,0.405], [0,0.5], [-0.294,0.405], [-0.476,0.155], [-0.476,-0.155], [-0.294,-0.405] ]
# Check if camera calibration matrix is already defined
if len(self.transform_matrix) > 1:
# set state flag to Step 2: nozzle alignment stage
self.state = 200
self.parent().debugString += '\nCalibrating T'+str(tool)+':C'+str(rep)+': '
# Space coordinates
self.space_coordinates = []
self.camera_coordinates = []
self.calibration_moves = 0
while True:
time.sleep(1)
(self.xy, self.target, self.tool_coordinates, self.radius) = self.analyzeFrame()
# analyzeFrame has returned our target coordinates, average its location and process according to state
self.average_location[0] += self.xy[0]
self.average_location[1] += self.xy[1]
self.detect_count += 1
# check if we've reached our number of detections for average positioning
if self.detect_count >= self.position_iterations:
# calculate average X Y position from detection
self.average_location[0] /= self.detect_count
self.average_location[1] /= self.detect_count
# round to 3 decimal places
self.average_location = np.around(self.average_location,3)
# get another detection validated
(self.xy, self.target, self.tool_coordinates, self.radius) = self.analyzeFrame()
#### Step 1: camera calibration and transformation matrix calculation
if self.state == 0:
self.parent().debugString += 'Calibrating camera...\n'
# Update GUI thread with current status and percentage complete
self.status_update.emit('Calibrating camera..')
self.message_update.emit('Calibrating rotation.. (10%)')
# Save position as previous location
self.oldxy = self.xy
# Reset space and camera coordinates
self.space_coordinates = []
self.camera_coordinates = []
# save machine coordinates for detected nozzle
self.space_coordinates.append( (self.tool_coordinates['X'], self.tool_coordinates['Y']) )
# save camera coordinates
self.camera_coordinates.append( (self.xy[0],self.xy[1]) )
# move carriage for calibration
self.offsetX = self.calibrationCoordinates[0][0]
self.offsetY = self.calibrationCoordinates[0][1]
#self.parent().printer.gCode('G91 G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000 G90 ')
self.parent().printer.gCodeBatch(['G91','G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000','G90'])
# Update state tracker to second nozzle calibration move
self.state = 1
continue
# Check if camera is still being calibrated
elif self.state >= 1 and self.state < len(self.calibrationCoordinates):
# Update GUI thread with current status and percentage complete
self.status_update.emit('Calibrating camera..')
self.message_update.emit('Calibrating rotation.. (' + str(self.state*10) + '%)')
# check if we've already moved, and calculate mpp value
if self.state == 1:
distance = self.getDistance(self.oldxy[0],self.oldxy[1],self.xy[0],self.xy[1])
if distance == 0:
self.parent().debugString += 'Camera calibration failed because of no movement, could be to close to camera or too slow camera.\n'
print('Camera calibration failed because of no movement, could be to close to camera or too slow camera.')
print('self.oldxy[0]:' + str(self.oldxy[0]))
print('self.oldxy[1]:' + str(self.oldxy[1]))
print('self.xy[0]:' + str(self.xy[0]))
print('self.xy[1]:' + str(self.xy[1]))
break
self.mpp = np.around(0.5/distance,4)
# save position as previous position
self.oldxy = self.xy
# save machine coordinates for detected nozzle
self.space_coordinates.append( (self.tool_coordinates['X'], self.tool_coordinates['Y']) )
# save camera coordinates
self.camera_coordinates.append( (self.xy[0],self.xy[1]) )
# return carriage to relative center of movement
self.offsetX = -1*self.offsetX
self.offsetY = -1*self.offsetY
#self.parent().printer.gCode('G91 G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000 G90 ')
self.parent().printer.gCodeBatch(['G91','G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000','G90'])
# move carriage a random amount in X&Y to collect datapoints for transform matrix
self.offsetX = self.calibrationCoordinates[self.state][0]
self.offsetY = self.calibrationCoordinates[self.state][1]
#self.parent().printer.gCode('G91 G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000 G90 ')
self.parent().printer.gCodeBatch(['G91','G1 X' + str(self.offsetX) + ' Y' + str(self.offsetY) +' F3000','G90'])
# increment state tracker to next calibration move
self.state += 1
continue
# check if final calibration move has been completed
elif self.state == len(self.calibrationCoordinates):
calibration_time = np.around(time.time() - self.startTime,1)
self.parent().debugString += 'Camera calibration completed in ' + str(calibration_time) + ' seconds.\n'
self.parent().debugString += 'Millimeters per pixel: ' + str(self.mpp) + '\n\n'
print('Millimeters per pixel: ' + str(self.mpp))
print('Camera calibration completed in ' + str(calibration_time) + ' seconds.')
# Update GUI thread with current status and percentage complete
self.message_update.emit('Calibrating rotation.. (100%) - MPP = ' + str(self.mpp))
self.status_update.emit('Calibrating T' + str(tool) + ', cycle: ' + str(rep+1) + '/' + str(self.cycles))
# save position as previous position
self.oldxy = self.xy
# save machine coordinates for detected nozzle
self.space_coordinates.append( (self.tool_coordinates['X'], self.tool_coordinates['Y']) )
# save camera coordinates
self.camera_coordinates.append( (self.xy[0],self.xy[1]) )
# calculate camera transformation matrix
self.transform_input = [(self.space_coordinates[i], self.normalize_coords(camera)) for i, camera in enumerate(self.camera_coordinates)]
self.transform_matrix, self.transform_residual = self.least_square_mapping(self.transform_input)
# define camera center in machine coordinate space
self.newCenter = self.transform_matrix.T @ np.array([0, 0, 0, 0, 0, 1])
self.guess_position[0]= np.around(self.newCenter[0],3)
self.guess_position[1]= np.around(self.newCenter[1],3)
#self.parent().printer.gCode('G90 G1 X{0:-1.3f} Y{1:-1.3f} F1000 G90 '.format(self.guess_position[0],self.guess_position[1]))
self.parent().printer.gCodeBatch(['G90','G1 X{0:-1.3f} Y{1:-1.3f} F1000'.format(self.guess_position[0],self.guess_position[1]),'G90'])
# update state tracker to next phase
self.state = 200
# start tool calibration timer
self.startTime = time.time()
self.parent().debugString += '\nCalibrating T'+str(tool)+':C'+str(rep)+': '
continue
#### Step 2: nozzle alignment stage
elif self.state == 200:
# Update GUI thread with current status and percentage complete
self.message_update.emit('Tool calibration move #' + str(self.calibration_moves))
self.status_update.emit('Calibrating T' + str(tool) + ', cycle: ' + str(rep+1) + '/' + str(self.cycles))
# increment moves counter
self.calibration_moves += 1
# nozzle detected, frame rotation is set, start
self.cx,self.cy = self.normalize_coords(self.xy)
self.v = [self.cx**2, self.cy**2, self.cx*self.cy, self.cx, self.cy, 0]
self.offsets = -1*(0.55*self.transform_matrix.T @ self.v)
self.offsets[0] = np.around(self.offsets[0],3)
self.offsets[1] = np.around(self.offsets[1],3)
# Move it a bit
#self.parent().printer.gCode( 'M564 S1' )
#self.parent().printer.gCode( 'G91 G1 X{0:-1.3f} Y{1:-1.3f} F1000 G90 '.format(self.offsets[0],self.offsets[1]) )
self.parent().printer.gCodeBatch(['G91','G1 X{0:-1.3f} Y{1:-1.3f} F1000'.format(self.offsets[0],self.offsets[1]),'G90'])
# save position as previous position
self.oldxy = self.xy
if ( self.offsets[0] == 0.0 and self.offsets[1] == 0.0 ):
self.parent().debugString += str(self.calibration_moves) + ' moves.\n'
self.parent().printer.gCode( 'G1 F13200' )
# Update GUI with progress
# calculate final offsets and return results
self.tool_offsets = self.parent().printer.getG10ToolOffset(tool)
old_final_x = np.around( (self.cp_coordinates['X'] + self.tool_offsets['X']) - self.tool_coordinates['X'], 3 )
print('T'+str(tool) + ' final_x=' + str(self.cp_coordinates['X']) + ' + ' + str(self.tool_offsets['X']) + ' - ' + str(self.tool_coordinates['X']) )
old_final_y = np.around( (self.cp_coordinates['Y'] + self.tool_offsets['Y']) - self.tool_coordinates['Y'], 3 )
print('T'+str(tool) + ' final_y=' + str(self.cp_coordinates['Y']) + ' + ' + str(self.tool_offsets['Y']) + ' - ' + str(self.tool_coordinates['Y']) )
old_string_final_x = "{:.3f}".format(old_final_x)
old_string_final_y = "{:.3f}".format(old_final_y)
final_x = np.around( ( self.tool_coordinates['X'] + self.tool_offsets['X'] ) - self.cp_coordinates['X'], 3 )
final_y = np.around( ( self.tool_coordinates['Y'] + self.tool_offsets['Y'] ) - self.cp_coordinates['Y'], 3 )
alt_final_x = np.around( self.cp_coordinates['X'] - self.tool_offsets['X'] - self.tool_coordinates['X'], 3 )
alt_final_y = np.around( self.cp_coordinates['Y'] - self.tool_offsets['Y'] - self.tool_coordinates['Y'], 3 )
alt_string_final_x = "{:.3f}".format(alt_final_x)
alt_string_final_y = "{:.3f}".format(alt_final_y)
string_final_x = "{:.3f}".format(final_x)
string_final_y = "{:.3f}".format(final_y)
# Save offset to output variable
# HBHBHBHB
_return = {}
_return['X'] = final_x
_return['Y'] = final_y
_return['MPP'] = self.mpp
_return['time'] = np.around(time.time() - self.startTime,1)
self.message_update.emit('Nozzle calibrated: offset coordinates X' + str(_return['X']) + ' Y' + str(_return['Y']) )
self.parent().debugString += 'T' + str(tool) + ', cycle ' + str(rep+1) + ' completed in ' + str(_return['time']) + ' seconds.\n'
print('T' + str(tool) + ', cycle ' + str(rep+1) + ' completed in ' + str(_return['time']) + ' seconds.')
self.message_update.emit('T' + str(tool) + ', cycle ' + str(rep+1) + ' completed in ' + str(_return['time']) + ' seconds.')
self.parent().printer.gCode( 'G1 F13200' )
self.parent().debugString += 'G10 P' + str(tool) + ' X' + string_final_x + ' Y' + string_final_y + '\n'
self.parent().debugString += 'alt_G10 P' + str(tool) + ' X' + alt_string_final_x + ' Y' + alt_string_final_y + '\n'
self.parent().debugString += 'old_G10 P' + str(tool) + ' X' + old_string_final_x + ' Y' + old_string_final_y + '\n'
x_tableitem = QTableWidgetItem(string_final_x)
x_tableitem.setBackground(QColor(100,255,100,255))
y_tableitem = QTableWidgetItem(string_final_y)
y_tableitem.setBackground(QColor(100,255,100,255))
print('G10 P' + str(tool) + ' X' + string_final_x + ' Y' + string_final_y + '\n')
print('altG10 P' + str(tool) + ' X' + alt_string_final_x + ' Y' + alt_string_final_y + '\n')
print('oldG10 P' + str(tool) + ' X' + old_string_final_x + ' Y' + old_string_final_y + '\n')
row_no = 0
for row in range(self.parent().offsets_table.rowCount()):
header = self.parent().offsets_table.verticalHeaderItem(row)
print('Row:' + str(row_no) + ', header.text():' + str(header.text()))
if str(header.text()) == str('T'+str(tool)):
row_no = row
print('Found on row:' + str(row_no))
# items = self.parent().offsets_table.findItems('T'+str(tool), Qt.MatchExactly)
# print('len(items):' + str(len(items)))
# if len(items) == 1 : # we have found our row
# item = items[0] # take the first
# print('items.row:' + str(items[0].row))
# row_no = item.row
self.parent().offsets_table.setItem(row_no,0,x_tableitem)
self.parent().offsets_table.setItem(row_no,1,y_tableitem)
self.result_update.emit({
'tool': str(tool),
'cycle': str(rep),
'mpp': str(self.mpp),
'X': string_final_x,
'Y': string_final_y
})
return(_return, self.transform_matrix, self.mpp)
else:
self.state = 200
continue
self.avg = [0,0]
self.location = {'X':0,'Y':0}
self.count = 0
def normalize_coords(self,coords):
xdim, ydim = camera_width, camera_height
return (coords[0] / xdim - 0.5, coords[1] / ydim - 0.5)
def least_square_mapping(self,calibration_points):
# Compute a 2x2 map from displacement vectors in screen space to real space.
n = len(calibration_points)
real_coords, pixel_coords = np.empty((n,2)),np.empty((n,2))
for i, (r,p) in enumerate(calibration_points):
real_coords[i] = r
pixel_coords[i] = p
x,y = pixel_coords[:,0],pixel_coords[:,1]
A = np.vstack([x**2,y**2,x * y, x,y,np.ones(n)]).T
transform = np.linalg.lstsq(A, real_coords, rcond = None)
return transform[0], transform[1].mean()
def getDistance(self, x1, y1, x0, y0 ):
x1_float = float(x1)
x0_float = float(x0)
y1_float = float(y1)
y0_float = float(y0)
x_dist = (x1_float - x0_float) ** 2
y_dist = (y1_float - y0_float) ** 2
retVal = np.sqrt((x_dist + y_dist))
return np.around(retVal,3)
def stop(self):
self._running = False
self.detection_on = False
try:
# tempCoords = self.printer.getCoords()
if self.printer.isIdle():
self.parent().changTool(None)
# #self.printer.gCode('T-1')
# self.printer.gCode('TOOL_DROPOFF')
# self.parent().printer.gCode('G1 X' + str(tempCoords['X']) + ' Y' + str(tempCoords['Y']))
# #while self.parent().printer.getStatus() not in 'idle':
while self.parent().printer.getStatus() not in 'ready':
time.sleep(1)
except: None
self.cap.release()
self.exit()
def createDetector(self):
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Thresholds
params.minThreshold = self.detect_th1
params.maxThreshold = self.detect_th2
params.thresholdStep = self.detect_thstep
# Area
params.filterByArea = True # Filter by Area.
params.minArea = self.detect_minArea
# Circularity
params.filterByCircularity = True # Filter by Circularity
params.minCircularity = self.detect_minCircularity
params.maxCircularity= 1
# Convexity
params.filterByConvexity = True # Filter by Convexity
params.minConvexity = 0.3
params.maxConvexity = 1
# Inertia
params.filterByInertia = True # Filter by Inertia
params.minInertiaRatio = 0.3
# create detector
self.detector = cv2.SimpleBlobDetector_create(params)
def adjust_gamma(self, image, gamma=1.2):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype('uint8')
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def putText(self, frame,text,color=(0, 0, 255),offsetx=0,offsety=0,stroke=1): # Offsets are in character box size in pixels.
if (text == 'timestamp'): text = datetime.datetime.now().strftime('%m-%d-%Y %H:%M:%S')
fontScale = 1
if (frame.shape[1] > 640): fontScale = stroke = 2
if (frame.shape[1] < 640):
fontScale = 0.8
stroke = 1
offpix = cv2.getTextSize('A', cv2.FONT_HERSHEY_SIMPLEX ,fontScale, stroke)
textpix = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX ,fontScale, stroke)
offsety=max(offsety, (-frame.shape[0]/2 + offpix[0][1])/offpix[0][1]) # Let offsety -99 be top row
offsetx=max(offsetx, (-frame.shape[1]/2 + offpix[0][0])/offpix[0][0]) # Let offsetx -99 be left edge
offsety=min(offsety, (frame.shape[0]/2 - offpix[0][1])/offpix[0][1]) # Let offsety 99 be bottom row.
offsetx=min(offsetx, (frame.shape[1]/2 - offpix[0][0])/offpix[0][0]) # Let offsetx 99 be right edge.
cv2.putText(frame, text,
(int(offsetx * offpix[0][0]) + int(frame.shape[1]/2) - int(textpix[0][0]/2)
,int(offsety * offpix[0][1]) + int(frame.shape[0]/2) + int(textpix[0][1]/2)),
cv2.FONT_HERSHEY_SIMPLEX, fontScale, color, stroke)
return(frame)
def changeVideoSrc(self, newSrc=-1):
self.cap.release()
video_src = newSrc
# Start Video feed
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.brightness_default = self.cap.get(cv2.CAP_PROP_BRIGHTNESS)
self.contrast_default = self.cap.get(cv2.CAP_PROP_CONTRAST)
self.saturation_default = self.cap.get(cv2.CAP_PROP_SATURATION)
self.hue_default = self.cap.get(cv2.CAP_PROP_HUE)
self.ret, self.cv_img = self.cap.read()
if self.ret:
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
else:
self.cap.open(video_src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE,1)
#self.cap.set(cv2.CAP_PROP_FPS,25)
self.ret, self.cv_img = self.cap.read()
local_img = self.cv_img
self.change_pixmap_signal.emit(local_img)
class App(QMainWindow):
cp_coords = {}
numTools = 0
current_frame = np.ndarray
mutex = QMutex()
debugString = ''
calibrationResults = []
def __init__(self, parent=None):
super().__init__()
self.setWindowFlag(Qt.WindowContextHelpButtonHint,False)
self.setWindowTitle('TAMV')
self.setWindowIcon(QIcon('jubilee.png'))
global display_width, display_height
screen = QDesktopWidget().availableGeometry()
self.small_display = False
# HANDLE DIFFERENT DISPLAY SIZES
# 800x600 display - fullscreen app
if int(screen.width()) >= 800 and int(screen.height()) >= 550 and int(screen.height() < 600):
self.small_display = True
print('800x600 desktop detected')
display_width = 512
display_height = 384
self.setWindowFlag(Qt.FramelessWindowHint)
self.showFullScreen()
self.setGeometry(0,0,700,500)
app_screen = self.frameGeometry()
# 848x480 display - fullscreen app
elif int(screen.width()) >= 800 and int(screen.height()) < 550:
self.small_display = True
print('848x480 desktop detected')
display_width = 448
display_height = 336
self.setWindowFlag(Qt.FramelessWindowHint)
self.showFullScreen()
self.setGeometry(0,0,700,400)
app_screen = self.frameGeometry()
# larger displays - normal window
else:
self.small_display = False
display_width = 640
display_height = 480
self.setGeometry(QStyle.alignedRect(Qt.LeftToRight,Qt.AlignHCenter,QSize(800,600),screen))
app_screen = self.frameGeometry()
app_screen.moveCenter(screen.center())
self.move(app_screen.topLeft())
# SET UP STYLESHEETS FOR GUI ELEMENTS
self.setStyleSheet(
'\
QPushButton {\
border: 1px solid #adadad;\
border-style: outset;\
border-radius: 4px;\
font: 14px;\
padding: 6px;\
}\
QPushButton:hover,QPushButton:enabled:hover,QPushButton:enabled:!checked:hover {\
background-color: #27ae60;\
border: 1px solid #aaaaaa;\
}\
QPushButton:pressed,QPushButton:enabled:pressed,QPushButton:enabled:checked {\
background-color: #ae2776;\
border: 1px solid #aaaaaa;\
}\
QPushButton:enabled {\
background-color: green;\
color: white;\
}\
QPushButton#debug,QMessageBox > #debug {\
background-color: blue;\
color: white;\
}\
QPushButton#debug:hover, QMessageBox > QAbstractButton#debug:hover {\
background-color: green;\
color: white;\
}\
QPushButton#debug:pressed, QMessageBox > QAbstractButton#debug:pressed {\
background-color: #ae2776;\
border-style: inset;\
color: white;\
}\
QPushButton#active, QMessageBox > QAbstractButton#active {\
background-color: green;\
color: white;\
}\
QPushButton#active:pressed,QMessageBox > QAbstractButton#active:pressed {\
background-color: #ae2776;\
}\
QPushButton#terminate {\
background-color: red;\
color: white;\
}\
QPushButton#terminate:pressed {\
background-color: #c0392b;\
}\
QPushButton:disabled, QPushButton#terminate:disabled {\
background-color: #cccccc;\
color: #999999;\
}\
QInputDialog QDialogButtonBox > QPushButton:enabled, QDialog QPushButton:enabled,QPushButton[checkable="true"]:enabled {\
background-color: none;\
color: black;\
border: 1px solid #adadad;\
border-style: outset;\
border-radius: 4px;\
font: 14px;\
padding: 6px;\
}\
QPushButton:enabled:checked {\
background-color: #ae2776;\
border: 1px solid #aaaaaa;\
}\
QInputDialog QDialogButtonBox > QPushButton:pressed, QDialog QPushButton:pressed {\
background-color: #ae2776;\
}\
QInputDialog QDialogButtonBox > QPushButton:hover:!pressed, QDialog QPushButton:hover:!pressed {\
background-color: #27ae60;\
}\
'
)
# LOAD USER SAVED PARAMETERS OR CREATE DEFAULTS
self.loadUserParameters()
# GUI ELEMENTS DEFINITION
# Menubar
if not self.small_display:
self._createActions()
self._createMenuBar()
self._connectActions()
self.centralWidget = QWidget()
self.setCentralWidget(self.centralWidget)
# create the label that holds the image
self.image_label = OverlayLabel()
self.image_label.setFixedSize( display_width, display_height )
pixmap = QPixmap( display_width, display_height )
self.image_label.setPixmap(pixmap)
# create a status bar
self.statusBar = QStatusBar()
self.statusBar.showMessage('Loading up video feed and libraries..',5000)
self.setStatusBar( self.statusBar )
# CP location on statusbar
self.cp_label = QLabel('<b>CP:</b> <i>undef</i>')
self.statusBar.addPermanentWidget(self.cp_label)
self.cp_label.setStyleSheet(style_red)
# Connection status on statusbar
self.connection_status = QLabel('Disconnected')
self.connection_status.setStyleSheet(style_red)
self.statusBar.addPermanentWidget(self.connection_status)
# BUTTONS
# Connect
self.connection_button = QPushButton('Connect..')
self.connection_button.setToolTip('Connect to a Duet machine..')
self.connection_button.clicked.connect(self.connectToPrinter)
self.connection_button.setFixedWidth(170)
# Disconnect
self.disconnection_button = QPushButton('STOP / DISCONNECT')
self.disconnection_button.setToolTip('End current operation,\nunload tools, and return carriage to CP\nthen disconnect.')
self.disconnection_button.clicked.connect(self.disconnectFromPrinter)
self.disconnection_button.setFixedWidth(170)
self.disconnection_button.setObjectName('terminate')
self.disconnection_button.setDisabled(True)
# Controlled point
self.cp_button = QPushButton('Set Controlled Point..')
self.cp_button.setToolTip('Define your origin point\nto calculate all tool offsets from.')
self.cp_button.clicked.connect(self.controlledPoint)
self.cp_button.setFixedWidth(170)
#self.cp_button.setStyleSheet(style_disabled)
self.cp_button.setDisabled(True)
# Calibration Auto
self.calibration_button = QPushButton('Auto Tool Alignment')
self.calibration_button.setToolTip('Start alignment process.\nMAKE SURE YOUR CARRIAGE IS CLEAR TO MOVE ABOUT WITHOUT COLLISIONS!')
self.calibration_button.clicked.connect(self.runCalibration)
#self.calibration_button.setStyleSheet(style_disabled)
self.calibration_button.setDisabled(True)
self.calibration_button.setFixedWidth(170)
# Calibration One Tool
self.calibrate_button = QPushButton('Tool Alignment loaded tool')
self.calibrate_button.setToolTip('Start alignment process for only currently loaded tool.\nMAKE SURE YOUR CARRIAGE IS CLEAR TO MOVE ABOUT WITHOUT COLLISIONS!')
self.calibrate_button.clicked.connect(self.runCalibration)
#self.calibrate_button.setStyleSheet(style_disabled)
self.calibrate_button.setDisabled(True)
self.calibrate_button.setFixedWidth(200)
# Jog Panel
self.jogpanel_button = QPushButton('Jog Panel')
self.jogpanel_button.setToolTip('Open a control panel to move carriage.')
self.jogpanel_button.clicked.connect(self.displayJogPanel)
self.jogpanel_button.setDisabled(True)
self.jogpanel_button.setFixedWidth(170)
# Debug Info
self.debug_button = QPushButton('Debug Information')
self.debug_button.setToolTip('Display current debug info for troubleshooting\nand to display final G10 commands')
self.debug_button.clicked.connect(self.displayDebug)
self.debug_button.setFixedWidth(170)
self.debug_button.setObjectName('debug')
# Exit
self.exit_button = QPushButton('Quit')
self.exit_button.setToolTip('Unload tools, disconnect, and quit TAMV.')
self.exit_button.clicked.connect(self.close)
self.exit_button.setFixedWidth(170)
# OTHER ELEMENTS
# Repeat spinbox
self.repeat_label = QLabel('Cycles: ')
self.repeat_label.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.repeatSpinBox = QSpinBox()
self.repeatSpinBox.setValue(1)
self.repeatSpinBox.setMinimum(1)
self.repeatSpinBox.setSingleStep(1)
self.repeatSpinBox.setDisabled(True)
# Offsets table
self.offsets_box = QGroupBox("Tool Offsets")
self.offsets_box.setMaximumWidth(170)
self.offsets_table = QTableWidget()
self.offsets_table.setColumnCount(2)
self.offsets_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.offsets_table.setColumnWidth(0,50)
self.offsets_table.setColumnWidth(1,50)
self.offsets_table.setHorizontalHeaderItem(0, QTableWidgetItem("X"))
self.offsets_table.setHorizontalHeaderItem(1, QTableWidgetItem("Y"))
self.offsets_table.resizeRowsToContents()
vbox = QVBoxLayout()
vbox.setSpacing(1)
self.offsets_box.setLayout(vbox)
vbox.addWidget(self.offsets_table)
self.offsets_box.setVisible(False)
# Tool buttons table
self.toolBoxLayout = QHBoxLayout()
self.toolBoxLayout.setSpacing(1)
self.toolBox = QGroupBox()
self.toolBoxLayout.setContentsMargins(0,0,0,0)
self.toolBox.setLayout(self.toolBoxLayout)
self.toolBox.setVisible(False)
self.toolButtons = []
# Xray checkbox
self.xray_box = QCheckBox('X-ray')
self.xray_box.setChecked(False)
self.xray_box.stateChanged.connect(self.toggle_xray)
self.xray_box.setDisabled(True)
self.xray_box.setVisible(False)
# Loose checkbox
self.loose_box = QCheckBox('Loose detection')
self.loose_box.setChecked(False)
self.loose_box.stateChanged.connect(self.toggle_loose)
self.loose_box.setDisabled(True)
self.loose_box.setVisible(False)
# Detection checkbox
self.detect_box = QCheckBox('Detect ON')
self.detect_box.setChecked(False)
self.detect_box.stateChanged.connect(self.toggle_detect)
# create a grid box layout
grid = QGridLayout()
grid.setSpacing(3)
# add elements to grid
# FIRST ROW
grid.addWidget(self.connection_button,1,1,Qt.AlignLeft)
grid.addWidget(self.detect_box,1,2,1,1)
grid.addWidget(self.xray_box,1,3,1,1)
grid.addWidget(self.loose_box,1,4,1,1)
grid.addWidget(self.toolBox,1,5,1,1)
grid.addWidget(self.disconnection_button,1,7,1,-1,Qt.AlignLeft)
# SECOND ROW
# THIRD ROW
# main image viewer
grid.addWidget(self.image_label,3,1,4,6)
grid.addWidget(self.jogpanel_button,3,7,1,1)
grid.addWidget(self.offsets_box,4,7,1,1)
if self.small_display:
grid.addWidget(self.exit_button,5,7,1,1)
grid.addWidget(self.debug_button,6,7,1,1)
# FOURTH ROW
grid.addWidget(self.cp_button,7,1,1,1)
grid.addWidget(self.calibration_button,7,2,1,1)
grid.addWidget(self.calibrate_button,7,3,1,1)
grid.addWidget(self.repeat_label,7,4,1,1)
grid.addWidget(self.repeatSpinBox,7,5,1,1)
# set the grid layout as the widgets layout
self.centralWidget.setLayout(grid)
# start video feed
self.startVideo()
# flag to draw circle
self.crosshair = False
def toggle_detect(self):
self.video_thread.display_crosshair = not self.video_thread.display_crosshair
self.video_thread.detection_on = not self.video_thread.detection_on
if self.video_thread.detection_on:
self.xray_box.setDisabled(False)
self.xray_box.setVisible(True)
self.loose_box.setDisabled(False)
self.loose_box.setVisible(True)
else:
self.xray_box.setDisabled(True)
self.xray_box.setVisible(False)
self.loose_box.setDisabled(True)
self.loose_box.setVisible(False)
self.updateStatusbar('Detection: OFF')
def cleanPrinterURL(self, inputString='/tmp/klippy_uds'):
if not isinstance(inputString, str):
return( 1, 'Invalid printer Local Unix domain socket', '' )
elif len(inputString) < 2:
return( 1, 'Invalid printer Local Unix domain socket', '' )
else:
return( 0, '', inputString )
'''
_errCode = 0
_errMsg = ''
#_printerURL = 'http://localhost'
_printerURL = inputString
from urllib.parse import urlparse
u = urlparse(inputString)
scheme = u[0]
netlocation = u[1]
if len(scheme) < 4 or scheme.lower() not in ['http']:
_errCode = 1
_errMsg = 'Invalid scheme. Please only use http connections.'
elif len(netlocation) < 1:
_errCode = 2
_errMsg = 'Invalid IP/network address.'
elif scheme.lower() in ['https']:
_errCode = 3
_errMsg = 'Cannot use https connections for Duet controllers'
else:
_printerURL = scheme + '://' + netlocation
return( _errCode, _errMsg, _printerURL )
'''
def loadUserParameters(self):
global camera_width, camera_height, video_src
try:
with open('settings.json','r') as inputfile:
options = json.load(inputfile)
camera_settings = options['camera'][0]
camera_height = int( camera_settings['display_height'] )
camera_width = int( camera_settings['display_width'] )
video_src = camera_settings['video_src']
if len(str(video_src)) == 1: video_src = int(video_src)
printer_settings = options['printer'][0]
tempURL = printer_settings['address']
( _errCode, _errMsg, self.printerURL ) = self.cleanPrinterURL(tempURL)
if _errCode > 0:
# invalid input
print('Invalid printer Local Unix domain socket detected in settings.json!')
print('Defaulting to \"/tmp/klippy_uds"...')
self.printerURL = '/tmp/klippy_uds'
except FileNotFoundError:
# create parameter file with standard parameters
options = {}
options['camera'] = []
options['camera'].append( {
'video_src': 0,
'display_width': '640',
'display_height': '480'
} )
options['printer'] = []
options['printer'].append( {
'address': '/tmp/klippy_uds',
'name': 'Hermoine'
} )
try:
camera_width = 640
camera_height = 480
video_src = 1
with open('settings.json','w') as outputfile:
json.dump(options, outputfile)
except Exception as e1:
print('Error writing user settings file.')
print(e1)
def saveUserParameters(self, cameraSrc=-2):
global camera_width, camera_height, video_src
cameraSrc = int(cameraSrc)
try:
if cameraSrc > -2:
video_src = cameraSrc
options = {}
options['camera'] = []
options['camera'].append( {
'video_src': video_src,
'display_width': camera_width,
'display_height': camera_height
} )
options['printer'] = []
options['printer'].append( {
'address': self.printerURL,
'name': 'Default printer'
} )
with open('settings.json','w') as outputfile:
json.dump(options, outputfile)
except Exception as e1:
print('Error saving user settings file.')
print(e1)
if int(video_src) != cameraSrc:
self.video_thread.changeVideoSrc(newSrc=cameraSrc)
self.updateStatusbar('Current profile saved to settings.json')
def _createMenuBar(self):
menuBar = self.menuBar()
# Creating menus using a QMenu object
fileMenu = QMenu('&File', self)
menuBar.addMenu(fileMenu)
fileMenu.addAction(self.debugAction)
fileMenu.addAction(self.cameraAction)
fileMenu.addSeparator()
fileMenu.addAction(self.saveAction)
fileMenu.addSeparator()
fileMenu.addAction(self.quitAction)
self.analysisMenu = QMenu('&Analyze',self)
menuBar.addMenu(self.analysisMenu)
self.analysisMenu.addAction(self.graphAction)
self.analysisMenu.addAction(self.exportAction)
self.analysisMenu.setDisabled(True)
def _createActions(self):
# Creating action using the first constructor
self.debugAction = QAction(self)
self.debugAction.setText('&Debug info')
self.cameraAction = QAction(self)
self.cameraAction.setText('&Camera settings')
self.quitAction = QAction(self)
self.quitAction.setText('&Quit')
self.saveAction = QAction(self)
self.saveAction.setText('&Save current settings')
self.graphAction = QAction(self)
self.graphAction.setText('&Graph calibration data..')
self.exportAction = QAction(self)
self.exportAction.setText('&Export to output.json')
def _connectActions(self):
# Connect File actions
self.debugAction.triggered.connect(self.displayDebug)
self.cameraAction.triggered.connect(self.displayCameraSettings)
self.quitAction.triggered.connect(self.close)
self.saveAction.triggered.connect(self.saveUserParameters)
self.graphAction.triggered.connect(lambda: self.analyzeResults(graph=True))
self.exportAction.triggered.connect(lambda: self.analyzeResults(export=True))
def displayCameraSettings(self):
self.camera_dialog = CameraSettingsDialog(parent=self)
self.camera_dialog.exec_()
def displayDebug(self):
dbg = DebugDialog(parent=self,message=self.debugString)
if dbg.exec_():
None
def displayJogPanel(self):
try:
local_status = self.printer.getStatus()
#if local_status == 'idle':
if local_status == 'ready':
jogPanel = CPDialog(parent=self,summary='Control printer movement using this panel.',title='Jog Control')
if jogPanel.exec_():
None
except Exception as e1: self.statusBar.showMessage('Printer is not available or is busy. ')
def startVideo(self):
# create the video capture thread
self.video_thread = CalibrateNozzles(parent=self,numTools=0, cycles=1,minArea=600, align=False)
# connect its signal to the update_image slot
# connect its signal to the update_image slot
self.video_thread.detection_error.connect(self.updateStatusbar)
self.video_thread.status_update.connect(self.updateStatusbar)
self.video_thread.message_update.connect(self.updateMessagebar)
self.video_thread.change_pixmap_signal.connect(self.update_image)
self.video_thread.calibration_complete.connect(self.applyCalibration)
self.video_thread.result_update.connect(self.addCalibrationResult)
# start the thread
self.video_thread.start()
def stopVideo(self):
try:
if self.video_thread.isRunning():
self.video_thread.stop()
except Exception as vs2:
self.updateStatusbar('ERROR: cannot stop video')
print('ERROR: cannot stop video')
print(vs2)
def closeEvent(self, event):
try:
if self.printer.isIdle():
self.changeTool(None)
# tempCoords = self.printer.getCoords()
# #self.printer.gCode('T-1')
# self.printer.gCode('TOOL_DROPOFF')
# self.printer.gCode('G1 X' + str(tempCoords['X']) + ' Y' + str(tempCoords['Y']))
except Exception as ce1: None # no printer connected usually.
print()
print('Thank you for using TAMV!')
print('Check out www.jubilee3d.com')
event.accept()
def connectToPrinter(self):
# temporarily suspend GUI and display status message
self.image_label.setText('Waiting to connect..')
self.updateStatusbar('Please enter Local Unix domain socket for Klipper')
self.connection_button.setDisabled(True)
self.disconnection_button.setDisabled(True)
self.calibration_button.setDisabled(True)
self.calibrate_button.setDisabled(True)
self.cp_button.setDisabled(True)
self.jogpanel_button.setDisabled(True)
self.offsets_box.setVisible(False)
self.connection_status.setText('Connecting..')
self.connection_status.setStyleSheet(style_orange)
self.cp_label.setText('<b>CP:</b> <i>undef</i>')
self.cp_label.setStyleSheet(style_orange)
self.repeatSpinBox.setDisabled(True)
self.xray_box.setDisabled(True)
self.xray_box.setChecked(False)
self.xray_box.setVisible(False)
self.loose_box.setDisabled(True)
self.loose_box.setChecked(False)
self.loose_box.setVisible(False)
self.repaint()
try:
# check if printerURL has already been defined (user reconnecting)
if len(self.printerURL) > 0:
None
except Exception:
# printerURL initalization to defaults
self.printerURL = '/tmp/klippy_uds'
# Prompt user for machine connection address
text, ok = QInputDialog.getText(self, 'Klipper UDS','Local Unix domain socket for Klipper: ', QLineEdit.Normal, self.printerURL)
# Handle clicking OK/Connect
if ok and text != '' and len(text) > 5:
( _errCode, _errMsg, tempURL ) = self.cleanPrinterURL(text)
while _errCode != 0:
# Invalid URL detected, pop-up window to correct this
text, ok = QInputDialog.getText(self, 'Klipper UDS', _errMsg + '\nLocal Unix domain socket for Klipper: ', QLineEdit.Normal, text)
if ok:
( _errCode, _errMsg, tempURL ) = self.cleanPrinterURL(text)
else:
self.updateStatusbar('Connection request cancelled.')
self.resetConnectInterface()
return
# input has been parsed and is clean, proceed
self.printerURL = tempURL
# Handle clicking cancel
elif not ok:
self.updateStatusbar('Connection request cancelled.')
self.resetConnectInterface()
return
# Handle invalid input
elif len(text) < 6 or text[:4] not in ['http']:
self.updateStatusbar('Invalid UDS: \"' + text )
self.resetConnectInterface()
return
# Update user with new state
self.statusBar.showMessage('Attempting to connect to: ' + self.printerURL )
# Attempt connecting to the Duet controller
try:
#self.printer = DWA.DuetWebAPI(self.printerURL)
self.printer = KA.KlipperAPI(self.printerURL)
if not self.printer.printerType():
# connection failed for some reason
self.updateStatusbar('Device at '+self.printerURL+' might not be a Klipper UDS.')
self.resetConnectInterface()
return
else:
# connection succeeded, update objects accordingly
self._connected_flag = True
self.tools = self.printer.getTools()
self.num_tools = len(self.tools)
self.video_thread.numTools = self.num_tools
# UPDATE OFFSET INFORMATION
self.offsets_box.setVisible(True)
self.offsets_table.setRowCount(self.num_tools)
for i in range(self.num_tools):
current_tool = self.printer.getG10ToolOffset(self.tools[i])
offset_x = "{:.3f}".format(current_tool['X'])
offset_y = "{:.3f}".format(current_tool['Y'])
x_tableitem = QTableWidgetItem(offset_x)
y_tableitem = QTableWidgetItem(offset_y)
x_tableitem.setBackground(QColor(255,255,255,255))
y_tableitem.setBackground(QColor(255,255,255,255))
self.offsets_table.setVerticalHeaderItem(i,QTableWidgetItem('T'+str(self.tools[i])))
self.offsets_table.setItem(i,0,x_tableitem)
self.offsets_table.setItem(i,1,y_tableitem)
# add tool buttons
toolButton = QPushButton('T'+str(self.tools[i]))
toolButton.setToolTip('Fetch T' +str(self.tools[i]) + ' to current machine position.')
self.toolButtons.append(toolButton)
except Exception as conn1:
self.updateStatusbar('Cannot connect to: ' + self.printerURL )
print('Duet Connection exception: ', conn1)
self.resetConnectInterface()
return
# Get active tool
_active = self.printer.getCurrentTool()
# Display toolbox
for i,button in enumerate(self.toolButtons):
button.setCheckable(True)
if int(_active) == i:
button.setChecked(True)
else:
button.setChecked(False)
button.clicked.connect(self.callTool)
self.toolBoxLayout.addWidget(button)
self.toolBox.setVisible(True)
# Connection succeeded, update GUI first
self.updateStatusbar('Connected to a Duet V'+str(self.printer.printerType()))
self.connection_button.setText('Online: ' + self.printerURL[self.printerURL.rfind('/')+1:])
self.statusBar.showMessage('Connected to printer at ' + self.printerURL, 5000)
self.connection_status.setText('Connected.')
self.image_label.setText('Set your Controlled Point to continue.')
# enable/disable buttons
self.connection_button.setDisabled(True)
self.calibration_button.setDisabled(True)
self.calibrate_button.setDisabled(True)
self.disconnection_button.setDisabled(False)
self.cp_button.setDisabled(False)
self.jogpanel_button.setDisabled(False)
self.analysisMenu.setDisabled(True)
# update connection status indicator to green
self.connection_status.setStyleSheet(style_green)
self.cp_label.setStyleSheet(style_red)
def callTool(self):
# handle scenario where machine is busy and user tries to select a tool.
if not self.printer.isIdle():
self.updateStatusbar('Machine is not idle, cannot select tool.')
return
# get current active tool
_active = self.printer.getCurrentTool()
# get requested tool number
sender = self.sender()
# update buttons to new status
for button in self.toolButtons:
if button.text() == sender.text():
button.setChecked(True)
else:
button.setChecked(False)
# self.toolButtons[int(self.sender().text()[1:])].setChecked(True)
# handle tool already active on printer
if int(_active) == int(sender.text()[1:]):
msg = QMessageBox()
status = msg.question( self, 'Unload ' + sender.text(), 'Unload ' + sender.text() + ' and return carriage to the current position?',QMessageBox.Yes | QMessageBox.No )
if status == QMessageBox.Yes:
for button in self.toolButtons:
if button.text() == sender.text():
button.setChecked(False)
self.changeTool(None)
# End video threads and restart default thread
self.video_thread.alignment = False
# Update GUI for unloading carriage
self.calibration_button.setDisabled(False)
self.calibrate_button.setDisabled(False)
self.cp_button.setDisabled(False)
self.updateMessagebar('Ready.')
self.updateStatusbar('Ready.')
else:
# User cancelled, do nothing
return
else:
# Requested tool is different from active tool
msg = QMessageBox()
status = msg.question( self, 'Confirm loading ' + sender.text(), 'Load ' + sender.text() + ' and move to current position?',QMessageBox.Yes | QMessageBox.No )
if status == QMessageBox.Yes:
self.changeTool(sender.text())
# START DETECTION THREAD HANDLING
# close camera settings dialog so it doesn't crash
try:
if self.camera_dialog.isVisible():
self.camera_dialog.reject()
except: None
# update GUI
self.cp_button.setDisabled(False)
self.jogpanel_button.setDisabled(False)
self.calibration_button.setDisabled(False)
self.calibrate_button.setDisabled(False)
self.repeatSpinBox.setDisabled(True)
else:
for button in self.toolButtons:
if button.text() == sender.text():
button.setChecked(False)
# self.toolButtons[int(self.sender().text()[1:])].setChecked(False)
def changeTool(self, tool=None):
# return carriage to controlled point position
if len(self.cp_coords) > 0:
tempCoords = self.cp_coords
else:
tempCoords = self.printer.getCoords()
if tool is not None:
self.printer.gCode(tool)
else:
self.printer.gCode('TOOL_DROPOFF')
self.printer.gCode('G1 X' + str(tempCoords['X']) + 'F24000')
self.printer.gCode('G1 Y' + str(tempCoords['Y']) + 'F24000')
self.printer.gCode('G1 Z' + str(tempCoords['Z']) + 'F24000')
def resetConnectInterface(self):
self.connection_button.setDisabled(False)
self.disconnection_button.setDisabled(True)
self.calibration_button.setDisabled(True)
self.calibrate_button.setDisabled(True)
self.cp_button.setDisabled(True)
self.jogpanel_button.setDisabled(True)
self.offsets_box.setVisible(False)
self.connection_status.setText('Disconnected')
self.connection_status.setStyleSheet(style_red)
self.cp_label.setText('<b>CP:</b> <i>undef</i>')
self.cp_label.setStyleSheet(style_red)
self.repeatSpinBox.setDisabled(True)
self.analysisMenu.setDisabled(True)
self.detect_box.setChecked(False)
self.detect_box.setDisabled(False)
self.xray_box.setDisabled(True)
self.xray_box.setChecked(False)
self.xray_box.setVisible(False)
self.loose_box.setDisabled(True)
self.loose_box.setChecked(False)
self.loose_box.setVisible(False)
self.video_thread.detection_on = False
self.video_thread.loose = False
self.video_thread.xray = False
self.video_thread.alignment = False
index = self.toolBoxLayout.count()-1
while index >= 0:
curWidget = self.toolBoxLayout.itemAt(index).widget()
curWidget.setParent(None)
index -= 1
self.toolBox.setVisible(False)
self.toolButtons = []
self.repaint()
def controlledPoint(self):
# handle scenario where machine is busy and user tries to select a tool.
if not self.printer.isIdle():
self.updateStatusbar('Machine is not idle, cannot select tool.')
return
# display crosshair on video feed at center of image
self.crosshair = True
self.calibration_button.setDisabled(True)
self.calibrate_button.setDisabled(True)
if len(self.cp_coords) > 0:
#self.printer.gCode('T-1')
self.printer.gCode('TOOL_DROPOFF')
#self.printer.gCode('G90 G1 X'+ str(self.cp_coords['X']) + ' Y' + str(self.cp_coords['Y']) + ' Z' + str(self.cp_coords['Z']) )
self.printer.gCodeBatch(['G90','G1 X'+ str(self.cp_coords['X']) + ' Y' + str(self.cp_coords['Y']) + ' Z' + str(self.cp_coords['Z'])])
dlg = CPDialog(parent=self)
if dlg.exec_():
self.cp_coords = self.printer.getCoords()
self.cp_string = '(' + str(self.cp_coords['X']) + ', ' + str(self.cp_coords['Y']) + ')'
self.readyToCalibrate()
else:
self.statusBar.showMessage('CP Setup cancelled.')
self.crosshair = False
def readyToCalibrate(self):
self.statusBar.showMessage('Controlled Point coordinates saved.',3000)
self.image_label.setText('Controlled Point set. Click \"Start Tool Alignment\" to calibrate..')
self.cp_button.setText('Reset CP ')
self.cp_label.setText('<b>CP:</b> ' + self.cp_string)
self.cp_label.setStyleSheet(style_green)
self.detect_box.setChecked(False)
self.detect_box.setDisabled(False)
self.detect_box.setVisible(True)
self.xray_box.setDisabled(True)
self.xray_box.setChecked(False)
self.xray_box.setVisible(False)
self.loose_box.setDisabled(True)
self.loose_box.setChecked(False)
self.loose_box.setVisible(False)
self.video_thread.detection_on = False
self.video_thread.loose = False
self.video_thread.xray = False
self.video_thread.alignment = False
self.calibration_button.setDisabled(False)
self.calibrate_button.setDisabled(False)
self.cp_button.setDisabled(False)
self.toolBox.setVisible(True)
self.repeatSpinBox.setDisabled(False)
if len(self.calibrationResults) > 1:
self.analysisMenu.setDisabled(False)
else:
self.analysisMenu.setDisabled(True)
def applyCalibration(self):
# update GUI
self.readyToCalibrate()
# close camera settings dialog so it doesn't crash
try:
if self.camera_dialog.isVisible():
self.camera_dialog.reject()
except: None
# prompt for user to apply results
# msgBox = QMessageBox(parent=self)
# msgBox.setIcon(QMessageBox.Information)
# msgBox.setText('Do you want to save the new offsets to your machine?')
# msgBox.setWindowTitle('Calibration Results')
# yes_button = msgBox.addButton('Apply offsets and save (M500)',QMessageBox.ApplyRole)
# yes_button.setStyleSheet(style_green)
# cancel_button = msgBox.addButton('Apply offsets',QMessageBox.NoRole)
# Update debug string
# self.debugString += '\nCalibration results:\n'
# for result in self.calibrationResults:
# calibrationCode = 'G10 P' + str(result['tool']) + ' X' + str(result['X']) + ' Y' + str(result['Y'])
# self.debugString += calibrationCode + '\n'
# Prompt user
# returnValue = msgBox.exec()
# if msgBox.clickedButton() == yes_button:
# for result in self.calibrationResults:
# calibrationCode = 'G10 P' + str(result['tool']) + ' X' + str(result['X']) + ' Y' + str(result['Y'])
# self.printer.gCode(calibrationCode)
# self.printer.gCode('M500 P10') # because of Rene.
# self.statusBar.showMessage('Offsets applied and stored using M500.')
# print('Offsets applied and stored using M500.')
# else:
# self.statusBar.showMessage('Temporary offsets applied. You must manually save these offsets.')
# Clean up threads and detection
self.video_thread.alignment = False
self.video_thread.detect_on = False
self.video_thread.display_crosshair = False
# run stats
self.analyzeResults()
def analyzeResults(self, graph=False, export=False):
if len(self.calibrationResults) < 1:
self.updateStatusbar('No calibration data found.')
return
if graph or export:
# get data as 3 dimensional array [tool][axis][datapoints] normalized around mean of each axis
(numTools, totalRuns, toolData) = self.parseData(self.calibrationResults)
else:
# display stats to terminal
self.stats()
if graph:
matplotlib.use('Qt5Agg',force=True)
# set up color and colormap arrays
colorMap = ["Greens","Oranges","Blues", "Reds"] #["Blues", "Reds","Greens","Oranges"]
colors = ['blue','red','green','orange']
# initiate graph data - 1 tool per column
# Row 0: scatter plot with standard deviation box
# Row 1: histogram of X axis data
# Row 2: histogram of Y axis data
# Set backend (if needed)
#plt.switch_backend('Qt4Agg')
fig, axes = plt.subplots(ncols=3,nrows=numTools,constrained_layout=False)
for i, data in enumerate(toolData):
# create a color array the length of the number of tools in the data
color = np.arange(len(data[0]))
# Axis formatting
# Major ticks
axes[i][0].xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
axes[i][0].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
# Minor ticks
axes[i][0].xaxis.set_minor_formatter(FormatStrFormatter('%.3f'))
axes[i][0].yaxis.set_minor_formatter(FormatStrFormatter('%.3f'))
# Draw 0,0 lines
axes[i][0].axhline()
axes[i][0].axvline()
# x&y std deviation box
x_sigma = np.around(np.std(data[0]),3)
y_sigma = np.around(np.std(data[1]),3)
axes[i][0].add_patch(patches.Rectangle((-1*x_sigma,-1*y_sigma), 2*x_sigma, 2*y_sigma, color="green",fill=False, linestyle='dotted'))
axes[i][0].add_patch(patches.Rectangle((-2*x_sigma,-2*y_sigma), 4*x_sigma, 4*y_sigma, color="red",fill=False, linestyle='-.'))
# scatter plot for tool data
axes[i][0].scatter(data[0], data[1], c=color, cmap=colorMap[i])
axes[i][0].autoscale = True
# Histogram data setup
# Calculate number of bins per axis
x_intervals = int(np.around(math.sqrt(len(data[0])),0)+1)
y_intervals = int(np.around(math.sqrt(len(data[1])),0)+1)
# plot histograms
x_kwargs = dict(alpha=0.5, bins=x_intervals,rwidth=.92, density=True)
n, bins, hist_patches = axes[i][1].hist([data[0],data[1]],**x_kwargs, color=[colors[0],colors[1]], label=['X','Y'])
axes[i][2].hist2d(data[0], data[1], bins=x_intervals, cmap='Blues')
axes[i][1].legend()
# add a 'best fit' line
# calculate mean and std deviation per axis
x_mean = np.mean(data[0])
y_mean = np.mean(data[1])
x_sigma = np.around(np.std(data[0]),3)
y_sigma = np.around(np.std(data[1]),3)
# calculate function lines for best fit
x_best = ((1 / (np.sqrt(2 * np.pi) * x_sigma)) *
np.exp(-0.5 * (1 / x_sigma * (bins - x_mean))**2))
y_best = ((1 / (np.sqrt(2 * np.pi) * y_sigma)) *
np.exp(-0.5 * (1 / y_sigma * (bins - y_mean))**2))
# add best fit line to plots
axes[i][1].plot(bins, x_best, '-.',color=colors[0])
axes[i][1].plot(bins, y_best, '--',color=colors[1])
x_count = int(sum( p == True for p in ((data[0] >= (x_mean - x_sigma)) & (data[0] <= (x_mean + x_sigma))) )/len(data[0])*100)
y_count = int(sum( p == True for p in ((data[1] >= (y_mean - y_sigma)) & (data[1] <= (y_mean + y_sigma))) )/len(data[1])*100)
# annotate std dev values
annotation_text = "Xσ: " + str(x_sigma) + " ("+str(x_count) + "%)"
if x_count < 68:
x_count = int(sum( p == True for p in ((data[0] >= (x_mean - 2*x_sigma)) & (data[0] <= (x_mean + 2*x_sigma))) )/len(data[0])*100)
annotation_text += " --> 2σ: " + str(x_count) + "%"
if x_count < 95 and x_sigma*2 > 0.1:
annotation_text += " -- check axis!"
else: annotation_text += " -- OK"
annotation_text += "\nYσ: " + str(y_sigma) + " ("+str(y_count) + "%)"
if y_count < 68:
y_count = int(sum( p == True for p in ((data[1] >= (y_mean - 2*y_sigma)) & (data[1] <= (y_mean + 2*y_sigma))) )/len(data[1])*100)
annotation_text += " --> 2σ: " + str(y_count) + "%"
if y_count < 95 and y_sigma*2 > 0.1:
annotation_text += " -- check axis!"
else: annotation_text += " -- OK"
axes[i][0].annotate(annotation_text, (10,10),xycoords='axes pixels')
axes[i][0].annotate('σ',(1.1*x_sigma,-1.1*y_sigma),xycoords='data',color='green')
axes[i][0].annotate('2σ',(1.1*2*x_sigma,-1.1*2*y_sigma),xycoords='data',color='red')
# # place title for graph
axes[i][0].set_ylabel("Tool " + str(i) + "\nY")
axes[i][0].set_xlabel("X")
axes[i][2].set_ylabel("Y")
axes[i][2].set_xlabel("X")
if i == 0:
axes[i][0].set_title('Scatter Plot')
axes[i][1].set_title('Histogram')
axes[i][2].set_title('2D Histogram')
plt.tight_layout()
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.ion()
plt.show()
if export:
# export JSON data to file
try:
with open('output.json','w') as outputfile:
json.dump(self.calibrationResults, outputfile)
except Exception as e1:
print('Error exporting data:')
print(e1)
self.updateStatusbar('Error exporting data, please check terminal for details.')
def stats(self):
###################################################################################
# Report on repeated executions
###################################################################################
print('')
print('Repeatability statistics for '+str(self.cycles)+' repeats:')
print('+-------------------------------------------------------------------------------------------------------+')
print('| | X | Y |')
print('| T | Avg | Max | Min | StdDev | Range | Avg | Max | Min | StdDev | Range |')
for index in range(self.num_tools):
# create array of results for current tool
_rawCalibrationData = [line for line in self.calibrationResults if line['tool'] == str(index)]
# If we have not run this particular tool
if len(_rawCalibrationData) < 1:
continue
x_array = [float(line['X']) for line in _rawCalibrationData]
y_array = [float(line['Y']) for line in _rawCalibrationData]
mpp_value = np.average([float(line['mpp']) for line in _rawCalibrationData])
cycles = np.max(
[float(line['cycle']) for line in _rawCalibrationData]
)
x_avg = np.average(x_array)
y_avg = np.average(y_array)
x_min = np.min(x_array)
y_min = np.min(y_array)
x_max = np.max(x_array)
y_max = np.max(y_array)
x_std = np.std(x_array)
y_std = np.std(y_array)
x_ran = x_max - x_min
y_ran = y_max - y_min
print('| {0:1.0f} '.format(index)
+ '| {0:7.3f} '.format(x_avg)
+ '| {0:7.3f} '.format(x_max)
+ '| {0:7.3f} '.format(x_min)
+ '| {0:7.3f} '.format(x_std)
+ '| {0:7.3f} '.format(x_ran)
+ '| {0:7.3f} '.format(y_avg)
+ '| {0:7.3f} '.format(y_max)
+ '| {0:7.3f} '.format(y_min)
+ '| {0:7.3f} '.format(y_std)
+ '| {0:7.3f} '.format(y_ran)
+ '|'
)
print('+-------------------------------------------------------------------------------------------------------+')
print('Note: Repeatability cannot be better than one pixel (MPP=' + str(mpp_value) + ').')
def parseData( self, rawData ):
# create empty output array
toolDataResult = []
# get number of tools
_numTools = np.max([ int(line['tool']) for line in rawData ]) + 1
_cycles = np.max([ int(line['cycle']) for line in rawData ])
for i in range(_numTools):
x = [float(line['X']) for line in rawData if int(line['tool']) == i]
y = [float(line['Y']) for line in rawData if int(line['tool']) == i]
# variable to hold return data coordinates per tool formatted as a 2D array [x_value, y_value]
tempPairs = []
# calculate stats
# mean values
x_mean = np.around(np.mean(x),3)
y_mean = np.around( | np.mean(y) | numpy.mean |
#! /usr/bin/env python
"""
Tutorial to demonstrate running parameter estimation on a reduced parameter
space for an injected signal.
This example estimates the masses using a uniform prior in both component masses
and distance using a uniform in comoving volume prior on luminosity distance
between luminosity distances of 100Mpc and 5Gpc, the cosmology is Planck15.
"""
from __future__ import division, print_function
import numpy as np
import bilby
from sys import exit
import os
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy import integrate, interpolate
import scipy
import lalsimulation
import lal
import time
import h5py
from scipy.ndimage.interpolation import shift
#from pylal import antenna, cosmography
import argparse
# fixed parameter values
condor_fixed_vals = {'mass_1':50.0,
'mass_2':50.0,
'mc':None,
'geocent_time':0.0,
'phase':0.0,
'ra':1.375,
'dec':-1.2108,
'psi':0.0,
'theta_jn':0.0,
'luminosity_distance':2000.0,
'a_1':0.0,
'a_2':0.0,
'tilt_1':0.0,
'tilt_2':0.0,
'phi_12':0.0,
'phi_jl':0.0,
'det':['H1','L1','V1']}
# prior bounds
condor_bounds = {'mass_1_min':35.0, 'mass_1_max':80.0,
'mass_2_min':35.0, 'mass_2_max':80.0,
'M_min':70.0, 'M_max':160.0,
'geocent_time_min':0.15,'geocent_time_max':0.35,
'phase_min':0.0, 'phase_max':2.0*np.pi,
'ra_min':0.0, 'ra_max':2.0*np.pi,
'dec_min':-0.5*np.pi, 'dec_max':0.5*np.pi,
'psi_min':0.0, 'psi_max':2.0*np.pi,
'theta_jn_min':0.0, 'theta_jn_max':np.pi,
'a_1_min':0.0, 'a_1_max':0.0,
'a_2_min':0.0, 'a_2_max':0.0,
'tilt_1_min':0.0, 'tilt_1_max':0.0,
'tilt_2_min':0.0, 'tilt_2_max':0.0,
'phi_12_min':0.0, 'phi_12_max':0.0,
'phi_jl_min':0.0, 'phi_jl_max':0.0,
'luminosity_distance_min':1000.0, 'luminosity_distance_max':3000.0}
def parser():
"""
Parses command line arguments
:return: arguments
"""
#TODO: complete help sections
parser = argparse.ArgumentParser(prog='bilby_pe.py', description='script for generating bilby samples/posterior')
# arguments for data
parser.add_argument('-samplingfrequency', type=float, help='sampling frequency of signal')
parser.add_argument('-samplers', nargs='+', type=str, help='list of samplers to use to generate')
parser.add_argument('-duration', type=float, help='duration of signal in seconds')
parser.add_argument('-Ngen', type=int, help='number of samples to generate')
parser.add_argument('-refgeocenttime', type=float, help='reference geocenter time')
parser.add_argument('-bounds', type=str, help='dictionary of the bounds')
parser.add_argument('-fixedvals', type=str, help='dictionary of the fixed values')
parser.add_argument('-randpars', nargs='+', type=str, help='list of pars to randomize')
parser.add_argument('-infpars', nargs='+', type=str, help='list of pars to infer')
parser.add_argument('-label', type=str, help='label of run')
parser.add_argument('-outdir', type=str, help='output directory')
parser.add_argument('-training', type=str, help='boolean for train/test config')
parser.add_argument('-seed', type=int, help='random seed')
parser.add_argument('-dope', type=str, help='boolean for whether or not to do PE')
return parser.parse_args()
def tukey(M,alpha=0.5):
""" Tukey window code copied from scipy.
Parameters
----------
M:
Number of points in the output window.
alpha:
The fraction of the window inside the cosine tapered region.
Returns
-------
w:
The window
"""
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
return np.array(w[:M])
def make_bbh(hp,hc,fs,ra,dec,psi,det,ifos,event_time):
""" Turns hplus and hcross into a detector output
applies antenna response and
and applies correct time delays to each detector
Parameters
----------
hp:
h-plus version of GW waveform
hc:
h-cross version of GW waveform
fs:
sampling frequency
ra:
right ascension
dec:
declination
psi:
polarization angle
det:
detector
Returns
-------
ht:
combined h-plus and h-cross version of waveform
hp:
h-plus version of GW waveform
hc:
h-cross version of GW waveform
"""
# compute antenna response and apply
#Fp=ifos.antenna_response(ra,dec,float(event_time),psi,'plus')
#Fc=ifos.antenna_response(ra,dec,float(event_time),psi,'cross')
#Fp,Fc,_,_ = antenna.response(float(event_time), ra, dec, 0, psi, 'radians', det )
ht = hp + hc # overwrite the timeseries vector to reuse it
return ht, hp, hc
def gen_template(duration,
sampling_frequency,
pars,
ref_geocent_time
):
"""
Generates a whitened waveform
"""
if sampling_frequency>4096:
print('EXITING: bilby doesn\'t seem to generate noise above 2048Hz so lower the sampling frequency')
exit(0)
# compute the number of time domain samples
Nt = int(sampling_frequency*duration)
# define the start time of the timeseries
start_time = ref_geocent_time-duration/2.0
# fix parameters here
injection_parameters = dict(
mass_1=pars['mass_1'],mass_2=pars['mass_2'], a_1=pars['a_1'], a_2=pars['a_2'], tilt_1=pars['tilt_1'], tilt_2=pars['tilt_2'],
phi_12=pars['phi_12'], phi_jl=pars['phi_jl'], luminosity_distance=pars['luminosity_distance'], theta_jn=pars['theta_jn'], psi=pars['psi'],
phase=pars['phase'], geocent_time=pars['geocent_time'], ra=pars['ra'], dec=pars['dec'])
# Fixed arguments passed into the source model
waveform_arguments = dict(waveform_approximant='IMRPhenomPv2',
reference_frequency=20., minimum_frequency=20.)
# Create the waveform_generator using a LAL BinaryBlackHole source function
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters,
waveform_arguments=waveform_arguments,
start_time=start_time)
# create waveform
wfg = waveform_generator
# extract waveform from bilby
wfg.parameters = injection_parameters
freq_signal = wfg.frequency_domain_strain()
time_signal = wfg.time_domain_strain()
# Set up interferometers. These default to their design
# sensitivity
ifos = bilby.gw.detector.InterferometerList(pars['det'])
# set noise to be colored Gaussian noise
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=start_time)
# inject signal
ifos.inject_signal(waveform_generator=waveform_generator,
parameters=injection_parameters)
whitened_signal_td_all = []
whitened_h_td_all = []
# iterate over ifos
for i in range(len(pars['det'])):
# get frequency domain noise-free signal at detector
signal_fd = ifos[i].get_detector_response(freq_signal, injection_parameters)
# whiten frequency domain noise-free signal (and reshape/flatten)
whitened_signal_fd = signal_fd/ifos[i].amplitude_spectral_density_array
#whitened_signal_fd = whitened_signal_fd.reshape(whitened_signal_fd.shape[0])
# get frequency domain signal + noise at detector
h_fd = ifos[i].strain_data.frequency_domain_strain
# inverse FFT noise-free signal back to time domain and normalise
whitened_signal_td = np.sqrt(2.0*Nt)*np.fft.irfft(whitened_signal_fd)
# whiten noisy frequency domain signal
whitened_h_fd = h_fd/ifos[i].amplitude_spectral_density_array
# inverse FFT noisy signal back to time domain and normalise
whitened_h_td = np.sqrt(2.0*Nt)*np.fft.irfft(whitened_h_fd)
whitened_h_td_all.append([whitened_h_td])
whitened_signal_td_all.append([whitened_signal_td])
return np.squeeze(np.array(whitened_signal_td_all),axis=1),np.squeeze( | np.array(whitened_h_td_all) | numpy.array |
"""
Various trail maps and adventures:
author: <NAME>
"""
# <codecell>
from typing import List, Tuple
import numpy as np
import matplotlib.pyplot as plt
class TrailMap:
def __init__(self, start=None, end=None):
self.start = start if start != None else np.array([0, 0])
self.end = end if end != None else np.array([0, 0])
self.tol = 3
def sample(self, x, y):
""" Returns odor on scale from 0 to 1 """
raise NotImplementedError('sample not implemented!')
def plot(self):
raise NotImplementedError('plot not implemented!')
def reset(self):
raise NotImplementedError('reset not implemented!')
def is_done(self, x, y):
is_done = np.all(np.isclose(self.end, (x, y), atol=self.tol))
return bool(is_done)
def is_at_checkpoint(self, x, y):
return False
class StraightTrail(TrailMap):
def __init__(self, end=None, narrow_factor=1):
super().__init__()
self.end = end if type(end) != type(None) else np.array([10, 15])
self.narrow_factor = narrow_factor
def sample(self, x, y):
eps = 1e-8
total_dist = np.sqrt((x - self.end[0]) ** 2 + (y - self.end[1]) ** 2)
perp_dist = np.abs((self.end[0] - self.start[0]) * (self.start[1] - y) - (self.start[0] - x) *
(self.end[1] - self.start[1])) / np.sqrt((self.start[0] - self.end[0]) ** 2 + (self.start[1] - self.end[1])**2 + eps)
max_odor = np.sqrt(np.sum((self.end - self.start) ** 2)) + 1
odor = max_odor - total_dist
odor *= 1 / (perp_dist + 1) ** self.narrow_factor
# odor = 1 / (perp_dist + 1) ** self.narrow_factor
# max_dist = np.sqrt(np.sum((self.end - self.start) ** 2))
# if np.isscalar(total_dist):
# if total_dist > max_dist:
# odor *= 1 / (total_dist - max_dist + 1) ** self.narrow_factor
# else:
# adjust = 1 / (np.clip(total_dist - max_dist, 0, np.inf) + 1) ** self.narrow_factor
# odor *= adjust
odor = np.clip(odor, 0, np.inf)
return odor / max_odor
return odor
def plot(self, ax=None):
x = np.linspace(-20, 20, 100)
y = np.linspace(-20, 20, 100)
xx, yy = | np.meshgrid(x, y) | numpy.meshgrid |
"""
"""
import numpy as np
from astropy.tests.helper import pytest
from astropy.utils.misc import NumpyRNGContext
from ..model_helpers import custom_spline, create_composite_dtype
from ..model_helpers import bounds_enforcing_decorator_factory, enforce_periodicity_of_box
from ..model_helpers import call_func_table, bind_default_kwarg_mixin_safe
from ...custom_exceptions import HalotoolsError
__all__ = ('test_enforce_periodicity_of_box', 'test_custom_spline1',
'test_check_multiple_box_lengths', 'test_velocity_flip')
fixed_seed = 43
def _func_maker(i):
def f(x):
return x + i
return f
def test_custom_spline1():
table_abscissa = (0, 1)
table_ordinates = (0, 1, 2)
with pytest.raises(HalotoolsError) as err:
__ = custom_spline(table_abscissa, table_ordinates)
substr = "table_abscissa and table_ordinates must have the same length"
assert substr in err.value.args[0]
def test_custom_spline2():
table_abscissa = (0, 1, 2)
table_ordinates = (0, 1, 2)
with pytest.raises(HalotoolsError) as err:
__ = custom_spline(table_abscissa, table_ordinates, k=-2)
substr = "Spline degree must be non-negative"
assert substr in err.value.args[0]
def test_custom_spline3():
table_abscissa = (0, 1, 2)
table_ordinates = (0, 1, 2)
with pytest.raises(HalotoolsError) as err:
__ = custom_spline(table_abscissa, table_ordinates, k=0)
substr = "In spline_degree=0 edge case,"
assert substr in err.value.args[0]
def test_create_composite_dtype():
dt1 = np.dtype([('x', 'f4')])
dt2 = np.dtype([('x', 'i4')])
with pytest.raises(HalotoolsError) as err:
result = create_composite_dtype([dt1, dt2])
substr = "Inconsistent dtypes for name"
assert substr in err.value.args[0]
def test_bind_default_kwarg_mixin_safe():
class DummyClass(object):
def __init__(self, d):
self.abc = 4
constructor_kwargs = {'abc': 10}
obj = DummyClass(constructor_kwargs)
keyword_argument = 'abc'
default_value = 0
with pytest.raises(HalotoolsError) as err:
__ = bind_default_kwarg_mixin_safe(
obj, keyword_argument, constructor_kwargs, default_value)
substr = "Do not pass the ``abc`` keyword argument "
assert substr in err.value.args[0]
def test_bounds_enforcing_decorator_factory():
"""
"""
def f(x):
return x
decorator = bounds_enforcing_decorator_factory(0, 1, warning=True)
decorated_f = decorator(f)
result = decorated_f(-1)
assert result == 0
def test_enforce_periodicity_of_box():
""" Verify that enforce_periodicity_of_box results in all points located
inside [0, Lbox]
"""
box_length = 250
Npts = int(1e5)
with NumpyRNGContext(fixed_seed):
coords = np.random.uniform(0, box_length, Npts*3).reshape(Npts, 3)
perturbation_size = box_length/10.
with NumpyRNGContext(fixed_seed):
coord_perturbations = np.random.uniform(
-perturbation_size, perturbation_size, Npts*3).reshape(Npts, 3)
coords += coord_perturbations
newcoords = enforce_periodicity_of_box(coords, box_length)
assert np.all(newcoords >= 0)
assert np.all(newcoords <= box_length)
def test_check_multiple_box_lengths():
""" Verify that enforce_periodicity_of_box function notices when the
some points lie many box lengths beyond +/- Lbox
"""
box_length = 250
Npts = int(1e4)
x = np.linspace(-2*box_length, box_length, Npts)
with pytest.raises(HalotoolsError) as err:
newcoords = enforce_periodicity_of_box(x, box_length,
check_multiple_box_lengths=True)
substr = "There is at least one input point with a coordinate less than -Lbox"
assert substr in err.value.args[0]
x = np.linspace(-box_length, 2.1*box_length, Npts)
with pytest.raises(HalotoolsError) as err:
newcoords = enforce_periodicity_of_box(x, box_length,
check_multiple_box_lengths=True)
substr = "There is at least one input point with a coordinate greater than 2*Lbox"
assert substr in err.value.args[0]
x = np.linspace(-box_length, 2*box_length, Npts)
newcoords = enforce_periodicity_of_box(x, box_length,
check_multiple_box_lengths=True)
def test_velocity_flip():
""" Verify that enforce_periodicity_of_box function flips the sign of
the velocity for points where PBCs needed to be enforced
"""
box_length = 250
Npts = int(1e4)
x = np.linspace(-0.5*box_length, 1.5*box_length, Npts)
vx = np.ones(Npts)
newcoords, newvel = enforce_periodicity_of_box(
x, box_length, velocity=vx)
inbox = ((x >= 0) & (x <= box_length))
assert np.all(newvel[inbox] == 1.0)
assert np.all(newvel[~inbox] == -1.0)
def test_call_func_table1():
num_conc_bins = 5
f_table = list(_func_maker(i) for i in range(num_conc_bins))
num_abscissa = 7
cum_prob = np.array(list(0.1*i for i in range(num_abscissa)))
func_idx = np.zeros(num_abscissa)
correct_result = cum_prob
result = call_func_table(f_table, cum_prob, func_idx)
assert np.all(result == correct_result)
def test_call_func_table2():
num_conc_bins = 5
f_table = list(_func_maker(i) for i in range(num_conc_bins))
num_abscissa = 7
cum_prob = np.array(list(0.1*i for i in range(num_abscissa)))
func_idx = np.zeros(num_abscissa) + 3
func_idx[2:] = 0
correct_result = | np.zeros(num_abscissa) | numpy.zeros |
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from io import BytesIO
from queue import Queue
from threading import Thread
import h5py
import numpy as np
import requests
import time
from PIL import Image
from cytomine.models import UploadedFile
DEBUG = False
def get_image_dimension(image):
if image.channels > 1:
return 'channel'
elif image.depth > 1:
return 'zStack'
elif image.duration > 1:
return 'time'
else:
return None
def create_hdf5(uploaded_file, image, slices, cf, n_workers=0, tile_size=512, n_written_tiles_to_update=50):
image_name = image.originalFilename
dimension = get_image_dimension(image)
if not dimension:
log("{} | ERROR: Cannot make profile for 2D image".format(image_name))
uploaded_file.status = uploaded_file.ERROR_CONVERSION
retry_update(uploaded_file)
return
path = os.path.dirname(uploaded_file.path)
os.makedirs(path, exist_ok=True)
hdf5 = h5py.File(uploaded_file.path, 'w')
hdf5.create_dataset("width", data=image.width, shape=())
hdf5.create_dataset("height", data=image.height, shape=())
hdf5.create_dataset("nSlices", data=len(slices), shape=())
bpc = image.bitPerSample if image.bitPerSample else 8
hdf5.create_dataset("bpc", data=bpc, shape=())
uploaded_file.status = UploadedFile.CONVERTING
uploaded_file = retry_update(uploaded_file)
cf = retry_update(cf)
dtype = np.uint16 if bpc > 8 else np.uint8
dataset = hdf5.create_dataset("data", shape=(image.height, image.width, len(slices)), dtype=dtype)
x_tiles = int( | np.ceil(image.width / tile_size) | numpy.ceil |
"""Tests for the normal distribution."""
import unittest
import itertools
from tests.testing import NumpyAssertions
import numpy as np
import scipy.sparse
from probnum import prob
from probnum.linalg import linops
class NormalTestCase(unittest.TestCase, NumpyAssertions):
"""General test case for the normal distribution."""
def setUp(self):
"""Resources for tests."""
# Seed
np.random.seed(seed=42)
# Parameters
m = 7
n = 3
self.constants = [-1, -2.4, 0, 200, np.pi]
sparsemat = scipy.sparse.rand(m=m, n=n, density=0.1, random_state=1)
self.normal_params = [
(-1, 3),
(np.random.uniform(size=10), np.eye(10)),
(np.array([1, -5]), linops.MatrixMult(A=np.array([[2, 1], [1, -0.1]]))),
(linops.MatrixMult(A=np.array([[0, -5]])), linops.Identity(shape=(2, 2))),
(
np.array([[1, 2], [-3, -0.4], [4, 1]]),
linops.Kronecker(A=np.eye(3), B=5 * np.eye(2)),
),
(
linops.MatrixMult(A=sparsemat.todense()),
linops.Kronecker(0.1 * linops.Identity(m), linops.Identity(n)),
),
(
linops.MatrixMult(A=np.random.uniform(size=(2, 2))),
linops.SymmetricKronecker(
A=np.array([[1, 2], [2, 1]]), B=np.array([[5, -1], [-1, 10]])
),
),
(
linops.Identity(shape=25),
linops.SymmetricKronecker(A=linops.Identity(25)),
),
]
def test_correct_instantiation(self):
"""Test whether different variants of the normal distribution are instances of Normal."""
for mean, cov in self.normal_params:
with self.subTest():
dist = prob.Normal(mean=mean, cov=cov)
self.assertIsInstance(dist, prob.Normal)
def test_scalarmult(self):
"""Multiply a rv with a normal distribution with a scalar."""
for (mean, cov), const in list(
itertools.product(self.normal_params, self.constants)
):
with self.subTest():
normrv = const * prob.RandomVariable(
distribution=prob.Normal(mean=mean, cov=cov)
)
self.assertIsInstance(normrv, prob.RandomVariable)
if const != 0:
self.assertIsInstance(normrv.distribution, prob.Normal)
else:
self.assertIsInstance(normrv.distribution, prob.Dirac)
def test_addition_normal(self):
"""Add two random variables with a normal distribution"""
for (mean0, cov0), (mean1, cov1) in list(
itertools.product(self.normal_params, self.normal_params)
):
with self.subTest():
normrv0 = prob.RandomVariable(
distribution=prob.Normal(mean=mean0, cov=cov0)
)
normrv1 = prob.RandomVariable(
distribution=prob.Normal(mean=mean1, cov=cov1)
)
if normrv0.shape == normrv1.shape:
self.assertIsInstance((normrv0 + normrv1).distribution, prob.Normal)
else:
with self.assertRaises(TypeError):
normrv_added = normrv0 + normrv1
def test_rv_linop_kroneckercov(self):
"""Create a rv with a normal distribution with linear operator mean and Kronecker product kernels."""
def mv(v):
return np.array([2 * v[0], 3 * v[1]])
A = linops.LinearOperator(shape=(2, 2), matvec=mv)
V = linops.Kronecker(A, A)
prob.RandomVariable(distribution=prob.Normal(mean=A, cov=V))
def test_normal_dimension_mismatch(self):
"""Instantiating a normal distribution with mismatched mean and kernels should result in a ValueError."""
for mean, cov in [
(0, [1, 2]),
( | np.array([1, 2]) | numpy.array |
"""Interfaces to modified Helmholtz operators."""
from bempp.api.operators.boundary import common as _common
import numpy as _np
def single_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the Helmholtz single-layer boundary operator."""
if _np.imag(omega) != 0:
raise ValueError("'omega' must be real.")
return _common.create_operator(
"modified_helmholtz_single_layer_boundary",
domain,
range_,
dual_to_range,
parameters,
assembler,
[omega],
"modified_helmholtz_single_layer",
"default_scalar",
device_interface,
precision,
False,
)
def double_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the mod. Helmholtz double-layer boundary operator."""
if | _np.imag(omega) | numpy.imag |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
import numpy as np
__all__ = [
'multi_view_learner',
]
class multi_view_model(object):
def __init__(self, models):
self.models = models
def apply(self, features):
if len(features) != len(self.models):
raise ValueError('milk.supervised.two_view: Nr of features does not match training data (got %s, expected %s)' % (len(features) ,len(self.models)))
Ps = np.array([model.apply(f) for model,f in zip(self.models, features)])
if | np.any(Ps <= 0.) | numpy.any |
#xyz Dec 2017
from __future__ import print_function
import pdb, traceback
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
from block_data_prep_util import Raw_H5f, Sort_RawH5f,Sorted_H5f,Normed_H5f,show_h5f_summary_info,MergeNormed_H5f
import numpy as np
import h5py
import glob
import time
import multiprocessing as mp
import itertools
import zipfile,gzip
from plyfile import PlyData, PlyElement
TMPDEBUG=False
ROOT_DIR = os.path.dirname(BASE_DIR)
DATA_DIR = os.path.join(ROOT_DIR,'data')
DATA_SOURCE= 'scannet_data'
SCANNET_DATA_DIR = os.path.join(DATA_DIR,DATA_SOURCE)
def get_stride_step_name(block_stride,block_step):
assert block_step[0] == block_step[1]
assert block_stride[0] == block_stride[1]
assert (block_step[0] == block_step[2] and block_stride[0] == block_stride[2]) or (block_step[2]==-1 and block_stride[2]==-1)
def get_str(v):
assert (v*10) % 1 == 0
if v%1!=0: return '%dd%d'%(int(v),(v%1)*10)
else: return str(int(v))
if block_stride[2] == -1:
return 'stride-%s-step-%s'%(get_str(block_stride[0]),get_str(block_step[0]))
else:
return 'stride_%s_step_%s'%(get_str(block_stride[0]),get_str(block_step[0]))
def zip_extract(groupe_name,house_name,file_name,file_format,zipf,house_dir_extracted):
'''
extract file if not already
'''
zipfile_name = '%s/%s/%s.%s'%(house_name,groupe_name,file_name,file_format)
file_path = house_dir_extracted + '/' + zipfile_name
if not os.path.exists(file_path):
print('extracting %s...'%(file_name))
file_path_extracted = zipf.extract(zipfile_name,house_dir_extracted)
print('file extracting finished: %s'%(file_path_extracted) )
assert file_path == file_path_extracted
else:
print('file file already extracted: %s'%(file_path))
return file_path
def parse_ply_file(ply_fo,IsDelVexMultiSem):
'''
element vertex 1522546
property float x
property float y
property float z
property float nx
property float ny
property float nz
property float tx
property float ty
property uchar red
property uchar green
property uchar blue
element face 3016249
property list uchar int vertex_indices
property int material_id
property int segment_id
property int category_id
'''
plydata = PlyData.read(ply_fo)
num_ele = len(plydata.elements)
num_vertex = plydata['vertex'].count
num_face = plydata['face'].count
data_vertex = plydata['vertex'].data
data_face = plydata['face'].data
## face
face_vertex_indices = data_face['vertex_indices']
face_vertex_indices = np.concatenate(face_vertex_indices,axis=0)
face_vertex_indices = np.reshape(face_vertex_indices,[-1,3])
face_eles = ['vertex_indices','material_id','segment_id','category_id']
datas_face = {}
for e in face_eles:
datas_face[e] = np.expand_dims(data_face[e],axis=-1)
face_semantic = np.concatenate([datas_face['category_id'],datas_face['segment_id'],datas_face['material_id']],axis=1)
## vertex
vertex_eles = ['x','y','z','nx','ny','nz','tx','ty','red','green','blue']
datas_vertex = {}
for e in vertex_eles:
datas_vertex[e] = np.expand_dims(data_vertex[e],axis=-1)
vertex_xyz = np.concatenate([datas_vertex['x'],datas_vertex['y'],datas_vertex['z']],axis=1)
vertex_nxnynz = np.concatenate([datas_vertex['nx'],datas_vertex['ny'],datas_vertex['nz']],axis=1)
vertex_rgb = np.concatenate([datas_vertex['red'],datas_vertex['green'],datas_vertex['blue']],axis=1)
vertex_semantic,vertex_indices_multi_semantic,face_indices_multi_semantic = get_vertex_label_from_face(face_vertex_indices,face_semantic,num_vertex)
if IsDelVexMultiSem:
vertex_xyz = np.delete(vertex_xyz,vertex_indices_multi_semantic,axis=0)
vertex_nxnynz = np.delete(vertex_nxnynz,vertex_indices_multi_semantic,axis=0)
vertex_rgb = np.delete(vertex_rgb,vertex_indices_multi_semantic,axis=0)
vertex_semantic = np.delete(vertex_semantic,vertex_indices_multi_semantic,axis=0)
face_vertex_indices = | np.delete(face_vertex_indices,face_indices_multi_semantic,axis=0) | numpy.delete |
from torch.utils.data import Dataset
import random
import numpy as np
import nibabel as nib
import torch
import os
from os import listdir,path
from os.path import join
from collections import defaultdict
from sklearn.feature_extraction.image import extract_patches
import imgaug as ia
from imgaug import augmenters as iaa
class MakeTrainData(object):
def __init__(self,patch_size,num_patches,num_classes,norm_type,is_sup):
self.patch_size = patch_size
self.num_patches = num_patches
self.num_classes = num_classes
self.is_sup = is_sup
self.norm_type = norm_type
def __call__(self,folder1_path,folder2_path):
if self.is_sup:
TrainData_path = folder1_path
print(TrainData_path)
TrainData_dir = listdir(TrainData_path)
TrainData_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
if self.norm_type == 'group':
_,self.mean,self.std = normalization(TrainData_path)
else:
self.mean, self.std = 0,1 #std = 1 to avoid 'divide-by-0' error
LabelData_path = folder2_path
LabelData_dir = listdir(LabelData_path)
LabelData_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
img_patches, gt_patches = [],[]
for image,label in zip(TrainData_dir, LabelData_dir):
image = nib.load(join(TrainData_path,image)).get_data()
label = nib.load(join(LabelData_path,label)).get_data()
if self.norm_type == 'self':
self.mean, self.std = np.mean(image), np.std(image)
image = (image - np.mean(image))/np.std(image)
sample = {'images': image, 'labels':label, 'targets': None}
transform = CropPatches(self.patch_size,self.num_patches,self.num_classes)
imgs,gts = transform(sample) # patches cropped from a single subject
imgs_aug, gts_aug = Simple_Aug(imgs, gts)
img_patches.append(imgs)
gt_patches.append(gts)
img_patches = np.asarray(img_patches).reshape(-1,59,59,59)
gt_patches = np.asarray(gt_patches).reshape(-1,59,59,59)
return np.asarray(img_patches),np.asarray(gt_patches)
else:
TargetData_orig_path = folder1_path
print(TargetData_orig_path)
TargetData_trans_path = folder2_path
TargetData_orig_dir = listdir(TargetData_orig_path)
TargetData_orig_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
TargetData_trans_dir = listdir(TargetData_trans_path)
TargetData_trans_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
target_orig_patches,target_trans_patches = [],[]
for origname,transname in zip(TargetData_orig_dir,TargetData_trans_dir):
target_orig = nib.load(join(TargetData_orig_path,origname)).get_data()
target_trans = nib.load(join(TargetData_trans_path,transname)).get_data()
if self.norm_type == 'self':
self.orig_mean, self.orig_std = np.mean(target_orig), np.std(target_orig)
self.trans_mean, self.trans_std = np.mean(target_trans), np.std(target_trans)
target_orig = (target_orig-self.orig_mean)/self.orig_std
target_trans = (target_trans-self.trans_mean)/self.trans_std
target_orig = CropTargetPatches(target_orig,self.patch_size,extraction_step=[27,27,27])
target_trans = CropTargetPatches(target_trans,self.patch_size,extraction_step=[27,27,27])
target_orig_patches.append(target_orig)
target_trans_patches.append(target_trans)
target_orig_patches = np.asarray(target_orig_patches).reshape(-1,59,59,59)
target_trans_patches = np.asarray(target_trans_patches).reshape(-1,59,59,59)
print(np.array(target_orig_patches).shape)
print(np.array(target_trans_patches).shape)
return np.asarray(target_orig_patches), np.asarray(target_trans_patches)
class TrainDataset(Dataset):
def __init__(self,args):
self.data_path = args.data_path
self.source_path = join(args.data_path,args.sourcefolder)
self.label_path = join(args.data_path,args.labelfolder)
self.patch_size = args.patch_size
self.num_patches = args.num_patches
self.batch_size = args.batch_size
self.num_classes = args.num_classes
self.norm_type = args.norm_type
prepare_traindata = MakeTrainData(self.patch_size,self.num_patches,self.num_classes,self.norm_type,is_sup=True)
self.img_patches, self.gt_patches = prepare_traindata(self.source_path,self.label_path)
print('%%%%%%%%%%%%%%%% ', self.img_patches.shape)
print('%%%%%%%%%%%%%%%% ', self.gt_patches.shape)
self.length = len(self.img_patches)
def __len__(self):
return self.length
def __getitem__(self,idx):
return {'images':self.img_patches[idx], 'labels':self.gt_patches[idx]}
class Target_TrainDataset(Dataset):
def __init__(self,args):
self.data_path = args.data_path
self.target_trans_path = join(args.data_path,args.target_trans_folder)
self.target_orig_path = join(args.data_path,args.target_orig_folder)
self.patch_size = args.patch_size
self.num_patches = args.num_patches
self.num_classes = args.num_classes
self.norm_type = args.norm_type
prepare_targetdata = MakeTrainData(self.patch_size,self.num_patches,self.num_classes,self.norm_type,is_sup=False)
self.target_orig_patches, self.target_trans_patches = prepare_targetdata(self.target_orig_path,self.target_trans_path)
self.length = len(self.target_orig_patches)
c = list(zip(self.target_orig_patches, self.target_trans_patches))
random.shuffle(c)
self.target_orig_patches, self.target_trans_patches = zip(*c)
def __len__(self):
return self.length
def __getitem__(self,idx):
return self.target_orig_patches[idx], self.target_trans_patches[idx]
class ValDataset(Dataset):
def __init__(self,args):
self.Val_path = args.val_path
self.Val_data_path = join(self.Val_path,args.valimagefolder)
self.Val_labels_path = join(self.Val_path,args.vallabelfolder)
self.Val_data_dir = listdir(self.Val_data_path)
self.Val_labels_dir = listdir(self.Val_labels_path)
self.Val_data_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
self.Val_labels_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
self.length = len(self.Val_data_dir)
self.norm_type = args.norm_type
if self.norm_type == 'group':
_,self.mean,self.std = normalization(args.train_path)
else:
self.mean, self.std = 0,1
def __len__(self):
return self.length
def __getitem__(self,idx):
image = nib.load(join(self.Val_data_path,self.Val_data_dir[idx])).get_data()
label = nib.load(join(self.Val_labels_path,self.Val_labels_dir[idx])).get_data()
name = self.Val_data_dir[idx]
label = np.asarray(label)
if self.norm_type == 'self':
self.mean, self.std = np.mean(image), np.std(image)
# volume-wise intensity normalization
image = (image - self.mean) / self.std
sample = {'images': image, 'labels': label.astype(int), 'names': name}
return sample
class TestDataset(Dataset):
def __init__(self,args):
self.TestData_path = args.test_path
self.TestData_dir = listdir(self.TestData_path)
self.length = len(listdir(self.TestData_path))
self.norm_type = args.norm_type
self.TestData_dir.sort(key=lambda f: int(filter(str.isdigit, f)))
if self.norm_type == 'group':
_,self.mean,self.std = normalization(args.train_path)
else:
self.mean, self.std = 0,1
def __len__(self):
return self.length
def __getitem__(self,idx):
image = nib.load(join(self.TestData_path,self.TestData_dir[idx])).get_data()
if self.norm_type == 'self':
self.mean,self.std = np.mean(image), np.std(image)
# volume-wise intensity normalization
image = (image - self.mean) / self.std
name = self.TestData_dir[idx]
sample = {'images':image,'name':name, 'pop_mean':self.mean, 'pop_std':self.std}
return sample
class CropPatches(object):
def __init__(self,patch_size,num_patches,num_classes):
self.patch_size = patch_size
self.num_classes = num_classes
self.num_patches = num_patches
def __call__(self,sample):
image,label = sample['images'], sample['labels']
if sample['targets'] is not None:
target = sample['targets']
targets = []
h,w,d = self.patch_size
# generate the training batch with equal probability for each class
fb = np.random.choice(2)
if fb:
index = np.argwhere(label > 0)
else:
index = np.argwhere(label == 0)
# randomly choose N center position
choose = random.sample(range(0,len(index)),self.num_patches)
centers = index[choose].astype(int)
images, gts = [],[]
# check whether the left and right index overflow
for center in centers:
left = []
for i in range(3):
margin_left = int(self.patch_size[i]/2)
margin_right = self.patch_size[i] - margin_left
left_index = center[i] - margin_left
right_index = center[i] + margin_right
if left_index < 0:
left_index = 0
if right_index > label.shape[i]:
left_index = left_index - (right_index - label.shape[i])
left.append(left_index)
img = np.zeros([h,w,d])
gt = | np.zeros([h,w,d]) | numpy.zeros |
from os import read
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import datetime
import sys
from tqdm import tqdm
import cppsolver as cs
from ..solver import Solver, Solver_jac
from ..preprocess import Reading_Data, LM_data, LM_data_2mag
from ..filter import lowpass_filter, mean_filter, median_filter, Magnet_KF, Magnet_UKF, Magnet_KF_cpp
from ..preprocess import read_data
def ang_convert(x):
a = x//(2*np.pi)
result = x-a*(2*np.pi)
if result > np.pi:
result -= np.pi * 2
return result
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def show_track_1mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-10, 15])
ax.set_ylim([-10, 15])
ax.set_zlim([0, 25])
# ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
# model = Solver(1)
# model = Finexus_Solver(-5e-2, -5e-2, 8e-2)
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
directions = []
SNR = []
cut = 5
starting_point = lm_data.get_gt(reading_data.tstamps[cut])[0]
if use_kalman:
kf_params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
model = Magnet_KF_cpp(
1, pSensor, [0.8, 0.8, 1.5]*pSensor.shape[0], kf_params, dt=1/17, ord=3)
else:
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * starting_point[0], 1e-2 * starting_point[1], 1e-2 * starting_point[2], 0, 0])
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(
My_M), 1e-2 * (-2), 1e-2 * (2), 1e-2 * (20), 0, 0])
for i in tqdm(range(cut, data.shape[0] - cut)):
# fix m value and gx gy gz
datai = data[i].reshape(-1, 3)
if use_kalman:
model.predict()
result = model.update(datai)
else:
result = cs.solve_1mag(
datai.reshape(-1), pSensor.reshape(-1), params)
params = result.copy()
[x0, y0, z0, Gx, Gy, Gz] = [
result[4] * 1e2, result[5] * 1e2,
result[6] * 1e2, result[0],
result[1], result[2]
]
# [m, theta, phy] = [np.exp(result['m0'].value), np.pi * sigmoid(
# result['theta0'].value), np.pi * np.tanh(result['phy0'].value)]
[m, theta, phy, direction] = [
np.exp(result[3]),
ang_convert(result[7]),
ang_convert(result[8]),
np.array([np.sin(ang_convert(result[7]))*np.cos(ang_convert(result[8])),
np.sin(ang_convert(result[7]))*np.sin(ang_convert(result[8])), np.cos(ang_convert(result[7]))]),
]
# [x, y, z, m] = [result['X'].value*1e2, result['Y'].value*1e2,
# result['Z'].value*1e2, result['m'].value]
G = np.array([Gx, Gy, Gz])
noise = np.linalg.norm(G, 2)
signal = np.linalg.norm(datai - G, 2)
pred_position.append(x0)
pred_position.append(y0)
pred_position.append(z0)
changingM.append(m)
changingTheta.append(theta)
changingPhy.append(phy)
changingG.append([Gx, Gy, Gz])
directions.append(direction)
changingG = np.array(changingG)
changingM = np.array(changingM)
changingTheta = np.array(changingTheta)
changingPhy = np.array(changingPhy)
changingAng = np.stack([changingTheta, changingPhy], axis=0).T
directions = np.stack(directions, axis=0)
pred_position = np.array(pred_position).reshape(-1, 3)
compare_label = [' ', '(fixing G)']
ax.plot(pred_position[:, 0],
pred_position[:, 1],
pred_position[:, 2],
c=color[index % len(color)],
label='Magnet')
print(np.mean(pred_position, axis=0))
# sensor position
ax.scatter(1e2 * pSensor[:, 0],
1e2 * pSensor[:, 1],
1e2 * pSensor[:, 2],
c='r',
s=1,
alpha=0.5)
# calculate loss
gt_route = []
losses = {}
losses_count = {}
gt_directions = []
losses_angle = {}
losses_count_angle = {}
for i in range(pred_position.shape[0]):
# Get gt
gt = lm_data.get_gt(reading_data.tstamps[i + cut])
gt_pos = gt[0]
gt_route.append(gt_pos)
gt_direction = gt[1]
gt_directions.append(gt_direction)
# calculate loss
dis = np.linalg.norm(gt_pos - np.mean(pSensor, axis=0), 2)
loss1 = np.linalg.norm(gt_pos - pred_position[i], 2)
loss2 = np.arccos(np.dot(gt_direction, directions[i]))
# store route loss
if not dis in losses.keys():
losses[dis] = loss1
losses_count[dis] = 1
else:
losses[dis] += loss1
losses_count[dis] += 1
# store ang loss
if not dis in losses_angle.keys():
losses_angle[dis] = loss2
losses_count_angle[dis] = 1
else:
losses_angle[dis] += loss2
losses_count_angle[dis] += 1
gt_route = np.stack(gt_route, axis=0)
gt_directions = np.stack(gt_directions, axis=0)
ax.plot(gt_route[:, 0],
gt_route[:, 1],
gt_route[:, 2],
c='b',
alpha=0.5,
linewidth=2,
label='Ground Truth')
plt.legend()
# store the gt route and the reconstructed route
tmp = reading_path.split('/')
file_name = tmp[-1].split('.')[0] + '.npz'
tmp.pop(0)
tmp.pop(-1)
result_path = os.path.join('result', 'reconstruction_result', *tmp)
if not os.path.exists(result_path):
os.makedirs(result_path)
np.savez(os.path.join(result_path, file_name),
gt=gt_route,
result=pred_position, gt_ang=gt_directions, result_ang=directions, G=changingG)
fig5 = plt.figure()
plt.title("Reconstuct Loss")
plot_loss_data = []
for dis in sorted(losses.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses[dis] / losses_count[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0],
plot_loss_data[:, 1], label='Position loss')
plt.legend()
fig6 = plt.figure()
plt.title("Reconstuct angle Loss")
plot_loss_data = []
for dis in sorted(losses_angle.keys()):
plot_loss_data.append(dis)
plot_loss_data.append(losses_angle[dis] / losses_count_angle[dis])
plot_loss_data = np.array(plot_loss_data).reshape(-1, 2)
plt.plot(plot_loss_data[:, 0], plot_loss_data[:, 1], label='Ang loss')
plt.legend()
fig2 = plt.figure()
plt.title("Magnet Moment")
# plt.ylim(0, 10)
plt.plot(changingM, label='M')
plt.legend()
fig3 = plt.figure()
plt.title("G")
plt.plot(changingG[:, 0], label='Gx')
plt.plot(changingG[:, 1], label='Gy')
plt.plot(changingG[:, 2], label='Gz')
plt.legend()
fig4 = plt.figure()
plt.title("orientation")
plt.ylim(-5, 5)
plt.plot(changingTheta, label='theta')
plt.plot(changingPhy, label='phy')
plt.legend()
plt.show()
# plt.savefig("result/result.jpg", dpi=900)
def show_track_2mag_csv_cpp(reading_path, cali_path, gt_path, pSensor, My_M, use_kalman=False):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
# ax.set_zlim([-2, 30])
ax.set_title("Reconstructed Magnet Position")
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
# M_choice = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
# M_choice = [0.8, 1, 1.2, 1.4]
M_choice = [2]
reading_data = Reading_Data(data_path=reading_path, cali_path=cali_path)
data = reading_data.readings
lm_data = LM_data_2mag(gt_path)
# set the origin of the gt
lm_data.offset = np.array([-1.5614192, -0.31039926, 0.90800506])
result_parameter = []
color = ['r', 'b', 'g', 'y', 'm']
for index, M in enumerate(M_choice):
pred_position = []
changingM = []
changingG = []
changingTheta = []
changingPhy = []
changingTheta2 = []
changingPhy2 = []
changingDir = []
changingDir2 = []
SNR = []
cut = 0
starting_point = lm_data.get_gt(reading_data.tstamps[cut])
params = {
'X0': 1e-2 * starting_point[0][0],
'Y0': 1e-2 * starting_point[0][1],
'Z0': 1e-2 * starting_point[0][2],
'm0': np.log(My_M),
'theta0': 0.1,
'phy0': 0.1,
'X1': 1e-2 * starting_point[2][0],
'Y1': 1e-2 * starting_point[2][1],
'Z1': 1e-2 * starting_point[2][2],
'm1': np.log(My_M),
'theta1': 0.1,
'phy1': 0.1,
'gx': 0,
'gy': 0,
'gz': 0,
}
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(My_M),
1e-2 * starting_point[0][0], 1e-2 *
starting_point[0][1], 1e-2 * starting_point[0][2], 0, 0,
1e-2 * starting_point[2][0], 1e-2 *
starting_point[2][1], 1e-2 * starting_point[2][2], 0, 0,
])
params = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(3),
1e-2 * 11, 1e-2 * 1, 1e-2 * (-2), np.pi*0.5, np.pi*0.5,
1e-2 * 5, 1e-2 * (7), 1e-2 * (-4), np.pi*0.5, np.pi*0.25,
])
for i in tqdm(range(cut, data.shape[0] - cut)):
# if i > 5:
# gt = lm_data.get_gt(reading_data.tstamps[i])
# params[4:7] = gt[0]*1e-2
# params[9:12] = gt[2]*1e-2
datai = data[i].reshape(-1, 3)
result = cs.solve_2mag(
datai.reshape(-1), pSensor.reshape(-1), params)
params = result.copy()
result_parameter.append(result)
# print('the m is ', result['m0'])
[x0, y0, z0, x1, y1, z1, Gx, Gy, Gz] = [
result[4] * 1e2, result[5] * 1e2, result[6] * 1e2, result[9] *
1e2, result[10] * 1e2, result[11] *
1e2, result[0],
result[1], result[2]
]
# [m, theta, phy] = [np.exp(result['m0'].value), np.pi * sigmoid(
# result['theta0'].value), np.pi * np.tanh(result['phy0'].value)]
[m, theta1, phy1, theta2, phy2] = [
np.exp(result[3]),
ang_convert(result[7]),
ang_convert(result[8]),
ang_convert(result[12]),
ang_convert(result[13]),
]
# [x, y, z, m] = [result['X'].value*1e2, result['Y'].value*1e2,
# result['Z'].value*1e2, result['m'].value]
G = np.array([Gx, Gy, Gz])
noise = np.linalg.norm(G, 2)
signal = np.linalg.norm(datai - G, 2)
pred_position.append(x0)
pred_position.append(y0)
pred_position.append(z0)
pred_position.append(x1)
pred_position.append(y1)
pred_position.append(z1)
changingM.append(m)
changingTheta.append(theta1)
changingPhy.append(phy1)
changingDir.append(np.array([np.sin(theta1)*np.cos(phy1), np.sin(
theta1)*np.sin(phy1), np.cos(theta1), np.sin(theta2)*np.cos(phy2), np.sin(
theta2)*np.sin(phy2), np.cos(theta2)]))
changingTheta2.append(theta2)
changingPhy2.append(phy2)
changingG.append([Gx, Gy, Gz])
changingG = np.array(changingG)
changingM = np.array(changingM)
changingTheta = np.array(changingTheta)
changingPhy = np.array(changingPhy)
changingAng = np.stack([changingTheta, changingPhy], axis=0).T
changingTheta2 = np.array(changingTheta2)
changingPhy2 = np.array(changingPhy2)
changingAng2 = np.stack([changingTheta2, changingPhy2], axis=0).T
changingDir = | np.stack(changingDir, axis=0) | numpy.stack |
"""
Testcases for encoders:
- Exclude
- Log10
- Normalization
Separate linear-reduction tests:
- PCA
- Karuhnen-Loeve
"""
import numpy as np
from profit.sur.encoders import Encoder
def test_exclude():
CONFIG = ['Exclude', [2], False, {}]
COLUMNS= [2]
SIZE = (10, 4)
n = SIZE[0] * SIZE[1]
X = np.linspace(0, n-1, n).reshape(SIZE)
enc = Encoder['Exclude'](COLUMNS)
assert enc.repr == CONFIG
X_enc = enc.encode(X)
assert np.all(X_enc == X[:, [0, 1, 3]])
X_dec = enc.decode(X_enc)
assert np.all(X_dec == X)
def test_log10():
from profit.sur.encoders import Log10Encoder
CONFIG = ['Log10', [2, 3], False, {}]
COLUMNS= [2, 3]
SIZE = (10, 4)
n = SIZE[0] * SIZE[1]
X = np.linspace(0, n-1, n).reshape(SIZE)
X_log = X.copy()
X_log[:, COLUMNS] = np.log10(X_log[:, COLUMNS])
enc = Log10Encoder(COLUMNS)
assert enc.repr == CONFIG
X_enc = enc.encode(X)
assert np.all(X_enc == X_log)
X_dec = enc.decode(X_enc)
assert | np.allclose(X_dec, X, atol=1e-7) | numpy.allclose |
import numpy as np
import os
if "DEBUG" in os.environ and os.environ["DEBUG"] == 'PLOT':
import plotly.offline as py
import plotly.graph_objs as go
import math
class Face:
def __init__(self, vertices, triangle_index, collision_type):
self.vertices = np.array(vertices)
self.index = triangle_index
self.vertices_transposed = np.transpose(self.vertices)
self.normal = np.array([0.0, 0.0, 0.0])
self.center = np.array([0.0, 0.0, 0.0])
self.type = None
self.collision_type = collision_type
self.bounding_box = None
self.calc_props()
def calc_props(self):
[p1, p2, p3] = self.vertices
u = p2 - p1
v = p3 - p1
# calc normal
cross = np.cross(u, v)
#print(cross)
biggest_var = np.max(np.abs(cross))
if biggest_var != 0:
self.normal = cross / np.max(np.abs(cross))
is_floor = self.normal[1] > 0.01
is_ceiling = self.normal[1] < -0.01
if is_floor:
self.type = 'FLOOR'
elif is_ceiling:
self.type = 'CEILING'
else:
self.type = 'WALL'
self.center = np.mean(self.vertices, axis=0, dtype=float)
self.bounding_box = (
np.min(self.vertices_transposed[0]), # -X
np.max(self.vertices_transposed[0]), # +X
np.min(self.vertices_transposed[1]), # -Y
np.max(self.vertices_transposed[1]), # +Y
np.min(self.vertices_transposed[2]), # -Z
| np.max(self.vertices_transposed[2]) | numpy.max |
#! /usr/bin/env python
import unittest
import numpy as np
import openravepy as orpy
# Tested package
import raveutils as ru
class Test_body(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.set_printoptions(precision=6, suppress=True)
env = orpy.Environment()
makita = env.ReadKinBodyXMLFile('objects/makita.kinbody.xml')
env.AddKinBody(makita)
cls.env = env
print('') # dummy line
@classmethod
def tearDownClass(cls):
cls.env.Reset()
cls.env.Destroy()
def test_enable_body(self):
env = self.env
makita = env.GetBodies()[0]
# Enable
ru.body.enable_body(makita, True)
link_status = [l.IsEnabled() for l in makita.GetLinks()]
self.assertTrue(all(link_status))
# Disable
ru.body.enable_body(makita, False)
link_status = [l.IsEnabled() for l in makita.GetLinks()]
self.assertFalse(any(link_status))
def test_get_bounding_box_corners(self):
# TODO: Write a proper test for this function
env = self.env
makita = env.GetBodies()[0]
corners = ru.body.get_bounding_box_corners(makita)
self.assertEqual(len(corners), 8)
# Transform given
makita = env.GetBodies()[0]
transform = makita.GetTransform()
corners = ru.body.get_bounding_box_corners(makita, transform)
self.assertEqual(len(corners), 8)
def test_set_body_color(self):
env = self.env
makita = env.GetBodies()[0]
for _ in range(10):
diffuse = np.random.sample(3)
ambient = | np.random.sample(3) | numpy.random.sample |
import os
import numpy as np
from PIL import Image
import multiprocessing
categories = ['background','aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',
'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
def do_python_eval(predict_folder, gt_folder, name_list, num_cls=21, input_type='png', threshold=1.0, printlog=False):
TP = []
P = []
T = []
for i in range(num_cls):
TP.append(multiprocessing.Value('L', 0, lock=True))
P.append(multiprocessing.Value('L', 0, lock=True))
T.append(multiprocessing.Value('L', 0, lock=True))
def compare(start,step,TP,P,T,input_type,threshold):
for idx in range(start,len(name_list),step):
name = name_list[idx]
if input_type == 'png':
predict_file = os.path.join(predict_folder,'%s.png'%name)
predict = np.array(Image.open(predict_file)) #cv2.imread(predict_file)
predict = predict[:,:]
elif 'npy' in input_type:
predict_file = os.path.join(predict_folder,'%s.npy'%name)
predict_dict = np.load(predict_file, allow_pickle=True).item()
h, w = list(predict_dict.values())[0].shape
tensor = np.zeros((num_cls,h,w),np.float32)
for key in predict_dict.keys():
v = predict_dict[key]
tensor[key+1] = predict_dict[key]
tensor[0,:,:] = threshold
predict = np.argmax(tensor, axis=0).astype(np.uint8)
gt_file = os.path.join(gt_folder,'%s.png'%name)
gt = np.array(Image.open(gt_file))
cal = gt<255
mask = (predict==gt) * cal
for i in range(num_cls):
P[i].acquire()
P[i].value += | np.sum((predict==i)*cal) | numpy.sum |
# -*- coding: utf-8 -*-
''' Data Handler Module
This module contains a class for managing a data processing pipeline
'''
from time import time
from datetime import timedelta
import numpy as np
import pandas as pd
from scipy.stats import mode, skew
from scipy.interpolate import interp1d
from sklearn.cluster import DBSCAN
import cvxpy as cvx
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from solardatatools.time_axis_manipulation import make_time_series,\
standardize_time_axis
from solardatatools.matrix_embedding import make_2d
from solardatatools.data_quality import daily_missing_data_advanced
from solardatatools.data_filling import zero_nighttime, interp_missing
from solardatatools.clear_day_detection import find_clear_days
from solardatatools.plotting import plot_2d
from solardatatools.clear_time_labeling import find_clear_times
from solardatatools.solar_noon import avg_sunrise_sunset
from solardatatools.algorithms import CapacityChange, TimeShift, SunriseSunset
class DataHandler():
def __init__(self, data_frame=None, raw_data_matrix=None, datetime_col=None,
convert_to_ts=False, aggregate=None, how=lambda x: x.mean()):
if data_frame is not None:
if convert_to_ts:
data_frame, keys = make_time_series(data_frame)
self.keys = keys
else:
self.keys = list(data_frame.columns)
self.data_frame_raw = data_frame.copy()
if not isinstance(self.data_frame_raw.index, pd.DatetimeIndex):
if datetime_col is not None:
df = self.data_frame_raw
df[datetime_col] = pd.to_datetime(df[datetime_col])
df.set_index(datetime_col, inplace=True)
else:
e = "Data frame must have a DatetimeIndex or"
e += "the user must set the datetime_col kwarg."
raise Exception(e)
df_index = self.data_frame_raw.index
if df_index.tz is not None:
df_index = df_index.tz_localize(None)
self.data_frame = None
if aggregate is not None:
new_data = how(self.data_frame_raw.resample(aggregate))
self.data_frame_raw = new_data
else:
self.data_frame_raw = None
self.data_frame = None
self.keys = None
self.raw_data_matrix = raw_data_matrix
if self.raw_data_matrix is not None:
self.num_days = self.raw_data_matrix.shape[1]
if self.raw_data_matrix.shape[0] <= 1400:
self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0])
else:
self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0]
else:
self.num_days = None
self.data_sampling = None
self.filled_data_matrix = None
self.use_column = None
self.capacity_estimate = None
self.start_doy = None
self.day_index = None
self.power_units = None
# "Extra" data, i.e. additional columns to process from the table
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {} # Relative quality: fraction of non-NaN values in column during daylight time periods, as defined by the main power columns
# Scores for the entire data set
self.data_quality_score = None # Fraction of days without data acquisition errors
self.data_clearness_score = None # Fraction of days that are approximately clear/sunny
# Flags for the entire data set
self.inverter_clipping = None # True if there is inverter clipping, false otherwise
self.num_clip_points = None # If clipping, the number of clipping set points
self.capacity_changes = None # True if the apparent capacity seems to change over the data set
self.normal_quality_scores = None # True if clustering of data quality scores are within decision boundaries
self.time_shifts = None # True if time shifts detected and corrected in data set
self.tz_correction = 0 # TZ correction factor (determined during pipeline run)
# Daily scores (floats), flags (booleans), and boolean masks
self.daily_scores = DailyScores() # 1D arrays of floats
self.daily_flags = DailyFlags() # 1D arrays of Booleans
self.boolean_masks = BooleanMasks() # 2D arrays of Booleans
# Useful daily signals defined by the data set
self.daily_signals = DailySignals()
# Algorithm objects
self.scsf = None
self.capacity_analysis = None
self.time_shift_analysis = None
self.daytime_analysis = None
# Private attributes
self._ran_pipeline = False
self._error_msg = ''
self.__density_lower_threshold = None
self.__density_upper_threshold = None
self.__linearity_threshold = None
self.__recursion_depth = 0
self.__initial_time = None
self.__fix_dst_ran = False
def run_pipeline(self, power_col=None, min_val=-5, max_val=None,
zero_night=True, interp_day=True, fix_shifts=True,
density_lower_threshold=0.6, density_upper_threshold=1.05,
linearity_threshold=0.1, clear_day_smoothness_param=0.9,
clear_day_energy_param=0.8, verbose=True,
start_day_ix=None, end_day_ix=None, c1=None, c2=500.,
solar_noon_estimator='com', correct_tz=True, extra_cols=None,
daytime_threshold=0.1, units='W'):
self.daily_scores = DailyScores()
self.daily_flags = DailyFlags()
self.capacity_analysis = None
self.time_shift_analysis = None
self.extra_matrices = {} # Matrix views of extra columns
self.extra_quality_scores = {}
self.power_units = units
if self.__recursion_depth == 0:
self.tz_correction = 0
t = np.zeros(6)
######################################################################
# Preprocessing
######################################################################
t[0] = time()
if self.data_frame_raw is not None:
self.data_frame = standardize_time_axis(self.data_frame_raw,
timeindex=True,
verbose=verbose)
if self.data_frame is not None:
self.make_data_matrix(power_col, start_day_ix=start_day_ix,
end_day_ix=end_day_ix)
if max_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = -9999
slct = mat_copy > max_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
if min_val is not None:
mat_copy = np.copy(self.raw_data_matrix)
mat_copy[np.isnan(mat_copy)] = 9999
slct = mat_copy < min_val
if np.sum(slct) > 0:
self.raw_data_matrix[slct] = np.nan
self.capacity_estimate = np.nanquantile(self.raw_data_matrix, 0.95)
if self.capacity_estimate <= 500 and self.power_units == 'W':
self.power_units = 'kW'
self.boolean_masks.missing_values = np.isnan(self.raw_data_matrix)
ss = SunriseSunset()
ss.run_optimizer(self.raw_data_matrix, plot=False)
self.boolean_masks.daytime = ss.sunup_mask_estimated
self.daytime_analysis = ss
### TZ offset detection and correction ###
# (1) Determine if there exists a "large" timezone offset error
if power_col is None:
power_col = self.data_frame.columns[0]
if correct_tz:
average_day = np.zeros(self.raw_data_matrix.shape[0])
all_nans = np.alltrue(np.isnan(self.raw_data_matrix), axis=1)
average_day[~all_nans] = np.nanmean(
self.raw_data_matrix[~all_nans, :], axis=1
)
average_day -= np.min(average_day)
average_day /= np.max(average_day)
### Troubleshooting code
# plt.plot(average_day)
# plt.axhline(0.02, color='red', ls='--', linewidth=1)
# plt.show()
meas_per_hour = np.int(60 / self.data_sampling)
cond1 = np.any(average_day[:meas_per_hour] > 0.02)
cond2 = np.any(average_day[-meas_per_hour:] > 0.02)
cond3 = self.__recursion_depth <= 2
if (cond1 or cond2) and cond3:
if verbose:
print(
'Warning: power generation at midnight. Attempting to correct...')
# Catch values that are more than 4 hours from noon and make a
# correction to the time axis (rough correction to avoid days
# rolling over)
rough_noon_est = np.nanmean(
self.data_frame.groupby(pd.Grouper(freq='D')) \
.idxmax()[power_col].dt.time \
.apply(lambda x: 60 * x.hour + x.minute)
) / 60
self.tz_correction = 12 - np.round(rough_noon_est)
self.data_frame.index = self.data_frame.index.shift(
self.tz_correction, freq='H'
)
if verbose:
print('Done.\nRestarting the pipeline...')
self.__recursion_depth += 1
if self.__initial_time is not None:
self.__initial_time = t[0]
self.run_pipeline(
power_col=power_col, min_val=min_val,
max_val=max_val, zero_night=zero_night,
interp_day=interp_day, fix_shifts=fix_shifts,
density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold,
clear_day_smoothness_param=clear_day_smoothness_param,
clear_day_energy_param=clear_day_energy_param,
verbose=verbose, start_day_ix=start_day_ix,
end_day_ix=end_day_ix, c1=c1, c2=c2,
solar_noon_estimator=solar_noon_estimator,
correct_tz=correct_tz, extra_cols=extra_cols,
daytime_threshold=daytime_threshold, units=units
)
return
######################################################################
# Cleaning
######################################################################
t[1] = time()
self.make_filled_data_matrix(zero_night=zero_night, interp_day=interp_day)
num_raw_measurements = np.count_nonzero(
np.nan_to_num(self.raw_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
num_filled_measurements = np.count_nonzero(
np.nan_to_num(self.filled_data_matrix,
copy=True,
nan=0.)[self.boolean_masks.daytime]
)
if num_raw_measurements > 0:
ratio = num_filled_measurements / num_raw_measurements
else:
msg = 'Error: data set contains no non-zero values!'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = 0.0
self.data_clearness_score = 0.0
self._ran_pipeline = True
return
if ratio < 0.9:
msg = 'Error: data was lost during NaN filling procedure. '
msg += 'This typically occurs when\nthe time stamps are in the '
msg += 'wrong timezone. Please double check your data table.\n'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
self.daily_flags = None
self.data_quality_score = None
self.data_clearness_score = None
self._ran_pipeline = True
return
### TZ offset detection and correction ###
# (2) Determine if there is a "small" timezone offset error
if correct_tz:
average_noon = np.nanmean(
avg_sunrise_sunset(self.filled_data_matrix, threshold=0.01)
)
tz_offset = int(np.round(12 - average_noon))
if tz_offset != 0:
self.tz_correction += tz_offset
# Related to this bug fix:
# https://github.com/slacgismo/solar-data-tools/commit/ae0037771c09ace08bff5a4904475da606e934da
old_index = self.data_frame.index.copy()
self.data_frame.index = self.data_frame.index.shift(
tz_offset, freq='H'
)
self.data_frame = self.data_frame.reindex(index=old_index,
method='nearest',
limit=1).fillna(0)
meas_per_hour = self.filled_data_matrix.shape[0] / 24
roll_by = int(meas_per_hour * tz_offset)
self.filled_data_matrix = np.nan_to_num(
np.roll(self.filled_data_matrix, roll_by, axis=0),
0
)
self.raw_data_matrix = np.roll(
self.raw_data_matrix, roll_by, axis=0
)
self.boolean_masks.daytime = np.roll(
self.boolean_masks.daytime, roll_by, axis=0
)
######################################################################
# Scoring
######################################################################
t[2] = time()
t_clean = np.zeros(6)
t_clean[0] = time()
try:
self.get_daily_scores(threshold=0.2)
except:
msg = 'Daily quality scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_scores = None
try:
self.get_daily_flags(density_lower_threshold=density_lower_threshold,
density_upper_threshold=density_upper_threshold,
linearity_threshold=linearity_threshold)
except:
msg = 'Daily quality flagging failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.daily_flags = None
t_clean[1] = time()
try:
self.detect_clear_days(smoothness_threshold=clear_day_smoothness_param,
energy_threshold=clear_day_energy_param)
except:
msg = 'Clear day detection failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
t_clean[2] = time()
try:
self.clipping_check()
except:
msg = 'Clipping check failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.inverter_clipping = None
t_clean[3] = time()
try:
self.score_data_set()
except:
msg = 'Data set summary scoring failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
self.data_quality_score = None
self.data_clearness_score = None
t_clean[4] = time()
try:
self.capacity_clustering()
except TypeError:
self.capacity_changes = None
t_clean[5] = time()
######################################################################
# Fix Time Shifts
######################################################################
t[3] = time()
if fix_shifts:
try:
self.auto_fix_time_shifts(c1=c1, c2=c2,
estimator=solar_noon_estimator,
threshold=daytime_threshold,
periodic_detector=False)
except Exception as e:
msg = 'Fix time shift algorithm failed.'
self._error_msg += '\n' + msg
if verbose:
print(msg)
print('Error message:', e)
print('\n')
self.time_shifts = None
######################################################################
# Update daytime detection based on cleaned up data
######################################################################
# self.daytime_analysis.run_optimizer(self.filled_data_matrix, plot=False)
self.daytime_analysis.calculate_times(self.filled_data_matrix)
self.boolean_masks.daytime = self.daytime_analysis.sunup_mask_estimated
######################################################################
# Process Extra columns
######################################################################
t[4] = time()
if extra_cols is not None:
freq = int(self.data_sampling * 60)
new_index = pd.date_range(start=self.day_index[0].date(),
end=self.day_index[-1].date() + timedelta(
days=1),
freq='{}s'.format(freq))[:-1]
if isinstance(extra_cols, str):
extra_cols = np.atleast_1d(extra_cols)
elif isinstance(extra_cols, tuple):
extra_cols = [extra_cols]
for col in extra_cols:
self.generate_extra_matrix(col, new_index=new_index)
t[5] = time()
times = np.diff(t, n=1)
cleaning_times = np.diff(t_clean, n=1)
total_time = t[-1] - t[0]
# Cleanup
self.__recursion_depth = 0
if verbose:
if self.__initial_time is not None:
restart_msg = '{:.2f} seconds spent automatically localizing the time zone\n'
restart_msg += 'Info for last pipeline run below:\n'
restart_msg = restart_msg.format(t[0] - self.__initial_time)
print(restart_msg)
out = 'total time: {:.2f} seconds\n'
out += '--------------------------------\n'
out += 'Breakdown\n'
out += '--------------------------------\n'
out += 'Preprocessing {:.2f}s\n'
out += 'Cleaning {:.2f}s\n'
out += 'Filtering/Summarizing {:.2f}s\n'
out += ' Data quality {:.2f}s\n'
out += ' Clear day detect {:.2f}s\n'
out += ' Clipping detect {:.2f}s\n'
out += ' Capacity change detect {:.2f}s\n'
if extra_cols is not None:
out += 'Extra Column Processing {:.2f}s'
print(out.format(
total_time,
times[0],
times[1] + times[3],
times[2],
cleaning_times[0],
cleaning_times[1],
cleaning_times[2],
cleaning_times[4],
times[4]
))
self._ran_pipeline = True
return
def report(self):
try:
if self.num_days >= 365:
l1 = 'Length: {:.2f} years\n'.format(self.num_days / 365)
else:
l1 = 'Length: {} days\n'.format(self.num_days)
if self.power_units == 'W':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000)
elif self.power_units == 'kW':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate)
else:
l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate)
l1_a += self.power_units + '\n'
if self.raw_data_matrix.shape[0] <= 1440:
l2 = 'Data sampling: {} minute\n'.format(self.data_sampling)
else:
l2 = 'Data sampling: {} second\n'.format(int(self.data_sampling * 60))
l3 = 'Data quality score: {:.1f}%\n'.format(self.data_quality_score * 100)
l4 = 'Data clearness score: {:.1f}%\n'.format(self.data_clearness_score * 100)
l5 = 'Inverter clipping: {}\n'.format(self.inverter_clipping)
l6 = 'Time shifts corrected: {}\n'.format(self.time_shifts)
if self.tz_correction != 0:
l7 = 'Time zone correction: {} hours'.format(int(self.tz_correction))
else:
l7 = 'Time zone correction: None'
p_out = l1 + l1_a + l2 + l3 + l4 + l5 + l6 + l7
if self.capacity_changes:
p_out += '\nWARNING: Changes in system capacity detected!'
if self.num_clip_points > 1:
p_out += '\nWARNING: {} clipping set points detected!'.format(
self.num_clip_points
)
if not self.normal_quality_scores:
p_out += '\nWARNING: Abnormal clustering of data quality scores!'
print(p_out)
return
except TypeError:
if self._ran_pipeline:
m1 = 'Pipeline failed, please check data set.\n'
m2 = "Try running: self.plot_heatmap(matrix='raw')\n\n"
if self.num_days >= 365:
l1 = 'Length: {:.2f} years\n'.format(
self.num_days / 365)
else:
l1 = 'Length: {} days\n'.format(
self.num_days)
if self.power_units == 'W':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate / 1000)
elif self.power_units == 'kW':
l1_a = 'Capacity estimate: {:.2f} kW\n'.format(self.capacity_estimate)
else:
l1_a = 'Capacity estimate: {:.2f} '.format(self.capacity_estimate)
l1_a += self.power_units + '\n'
if self.raw_data_matrix.shape[0] <= 1440:
l2 = 'Data sampling: {} minute\n'.format(
self.data_sampling)
else:
l2 = 'Data sampling: {} second\n'.format(
int(self.data_sampling * 60))
p_out = m1 + m2 + l1 + l1_a + l2
print(p_out)
print('\nError messages captured from pipeline:' + self._error_msg)
else:
print('Please run the pipeline first!')
return
def augment_data_frame(self, boolean_index, column_name):
"""
Add a column to the data frame (tabular) representation of the data,
containing True/False values at each time stamp.
Boolean index is a 1-D or 2-D numpy array of True/False values. If 1-D,
array should be of length N, where N is the number of days in the data
set. If 2-D, the array should be of size M X N where M is the number
of measurements each day and N is the number of days.
:param boolean_index: Length N or size M X N numpy arrays of booleans
:param column_name: Name for column
:return:
"""
if self.data_frame is None:
print('This DataHandler object does not contain a data frame.')
return
if boolean_index is None:
print('No mask available for ' + column_name)
return
m, n = self.raw_data_matrix.shape
index_shape = boolean_index.shape
cond1 = index_shape == (m, n)
cond2 = index_shape == (n ,)
if not cond1 and not cond2:
print('Boolean index shape does not match the data.')
elif cond1:
if self.time_shifts:
ts = self.time_shift_analysis
boolean_index = ts.invert_corrections(boolean_index)
start = self.day_index[0]
freq = '{}min'.format(self.data_sampling)
periods = self.filled_data_matrix.size
tindex = pd.date_range(start=start, freq=freq, periods=periods)
series = pd.Series(data=boolean_index.ravel(order='F'), index=tindex)
series.name = column_name
if column_name in self.data_frame.columns:
del self.data_frame[column_name]
self.data_frame = self.data_frame.join(series)
self.data_frame[column_name] = self.data_frame[column_name].fillna(False)
elif cond2:
slct_dates = self.day_index[boolean_index].date
bix = np.isin(self.data_frame.index.date, slct_dates)
self.data_frame[column_name] = False
self.data_frame.loc[bix, column_name] = True
if column_name in self.data_frame_raw.columns:
del self.data_frame_raw[column_name]
self.data_frame_raw = self.data_frame_raw.join(self.data_frame[column_name])
def fix_dst(self):
"""
Helper function for fixing data sets with known DST shift. This function
works for data recorded anywhere in the United States. The choice of
timezone (e.g. 'US/Pacific') does not matter, as long as the dates
of the clock changes are the same.
:return:
"""
if not self.__fix_dst_ran:
df = self.data_frame_raw
df_localized = df.tz_localize('US/Pacific', ambiguous='NaT',
nonexistent='NaT')
df_localized = df_localized[df_localized.index == df_localized.index]
df_localized = df_localized.tz_convert('Etc/GMT+8')
df_localized = df_localized.tz_localize(None)
self.data_frame_raw = df_localized
self.__fix_dst_ran = True
return
else:
print('DST correction already performed on this data set.')
return
def make_data_matrix(self, use_col=None, start_day_ix=None, end_day_ix=None):
df = self.data_frame
if use_col is None:
use_col = df.columns[0]
self.raw_data_matrix, day_index = make_2d(df, key=use_col, return_day_axis=True)
self.raw_data_matrix = self.raw_data_matrix[:, start_day_ix:end_day_ix]
self.num_days = self.raw_data_matrix.shape[1]
if self.raw_data_matrix.shape[0] <= 1400:
self.data_sampling = int(24 * 60 / self.raw_data_matrix.shape[0])
else:
self.data_sampling = 24 * 60 / self.raw_data_matrix.shape[0]
self.use_column = use_col
self.day_index = day_index[start_day_ix:end_day_ix]
self.start_doy = self.day_index.dayofyear[0]
return
def make_filled_data_matrix(self, zero_night=True, interp_day=True):
self.filled_data_matrix = np.copy(self.raw_data_matrix)
if zero_night:
self.filled_data_matrix = zero_nighttime(self.raw_data_matrix,
night_mask=~self.boolean_masks.daytime)
if interp_day:
self.filled_data_matrix = interp_missing(self.filled_data_matrix)
else:
msk = np.isnan(self.filled_data_matrix)
self.filled_data_matrix[msk] = 0
self.daily_signals.energy = | np.sum(self.filled_data_matrix, axis=0) | numpy.sum |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [ | np.array([0.0, GOLDEN_RATIO]) | numpy.array |
import numpy as np
from harmonic_equation import harmonic_equation
from equation import equation
import low_level_tools as llt
################################################################################
def eq_11_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_11_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_11_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = ((1 + np.exp(x * y)) * x ** 2 + (2 + np.cos(np.pi * x)) * y ** 2 + \
(1 + x * y) * np.exp(-x * y) + y * np.exp(x) + x * np.exp(y) + np.sin(np.pi * x * y)) * np.exp(x * y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_11_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 1 + np.exp(x * y)
b11 = 2 + np.cos(np.pi * x)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_red_fox_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_red_fox_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_red_fox_rhs(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (x**2 + y**2 + a*y)*np.exp(x*y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_red_fox_coeff(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = np.ones((N, N))
b11 = np.ones((N, N))
c11 = np.zeros((N, N))
d11 = a*np.ones((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_00_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_00_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
return np.array([u_exact])
def eq_00_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = -np.pi ** 2 * np.sin(np.pi * x) * np.sin(np.pi * y) * (
4 + y * np.cos(x * np.pi) + 4 + x * np.exp(-x * y)) / 2 + \
np.pi ** 2 * | np.cos(np.pi * x) | numpy.cos |
#######################################
# load mnist #
import mnist
import numpy as np
def normalize(img):
fac = 0.99 / 255
return img * fac + 0.01
def digit_to_layer(digit):
return (np.arange(10) == digit).astype(np.float)
train_images = np.array([normalize(img) for img in mnist.train_images()])
train_labels = np.array([digit_to_layer(digit) for digit in mnist.train_labels()])
test_images = np.array([normalize(img) for img in mnist.test_images()])
test_labels = np.array([digit_to_layer(digit) for digit in mnist.test_labels()])
###
import math
from functools import reduce
padding = 'valid'
padding = 'same'
padding = 'full'
# I x I x C
# O x O x K
def init_tuple_counter(count_to: tuple) -> tuple:
return tuple(np.zeros(len(count_to.shape), dtype=int))
def adder(counter: tuple, max: tuple) -> tuple:
if counter == max:
return counter
counter_array = np.array(counter)
length = len(counter_array)
carry = True
for i in range(length - 1, -1, -1):
counter_array[i] = counter_array[i] + 1
carry = False
if counter_array[i] > max[i]:
counter_array[i] = 0
carry = True
if not carry:
break
counted = [max[:-1] == counter_array[:-1]]
if carry and counted:
counter_array = max
return tuple(counter_array)
def conv2d(input: np.array, output: np.array, filters: np.array, stride: tuple([int, int]) = (1, 1)) \
-> np.array:
## padding needs to be implemented
## proper strides
kernel_y = len(filters)
kernel_x = len(filters[0])
kernel_channels = len(filters[0][0])
num_filters = len(filters[0][0][0])
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
## assert padding is valid I x I x K
conv_out_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) \
+ 1
conv_out_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) \
+ 1
conv_shape = batch_shape + (conv_out_height, conv_out_width, num_filters)
# conv_out = np.ndarray(shape=conv_shape)
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape: ## probably needs to be changed
layer = input[tuple(batch_idx)]
for y_idx in range(0, conv_out_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, conv_out_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
kernel = layer[y_start:y_end, x_start:x_end]
for filter_idx in range(num_filters):
filter = filters[:, :, :, filter_idx]
multi = np.multiply(kernel, filter)
product_idx = (y_idx, x_idx, filter_idx)
output[tuple(batch_idx) + product_idx] = np.sum(multi)
batch_idx = adder(batch_idx, batch_shape)
return output
def conv_output_size(layer_dimensions: tuple, kernel_dimensions: tuple,
stride_dimensionsensions: tuple, padding: int):
return (int(((layer_dimensions[0] - kernel_dimensions[0] + 2 * padding) \
/ stride_dimensionsensions[0])) + 1,
int(((layer_dimensions[1] - kernel_dimensions[1] + 2 * padding) \
/ stride_dimensionsensions[1])) + 1,
kernel_dimensions[3])
def generate_conv2d_filters(kernel_dimensions: tuple, k: float = 2.0) -> np.array:
kernel_y = kernel_dimensions[0]
kernel_x = kernel_dimensions[1]
kernel_channels = kernel_dimensions[2]
num_filters = kernel_dimensions[3]
filters = np.ndarray(shape=kernel_dimensions)
filter_shape = tuple([kernel_y, kernel_x, kernel_channels])
nl = kernel_x * kernel_y * kernel_channels
std = math.sqrt(k / nl)
for filter_idx in range(num_filters):
filter = np.random.normal(scale=std, size=nl)
filter = filter.reshape(filter_shape)
filters[:, :, :, filter_idx] = filter
return filters
def lif_neuron(Vm: float, V_reset: float, V_th: float, tau_m: float, fire=True,
leaky=True) -> np.array:
if Vm >= V_th and fire:
spike = 1
Vm = V_reset
else:
spike = 0
if leaky:
Vm = Vm * math.exp(-1 / tau_m)
return [Vm, spike]
def flatten(input: np.array, output: np.array, flatten_dim: int):
self.input_shape
batch_dimensions = input.shape[:flatten_dim]
flattened_dimension = tuple([math.prod(input.shape[flatten_dim:])])
output = np.reshape(input, batch_dimensions + flattened_dimension)
return output
def lif_neuron_pool(Vin: np.array,
Vout: np.array,
spike_out: np.array,
Vreset: float = 0,
Vth: float = 0.75,
tau_m: int = 100,
fire: bool = True,
leaky: bool = True,
time_index: int = 0) -> np.array:
# [batch][time][spike_train]
# [batch][ Vin ]
# adequate dimensions to process
# a dimensions to
# assert (len(Vin.shape[-4]) > 2)
#if (Vin != NULL):
# s = 1 # TODO: implement smth here
# generate output arrays
# Vout = np.zero(shape=(Vin.shape))
# spike_out = np.zero(shape=(Vin.shape))
assert(Vin.shape == Vout.shape)
# process batches
batch_dimensions = Vin.shape[:max(time_index-1,0)]
spike_train_length = Vin.shape[time_index]
membrane_dimensions = Vin.shape[time_index+1:]
for batch_idx in np.ndindex(batch_dimensions):
for neuron_idx in np.ndindex(membrane_dimensions):
for t_idx in range(1, spike_train_length):
# membrane voltage for this step
t_current = batch_idx + tuple([t_idx]) + neuron_idx
t_previous = batch_idx + tuple([t_idx - 1]) + neuron_idx
Vm = Vin[t_current] + Vout[t_previous]
# simulate lif-neuron
[Vout[t_current], spike_out[t_current]] = lif_neuron(Vm, Vreset, Vth, tau_m, fire, leaky)
return [Vout, spike_out]
def generate_spike_train(p: float, t: int) -> np.array:
dist = np.random.uniform(1, 0, t)
return np.array([int(item < p) for item in dist])
def generate_layer_spike_train(layer: np.array, train_length: int):
layer_height = len(layer)
layer_width = len(layer[0])
spike_layer = np.ndarray(shape=(train_length, layer_height, layer_width, 1))
for y in range(0, layer_height):
for x in range(0, layer_width):
train = np.array(generate_spike_train(layer[y][x], train_length))
for t in range(0, train_length):
spike_layer[t, y, x, 0] = train[t]
return spike_layer
def avg_pool(input: np.array, output:np.array, kernel_size: tuple([int, int]) = (2, 2), stride: tuple([int, int]) = (1, 1)) -> np.array:
pool = output
## padding needs to be implemented
## proper strides
kernel_y = kernel_size[1]
kernel_x = kernel_size[0]
batch_shape = input.shape[:-3]
layer_shape = input.shape[-3:]
layer_height = layer_shape[0]
layer_width = layer_shape[1]
layer_channel = layer_shape[2]
stride_x = stride[0]
stride_y = stride[1]
padding = 0
pool_height = int(((layer_height - kernel_y + 2 * padding) / stride_y)) + 1
pool_width = int(((layer_width - kernel_x + 2 * padding) / stride_x)) + 1
pool_shape = batch_shape + (pool_height, pool_width, layer_channel)
# pool = np.ndarray(shape=pool_shape)
# TODO: Update this code
batch_idx = np.zeros(len(batch_shape), dtype=int)
while batch_idx != batch_shape:
layer = input[tuple(batch_idx)]
for y_idx in range(0, pool_height):
y_start = y_idx * stride_y
y_end = (y_idx * stride_y + kernel_y)
for x_idx in range(0, pool_width):
x_start = x_idx * stride_x
x_end = (x_idx * stride_x + kernel_x)
for channel_idx in range(0, layer_channel):
kernel = layer[y_start:y_end, x_start:x_end, channel_idx]
product = np.sum(kernel) / kernel.size
product_idx = (y_idx, x_idx, channel_idx)
pool[tuple(batch_idx) + product_idx] = product
batch_idx = adder(batch_idx, batch_shape)
return pool
def generate_dense_layer_weights(input_dimensions: tuple, num_neuron_output: int, k: float = 2.0) -> np.array:
axons_per_neuron = math.prod(input_dimensions)
synapses = np.ndarray(shape=(num_neuron_output, axons_per_neuron))
nl = axons_per_neuron
std = math.sqrt(k / nl)
for i in range(num_neuron_output):
synapses[i] = np.random.normal(scale=std, size=nl)
return synapses
def dense_forward(input_neurons: np.array, output_neurons: np.array, weights: np.array) -> np.array:
ins = input_neurons.shape
ons = output_neurons.shape
ws = weights.shape
# [batch][spike time]
batch_dimensions = input_neurons.shape[:-1]
# [][]
num_input_neurons = weights.shape[1]
num_output_neurons = weights.shape[0]
#[neuron y][neuron x][channel]
for batch_idx in np.ndindex(batch_dimensions):
for output_neuron_idx in range(num_output_neurons):
# action_potential = 0
# dot product
# for input_neuron_idx in range(num_input_neurons):
# ax = input_neurons[batch_idx][input_neuron_idx]
# wx = weights[output_neuron_idx][input_neuron_idx]
# action_potential = action_potential + ax*wx
output_neurons[batch_idx][output_neuron_idx] = np.dot(input_neurons[batch_idx], weights[output_neuron_idx])
return output_neurons
def generate_membrane(membrane_dimensions: tuple, value: float = 0.0):
membrane = np.ndarray(shape=membrane_dimensions)
membrane.fill(value)
return membrane
# This gains the term da_lif / d_net
def differentiate_spike_train(spike_train, Vth = 1):
# sum of decay over time
gamma = sum(spike_train)
if gamma == 0:
return 0
tau_m = len(spike_train)
total_decay = 0
t = tk = 1
for activation in spike_train:
if activation:
if t != tk:
decay = math.exp(-(t - tk) / tau_m)
total_decay = total_decay - (1 / tau_m) * decay
tk = t + 1
t = t + 1
return (1/Vth) * (1 + (1/gamma) * total_decay)
class Layer:
def __init__(self):
self.trainable = True
self.input_shape = None
self.output_shape = None
def count_parameters(self):
raise NotImplementedError()
def compute_output_shape(self, input_shape):
raise NotImplementedError()
def forward_propagate(self, A):
raise NotImplementedError()
def backward_propagate(self, dZ, cache):
raise NotImplementedError()
def get_weights(self):
raise NotImplementedError()
def set_weights(self, weights):
raise NotImplementedError()
def build(self, input_shape):
self.input_shape = input_shape
class Dropout(Layer):
def __init__(self, probability):
super().__init__()
self.probability = probability
self.mask = None
def build(self, input_shape):
self.input_shape = input_shape
self.output_shape = input_shape
self.reset()
def reset(self):
self.mask = np.random.binomial(1, 1-self.probability, size=self.output_shape)
def forward_propagate(self, A):
masked = | np.multiply(self.mask, A) | numpy.multiply |
from __future__ import print_function
import numpy as np
from scipy.ndimage import filters,interpolation
from .toplevel import *
from . import sl,morph
def B(a):
if a.dtype==np.dtype('B'): return a
return np.array(a,'B')
class record:
def __init__(self,**kw): self.__dict__.update(kw)
def binary_objects(binary):
labels,n = morph.label(binary)
objects = morph.find_objects(labels)
return objects
@checks(ABINARY2)
def estimate_scale(binary, zoom=1.0):
objects = binary_objects(binary)
bysize = sorted(objects,key=sl.area)
scalemap = np.zeros(binary.shape)
for o in bysize:
if np.amax(scalemap[o])>0: continue
scalemap[o] = sl.area(o)**0.5
scalemap = scalemap[(scalemap>3/zoom)&(scalemap<100/zoom)]
if | np.any(scalemap) | numpy.any |
"""
Author: <NAME>
"""
from statsmodels.compat.platform import PLATFORM_LINUX32, PLATFORM_WIN
from itertools import product
import json
import pathlib
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pandas as pd
import pytest
import scipy.stats
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
import statsmodels.tsa.holtwinters as holtwinters
import statsmodels.tsa.statespace.exponential_smoothing as statespace
# This contains tests for the exponential smoothing implementation in
# tsa/exponential_smoothing/ets.py.
#
# Tests are mostly done by comparing results with the R implementation in the
# package forecast for the datasets `oildata` (non-seasonal) and `austourists`
# (seasonal).
#
# Therefore, a parametrized pytest fixture ``setup_model`` is provided, which
# returns a constructed model, model parameters from R in the format expected
# by ETSModel, and a dictionary of reference results. Use like this:
#
# def test_<testname>(setup_model):
# model, params, results_R = setup_model
# # perform some tests
# ...
###############################################################################
# UTILS
###############################################################################
# Below I define parameter lists for all possible model and data combinations
# (for data, see below). These are used for parametrizing the pytest fixture
# ``setup_model``, which should be used for all tests comparing to R output.
def remove_invalid_models_from_list(modellist):
# remove invalid models (no trend but damped)
for i, model in enumerate(modellist):
if model[1] is None and model[3]:
del modellist[i]
ERRORS = ("add", "mul")
TRENDS = ("add", "mul", None)
SEASONALS = ("add", "mul", None)
DAMPED = (True, False)
MODELS_DATA_SEASONAL = list(
product(ERRORS, TRENDS, ("add", "mul"), DAMPED, ("austourists",),)
)
MODELS_DATA_NONSEASONAL = list(
product(ERRORS, TRENDS, (None,), DAMPED, ("oildata",),)
)
remove_invalid_models_from_list(MODELS_DATA_SEASONAL)
remove_invalid_models_from_list(MODELS_DATA_NONSEASONAL)
def short_model_name(error, trend, seasonal, damped=False):
short_name = {"add": "A", "mul": "M", None: "N", True: "d", False: ""}
return (
short_name[error]
+ short_name[trend]
+ short_name[damped]
+ short_name[seasonal]
)
ALL_MODELS_AND_DATA = MODELS_DATA_NONSEASONAL + MODELS_DATA_SEASONAL
ALL_MODEL_IDS = [
short_model_name(*mod[:3], mod[3]) for mod in ALL_MODELS_AND_DATA
]
@pytest.fixture(params=ALL_MODELS_AND_DATA, ids=ALL_MODEL_IDS)
def setup_model(
request,
austourists,
oildata,
ets_austourists_fit_results_R,
ets_oildata_fit_results_R,
):
params = request.param
error, trend, seasonal, damped = params[0:4]
data = params[4]
if data == "austourists":
data = austourists
seasonal_periods = 4
results = ets_austourists_fit_results_R[damped]
else:
data = oildata
seasonal_periods = None
results = ets_oildata_fit_results_R[damped]
name = short_model_name(error, trend, seasonal)
if name not in results:
pytest.skip(f"model {name} not implemented or not converging in R")
results_R = results[name]
params = get_params_from_R(results_R)
model = ETSModel(
data,
seasonal_periods=seasonal_periods,
error=error,
trend=trend,
seasonal=seasonal,
damped_trend=damped,
)
return model, params, results_R
@pytest.fixture
def austourists_model(austourists):
return ETSModel(
austourists,
seasonal_periods=4,
error="add",
trend="add",
seasonal="add",
damped_trend=True,
)
@pytest.fixture
def austourists_model_fit(austourists_model):
return austourists_model.fit(disp=False)
@pytest.fixture
def oildata_model(oildata):
return ETSModel(oildata, error="add", trend="add", damped_trend=True,)
#############################################################################
# DATA
#############################################################################
@pytest.fixture
def austourists():
# austourists dataset from fpp2 package
# https://cran.r-project.org/web/packages/fpp2/index.html
data = [
30.05251300,
19.14849600,
25.31769200,
27.59143700,
32.07645600,
23.48796100,
28.47594000,
35.12375300,
36.83848500,
25.00701700,
30.72223000,
28.69375900,
36.64098600,
23.82460900,
29.31168300,
31.77030900,
35.17787700,
19.77524400,
29.60175000,
34.53884200,
41.27359900,
26.65586200,
28.27985900,
35.19115300,
42.20566386,
24.64917133,
32.66733514,
37.25735401,
45.24246027,
29.35048127,
36.34420728,
41.78208136,
49.27659843,
31.27540139,
37.85062549,
38.83704413,
51.23690034,
31.83855162,
41.32342126,
42.79900337,
55.70835836,
33.40714492,
42.31663797,
45.15712257,
59.57607996,
34.83733016,
44.84168072,
46.97124960,
60.01903094,
38.37117851,
46.97586413,
50.73379646,
61.64687319,
39.29956937,
52.67120908,
54.33231689,
66.83435838,
40.87118847,
51.82853579,
57.49190993,
65.25146985,
43.06120822,
54.76075713,
59.83447494,
73.25702747,
47.69662373,
61.09776802,
66.05576122,
]
index = pd.date_range("1999-01-01", "2015-12-31", freq="Q")
return pd.Series(data, index)
@pytest.fixture
def oildata():
# oildata dataset from fpp2 package
# https://cran.r-project.org/web/packages/fpp2/index.html
data = [
111.0091346,
130.8284341,
141.2870879,
154.2277747,
162.7408654,
192.1664835,
240.7997253,
304.2173901,
384.0045673,
429.6621566,
359.3169299,
437.2518544,
468.4007898,
424.4353365,
487.9794299,
509.8284478,
506.3472527,
340.1842374,
240.2589210,
219.0327876,
172.0746632,
252.5900922,
221.0710774,
276.5187735,
271.1479517,
342.6186005,
428.3558357,
442.3945534,
432.7851482,
437.2497186,
437.2091599,
445.3640981,
453.1950104,
454.4096410,
422.3789058,
456.0371217,
440.3866047,
425.1943725,
486.2051735,
500.4290861,
521.2759092,
508.9476170,
488.8888577,
509.8705750,
456.7229123,
473.8166029,
525.9508706,
549.8338076,
542.3404698,
]
return pd.Series(data, index=pd.date_range("1965", "2013", freq="AS"))
#############################################################################
# REFERENCE RESULTS
#############################################################################
def obtain_R_results(path):
with path.open("r") as f:
R_results = json.load(f)
# remove invalid models
results = {}
for damped in R_results:
new_key = damped == "TRUE"
results[new_key] = {}
for model in R_results[damped]:
if len(R_results[damped][model]):
results[new_key][model] = R_results[damped][model]
# get correct types
for damped in results:
for model in results[damped]:
for key in ["alpha", "beta", "gamma", "phi", "sigma2"]:
results[damped][model][key] = float(
results[damped][model][key][0]
)
for key in [
"states",
"initstate",
"residuals",
"fitted",
"forecast",
"simulation",
]:
results[damped][model][key] = np.asarray(
results[damped][model][key]
)
return results
@pytest.fixture
def ets_austourists_fit_results_R():
"""
Dictionary of ets fit results obtained with script ``results/fit_ets.R``.
"""
path = (
pathlib.Path(__file__).parent
/ "results"
/ "fit_ets_results_seasonal.json"
)
return obtain_R_results(path)
@pytest.fixture
def ets_oildata_fit_results_R():
"""
Dictionary of ets fit results obtained with script ``results/fit_ets.R``.
"""
path = (
pathlib.Path(__file__).parent
/ "results"
/ "fit_ets_results_nonseasonal.json"
)
return obtain_R_results(path)
def fit_austourists_with_R_params(model, results_R, set_state=False):
"""
Fit the model with params as found by R's forecast package
"""
params = get_params_from_R(results_R)
with model.fix_params(dict(zip(model.param_names, params))):
fit = model.fit(disp=False)
if set_state:
states_R = get_states_from_R(results_R, model._k_states)
fit.states = states_R
return fit
def get_params_from_R(results_R):
# get params from R
params = [results_R[name] for name in ["alpha", "beta", "gamma", "phi"]]
# in R, initial states are order l[-1], b[-1], s[-1], s[-2], ..., s[-m]
params += list(results_R["initstate"])
params = list(filter(np.isfinite, params))
return params
def get_states_from_R(results_R, k_states):
if k_states > 1:
xhat_R = results_R["states"][1:, 0:k_states]
else:
xhat_R = results_R["states"][1:]
xhat_R = np.reshape(xhat_R, (len(xhat_R), 1))
return xhat_R
#############################################################################
# BASIC TEST CASES
#############################################################################
def test_fit_model_austouritsts(setup_model):
model, params, results_R = setup_model
model.fit(disp=False)
#############################################################################
# TEST OF MODEL EQUATIONS VS R
#############################################################################
def test_smooth_vs_R(setup_model):
model, params, results_R = setup_model
yhat, xhat = model.smooth(params, return_raw=True)
yhat_R = results_R["fitted"]
xhat_R = get_states_from_R(results_R, model._k_states)
assert_allclose(xhat, xhat_R, rtol=1e-5, atol=1e-5)
assert_allclose(yhat, yhat_R, rtol=1e-5, atol=1e-5)
def test_residuals_vs_R(setup_model):
model, params, results_R = setup_model
yhat = model.smooth(params, return_raw=True)[0]
residuals = model._residuals(yhat)
assert_allclose(residuals, results_R["residuals"], rtol=1e-5, atol=1e-5)
def test_loglike_vs_R(setup_model):
model, params, results_R = setup_model
loglike = model.loglike(params)
# the calculation of log likelihood in R is only up to a constant:
const = -model.nobs / 2 * (np.log(2 * np.pi / model.nobs) + 1)
loglike_R = results_R["loglik"][0] + const
assert_allclose(loglike, loglike_R, rtol=1e-5, atol=1e-5)
def test_forecast_vs_R(setup_model):
model, params, results_R = setup_model
fit = fit_austourists_with_R_params(model, results_R, set_state=True)
fcast = fit.forecast(4)
expected = np.asarray(results_R["forecast"])
assert_allclose(expected, fcast.values, rtol=1e-3, atol=1e-4)
def test_simulate_vs_R(setup_model):
model, params, results_R = setup_model
fit = fit_austourists_with_R_params(model, results_R, set_state=True)
innov = np.asarray([[1.76405235, 0.40015721, 0.97873798, 2.2408932]]).T
sim = fit.simulate(4, anchor="end", repetitions=1, random_errors=innov)
expected = np.asarray(results_R["simulation"])
assert_allclose(expected, sim.values, rtol=1e-5, atol=1e-5)
def test_fit_vs_R(setup_model, reset_randomstate):
model, params, results_R = setup_model
if PLATFORM_WIN and model.short_name == "AAdA":
start = params
else:
start = None
fit = model.fit(disp=True, pgtol=1e-8, start_params=start)
# check log likelihood: we want to have a fit that is better, i.e. a fit
# that has a **higher** log-likelihood
const = -model.nobs / 2 * (np.log(2 * np.pi / model.nobs) + 1)
loglike_R = results_R["loglik"][0] + const
loglike = fit.llf
try:
assert loglike >= loglike_R - 1e-4
except AssertionError:
fit = model.fit(disp=True, tol=1e-8, start_params=params)
loglike = fit.llf
try:
assert loglike >= loglike_R - 1e-4
except AssertionError:
if PLATFORM_LINUX32:
# Linux32 often fails to produce the correct solution.
# Fixing this is low priority given the rareness of
# its application
pytest.xfail("Known to fail on 32-bit Linux")
else:
raise
def test_predict_vs_R(setup_model):
model, params, results_R = setup_model
fit = fit_austourists_with_R_params(model, results_R, set_state=True)
n = fit.nobs
prediction = fit.predict(end=n + 3, dynamic=n)
yhat_R = results_R["fitted"]
assert_allclose(prediction[:n], yhat_R, rtol=1e-5, atol=1e-5)
forecast_R = results_R["forecast"]
assert_allclose(prediction[n:], forecast_R, rtol=1e-3, atol=1e-4)
#############################################################################
# OTHER TESTS
#############################################################################
def test_initialization_known(austourists):
initial_level, initial_trend = [36.46466837, 34.72584983]
model = ETSModel(
austourists,
error="add",
trend="add",
damped_trend=True,
initialization_method="known",
initial_level=initial_level,
initial_trend=initial_trend,
)
internal_params = model._internal_params(model._start_params)
assert initial_level == internal_params[4]
assert initial_trend == internal_params[5]
assert internal_params[6] == 0
def test_initialization_heuristic(oildata):
model_estimated = ETSModel(
oildata,
error="add",
trend="add",
damped_trend=True,
initialization_method="estimated",
)
model_heuristic = ETSModel(
oildata,
error="add",
trend="add",
damped_trend=True,
initialization_method="heuristic",
)
fit_estimated = model_estimated.fit(disp=False)
fit_heuristic = model_heuristic.fit(disp=False)
yhat_estimated = fit_estimated.fittedvalues.values
yhat_heuristic = fit_heuristic.fittedvalues.values
# this test is mostly just to see if it works, so we only test whether the
# result is not totally off
assert_allclose(yhat_estimated[10:], yhat_heuristic[10:], rtol=0.5)
def test_bounded_fit(oildata):
beta = [0.99, 0.99]
model1 = ETSModel(
oildata,
error="add",
trend="add",
damped_trend=True,
bounds={"smoothing_trend": beta},
)
fit1 = model1.fit(disp=False)
assert fit1.smoothing_trend == 0.99
# same using with fix_params semantic
model2 = ETSModel(oildata, error="add", trend="add", damped_trend=True,)
with model2.fix_params({"smoothing_trend": 0.99}):
fit2 = model2.fit(disp=False)
assert fit2.smoothing_trend == 0.99
assert_allclose(fit1.params, fit2.params)
fit2.summary() # check if summary runs without failing
# using fit_constrained
fit3 = model2.fit_constrained({"smoothing_trend": 0.99})
assert fit3.smoothing_trend == 0.99
assert_allclose(fit1.params, fit3.params)
fit3.summary()
def test_seasonal_periods(austourists):
# test auto-deduction of period
model = ETSModel(austourists, error="add", trend="add", seasonal="add")
assert model.seasonal_periods == 4
# test if seasonal period raises error
try:
model = ETSModel(austourists, seasonal="add", seasonal_periods=0)
except ValueError:
pass
def test_simulate_keywords(austourists_model_fit):
"""
check whether all keywords are accepted and work without throwing errors.
"""
fit = austourists_model_fit
# test anchor
assert_almost_equal(
fit.simulate(4, anchor=-1, random_state=0).values,
fit.simulate(4, anchor="2015-12-31", random_state=0).values,
)
assert_almost_equal(
fit.simulate(4, anchor="end", random_state=0).values,
fit.simulate(4, anchor="2015-12-31", random_state=0).values,
)
# test different random error options
fit.simulate(4, repetitions=10)
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm)
fit.simulate(4, repetitions=10, random_errors=scipy.stats.norm())
fit.simulate(4, repetitions=10, random_errors=np.random.randn(4, 10))
fit.simulate(4, repetitions=10, random_errors="bootstrap")
# test seeding
res = fit.simulate(4, repetitions=10, random_state=10).values
res2 = fit.simulate(
4, repetitions=10, random_state=np.random.RandomState(10)
).values
assert np.all(res == res2)
def test_predict_ranges(austourists_model_fit):
# in total 68 observations
fit = austourists_model_fit
# first prediction is 0, last is 10 -> 11 predictions
pred = fit.predict(start=0, end=10)
assert len(pred) == 11
pred = fit.predict(start=10, end=20)
assert len(pred) == 11
pred = fit.predict(start=10, dynamic=10, end=30)
assert len(pred) == 21
# try boolean dynamic
pred = fit.predict(start=0, dynamic=True, end=70)
assert len(pred) == 71
pred = fit.predict(start=0, dynamic=True, end=70)
assert len(pred) == 71
# try only out oof sample prediction
pred = fit.predict(start=80, end=84)
assert len(pred) == 5
def test_summary(austourists_model):
# just try to run summary to see if it works
fit = austourists_model.fit(disp=False)
fit.summary()
# now without estimated initial states
austourists_model.set_initialization_method("heuristic")
fit = austourists_model.fit(disp=False)
fit.summary()
# and with fixed params
fit = austourists_model.fit_constrained({"smoothing_trend": 0.9})
fit.summary()
def test_score(austourists_model_fit):
score_cs = austourists_model_fit.model.score(austourists_model_fit.params)
score_fd = austourists_model_fit.model.score(
austourists_model_fit.params,
approx_complex_step=False,
approx_centered=True,
)
assert_almost_equal(score_cs, score_fd, 4)
def test_hessian(austourists_model_fit):
# The hessian approximations are not very consistent, but the test makes
# sure they run
austourists_model_fit.model.hessian(austourists_model_fit.params)
austourists_model_fit.model.hessian(
austourists_model_fit.params,
approx_complex_step=False,
approx_centered=True,
)
def test_prediction_results(austourists_model_fit):
# simple test case starting at 0
pred = austourists_model_fit.get_prediction(start=0, dynamic=30, end=40,)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 41
assert np.all(~np.isnan(summary["mean"]))
# simple test case starting at not 0
pred = austourists_model_fit.get_prediction(start=10, dynamic=30, end=40)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 31
assert np.all(~np.isnan(summary["mean"]))
# long out of sample prediction
pred = austourists_model_fit.get_prediction(start=0, dynamic=30, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 81
assert np.all(~np.isnan(summary["mean"]))
# long out of sample, starting in-sample
pred = austourists_model_fit.get_prediction(start=67, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 14
assert np.all(~np.isnan(summary["mean"]))
# long out of sample, starting at end of sample
pred = austourists_model_fit.get_prediction(start=68, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 13
assert np.all(~np.isnan(summary["mean"]))
# long out of sample, starting just out of sample
pred = austourists_model_fit.get_prediction(start=69, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 12
assert np.all(~np.isnan(summary["mean"]))
# long out of sample, starting long out of sample
pred = austourists_model_fit.get_prediction(start=79, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 2
assert np.all(~np.isnan(summary["mean"]))
# long out of sample, `start`== `end`
pred = austourists_model_fit.get_prediction(start=80, end=80)
summary = pred.summary_frame()
assert len(summary["mean"].values) == 1
assert np.all(~ | np.isnan(summary["mean"]) | numpy.isnan |
import os
import sys
import math
import datetime
import time
import pytz
from collections import Counter
import csv
# from PyQt5.QtWidgets import QApplication, QDesktopWidget, QWidget, QPushButton, QMessageBox
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
import scipy.interpolate
from scipy.interpolate import splev, splrep
import pandas as pd
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from ConfigFile import ConfigFile
from MainConfig import MainConfig
# This gets reset later in Controller.processSingleLevel to reflect the file being processed.
if "LOGFILE" not in os.environ:
os.environ["LOGFILE"] = "temp.log"
class Utilities:
@staticmethod
def mostFrequent(List):
occurence_count = Counter(List)
return occurence_count.most_common(1)[0][0]
@staticmethod
def find_nearest(array,value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# ''' ONLY FOR SORTED ARRAYS'''
# idx = np.searchsorted(array, value, side="left")
# if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
# return array[idx-1]
# else:
# return array[idx]
@staticmethod
def errorWindow(winText,errorText):
msgBox = QMessageBox()
# msgBox.setIcon(QMessageBox.Information)
msgBox.setIcon(QMessageBox.Critical)
msgBox.setText(errorText)
msgBox.setWindowTitle(winText)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec_()
@staticmethod
def waitWindow(winText,waitText):
msgBox = QMessageBox()
# msgBox.setIcon(QMessageBox.Information)
msgBox.setIcon(QMessageBox.Critical)
msgBox.setText(waitText)
msgBox.setWindowTitle(winText)
# msgBox.setStandardButtons(QMessageBox.Ok)
# msgBox.exec_()
return msgBox
@staticmethod
def YNWindow(winText,infoText):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText(infoText)
msgBox.setWindowTitle(winText)
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
returnValue = msgBox.exec_()
return returnValue
# Print iterations progress
@staticmethod
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
@staticmethod
def writeLogFile(logText, mode='a'):
with open('Logs/' + os.environ["LOGFILE"], mode) as logFile:
logFile.write(logText + "\n")
# Converts degrees minutes to decimal degrees format
@staticmethod # for some reason, these were not set to static method, but didn't refer to self
def dmToDd(dm, direction):
d = int(dm/100)
m = dm-d*100
dd = d + m/60
if direction == b'W' or direction == b'S':
dd *= -1
return dd
# Converts decimal degrees to degrees minutes format
@staticmethod
def ddToDm(dd):
d = int(dd)
m = abs(dd - d)*60
dm = (d*100) + m
return dm
# Converts GPS UTC time (HHMMSS.ds; i.e. 99 ds after midnight is 000000.99)to seconds
# Note: Does not support multiple days
@staticmethod
def utcToSec(utc):
# Use zfill to ensure correct width, fixes bug when hour is 0 (12 am)
t = str(int(utc)).zfill(6)
# print(t)
#print(t[:2], t[2:4], t[4:])
h = int(t[:2])
m = int(t[2:4])
s = float(t[4:])
return ((h*60)+m)*60+s
# Converts datetime date and UTC (HHMMSS.ds) to datetime (uses microseconds)
@staticmethod
def utcToDateTime(dt, utc):
# Use zfill to ensure correct width, fixes bug when hour is 0 (12 am)
num, dec = str(float(utc)).split('.')
t = num.zfill(6)
h = int(t[:2])
m = int(t[2:4])
s = int(t[4:6])
us = 10000*int(dec) # i.e. 0.55 s = 550,000 us
return datetime.datetime(dt.year,dt.month,dt.day,h,m,s,us,tzinfo=datetime.timezone.utc)
# Converts datetag (YYYYDDD) to date string
@staticmethod
def dateTagToDate(dateTag):
dt = datetime.datetime.strptime(str(int(dateTag)), '%Y%j')
timezone = pytz.utc
dt = timezone.localize(dt)
return dt.strftime('%Y%m%d')
# Converts datetag (YYYYDDD) to datetime
@staticmethod
def dateTagToDateTime(dateTag):
dt = datetime.datetime.strptime(str(int(dateTag)), '%Y%j')
timezone = pytz.utc
dt = timezone.localize(dt)
return dt
# Converts seconds of the day (NOT GPS UTCPOS) to GPS UTC (HHMMSS.SS)
@staticmethod
def secToUtc(sec):
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
return float("%d%02d%02d" % (h, m, s))
# Converts seconds of the day to TimeTag2 (HHMMSSmmm; i.e. 0.999 sec after midnight = 000000999)
@staticmethod
def secToTimeTag2(sec):
#return float(time.strftime("%H%M%S", time.gmtime(sec)))
t = sec * 1000
s, ms = divmod(t, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
return int("%d%02d%02d%03d" % (h, m, s, ms))
# Converts TimeTag2 (HHMMSSmmm) to seconds
@staticmethod
def timeTag2ToSec(tt2):
t = str(int(tt2)).zfill(9)
h = int(t[:2])
m = int(t[2:4])
s = int(t[4:6])
ms = int(t[6:])
# print(h, m, s, ms)
return ((h*60)+m)*60+s+(float(ms)/1000.0)
# Converts datetime.date and TimeTag2 (HHMMSSmmm) to datetime
@staticmethod
def timeTag2ToDateTime(dt,tt2):
t = str(int(tt2)).zfill(9)
h = int(t[:2])
m = int(t[2:4])
s = int(t[4:6])
us = 1000*int(t[6:])
# print(h, m, s, us)
# print(tt2)
return datetime.datetime(dt.year,dt.month,dt.day,h,m,s,us,tzinfo=datetime.timezone.utc)
# Converts datetime to Timetag2 (HHMMSSmmm)
@staticmethod
def datetime2TimeTag2(dt):
h = dt.hour
m = dt.minute
s = dt.second
ms = dt.microsecond/1000
return int("%d%02d%02d%03d" % (h, m, s, ms))
# Converts datetime to Datetag
@staticmethod
def datetime2DateTag(dt):
y = dt.year
# mon = dt.month
day = dt.timetuple().tm_yday
return int("%d%03d" % (y, day))
# Converts HDFRoot timestamp attribute to seconds
@staticmethod
def timestampToSec(timestamp):
timei = timestamp.split(" ")[3]
t = timei.split(":")
h = int(t[0])
m = int(t[1])
s = int(t[2])
return ((h*60)+m)*60+s
# Convert GPRMC Date to Datetag
@staticmethod
def gpsDateToDatetime(year, gpsDate):
date = str(gpsDate).zfill(6)
day = int(date[:2])
mon = int(date[2:4])
return datetime.datetime(year,mon,day,0,0,0,0,tzinfo=datetime.timezone.utc)
# Add a dataset to each group for DATETIME, as defined by TIMETAG2 and DATETAG
# Also screens for nonsense timetags like 0.0 or NaN, and datetags that are not
# in the 20th or 21st centuries
@staticmethod
def rootAddDateTime(node):
for gp in node.groups:
# print(gp.id)
if gp.id != "SOLARTRACKER_STATUS": # No valid timestamps in STATUS
timeData = gp.getDataset("TIMETAG2").data["NONE"].tolist()
dateTag = gp.getDataset("DATETAG").data["NONE"].tolist()
timeStamp = []
for i, timei in enumerate(timeData):
# Converts from TT2 (hhmmssmss. UTC) and Datetag (YYYYDOY UTC) to datetime
# Filter for aberrant Datetags
t = str(int(timei)).zfill(9)
h = int(t[:2])
m = int(t[2:4])
s = int(t[4:6])
if (str(dateTag[i]).startswith("19") or str(dateTag[i]).startswith("20")) \
and timei != 0.0 and not np.isnan(timei) \
and h < 60 and m < 60 and s < 60:
dt = Utilities.dateTagToDateTime(dateTag[i])
timeStamp.append(Utilities.timeTag2ToDateTime(dt, timei))
else:
msg = f"Bad Datetag or Timetag2 found. Eliminating record. {i} : {dateTag[i]} : {timei}"
print(msg)
Utilities.writeLogFile(msg)
gp.datasetDeleteRow(i)
dateTime = gp.addDataset("DATETIME")
dateTime.data = timeStamp
return node
# Add a data column to each group dataset for DATETIME, as defined by TIMETAG2 and DATETAG
# Also screens for nonsense timetags like 0.0 or NaN, and datetags that are not
# in the 20th or 21st centuries
@staticmethod
def rootAddDateTimeL2(node):
for gp in node.groups:
if gp.id != "SOLARTRACKER_STATUS": # No valid timestamps in STATUS
for ds in gp.datasets:
# Make sure all datasets have been transcribed to columns
gp.datasets[ds].datasetToColumns()
if not 'Datetime' in gp.datasets[ds].columns:
timeData = gp.datasets[ds].columns["Timetag2"]
dateTag = gp.datasets[ds].columns["Datetag"]
timeStamp = []
for i, timei in enumerate(timeData):
# Converts from TT2 (hhmmssmss. UTC) and Datetag (YYYYDOY UTC) to datetime
# Filter for aberrant Datetags
if (str(dateTag[i]).startswith("19") or str(dateTag[i]).startswith("20")) \
and timei != 0.0 and not np.isnan(timei):
dt = Utilities.dateTagToDateTime(dateTag[i])
timeStamp.append(Utilities.timeTag2ToDateTime(dt, timei))
else:
gp.datasetDeleteRow(i)
msg = f"Bad Datetag or Timetag2 found. Eliminating record. {dateTag[i]} : {timei}"
print(msg)
Utilities.writeLogFile(msg)
gp.datasets[ds].columns["Datetime"] = timeStamp
gp.datasets[ds].columns.move_to_end('Datetime', last=False)
gp.datasets[ds].columnsToDataset()
return node
# Remove records if values of DATETIME are not strictly increasing
# (strictly increasing values required for interpolation)
@staticmethod
def fixDateTime(gp):
dateTime = gp.getDataset("DATETIME").data
# Test for strictly ascending values
# Not sensitive to UTC midnight (i.e. in datetime format)
total = len(dateTime)
globalTotal = total
if total >= 2:
# Check the first element prior to looping over rest
i = 0
if dateTime[i+1] <= dateTime[i]:
gp.datasetDeleteRow(i)
del dateTime[i] # I'm fuzzy on why this is necessary; not a pointer?
total = total - 1
msg = f'Out of order timestamp deleted at {i}'
print(msg)
Utilities.writeLogFile(msg)
#In case we went from 2 to 1 element on the first element,
if total == 1:
msg = f'************Too few records ({total}) to test for ascending timestamps. Exiting.'
print(msg)
Utilities.writeLogFile(msg)
return False
i = 1
while i < total:
if dateTime[i] <= dateTime[i-1]:
''' BUG?:Same values of consecutive TT2s are shockingly common. Confirmed
that 1) they exist from L1A, and 2) sensor data changes while TT2 stays the same '''
gp.datasetDeleteRow(i)
del dateTime[i] # I'm fuzzy on why this is necessary; not a pointer?
total = total - 1
msg = f'Out of order TIMETAG2 row deleted at {i}'
print(msg)
Utilities.writeLogFile(msg)
continue # goto while test skipping i incrementation. dateTime[i] is now the next value.
i += 1
else:
msg = f'************Too few records ({total}) to test for ascending timestamps. Exiting.'
print(msg)
Utilities.writeLogFile(msg)
return False
if (globalTotal - total) > 0:
msg = f'Data eliminated for non-increasing timestamps: {100*(globalTotal - total)/globalTotal:3.1f}%'
print(msg)
Utilities.writeLogFile(msg)
return True
# @staticmethod
# def epochSecToDateTagTimeTag2(eSec):
# dateTime = datetime.datetime.utcfromtimestamp(eSec)
# year = dateTime.timetuple()[0]
# return
# Checks if a string is a floating point number
@staticmethod
def isFloat(text):
try:
float(text)
return True
except ValueError:
return False
# Check if dataset contains NANs
@staticmethod
def hasNan(ds):
for k in ds.data.dtype.fields.keys():
for x in range(ds.data.shape[0]):
if k != 'Datetime':
if np.isnan(ds.data[k][x]):
return True
# else:
# if np.isnan(ds.data[k][x]):
# return True
return False
# Check if the list contains strictly increasing values
@staticmethod
def isIncreasing(l):
return all(x<y for x, y in zip(l, l[1:]))
@staticmethod
def windowAverage(data,window_size):
min_periods = round(window_size/2)
df=pd.DataFrame(data)
out=df.rolling(window_size,min_periods,center=True,win_type='boxcar')
# out = [item for items in out for item in items] #flattening doesn't work
return out
@staticmethod
def movingAverage(data, window_size):
# Window size will be determined experimentally by examining the dark and light data from each instrument.
""" Noise detection using a low-pass filter.
https://www.datascience.com/blog/python-anomaly-detection
Computes moving average using discrete linear convolution of two one dimensional sequences.
Args:
-----
data (pandas.Series): independent variable
window_size (int): rolling window size
Returns:
--------
ndarray of linear convolution
References:
------------
[1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
[2] API Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html
[3] <NAME>., <NAME>., and <NAME>. 2006. Outlier detection by active learning.
In Proceedings of the 12th ACM SIGKDD International Conference on Knowledge Discovery and
Data Mining. ACM Press, New York, 504–509
[4] <NAME>, <NAME> and <NAME> 2009. Anomaly Detection: A Survey Article No. 15 in ACM
Computing Surveys"""
# window = np.ones(int(window_size))/float(window_size)
# Convolve is not nan-tolerant, so use a mask
data = np.array(data)
mask = np.isnan(data)
K = np.ones(window_size, dtype=int)
denom = np.convolve(~mask,K)
denom = np.where(denom != 0, denom, 1) # replace the 0s with 1s to block div0 error; the numerator will be zero anyway
out = np.convolve(np.where(mask,0,data), K)/denom
# return np.convolve(data, window, 'same')
# Slice out one half window on either side; this requires an odd-sized window
return out[int(np.floor(window_size/2)):-int(np.floor(window_size/2))]
@staticmethod
def darkConvolution(data,avg,std,sigma):
badIndex = []
for i in range(len(data)):
if i < 1 or i > len(data)-2:
# First and last avg values from convolution are not to be trusted
badIndex.append(True)
elif np.isnan(data[i]):
badIndex.append(False)
else:
# Use stationary standard deviation anomaly (from rolling average) detection for dark data
if (data[i] > avg[i] + (sigma*std)) or (data[i] < avg[i] - (sigma*std)):
badIndex.append(True)
else:
badIndex.append(False)
return badIndex
@staticmethod
def lightConvolution(data,avg,rolling_std,sigma):
badIndex = []
for i in range(len(data)):
if i < 1 or i > len(data)-2:
# First and last avg values from convolution are not to be trusted
badIndex.append(True)
elif np.isnan(data[i]):
badIndex.append(False)
else:
# Use rolling standard deviation anomaly (from rolling average) detection for dark data
if (data[i] > avg[i] + (sigma*rolling_std[i])) or (data[i] < avg[i] - (sigma*rolling_std[i])):
badIndex.append(True)
else:
badIndex.append(False)
return badIndex
@staticmethod
def l1dThresholds(band,data,minRad,maxRad,minMaxBand):
badIndex = []
for i in range(len(data)):
badIndex.append(False)
# ConfigFile setting updated directly from the checkbox in AnomDetection.
# This insures values of badIndex are false if unthresholded or Min or Max are None
if ConfigFile.settings["bL1dThreshold"]:
# Only run on the pre-selected waveband
if band == minMaxBand:
if minRad or minRad==0: # beware falsy zeros...
if data[i] < minRad:
badIndex[-1] = True
if maxRad or maxRad==0:
if data[i] > maxRad:
badIndex[-1] = True
return badIndex
# @staticmethod
# def rejectOutliers(data, m):
# d = np.abs(data - np.nanmedian(data))
# mdev = np.nanmedian(d)
# s = d/mdev if mdev else 0.
# badIndex = np.zeros((len(s),1),dtype=bool)
# badIndex = [s>=m]
# return badIndex
@staticmethod
def interp(x, y, new_x, kind='linear', fill_value=0.0):
''' Wrapper for scipy interp1d that works even if
values in new_x are outside the range of values in x'''
''' NOTE: This will fill missing values at the beginning and end of data record with
the nearest actual record. This is fine for integrated datasets, but may be dramatic
for some gappy ancillary records of lower temporal resolution.'''
# If the last value to interp to is larger than the last value interp'ed from,
# then append that higher value onto the values to interp from
n0 = len(x)-1
n1 = len(new_x)-1
if new_x[n1] > x[n0]:
#print(new_x[n], x[n])
# msg = '********** Warning: extrapolating to beyond end of data record ********'
# print(msg)
# Utilities.writeLogFile(msg)
x.append(new_x[n1])
y.append(y[n0])
# If the first value to interp to is less than the first value interp'd from,
# then add that lesser value to the beginning of values to interp from
if new_x[0] < x[0]:
#print(new_x[0], x[0])
# msg = '********** Warning: extrapolating to before beginning of data record ******'
# print(msg)
# Utilities.writeLogFile(msg)
x.insert(0, new_x[0])
y.insert(0, y[0])
new_y = scipy.interpolate.interp1d(x, y, kind=kind, bounds_error=False, fill_value=fill_value)(new_x)
return new_y
@staticmethod
def interpAngular(x, y, new_x, fill_value="extrapolate"):
''' Wrapper for scipy interp1d that works even if
values in new_x are outside the range of values in x'''
''' NOTE: Except for SOLAR_AZ and SZA, which are extrapolated, this will fill missing values at the
beginning and end of data record with the nearest actual record. This is fine for integrated
datasets, but may be dramatic for some gappy ancillary records of lower temporal resolution.'''
if fill_value != "extrapolate": # Only extrapolate SOLAR_AZ and SZA, otherwise keep fill values constant
# Some angular measurements (like SAS pointing) are + and -. Convert to all +
# eliminate NaNs
for i, value in enumerate(y):
if value < 0:
y[i] = 360 + value
if np.isnan(value):
x.pop(i)
y.pop(i)
# If the last value to interp to is larger than the last value interp'ed from,
# then append that higher value onto the values to interp from
n0 = len(x)-1
n1 = len(new_x)-1
if new_x[n1] > x[n0]:
#print(new_x[n], x[n])
# msg = '********** Warning: extrapolating to beyond end of data record ********'
# print(msg)
# Utilities.writeLogFile(msg)
x.append(new_x[n1])
y.append(y[n0])
# If the first value to interp to is less than the first value interp'd from,
# then add that lesser value to the beginning of values to interp from
if new_x[0] < x[0]:
#print(new_x[0], x[0])
# msg = '********** Warning: extrapolating to before beginning of data record ******'
# print(msg)
# Utilities.writeLogFile(msg)
x.insert(0, new_x[0])
y.insert(0, y[0])
y_rad = np.deg2rad(y)
# f = scipy.interpolate.interp1d(x,y_rad,kind='linear', bounds_error=False, fill_value=None)
f = scipy.interpolate.interp1d(x,y_rad,kind='linear', bounds_error=False, fill_value=fill_value)
new_y_rad = f(new_x)%(2*np.pi)
new_y = np.rad2deg(new_y_rad)
return new_y
# Cubic spline interpolation intended to get around the all NaN output from InterpolateUnivariateSpline
# x is original time to be splined, y is the data to be interpolated, new_x is the time to interpolate/spline to
# interpolate.splrep is intolerant of duplicate or non-ascending inputs, and inputs with fewer than 3 points
@staticmethod
def interpSpline(x, y, new_x):
spl = splrep(x, y)
new_y = splev(new_x, spl)
for i in range(len(new_y)):
if np.isnan(new_y[i]):
print("NaN")
return new_y
@staticmethod
def plotRadiometry(root, filename, rType, plotDelta = False):
dirPath = os.getcwd()
outDir = MainConfig.settings["outDir"]
# If default output path (HyperInSPACE/Data) is used, choose the root HyperInSPACE path,
# and build on that (HyperInSPACE/Plots/etc...)
if os.path.abspath(outDir) == os.path.join(dirPath,'Data'):
outDir = dirPath
# Otherwise, put Plots in the chosen output directory from Main
plotDir = os.path.join(outDir,'Plots','L2')
if not os.path.exists(plotDir):
os.makedirs(plotDir)
dataDelta = None
''' Note: If only one spectrum is left in a given ensemble, deltas will
be zero for Es, Li, and Lt.'''
if rType=='Rrs':
print('Plotting Rrs')
group = root.getGroup("REFLECTANCE")
Data = group.getDataset(f'{rType}_HYPER')
if plotDelta:
dataDelta = group.getDataset(f'{rType}_HYPER_unc')
plotRange = [340, 800]
if ConfigFile.settings['bL2WeightMODISA']:
Data_MODISA = group.getDataset(f'{rType}_MODISA')
if plotDelta:
dataDelta_MODISA = group.getDataset(f'{rType}_MODISA_unc')
if ConfigFile.settings['bL2WeightMODIST']:
Data_MODIST = group.getDataset(f'{rType}_MODIST')
if plotDelta:
dataDelta_MODIST = group.getDataset(f'{rType}_MODIST_unc')
if ConfigFile.settings['bL2WeightVIIRSN']:
Data_VIIRSN = group.getDataset(f'{rType}_VIIRSN')
if plotDelta:
dataDelta_VIIRSN = group.getDataset(f'{rType}_VIIRSN_unc')
if ConfigFile.settings['bL2WeightVIIRSJ']:
Data_VIIRSJ = group.getDataset(f'{rType}_VIIRSJ')
if plotDelta:
dataDelta_VIIRSJ = group.getDataset(f'{rType}_VIIRSJ_unc')
if ConfigFile.settings['bL2WeightSentinel3A']:
Data_Sentinel3A = group.getDataset(f'{rType}_Sentinel3A')
if plotDelta:
dataDelta_Sentinel3A = group.getDataset(f'{rType}_Sentinel3A_unc')
if ConfigFile.settings['bL2WeightSentinel3B']:
Data_Sentinel3B = group.getDataset(f'{rType}_Sentinel3B')
if plotDelta:
dataDelta_Sentinel3B = group.getDataset(f'{rType}_Sentinel3B_unc')
if rType=='nLw':
print('Plotting nLw')
group = root.getGroup("REFLECTANCE")
Data = group.getDataset(f'{rType}_HYPER')
if plotDelta:
dataDelta = group.getDataset(f'{rType}_HYPER_unc')
plotRange = [340, 800]
if ConfigFile.settings['bL2WeightMODISA']:
Data_MODISA = group.getDataset(f'{rType}_MODISA')
if plotDelta:
dataDelta_MODISA = group.getDataset(f'{rType}_MODISA_unc')
if ConfigFile.settings['bL2WeightMODIST']:
Data_MODIST = group.getDataset(f'{rType}_MODIST')
if plotDelta:
dataDelta_MODIST = group.getDataset(f'{rType}_MODIST_unc')
if ConfigFile.settings['bL2WeightVIIRSN']:
Data_VIIRSN = group.getDataset(f'{rType}_VIIRSN')
if plotDelta:
dataDelta_VIIRSN = group.getDataset(f'{rType}_VIIRSN_unc')
if ConfigFile.settings['bL2WeightVIIRSJ']:
Data_VIIRSJ = group.getDataset(f'{rType}_VIIRSJ')
if plotDelta:
dataDelta_VIIRSJ = group.getDataset(f'{rType}_VIIRSJ_unc')
if ConfigFile.settings['bL2WeightSentinel3A']:
Data_Sentinel3A = group.getDataset(f'{rType}_Sentinel3A')
if plotDelta:
dataDelta_Sentinel3A = group.getDataset(f'{rType}_Sentinel3A_unc')
if ConfigFile.settings['bL2WeightSentinel3B']:
Data_Sentinel3B = group.getDataset(f'{rType}_Sentinel3B')
if plotDelta:
dataDelta_Sentinel3B = group.getDataset(f'{rType}_Sentinel3B_unc')
''' Could include satellite convolved (ir)radiances in the future '''
if rType=='ES':
print('Plotting Es')
group = root.getGroup("IRRADIANCE")
Data = group.getDataset(f'{rType}_HYPER')
if plotDelta:
dataDelta = group.getDataset(f'{rType}_HYPER_sd')
plotRange = [305, 1140]
if rType=='LI':
print('Plotting Li')
group = root.getGroup("RADIANCE")
Data = group.getDataset(f'{rType}_HYPER')
if plotDelta:
dataDelta = group.getDataset(f'{rType}_HYPER_sd')
plotRange = [305, 1140]
if rType=='LT':
print('Plotting Lt')
group = root.getGroup("RADIANCE")
Data = group.getDataset(f'{rType}_HYPER')
lwData = group.getDataset(f'LW_HYPER')
if plotDelta:
dataDelta = group.getDataset(f'{rType}_HYPER_sd')
# lwDataDelta = group.getDataset(f'LW_HYPER_sd')
plotRange = [305, 1140]
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# Hyperspectral
x = []
xLw = []
wave = []
subwave = [] # accomodates Zhang, which deletes out-of-bounds wavebands
# For each waveband
for k in Data.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x.append(k)
wave.append(float(k))
# Add Lw to Lt plots
if rType=='LT':
for k in lwData.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
xLw.append(k)
subwave.append(float(k))
# Satellite Bands
x_MODISA = []
wave_MODISA = []
if ConfigFile.settings['bL2WeightMODISA'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_MODISA.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_MODISA.append(k)
wave_MODISA.append(float(k))
x_MODIST = []
wave_MODIST = []
if ConfigFile.settings['bL2WeightMODIST'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_MODIST.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_MODIST.append(k)
wave_MODIST.append(float(k))
x_VIIRSN = []
wave_VIIRSN = []
if ConfigFile.settings['bL2WeightVIIRSN'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_VIIRSN.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_VIIRSN.append(k)
wave_VIIRSN.append(float(k))
x_VIIRSJ = []
wave_VIIRSJ = []
if ConfigFile.settings['bL2WeightVIIRSJ'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_VIIRSJ.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_VIIRSJ.append(k)
wave_VIIRSJ.append(float(k))
x_Sentinel3A = []
wave_Sentinel3A = []
if ConfigFile.settings['bL2WeightSentinel3A'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_Sentinel3A.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_Sentinel3A.append(k)
wave_Sentinel3A.append(float(k))
x_Sentinel3B = []
wave_Sentinel3B = []
if ConfigFile.settings['bL2WeightSentinel3B'] and (rType == 'Rrs' or rType == 'nLw'):
for k in Data_Sentinel3B.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x_Sentinel3B.append(k)
wave_Sentinel3B.append(float(k))
total = Data.data.shape[0]
maxRad = 0
minRad = 0
cmap = cm.get_cmap("jet")
color=iter(cmap(np.linspace(0,1,total)))
plt.figure(1, figsize=(8,6))
for i in range(total):
# Hyperspectral
y = []
dy = []
for k in x:
y.append(Data.data[k][i])
if plotDelta:
dy.append(dataDelta.data[k][i])
# Add Lw to Lt plots
if rType=='LT':
yLw = []
# dyLw = []
for k in xLw:
yLw.append(lwData.data[k][i])
# if plotDelta:
# dy.append(dataDelta.data[k][i])
# Satellite Bands
y_MODISA = []
dy_MODISA = []
if ConfigFile.settings['bL2WeightMODISA'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_MODISA:
y_MODISA.append(Data_MODISA.data[k][i])
if plotDelta:
dy_MODISA.append(dataDelta_MODISA.data[k][i])
y_MODIST = []
dy_MODIST = []
if ConfigFile.settings['bL2WeightMODIST'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_MODIST:
y_MODIST.append(Data_MODIST.data[k][i])
if plotDelta:
dy_MODIST.append(dataDelta_MODIST.data[k][i])
y_VIIRSN = []
dy_VIIRSN = []
if ConfigFile.settings['bL2WeightVIIRSN'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_VIIRSN:
y_VIIRSN.append(Data_VIIRSN.data[k][i])
if plotDelta:
dy_VIIRSN.append(dataDelta_VIIRSN.data[k][i])
y_VIIRSJ = []
dy_VIIRSJ = []
if ConfigFile.settings['bL2WeightVIIRSJ'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_VIIRSJ:
y_VIIRSJ.append(Data_VIIRSJ.data[k][i])
if plotDelta:
dy_VIIRSJ.append(dataDelta_VIIRSJ.data[k][i])
y_Sentinel3A = []
dy_Sentinel3A = []
if ConfigFile.settings['bL2WeightSentinel3A'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_Sentinel3A:
y_Sentinel3A.append(Data_Sentinel3A.data[k][i])
if plotDelta:
dy_Sentinel3A.append(dataDelta_Sentinel3A.data[k][i])
y_Sentinel3B = []
dy_Sentinel3B = []
if ConfigFile.settings['bL2WeightSentinel3B'] and (rType == 'Rrs' or rType == 'nLw'):
for k in x_Sentinel3B:
y_Sentinel3B.append(Data_Sentinel3B.data[k][i])
if plotDelta:
dy_Sentinel3B.append(dataDelta_Sentinel3B.data[k][i])
c=next(color)
if max(y) > maxRad:
maxRad = max(y)+0.1*max(y)
if rType == 'LI' and maxRad > 20:
maxRad = 20
if rType == 'LT' and maxRad > 2:
maxRad = 2
if min(y) < minRad:
minRad = min(y)-0.1*min(y)
if rType == 'LI':
minRad = 0
if rType == 'LT':
minRad = 0
if rType == 'ES':
minRad = 0
# Plot the Hyperspectral spectrum
plt.plot(wave, y, c=c, zorder=-1)
# Add the Wei QA score to the Rrs plot, if calculated
if rType == 'Rrs':
if ConfigFile.products['bL2ProdweiQA']:
groupProd = root.getGroup("DERIVED_PRODUCTS")
score = groupProd.getDataset('wei_QA')
QA_note = f"Score: {score.columns['QA_score'][i]}"
axes = plt.gca()
axes.text(1.0,1.1 - (i+1)/len(score.columns['QA_score']), QA_note,
verticalalignment='top', horizontalalignment='right',
transform=axes.transAxes,
color=c, fontdict=font)
# Add Lw to Lt plots
if rType=='LT':
plt.plot(subwave, yLw, c=c, zorder=-1, linestyle='dashed')
if plotDelta:
# Generate the polygon for uncertainty bounds
deltaPolyx = wave + list(reversed(wave))
dPolyyPlus = [(y[i]+dy[i]) for i in range(len(y))]
dPolyyMinus = [(y[i]-dy[i]) for i in range(len(y))]
deltaPolyyPlus = y + list(reversed(dPolyyPlus))
deltaPolyyMinus = y + list(reversed(dPolyyMinus))
plt.fill(deltaPolyx, deltaPolyyPlus, alpha=0.2, c=c, zorder=-1)
plt.fill(deltaPolyx, deltaPolyyMinus, alpha=0.2, c=c, zorder=-1)
# Satellite Bands
if ConfigFile.settings['bL2WeightMODISA']:
# Plot the MODISA spectrum
if plotDelta:
plt.errorbar(wave_MODISA, y_MODISA, yerr=dy_MODISA, fmt='.',
elinewidth=0.1, color=c, ecolor='black', zorder=3) # ecolor is broken
else:
plt.plot(wave_MODISA, y_MODISA, 'o', c=c)
if ConfigFile.settings['bL2WeightMODIST']:
# Plot the MODIST spectrum
if plotDelta:
plt.errorbar(wave_MODIST, y_MODIST, yerr=dy_MODIST, fmt='.',
elinewidth=0.1, color=c, ecolor='black')
else:
plt.plot(wave_MODIST, y_MODIST, 'o', c=c)
if ConfigFile.settings['bL2WeightVIIRSN']:
# Plot the VIIRSN spectrum
if plotDelta:
plt.errorbar(wave_VIIRSN, y_VIIRSN, yerr=dy_VIIRSN, fmt='.',
elinewidth=0.1, color=c, ecolor='black')
else:
plt.plot(wave_VIIRSN, y_VIIRSN, 'o', c=c)
if ConfigFile.settings['bL2WeightVIIRSJ']:
# Plot the VIIRSJ spectrum
if plotDelta:
plt.errorbar(wave_VIIRSJ, y_VIIRSJ, yerr=dy_VIIRSJ, fmt='.',
elinewidth=0.1, color=c, ecolor='black')
else:
plt.plot(wave_VIIRSJ, y_VIIRSJ, 'o', c=c)
if ConfigFile.settings['bL2WeightSentinel3A']:
# Plot the Sentinel3A spectrum
if plotDelta:
plt.errorbar(wave_Sentinel3A, y_Sentinel3A, yerr=dy_Sentinel3A, fmt='.',
elinewidth=0.1, color=c, ecolor='black')
else:
plt.plot(wave_Sentinel3A, y_Sentinel3A, 'o', c=c)
if ConfigFile.settings['bL2WeightSentinel3B']:
# Plot the Sentinel3B spectrum
if plotDelta:
plt.errorbar(wave_Sentinel3B, y_Sentinel3B, yerr=dy_Sentinel3B, fmt='.',
elinewidth=0.1, color=c, ecolor='black')
else:
plt.plot(wave_Sentinel3B, y_Sentinel3B, 'o', c=c)
axes = plt.gca()
axes.set_title(filename, fontdict=font)
# axes.set_xlim([390, 800])
axes.set_ylim([minRad, maxRad])
plt.xlabel('wavelength (nm)', fontdict=font)
if rType=='LT':
plt.ylabel('LT (LW dashed)', fontdict=font)
else:
plt.ylabel(rType, fontdict=font)
# Tweak spacing to prevent clipping of labels
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
note = f'Interval: {ConfigFile.settings["fL2TimeInterval"]} s'
axes.text(0.95, 0.95, note,
verticalalignment='top', horizontalalignment='right',
transform=axes.transAxes,
color='black', fontdict=font)
axes.grid()
# plt.show() # --> QCoreApplication::exec: The event loop is already running
# Save the plot
filebasename = filename.split('_')
fp = os.path.join(plotDir, '_'.join(filebasename[0:-1]) + '_' + rType + '.png')
plt.savefig(fp)
plt.close() # This prevents displaying the plot on screen with certain IDEs
@staticmethod
def plotRadiometryL1D(root, filename, rType):
dirPath = os.getcwd()
outDir = MainConfig.settings["outDir"]
# If default output path (HyperInSPACE/Data) is used, choose the root HyperInSPACE path,
# and build on that (HyperInSPACE/Plots/etc...)
if os.path.abspath(outDir) == os.path.join(dirPath,'Data'):
outDir = dirPath
# Otherwise, put Plots in the chosen output directory from Main
plotDir = os.path.join(outDir,'Plots','L1D')
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plotRange = [305, 1140]
if rType=='ES':
print('Plotting Es')
group = root.getGroup(rType)
Data = group.getDataset(rType)
if rType=='LI':
print('Plotting Li')
group = root.getGroup(rType)
Data = group.getDataset(rType)
if rType=='LT':
print('Plotting Lt')
group = root.getGroup(rType)
Data = group.getDataset(rType)
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# Hyperspectral
x = []
wave = []
# For each waveband
for k in Data.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
x.append(k)
wave.append(float(k))
total = Data.data.shape[0]
maxRad = 0
minRad = 0
cmap = cm.get_cmap("jet")
color=iter(cmap(np.linspace(0,1,total)))
plt.figure(1, figsize=(8,6))
for i in range(total):
# Hyperspectral
y = []
for k in x:
y.append(Data.data[k][i])
c=next(color)
if max(y) > maxRad:
maxRad = max(y)+0.1*max(y)
if rType == 'LI' and maxRad > 20:
maxRad = 20
if rType == 'LT' and maxRad > 10:
maxRad = 10
if min(y) < minRad:
minRad = min(y)-0.1*min(y)
if rType == 'LI':
minRad = 0
if rType == 'LT':
minRad = 0
if rType == 'ES':
minRad = 0
# Plot the Hyperspectral spectrum
plt.plot(wave, y, c=c, zorder=-1)
axes = plt.gca()
axes.set_title(filename, fontdict=font)
# axes.set_xlim([390, 800])
axes.set_ylim([minRad, maxRad])
plt.xlabel('wavelength (nm)', fontdict=font)
plt.ylabel(rType, fontdict=font)
# Tweak spacing to prevent clipping of labels
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
axes.grid()
# plt.show() # --> QCoreApplication::exec: The event loop is already running
# Save the plot
filebasename = filename.split('_')
fp = os.path.join(plotDir, '_'.join(filebasename[0:-1]) + '_' + rType + '.png')
plt.savefig(fp)
plt.close() # This prevents displaying the plot on screen with certain IDEs
@staticmethod
def plotTimeInterp(xData, xTimer, newXData, yTimer, instr, fileName):
''' Plot results of L1E time interpolation '''
dirPath = os.getcwd()
outDir = MainConfig.settings["outDir"]
# If default output path (HyperInSPACE/Data) is used, choose the root HyperInSPACE path,
# and build on that (HyperInSPACE/Plots/etc...)
if os.path.abspath(outDir) == os.path.join(dirPath,'Data'):
outDir = dirPath
# Otherwise, put Plots in the chosen output directory from Main
plotDir = os.path.join(outDir,'Plots','L1E')
if not os.path.exists(plotDir):
os.makedirs(plotDir)
# For the sake of MacOS, need to hack the datetimes into panda dataframes for plotting
dfx = pd.DataFrame(data=xTimer, index=list(range(0,len(xTimer))), columns=['x'])
# *** HACK: CONVERT datetime column to string and back again - who knows why this works? ***
dfx['x'] = pd.to_datetime(dfx['x'].astype(str))
dfy = pd.DataFrame(data=yTimer, index=list(range(0,len(yTimer))), columns=['x'])
dfy['x'] = pd.to_datetime(dfy['x'].astype(str))
fileBaseName,_ = fileName.split('.')
register_matplotlib_converters()
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# Steps in wavebands used for plots
# step = float(ConfigFile.settings["fL3InterpInterval"]) # this is in nm
# This happens prior to waveband interpolation, so each interval is ~3.3 nm
''' To Do: THIS COULD BE SET IN THE CONFIG WINDOW '''
step = 20 # this is in band intervals
if instr == 'ES' or instr == 'LI' or instr == 'LT':
l = round((len(xData.data.dtype.names)-3)/step) # skip date and time and datetime
index = l
else:
l = len(xData.data.dtype.names)-3 # skip date and time and datetime
index = None
Utilities.printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
ticker = 0
if index is not None:
for k in xData.data.dtype.names:
if index % step == 0:
if k == "Datetag" or k == "Timetag2" or k == "Datetime":
continue
ticker += 1
Utilities.printProgressBar(ticker, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
x = np.copy(xData.data[k]).tolist()
new_x = np.copy(newXData.columns[k]).tolist()
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
# ax.plot(xTimer, x, 'bo', label='Raw')
ax.plot(dfx['x'], x, 'bo', label='Raw')
# ax.plot(yTimer, new_x, 'k.', label='Interpolated')
ax.plot(dfy['x'], new_x, 'k.', label='Interpolated')
ax.legend()
plt.xlabel('Date/Time (UTC)', fontdict=font)
plt.ylabel(f'{instr}_{k}', fontdict=font)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
# plt.savefig(os.path.join('Plots','L1E',f'{fileBaseName}_{instr}_{k}.png'))
plt.savefig(os.path.join(plotDir,f'{fileBaseName}_{instr}_{k}.png'))
plt.close()
index +=1
else:
for k in xData.data.dtype.names:
if k == "Datetag" or k == "Timetag2" or k == "Datetime":
continue
ticker += 1
Utilities.printProgressBar(ticker, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
x = np.copy(xData.data[k]).tolist()
new_x = np.copy(newXData.columns[k]).tolist()
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 1, 1)
# ax.plot(xTimer, x, 'bo', label='Raw')
ax.plot(dfx['x'], x, 'bo', label='Raw')
# ax.plot(yTimer, new_x, 'k.', label='Interpolated')
ax.plot(dfy['x'], new_x, 'k.', label='Interpolated')
ax.legend()
plt.xlabel('Date/Time (UTC)', fontdict=font)
plt.ylabel(f'{instr}', fontdict=font)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
plt.savefig(os.path.join(plotDir,f'{fileBaseName}_{instr}_{k}.png'))
plt.close()
print('\n')
@staticmethod
def specFilter(inFilePath, Dataset, timeStamp, station=None, filterRange=[400, 700],\
filterFactor=3, rType='None'):
dirPath = os.getcwd()
outDir = MainConfig.settings["outDir"]
# If default output path (HyperInSPACE/Data) is used, choose the root HyperInSPACE path,
# and build on that (HyperInSPACE/Plots/etc...)
if os.path.abspath(outDir) == os.path.join(dirPath,'Data'):
outDir = dirPath
# Otherwise, put Plots in the chosen output directory from Main
plotDir = os.path.join(outDir,'Plots','L2_Spectral_Filter')
if not os.path.exists(plotDir):
os.makedirs(plotDir)
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# Collect each column name ignoring Datetag and Timetag2 (i.e. each wavelength) in the desired range
x = []
wave = []
for k in Dataset.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=filterRange[0] and float(k)<=filterRange[1]:
x.append(k)
wave.append(float(k))
# Read in each spectrum
total = Dataset.data.shape[0]
specArray = []
normSpec = []
# cmap = cm.get_cmap("jet")
# color=iter(cmap(np.linspace(0,1,total)))
print('Creating plots...')
plt.figure(1, figsize=(10,8))
for timei in range(total):
y = []
for waveband in x:
y.append(Dataset.data[waveband][timei])
specArray.append(y)
peakIndx = y.index(max(y))
normSpec.append(y / y[peakIndx])
# plt.plot(wave, y / y[peakIndx], color='grey')
normSpec = np.array(normSpec)
aveSpec = np.median(normSpec, axis = 0)
stdSpec = np.std(normSpec, axis = 0)
badTimes = []
badIndx = []
# For each spectral band...
for i in range(0, len(normSpec[0])-1):
# For each timeseries radiometric measurement...
for j, rad in enumerate(normSpec[:,i]):
# Identify outliers and negative values for elimination
if rad > (aveSpec[i] + filterFactor*stdSpec[i]) or \
rad < (aveSpec[i] - filterFactor*stdSpec[i]) or \
rad < 0:
badIndx.append(j)
badTimes.append(timeStamp[j])
badIndx = np.unique(badIndx)
badTimes = np.unique(badTimes)
# Duplicates each element to a list of two elements in a list:
badTimes = np.rot90(np.matlib.repmat(badTimes,2,1), 3)
# t0 = time.time()
for timei in range(total):
# for i in badIndx:
if timei in badIndx:
# plt.plot( wave, normSpec[i,:], color='red', linewidth=0.5, linestyle=(0, (1, 10)) ) # long-dot
plt.plot( wave, normSpec[timei,:], color='red', linewidth=0.5, linestyle=(0, (5, 5)) ) # dashed
else:
plt.plot(wave, normSpec[timei,:], color='grey')
# t1 = time.time()
# print(f'Time elapsed: {str(round((t1-t0)))} Seconds')
plt.plot(wave, aveSpec, color='black', linewidth=0.5)
plt.plot(wave, aveSpec + filterFactor*stdSpec, color='black', linewidth=2, linestyle='dashed')
plt.plot(wave, aveSpec - filterFactor*stdSpec, color='black', linewidth=2, linestyle='dashed')
plt.title(f'Sigma = {filterFactor}', fontdict=font)
plt.xlabel('Wavelength [nm]', fontdict=font)
plt.ylabel(f'{rType} [Normalized to peak value]', fontdict=font)
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
axes = plt.gca()
axes.grid()
# Save the plot
_,filename = os.path.split(inFilePath)
filebasename,_ = filename.rsplit('_',1)
if station:
fp = os.path.join(plotDir, f'STATION_{station}_{filebasename}_{rType}.png')
else:
fp = os.path.join(plotDir, f'{filebasename}_{rType}.png')
plt.savefig(fp)
plt.close()
return badTimes
@staticmethod
def plotIOPs(root, filename, algorithm, iopType, plotDelta = False):
dirPath = os.getcwd()
outDir = MainConfig.settings["outDir"]
# If default output path (HyperInSPACE/Data) is used, choose the root HyperInSPACE path,
# and build on that (HyperInSPACE/Plots/etc...)
if os.path.abspath(outDir) == os.path.join(dirPath,'Data'):
outDir = dirPath
# Otherwise, put Plots in the chosen output directory from Main
plotDir = os.path.join(outDir,'Plots','L2_Products')
if not os.path.exists(plotDir):
os.makedirs(plotDir)
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
cmap = cm.get_cmap("jet")
# dataDelta = None
group = root.getGroup("DERIVED_PRODUCTS")
# if iopType=='a':
# print('Plotting absorption')
if algorithm == "qaa" or algorithm == "giop":
plotRange = [340, 700]
qaaName = f'bL2Prod{iopType}Qaa'
giopName = f'bL2Prod{iopType}Giop'
if ConfigFile.products["bL2Prodqaa"] and ConfigFile.products[qaaName]:
label = f'qaa_{iopType}'
DataQAA = group.getDataset(label)
# if plotDelta:
# dataDelta = group.getDataset(f'{iopType}_HYPER_delta')
xQAA = []
waveQAA = []
# For each waveband
for k in DataQAA.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
xQAA.append(k)
waveQAA.append(float(k))
totalQAA = DataQAA.data.shape[0]
colorQAA = iter(cmap(np.linspace(0,1,totalQAA)))
if ConfigFile.products["bL2Prodgiop"] and ConfigFile.products[giopName]:
label = f'giop_{iopType}'
DataGIOP = group.getDataset(label)
# if plotDelta:
# dataDelta = group.getDataset(f'{iopType}_HYPER_delta')
xGIOP = []
waveGIOP = []
# For each waveband
for k in DataGIOP.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
xGIOP.append(k)
waveGIOP.append(float(k))
totalGIOP = DataQAA.data.shape[0]
colorGIOP = iter(cmap(np.linspace(0,1,totalGIOP)))
if algorithm == "gocad":
plotRange = [270, 700]
gocadName = f'bL2Prod{iopType}'
if ConfigFile.products["bL2Prodgocad"] and ConfigFile.products[gocadName]:
# ag
label = f'gocad_{iopType}'
agDataGOCAD = group.getDataset(label)
# if plotDelta:
# dataDelta = group.getDataset(f'{iopType}_HYPER_delta')
agGOCAD = []
waveGOCAD = []
# For each waveband
for k in agDataGOCAD.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
agGOCAD.append(k)
waveGOCAD.append(float(k))
totalGOCAD = agDataGOCAD.data.shape[0]
colorGOCAD = iter(cmap(np.linspace(0,1,totalGOCAD)))
# Sg
sgDataGOCAD = group.getDataset(f'gocad_Sg')
sgGOCAD = []
waveSgGOCAD = []
# For each waveband
for k in sgDataGOCAD.data.dtype.names:
if Utilities.isFloat(k):
if float(k)>=plotRange[0] and float(k)<=plotRange[1]: # also crops off date and time
sgGOCAD.append(k)
waveSgGOCAD.append(float(k))
# DOC
docDataGOCAD = group.getDataset(f'gocad_doc')
maxIOP = 0
minIOP = 0
# Plot
plt.figure(1, figsize=(8,6))
if algorithm == "qaa" or algorithm == "giop":
if ConfigFile.products["bL2Prodqaa"] and ConfigFile.products[qaaName]:
for i in range(totalQAA):
y = []
# dy = []
for k in xQAA:
y.append(DataQAA.data[k][i])
# if plotDelta:
# dy.append(dataDelta.data[k][i])
c=next(colorQAA)
if max(y) > maxIOP:
maxIOP = max(y)+0.1*max(y)
# if iopType == 'LI' and maxIOP > 20:
# maxIOP = 20
# Plot the Hyperspectral spectrum
plt.plot(waveQAA, y, c=c, zorder=-1)
# if plotDelta:
# # Generate the polygon for uncertainty bounds
# deltaPolyx = wave + list(reversed(wave))
# dPolyyPlus = [(y[i]+dy[i]) for i in range(len(y))]
# dPolyyMinus = [(y[i]-dy[i]) for i in range(len(y))]
# deltaPolyyPlus = y + list(reversed(dPolyyPlus))
# deltaPolyyMinus = y + list(reversed(dPolyyMinus))
# plt.fill(deltaPolyx, deltaPolyyPlus, alpha=0.2, c=c, zorder=-1)
# plt.fill(deltaPolyx, deltaPolyyMinus, alpha=0.2, c=c, zorder=-1)
if ConfigFile.products["bL2Prodgiop"] and ConfigFile.products[giopName]:
for i in range(totalGIOP):
y = []
for k in xGIOP:
y.append(DataGIOP.data[k][i])
c=next(colorGIOP)
if max(y) > maxIOP:
maxIOP = max(y)+0.1*max(y)
# Plot the Hyperspectral spectrum
plt.plot(waveGIOP, y, c=c, ls='--', zorder=-1)
if algorithm == "gocad":
if ConfigFile.products["bL2Prodgocad"] and ConfigFile.products[gocadName]:
for i in range(totalGOCAD):
y = []
for k in agGOCAD:
y.append(agDataGOCAD.data[k][i])
c=next(colorGOCAD)
if max(y) > maxIOP:
maxIOP = max(y)+0.1*max(y)
# Plot the point spectrum
# plt.scatter(waveGOCAD, y, s=100, c=c, marker='*', zorder=-1)
plt.plot(waveGOCAD, y, c=c, marker='*', markersize=13, linestyle = '', zorder=-1)
# Now extrapolate using the slopes
Sg = []
for k in sgGOCAD:
Sg.append(sgDataGOCAD.data[k][i])
yScaler = maxIOP*i/totalGOCAD
if k == '275':
wave = np.array(list(range(275, 300)))
ag_extrap = agDataGOCAD.data['275'][i] * np.exp(-1*sgDataGOCAD.data[k][i] * (wave - 275))
plt.plot(wave, ag_extrap, c=[0.9, 0.9, 0.9], ls='--', zorder=-1)
plt.text(285, 0.9*maxIOP - 0.12*yScaler, '{} {:.4f}'.format('S275 = ', sgDataGOCAD.data[k][i]), color=c)
if k == '300':
wave = np.array(list(range(300, 355)))
# uses the trailing end of the last extrapolation.
ag_extrap = ag_extrap[-1] * np.exp(-1*sgDataGOCAD.data[k][i] * (wave - 300))
plt.plot(wave, ag_extrap, c=[0.9, 0.9, 0.9], ls='--', zorder=-1)
plt.text(300, 0.7*maxIOP - 0.12*yScaler, '{} {:.4f}'.format('S300 = ', sgDataGOCAD.data[k][i]), color=c)
if k == '350':
# Use the 350 slope starting at 355 (where we have ag)
wave = np.array(list(range(355, 380)))
ag_extrap = agDataGOCAD.data['355'][i] * np.exp(-1*sgDataGOCAD.data[k][i] * (wave - 355))
plt.plot(wave, ag_extrap, c=[0.9, 0.9, 0.9], ls='--', zorder=-1)
plt.text(350, 0.5*maxIOP - 0.12*yScaler, '{} {:.4f}'.format('S350 = ', sgDataGOCAD.data[k][i]), color=c)
if k == '380':
wave = np.array(list(range(380, 412)))
ag_extrap = agDataGOCAD.data['380'][i] * np.exp(-1*sgDataGOCAD.data[k][i] * (wave - 380))
plt.plot(wave, ag_extrap, c=[0.9, 0.9, 0.9], ls='--', zorder=-1)
plt.text(380, 0.3*maxIOP - 0.12*yScaler, '{} {:.4f}'.format('S380 = ', sgDataGOCAD.data[k][i]), color=c)
if k == '412':
wave = np.array(list(range(412, 700)))
ag_extrap = agDataGOCAD.data['412'][i] * np.exp(-1*sgDataGOCAD.data[k][i] * (wave - 412))
plt.plot(wave, ag_extrap, c=[0.9, 0.9, 0.9], ls='--', zorder=-1)
plt.text(440, 0.15*maxIOP- 0.12*yScaler, '{} {:.4f}'.format('S412 = ', sgDataGOCAD.data[k][i]), color=c)
# Now tack on DOC
plt.text(600, 0.5 - 0.12*yScaler, '{} {:3.2f}'.format('DOC = ', docDataGOCAD.data['doc'][i]) , color=c)
axes = plt.gca()
axes.set_title(filename, fontdict=font)
axes.set_ylim([minIOP, maxIOP])
plt.xlabel('wavelength (nm)', fontdict=font)
plt.ylabel(f'{label} [1/m]', fontdict=font)
# Tweak spacing to prevent clipping of labels
plt.subplots_adjust(left=0.15)
plt.subplots_adjust(bottom=0.15)
note = f'Interval: {ConfigFile.settings["fL2TimeInterval"]} s'
axes.text(0.95, 0.95, note,
verticalalignment='top', horizontalalignment='right',
transform=axes.transAxes,
color='black', fontdict=font)
axes.grid()
# plt.show() # --> QCoreApplication::exec: The event loop is already running
# Save the plot
filebasename = filename.split('_')
fp = os.path.join(plotDir, '_'.join(filebasename[0:-1]) + '_' + label + '.png')
plt.savefig(fp)
plt.close() # This prevents displaying the plot on screen with certain IDEs
@staticmethod
def readAnomAnalFile(filePath):
paramDict = {}
with open(filePath, newline='') as csvfile:
paramreader = csv.DictReader(csvfile)
for row in paramreader:
paramDict[row['filename']] = [int(row['ESWindowDark']), int(row['ESWindowLight']), \
float(row['ESSigmaDark']), float(row['ESSigmaLight']),
float(row['ESMinDark']), float(row['ESMaxDark']),
float(row['ESMinMaxBandDark']),float(row['ESMinLight']),
float(row['ESMaxLight']),float(row['ESMinMaxBandLight']),
int(row['LIWindowDark']), int(row['LIWindowLight']),
float(row['LISigmaDark']), float(row['LISigmaLight']),
float(row['LIMinDark']), float(row['LIMaxDark']),\
float(row['LIMinMaxBandDark']),float(row['LIMinLight']),\
float(row['LIMaxLight']),float(row['LIMinMaxBandLight']),\
int(row['LTWindowDark']), int(row['LTWindowLight']),
float(row['LTSigmaDark']), float(row['LTSigmaLight']),
float(row['LTMinDark']), float(row['LTMaxDark']),\
float(row['LTMinMaxBandDark']),float(row['LTMinLight']),\
float(row['LTMaxLight']),float(row['LTMinMaxBandLight']),int(row['Threshold']) ]
paramDict[row['filename']] = [None if v==-999 else v for v in paramDict[row['filename']]]
return paramDict
@staticmethod
def deglitchBand(band, radiometry1D, windowSize, sigma, lightDark, minRad, maxRad, minMaxBand):
''' For a given sensor in a given band (1D), calculate the first and second outliers on the
light and dark based on moving average filters. Then apply thresholds.
This may benefit in the future from eliminating the thresholded values from the moving
average filter analysis.
'''
if lightDark == 'Dark':
# For Darks, calculate the moving average and residual vectors
# and the OVERALL standard deviation of the residual over the entire file
# First pass
avg = Utilities.movingAverage(radiometry1D, windowSize).tolist()
residual = np.array(radiometry1D) - np.array(avg)
stdData = np.std(residual)
badIndex = Utilities.darkConvolution(radiometry1D,avg,stdData,sigma)
# Second pass
radiometry1D2 = np.array(radiometry1D[:])
radiometry1D2[badIndex] = np.nan
radiometry1D2 = radiometry1D2.tolist()
avg2 = Utilities.movingAverage(radiometry1D2, windowSize).tolist()
residual = np.array(radiometry1D2) - | np.array(avg2) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Image visual clutter utility functions.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# Standard library modules
from typing import Dict, List, Optional, Tuple, Union
# Third-party modules
import cv2
import numpy as np
from scipy import signal
from skimage import transform
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "<NAME>, <NAME>"
__date__ = "2021-08-28"
__email__ = "<EMAIL>"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Image visual clutter utility functions
# ----------------------------------------------------------------------------
def rgb2lab(im: np.ndarray) -> np.ndarray:
"""
Converts the RGB color space to the CIELab color space.
Args:
im: Input RGB image
Returns:
Output Lab image
"""
im = im / 255.0 # get r,g,b value in the range of [0,1]
# the figure from graybar.m and the infromation from the website
# http://www.cinenet.net/~spitzak/conversion/whysrgb.html, we can conclude
# that our RGB system is sRGB
# if RGB system is sRGB
mask = im >= 0.04045
im[mask] = ((im[mask] + 0.055) / 1.055) ** 2.4
im[~mask] = im[~mask] / 12.92
# Observer. = 2°, Illuminant = D65
matrix = np.array(
[
[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227],
]
)
c_im = np.dot(im, matrix.T)
c_im[:, :, 0] = c_im[:, :, 0] / 95.047
c_im[:, :, 1] = c_im[:, :, 1] / 100.000
c_im[:, :, 2] = c_im[:, :, 2] / 108.833
mask = c_im >= 0.008856
c_im[mask] = c_im[mask] ** (1 / 3)
c_im[~mask] = 7.787 * c_im[~mask] + 16 / 116
im_Lab = np.zeros_like(c_im)
im_Lab[:, :, 0] = (116 * c_im[:, :, 1]) - 16
im_Lab[:, :, 1] = 500 * (c_im[:, :, 0] - c_im[:, :, 1])
im_Lab[:, :, 2] = 200 * (c_im[:, :, 1] - c_im[:, :, 2])
return im_Lab
def normlize(arr: np.ndarray) -> np.ndarray:
"""
Normalizes the array input between (min, max) -> (0, 255).
Args:
arr: Ndarray image
Returns:
Normlized ndarray
"""
min_min = arr.min()
max_max = arr.max()
if min_min == max_max:
return np.full_like(arr, 255 / 2).astype("uint8")
else:
return (
(arr - arr.min()) * (1 / (arr.max() - arr.min()) * 255)
).astype("uint8")
def conv2(
x: np.ndarray, y: np.ndarray, mode: Optional[str] = None
) -> np.ndarray:
"""
Computes the two-dimensional convolution of matrices x and y.
Args:
x: First ndarray
y: Second ndarray
mode: Method of convolution, default=None, if it sets "same" then
computes the central part of the convolution
Returns:
Convolution ndarray of input matrices
"""
if mode == "same":
return np.rot90(
signal.convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2
)
else:
return signal.convolve2d(x, y)
def RRoverlapconv(kernel: np.ndarray, in_: np.ndarray) -> np.ndarray:
"""
Filters the image in_ with filter kernel, where it only "counts" the
part of the filter that overlaps the image. Rescales the filter so its
weights which overlap the image sum to the same as the full filter
kernel.
Args:
in_: Input ndarray image
kernel: Input ndarray filter kernel
Returns:
Filtered ndarray of input image with the filter kernel
"""
# Convolve with the original kernel
out = conv2(in_, kernel, mode="same")
# Convolve kernel with an image of 1's, of the same size as the input image
rect = np.ones_like(in_)
overlapsum = conv2(rect, kernel, "same")
# Now scale the output image at each pixel by the relative overlap of the filter with the image
out = | np.sum(kernel) | numpy.sum |
import math
import numpy as np
class Atom:
def __init__(self, atom_string):
self.id = int(atom_string[6:11])
self.name = atom_string[11:16].strip()
self.alt = atom_string[16]
self.resn = atom_string[17:20].strip()
self.chain = atom_string[21]
self.resi = int(atom_string[22:26])
self.x = float(atom_string[30:38])
self.y = float(atom_string[38:46])
self.z = float(atom_string[46:54])
self.pos = np.array([self.x,self.y,self.z])
self.occ = float(atom_string[54:60])
self.temp_factor = float(atom_string[60:66])
if len(atom_string)>=78:
self.elem = atom_string[76:78].strip()
def __str__(self):
return self.name+"_"+str(self.resi)
def __repr__(self):
return 'Atom("ATOM %5d%5s %3s %c%4d %8.3f%8.3f%8.3f")' % (self.id, self.name, self.resn, self.chain, self.resi, self.x, self.y, self.z)
def pdbString(self):
return 'ATOM %5d%5s %3s %c%4d %8.3f%8.3f%8.3f' % (self.id, self.name, self.resn, self.chain, self.resi, self.x, self.y, self.z)
def __sub__(self, other):
return np.array([self.x-other.x, self.y-other.y, self.z-other.z])
def __add__(self, other):
return np.array([self.x+other.x, self.y+other.y, self.z+other.z])
def distance(self, a):
return math.sqrt((self.x-a.x)**2 + (self.y-a.y)**2 + (self.z-a.z)**2)
def distanceSquared(self, a):
return (self.x-a.x)**2 + (self.y-a.y)**2 + (self.z-a.z)**2
class PDBFile:
""" A representation of a PDB-file """
def pdbDownload(self,pdb_id):
hostname="ftp.wwpdb.org"
directory="/pub/pdb/data/structures/all/pdb/"
prefix="pdb"
suffix=".ent.gz"
import os, sys, ftplib, shutil, gzip
# Log into server
#print "Downloading %s from %s ..." % (pdb_id, hostname)
ftp = ftplib.FTP()
ftp.connect(hostname)
ftp.login()
# Download all files in file_list
to_get = "%s/%s%s%s" % (directory,prefix,pdb_id.lower(),suffix)
to_write = "%s%s" % (pdb_id,suffix)
final_name = "%s.pdb" % to_write[:to_write.index(".")]
try:
ftp.retrbinary("RETR %s" % to_get,open(to_write,"wb").write)
f = gzip.open(to_write,'r')
g = open(final_name,'w')
g.writelines(f.readlines())
f.close()
g.close()
os.remove(to_write)
except ftplib.error_perm:
os.remove(to_write)
print("ERROR! %s could not be retrieved from PDB!" % to_get)
ftp.quit()
return None
# Log out
ftp.quit()
return final_name
def __init__(self, pdb):
""" Initialize a PDBFile object using a PDB-file or PDB-id. If pdb is 4 characters long
its assumed to be a PDB-id and the corresponding PDB-file will be downloaded and used. """
self.file_name = pdb
self.models = []
cur_model = None
if len(pdb)==4:
self.file_name = self.pdbDownload(pdb)
if self.file_name.endswith(".gz"):
import gzip
f = gzip.open(self.file_name, "r")
lines = map(lambda l: l.decode('ascii'), f.readlines())
else:
f = open(self.file_name,'r')
lines = f.readlines()
for line in lines:
if line[0:4] == "ATOM":
if cur_model==None: cur_model = []
cur_model.append(Atom(line))
if (line[0:6] == "ENDMDL" or line[0:5] == "MODEL") and cur_model!=None:
self.models.append(cur_model)
cur_model = None
if cur_model!=None:
self.models.append(cur_model)
f.close()
def removeResidues(self, residues):
for model in range(len(self.models)):
self.models[model] = [ a for a in self.models[model] if not a.resi in residues ]
def getAtom(self, res_number, atom_name, model_number = 0):
for atom in self.models[model_number]:
if atom.resi==res_number and atom.name==atom_name:
return atom
def getAtomById(self, atom_id):
for model in self.models:
for atom in model:
if atom.id==atom_id:
return atom
def getAtoms(self, model_number = 0):
return self.models[model_number]
def getAtomsInResi(self, resi, model_number = 0):
ret = []
for atom in self.models[model_number]:
if atom.resi==resi:
ret.append(atom)
return ret
def getResidues(self, model_number = 0):
'''
Return a sorted list of all residue numbers in this structure
'''
ret = set()
for atom in self.models[model_number]:
ret.add(atom.resi)
return sorted(list(ret))
def getResidueIDsandNames(self, model_number = 0):
'''
Return a sorted list of all residue numbers and names in this structure
'''
ret = set()
for atom in self.models[model_number]:
ret.add((atom.resi,atom.resn))
return sorted(list(ret))
def getChains(self, model_number = 0):
'''
Return a set of unique chain identifiers
'''
return set(map(lambda a: a.chain, self.models[model_number]))
def getSequence(self, model_number = 0):
'''
Get the sequence of this structure. Currently only works for RNA (single-char resn)
'''
protresnmap={'ALA':'A','ARG':'R','ASN':'N','ASP':'D','ASX':'B','CYS':'C','GLU':'E','GLN':'Q','GLX':'Z','GLY':'G','HIS':'H','ILE':'I','LEU':'L','LYS':'K','MET':'M','PHE':'F','PRO':'P','SER':'S','THR':'T','TRP':'W','TYR':'Y','VAL':'V'}
ret = ''
prevResi = -1000
for atom in self.models[model_number]:
if prevResi==-1000 or prevResi+1==atom.resi:
if len(atom.resn)==1: #Its RNA/DNA: Just use the resn
ret+=atom.resn
else: #Its probably protein. Use the resn map
if atom.resn in protresnmap:
ret+=protresnmap[atom.resn]
else:
ret+='?'
prevResi=atom.resi
else:
while prevResi<atom.resi:
ret+='_'
prevResi+=1
return ret
def bFactorList(self, model_number = 0, names=["C4'","CA"],resis=None):
"""
Get a list of b-factors number of atoms with one of the specified names, an optional list of residues
that can limit b factors to specified residues only, useful for
comparison across non-identical sequences or with missing loops.
"""
ret =[]
for atom in self.models[model_number]:
if names and atom.name not in names: continue
if resis and atom.resi not in resis: continue
ret.append(atom.temp_factor)
return ret
def coordMatrix(self, model_number = 0, names=["C4'","CA"],resis=None):
"""
Get a coordinate-matrix of shape (a, 3) where a is the number of atoms with one of the specified names.
New: an optional list of residues can limit the coordinate Matrix to specified residues only, useful for
comparison across non-identical sequences or with missing loops.
"""
ret = np.zeros( shape=( len(self.models[model_number]) , 3 ) )
a = 0
for atom in self.models[model_number]:
if names and atom.name not in names: continue
if resis and atom.resi not in resis: continue
ret[a][0] = atom.x
ret[a][1] = atom.y
ret[a][2] = atom.z
a+=1
ret.resize(a,3)
return ret
def rmsd(self, pdbFile, model1=0, model2=0, names=None):
"""
Return the smallest root-mean-square-deviation between coordinates in self and pdbFile.
If names is None, then all atoms are used.
"""
crds1 = self.coordMatrix(model1, names=names)
crds2 = pdbFile.coordMatrix(model2, names=names)
assert(crds1.shape[1] == 3)
if crds1.shape[0] != crds2.shape[0]:
print("Structure 1 size does not match structure 2 (", crds1.shape[0], "vs", crds2.shape[0], ")")
assert(crds1.shape == crds2.shape)
n = np.shape(crds1)[0]
# Move crds1 to origo
avg1 = | np.zeros(3) | numpy.zeros |
import warnings
import numpy as np
import matplotlib.pyplot as plt
def Watt2dBm(x):
'''
converts from units of watts to dBm
'''
return 10.*np.log10(x*1000.)
def dBm2Watt(x):
'''
converts from units of watts to dBm
'''
return 10**(x/10.) /1000.
class plotting(object):
'''
some helper functions for plotting
'''
def plotall(self):
real = self.z_data_raw.real
imag = self.z_data_raw.imag
real2 = self.z_data_sim.real
imag2 = self.z_data_sim.imag
plt.subplot(221)
plt.plot(real,imag,label='rawdata')
plt.plot(real2,imag2,label='fit')
plt.xlabel('Re(S21)')
plt.ylabel('Im(S21)')
plt.legend()
plt.subplot(222)
plt.plot(self.f_data*1e-9,np.absolute(self.z_data_raw),label='rawdata')
plt.plot(self.f_data*1e-9,np.absolute(self.z_data_sim),label='fit')
plt.axvline(x=self.fitresults['fr']*1e-9, ymin=0, ymax=1,color='red')
plt.xlabel('f (GHz)')
plt.ylabel('|S21|')
plt.legend()
plt.subplot(223)
plt.plot(self.f_data*1e-9,np.angle(self.z_data_raw),label='rawdata')
plt.plot(self.f_data*1e-9,np.angle(self.z_data_sim),label='fit')
plt.xlabel('f (GHz)')
plt.ylabel('arg(|S21|)')
plt.legend()
plt.subplot(224)
text = "fr= %f GHz"%self.fitresults['fr']
plt.text(5,5,text,fontsize=20,color="red",verticalalignment ='top', horizontalalignment ='center',bbox ={'facecolor':'grey', 'pad':10})
plt.xlim([0,10])
plt.ylim([0,10])
plt.show()
def plotcalibrateddata(self):
real = self.z_data.real
imag = self.z_data.imag
plt.subplot(221)
plt.plot(real,imag,label='rawdata')
plt.xlabel('Re(S21)')
plt.ylabel('Im(S21)')
plt.legend()
plt.subplot(222)
plt.plot(self.f_data*1e-9,np.absolute(self.z_data),label='rawdata')
plt.xlabel('f (GHz)')
plt.ylabel('|S21|')
plt.legend()
plt.subplot(223)
plt.plot(self.f_data*1e-9,np.angle(self.z_data),label='rawdata')
plt.xlabel('f (GHz)')
plt.ylabel('arg(|S21|)')
plt.legend()
plt.show()
def plotrawdata(self):
real = self.z_data_raw.real
imag = self.z_data_raw.imag
plt.subplot(221)
plt.plot(real,imag,label='rawdata')
plt.xlabel('Re(S21)')
plt.ylabel('Im(S21)')
plt.legend()
plt.subplot(222)
plt.plot(self.f_data*1e-9,np.absolute(self.z_data_raw),label='rawdata')
plt.xlabel('f (GHz)')
plt.ylabel('|S21|')
plt.legend()
plt.subplot(223)
plt.plot(self.f_data*1e-9,np.angle(self.z_data_raw),label='rawdata')
plt.xlabel('f (GHz)')
plt.ylabel('arg(|S21|)')
plt.legend()
plt.show()
class save_load(object):
'''
procedures for loading and saving data used by other classes
'''
def _ConvToCompl(self,x,y,dtype):
'''
dtype = 'realimag', 'dBmagphaserad', 'linmagphaserad', 'dBmagphasedeg', 'linmagphasedeg'
'''
if dtype=='realimag':
return x+1j*y
elif dtype=='linmagphaserad':
return x*np.exp(1j*y)
elif dtype=='dBmagphaserad':
return 10**(x/20.)*np.exp(1j*y)
elif dtype=='linmagphasedeg':
return x*np.exp(1j*y/180.*np.pi)
elif dtype=='dBmagphasedeg':
return 10**(x/20.)*np.exp(1j*y/180.*np.pi)
else: warnings.warn("Undefined input type! Use 'realimag', 'dBmagphaserad', 'linmagphaserad', 'dBmagphasedeg' or 'linmagphasedeg'.", SyntaxWarning)
def add_data(self,f_data,z_data):
self.f_data = np.array(f_data)
self.z_data_raw = np.array(z_data)
def cut_data(self,f1,f2):
def findpos(f_data,val):
pos = 0
for i in range(len(f_data)):
if f_data[i]<val: pos=i
return pos
pos1 = findpos(self.f_data,f1)
pos2 = findpos(self.f_data,f2)
self.f_data = self.f_data[pos1:pos2]
self.z_data_raw = self.z_data_raw[pos1:pos2]
def add_fromtxt(self,fname,dtype,header_rows,usecols=(0,1,2),fdata_unit=1.,delimiter=None):
'''
dtype = 'realimag', 'dBmagphaserad', 'linmagphaserad', 'dBmagphasedeg', 'linmagphasedeg'
'''
data = | np.loadtxt(fname,usecols=usecols,skiprows=header_rows,delimiter=delimiter) | numpy.loadtxt |
from .ldft_model import LdftModel
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
from functools import reduce
class LG2dAOHighl(LdftModel):
"""This class describes a single component lattice gas in 2d with
sticky next neighbour attractions on a simple cubic lattice. The
description is done within the framework of lattice density
functional theory (ldft). The free energy functional was constructed
by translating the model to the Asakura-Oosawa (AO) model and then
setting up the functional of the resulting colloid-polymer
dispersion by the Highlander version of dft. Therefor this class
works with three species instead of one, namely the species of the
extended AO-model (colloid, polymer clusters species accounting for
attraction in x-direction and polymer for the attraction in
y-direction). The free energy functional is the one for the three
species. It differs from the free energy functional of the AO-model
just by a correction term accounting for the zero- and one-body
interaction of the polymers. If one wants the free energy of the
lattice gas, one would have to calculate the semi-grand potential of
the previous free energy, where the polymer clusters are treated
grand-canonically and the colloids canonically. In this class extra
functions are supported for this. The colloids correspond to the
species in the lattice gas.
Parameters
----------
size : `tuple` of `int`
Shape of the systems simulation box. Expects a `Tuple` of two
integers, each for one dimensional axis.
epsi : `float`
Attraction strength of the lattice gas particles (multiplied
with the inverse temperature to make it's dimension 1). From
this the value of the chemical potential of the polymer clusters
is calculated.
mu_fix_c : `bool`, optional: default = False
Determines whether or not the system is treated canonical or
grand canonical. Meant is the lattice gas system. This parameter
therefore only steers the colloid-species. The others are set
`True` by default. `False` for canonical.
mu_c : `float`, optional: default = `None`
The chemical potential for the colloid species (multiplied with
the inverse temperature to make it's dimension 1). Just required
when ``mu_fix==True``. The chemical potential of the polymer
clusters is determined by the value of ``epsi``.
dens_c : `float`, optional: default = `None`
The average density of the colloids. Just required when
``mu_fix``==`False`. The average density of the polymer clusters
is not required, as for those ``mu_fix`` is set `True`.
v_ext_c : `numpy.ndarray`, optional: default=`None`
An external potential for the colloids. Shape must be of the
same shape as chosen in ``size``. This class does not consider
the possibility of sticky walls. Therefore the external
potential of polymers is set zero by default.
bound_cond : `string`, optional: default='periodic'
The boundary condition. Supports 'periodic' for periodic
boundary conditions and '11_if' for a 45° tilted system with
respect to the lattice. The latter is for creating slab
interface with (11) orientation. If '11_if' is chosen then one
dimension has to be chosen twice as the other dimension in the
``size`` parameter e.g. (64, 128). Default value is 'periodic'.
r : `List` of `np.array`; Optional: default = `None`
Density profile for all three species arranged in a `List`. Choose
`None` in case you hand over the ``r_hist``-parameter or in case
you do not want to set the variable yet.
r_hist : `List` of `List` of `np.array`; Optional: default = `None`
Picard-history of a density profile. It contains the density
profiles for certain picard-steps of a system which has already
been evolved through picard iteration. Caution! Every entry is
of the format of the ``_r``-instance variable, which is a list
itself containing the profile for each species. Therefore in our
case the list is of length one. Use `None` if the system has no
history yet.
err_hist : `List` of `Tuple` of `Float`; Optional: default = `None`
Contains the error at the picard-steps corresponding to the
entries of `r_hist`. The entries are tuples containing an error
for every species. Use `None` if no history available.
it_hist : `List`; Optional: default = `None`
List of the picard-steps corresponding to the density profiles at
the ``r_hist``-parameter. Use `None` if no history available.
Note: if ``r_hist`` is given then also this argument should be
assigned with an appropriate list.
"""
def __init__(self, size, epsi, mu_fix_c=False, mu_c=None,\
dens_c=None, v_ext_c=None, bound_cond='periodic', r=None,\
r_hist=None, err_hist=None, it_hist=None):
mu_pc=self.translate_epsi_to_mu_pc(epsi)
v_ext_pc = np.zeros(size)
v_ext_c = v_ext_pc if type(v_ext_c)==type(None) else v_ext_c
super().__init__(size=size, mu_fix=[mu_fix_c, True, True],
mu=[mu_c, mu_pc, mu_pc], dens=[dens_c, None, None],
v_ext=[v_ext_c, v_ext_pc, v_ext_pc], r=r, r_hist=r_hist,
err_hist=err_hist, it_hist=it_hist,
bound_cond=bound_cond)
def __str__(self):
descrLG2dHighl = 'This is a Lattice gas described with lattice'\
+' DFT. It was translated to the AO-model and the'\
+' functional was constructed by the Highlander method'\
+' It is an object of the Type \'LG2dAOHighl\' and has'\
+' the following properties:'
epsiStr='{0:<40s}: {1}\n'.format('Attr. strength \'epsi\'',\
self.epsi)
motherClass = 'It inherits from \'LdftModel\', with the'\
+' following properties:'
descrLdftModel=super().__str__()
return descrLG2dHighl+'\n\n'+epsiStr+'\n'+motherClass+\
'\n\n'+descrLdftModel
####################################################################
#Protected descriptors for internal use. These are for a more
#convenient addressing of the species specific instance variables.
#Important to notice: do not override the protected variables of the
#super class LdftModel. Otherwise the functionality of the instance
#methods in LdftModel can not be secured.
####################################################################
@property
def _mu_c(self):
"""The chemical potential of the colloid species (times the
inverse temperature to make its dimension 1)
(`float`, read-only).
"""
return self._mu[0]
@property
def _mu_pc1(self):
"""The chemical potential of the polymer species in x-direction
(times the inverse temperature to make its dimension 1).
(`float`, read-only)
"""
return self._mu[1]
@property
def _mu_pc2(self):
"""The chemical potential of the polymer species in y-direction
(times the inverse temperature to make its dimension 1).
(`float`, read-only)
"""
return self._mu[2]
@property
def _dens_c(self):
"""The average density of the colloid species (`float`,
read-only).
"""
return self._dens[0]
@property
def _dens_pc1(self):
"""The average density of the polymer species in x-direction
(`float`, read-only).
"""
return self._dens[1]
@property
def _dens_pc2(self):
"""The average density of the polymer species in x-direction
(`float`, read-only).
"""
return self._dens[2]
@property
def _v_ext_c(self):
"""The external potential acting on the colloids (`np.array`,
read-only)
"""
return self._v_ext[0]
@property
def _v_ext_pc1(self):
"""The external potential acting on the polymer clusters in
x-direction. (`np.array`, read-only)
"""
return self._v_ext[1]
@property
def _v_ext_pc2(self):
"""The external potential acting on the polymer clusters in
y-direction. (`np.array`, read-only)
"""
return self._v_ext[2]
@property
def _r_c(self):
"""The density profile of the colloid species. (`numpy.ndarray`,
read-only)
"""
return self._r[0]
@property
def _r_pc1(self):
"""The density profile of the polymer species in x-direction.
(`numpy.ndarray`, read-only)
"""
return self._r[1]
@property
def _r_pc2(self):
"""The density profile of the polymer species in y-direction.
(`numpy.ndarray`, read-only)
"""
return self._r[2]
####################################################################
#Public descriptors. These are for the user to access the variables
#of interest. Some are already defined in the super class. Some of
#them are reused, but others are overwritten.
####################################################################
@property
def epsi(self):
"""The attraction strength between the lattice-particles of the
lattice gas. (`Float`, read-only)
"""
return self.translate_mu_pc_to_epsi(self._mu_pc1)
@property
def mu_c(self):
"""The chemical potential of the colloids (times the inverse
temperature to make its dimension 1). It is equals the chemical
potential of the particles of the lattice gas. (`float`)
"""
return self._mu[0]
@mu_c.setter
def mu_c(self, mu_c):
self._mu[0]=mu_c
mu_pc1=_mu_pc1
"""The chemical potential of the polymer-cluster in x-direction
(times the inverse temperature to make its dimension 1).
(`float`, read-only)
"""
mu_pc2=_mu_pc2
"""The chemical potential of the polymer-cluster in y-direction
(times the inverse temperature to make its dimension 1).
(`float`, read-only)
"""
@LdftModel.mu.setter
def mu(self, mu):
print('This setter has been deactivated in favour for \`mu_c\`')
@property
def dens_c(self):
"""The average density of the colloids. It is equals the average
density in the lattice gas. (`float`)
"""
return self._dens[0]
dens_pc1=_dens_pc1
"""The average density of the polymer clusters in x-direction.
(`float`, read-only)
"""
dens_pc2=_dens_pc2
"""The average density of the polymer clusters in x-direction.
(`float`, read-only)
"""
@LdftModel.dens.setter
def dens(self, dens):
print('This setter has been deactivated in favour for \
\`dens_c\`')
@property
def mu_fix_c(self):
"""Flag which determines Wether the colloids (a.k. the particles
of the lattice gas) are treated canonical (`False`) or grand
canonical (`True`). (`Bool`)
"""
return self._mu_fix[0]
@mu_fix_c.setter
def mu_fix_c(self, mu_fix_c):
self._mu_fix[0]=mu_fix_c
@LdftModel.mu_fix.setter
def mu_fix(self, mu_fix):
print('This setter has been deactivated in favour for \
\`mu_fix_c\`')
@property
def v_ext_c(self):
"""External potential acting on the colloids (a.k. the particles
of the lattice gas). (`np.array`)
"""
return self._v_ext[0]
@v_ext_c.setter
def v_ext_c(self, v_ext_c):
self._v_ext[0]=v_ext_c
@LdftModel.v_ext.setter
def v_ext(self, v_ext):
print('This setter has been deactivated in favour for \
\`v_ext_c\`')
@property
def r_c(self):
"""The density profile of the colloids (a.k. the particles of
the lattice gas). (`np.array`, read-only)
"""
return self._r[0]
r_pc1=_r_pc1
"""The density profile of the polymer clusters in x-direction.
(`np.array`, read-only)
"""
r_pc2=_r_pc2
"""The density profile of the polymer clusters in y-direction.
(`np.array`, read-only)
"""
@property
def r_c_hist(self):
"""Iteration history of the density profile of the colloids
(a.k. the particles of the lattice gas). (`List`, read-only)
"""
r_c_hist = [r[0] for r in self._r_hist]
return r_c_hist
@property
def err_c_hist(self):
"""Iteration history of the picard-error at the colloidal
density profile. (`List`, read-only)
"""
err_hist =[err[0] for err in self._err_hist]
return err_hist
####################################################################
#Map the lattice gas to the AO-model:
####################################################################
@staticmethod
def translate_epsi_to_mu_pc(epsi):
"""Maps the attraction strength of the lattice gas ``epsi`` to
the corresponding polymer cluster chemical potential.
Parameters
----------
epsi : `float`
The attraction strength (multiplied with the inverse
temperature to make the quantity dimensionless).
Returns
-------
mu_pc : The chemical potential (multiplied with the inverse
temperature to make the quantity dimensionless). (`float`)
"""
mu_pc=np.log(np.exp(epsi)-1)
return mu_pc
@staticmethod
def translate_mu_pc_to_epsi(mu_pc):
"""Maps the polymer cluster chemical potential to the attraction
strength of the lattice gas ``epsi``.
Parameters
----------
mu_pc : `float`
The polymer chemical potential (multiplied with the inverse
temperature to make the quantity dimensionless).
Returns
-------
epsi : The attraction strength (multiplied with the inverse
temperature to make the quantity dimensionless). (`float`)
"""
epsi=np.log(np.exp(mu_pc)+1)
return epsi
####################################################################
#The inhomogeneous functional:
#In this section all the functions concerning the model specific
#free energy functional are defined.
####################################################################
def _cal_n(self):
"""Calculates the weighted densities necessary for the
calculation of the free energy and the excess chemical
potential.
Returns
-------
Result : `tuple` of `numpy.ndaray`
"""
n1 = self._r_c + self._r_pc1
n2 = self._boundary_roll(self._r_c, -1, axis=1) + self._r_pc1
n3 = self._r_c + self._r_pc2
n4 = self._boundary_roll(self._r_c, -1, axis=0) + self._r_pc2
n5 = self._r_pc1
n6 = self._r_pc2
n7 = self._r_c
return n1, n2, n3, n4, n5, n6, n7
def _cal_Phi_ex_AO(self):
"""Calculates the excess free energy of the AO-model.
Returns
-------
Result : `np.array`
Free energy density of the AO-model.
"""
n=self._cal_n()
n1=n[0]
n2=n[1]
n3=n[2]
n4=n[3]
n5=n[4]
n6=n[5]
n7=n[6]
Phi0=self._cal_Phi_0
Phi_ex = Phi0(n1)+Phi0(n2)+Phi0(n3)+Phi0(n4)-Phi0(n5)-Phi0(n6)\
-3*Phi0(n7)
return Phi_ex
def cal_F(self):
"""Calculates the free energy of the three component system. It
differs from the free energy functional of the AO-model just by
a correction term accounting for the zero- and one-body
interaction of the polymers (see description of the class). For
getting the free energy of the lattice gas use ``cal_F_lg``,
which is the semi-grand potential, where the polymer clusters are
treated grand canonically and the colloids canonically.
Returns
-------
Result : `float`
Free energy of the three component system (times the inverse
temperature to make the results dimension 1).
"""
z_pc1 = np.exp(self._mu_pc1)
z_pc2 = np.exp(self._mu_pc2)
r_c = self._r_c
r_pc1 = self._r_pc1
r_pc2 = self._r_pc2
Phi_id = self._cal_Phi_id()
Phi_ex = self._cal_Phi_ex_AO()
F_id = np.sum(Phi_id)
F_ex_AO = np.sum(Phi_ex)
F = (F_id + F_ex_AO
- np.log(z_pc1+1)
*np.sum(-1+r_c+self._boundary_roll(r_c, -1, axis=1))
- np.log(z_pc2+1)
*np.sum(-1+r_c+self._boundary_roll(r_c, -1, axis=0)))
return F
def cal_F_lg(self):
"""Calculates the free energy of the lattice gas. If
``self.mu_fix==False`` this should give the same result as the
``cal_semi_Om``-function.
Returns
-------
Result : `float`
Free energy of the lattice gas.
"""
F_lg = self.cal_F()
mu_pc1 = self._mu_pc1
mu_pc2 = self._mu_pc2
r_pc1 = self._r_pc1
r_pc2 = self._r_pc2
F_lg -= (mu_pc1*np.sum(r_pc1)+mu_pc2*np.sum(r_pc2))
return F_lg
@LdftModel._RespectBoundaryCondition()
def cal_mu_ex(self):
n = self._cal_n()
n1=n[0]
n2=n[1]
n3=n[2]
n4=n[3]
n5=n[4]
n6=n[5]
n7=n[6]
z_pc = np.exp(self._mu_pc1)
mu_c_ex = np.log((1-n1)*(1-self._boundary_roll(n2, 1, axis=1))\
*(1-n3)*(1-self._boundary_roll(n4, 1, axis=0))\
/(1-n7)**3) + 4*np.log(z_pc+1)
mu_pc1_ex = np.log((1-n1)*(1-n2)/(1-n5))
mu_pc2_ex = np.log((1-n3)*(1-n4)/(1-n6))
return mu_c_ex, mu_pc1_ex, mu_pc2_ex
####################################################################
#The homogeneous methods:
#The following section contains all the methods concerning the bulk
#properties of the system.
####################################################################
@classmethod
def _cal_bulk_r_pc(cls, r_c, epsi):
"""Calculates the bulk polymer cluster density in dependence of
the colloid density and the chosen attraction strength
Parameters
----------
r_c : `float` or `np.ndarray`
The colloid density.
epsi : `float`
Attraction strength (times inverse temperature).
Returns
-------
r_pc : `float` or `np.ndarray`
The polymer cluster density.
"""
mu_pc = cls.translate_epsi_to_mu_pc(epsi)
z_pc = np.exp(mu_pc)
r_pc = ((1+2*z_pc*(1-r_c))/(2*(z_pc+1))
- 1/(2*(z_pc+1))*np.sqrt((1+2*z_pc*(1-r_c))**2 -
4*z_pc*(z_pc+1)*(1-r_c)**2))
return r_pc
@classmethod
def _cal_bulk_dr_pc(cls, r_c, epsi):
"""Calculates the derivative of the bulk polymer cluster density
with respect to the colloidal density in dependence of
the colloid density and the chosen attraction strength
Parameters
----------
r_c : `float` or `np.ndarray`
The colloid density.
epsi : `float`
Attraction strength (times inverse temperature).
Returns
-------
dr_pc : `float` or `np.ndarray`
The derivative of the polymer cluster density.
"""
mu_pc = cls.translate_epsi_to_mu_pc(epsi)
z_pc = np.exp(mu_pc)
dr_pc = -z_pc/(z_pc+1)\
*(1+(1-2*r_c)/np.sqrt(4*z_pc*(1-r_c)*r_c+1))
return dr_pc
@classmethod
def cal_bulk_mu_lg(cls, r_c, epsi):
"""Calculates the chemical potential for a bulk lattice gas.
Parameters
----------
r_c : `Float` or `np.ndarray`
The colloidal density.
epsi : `Float`
Attraction strength
Returns
-------
mu : `Float` or `np.ndarray`
The chemical potential for the lattice gas.
"""
r_pc = cls._cal_bulk_r_pc(r_c, epsi)
mu_pc = cls.translate_epsi_to_mu_pc(epsi)
z_pc = np.exp(mu_pc)
mu_c = (np.log(r_c) +4*cls._cal_dPhi_0(r_c+r_pc)
-3*cls._cal_dPhi_0(r_c)-4*np.log(z_pc+1))
return mu_c
@classmethod
def cal_bulk_dmu_lg(cls, r_c, epsi):
"""Calculates the derivative of the chemical potential from the
bulk lattice gas with respect to the colloidal density.
Parameters
----------
r_c : `Float` or `np.ndarray`
The colloidal density.
epsi : `Float`
Attraction strength
Returns
-------
dmu : `Float` or `np.ndarray`
The derivative of the chemical potential from the lattice
gas.
"""
r_pc = cls._cal_bulk_r_pc(r_c, epsi)
dr_pc = cls._cal_bulk_dr_pc(r_c, epsi)
mu_pc = cls.translate_epsi_to_mu_pc(epsi)
z_pc = np.exp(mu_pc)
dmu = 1/r_c + 4*cls._cal_d2Phi_0(r_c+r_pc)*(1+dr_pc)\
-3*cls._cal_d2Phi_0(r_c)
return dmu
@classmethod
def _cal_bulk_f_AO_id(cls, r_c, r_pc):
"""Calculates the ideal gas part of the free energy density of
a bulk AO-system under given colloid and polymer cluster
density.
Parameters
----------
r_c : `float`
Colloid density
r_pc : `float`
Polymer cluster density
Returns
-------
f_id : `float`
The idea gas part of the free energy density.
"""
f_id = r_c*(np.log(r_c)-1) +2*r_pc*(np.log(r_pc)-1)
return f_id
@classmethod
def _cal_bulk_f_AO_ex(cls, r_c, r_pc):
"""Calculates the excess part of the free energy density of a
bulk AO-system under given colloid and polymer cluster density.
Parameters
----------
r_c : `float`
Colloid density
r_pc : `float`
Polymer cluster density
Returns
-------
f_ex : `float`
The excess part of the free energy density.
"""
n1 = n2 = n3 = n4= r_c+r_pc
n5 = n6 = r_pc
n7 = r_c
f_ex = (cls._cal_Phi_0(n1)+cls._cal_Phi_0(n2)+cls._cal_Phi_0(n3)
+cls._cal_Phi_0(n4)-3*cls._cal_Phi_0(n7)
-cls._cal_Phi_0(n5)-cls._cal_Phi_0(n6))
return f_ex
@classmethod
def cal_bulk_f_lg(cls, r_c, epsi):
"""Calculates the free energy density of the bulk lattice gas
under given density. (The function is the same as in
``cal_F_lg`` but simplified for bulk systems.)
Parameters
----------
r_c: `float` or `np.ndarray`
Density
epsi: `float`
Attraction strength (times inverse temperature)
Returns
-------
f : `float` or `np.ndarray`
The free energy density of a bulk lattice gas.
"""
r_pc = cls._cal_bulk_r_pc(r_c, epsi)
f_AO_id = cls._cal_bulk_f_AO_id(r_c, r_pc)
f_AO_ex = cls._cal_bulk_f_AO_ex(r_c, r_pc)
mu_pc = cls.translate_epsi_to_mu_pc(epsi)
z_pc = np.exp(mu_pc)
f_tilde = f_AO_id+f_AO_ex-2*np.log(z_pc+1)*(2*r_c-1)
f_eff = f_tilde-2*r_pc*np.log(z_pc)
return f_eff
@classmethod
def cal_bulk_om_lg(cls, r, epsi):
"""Calculates the grand potential density for a bulk lattice gas
under given densities.
Parameters
----------
r : `float` or `np.ndarray`
The density.
epsi : `float`
The attraction strength (times inverse temperature).
Returns
-------
om : `Float`
The grand potential density
"""
f = cls.cal_bulk_f_lg(r, epsi)
mu = cls.cal_bulk_mu_lg(r, epsi)
om = f-mu*r
return om
@classmethod
def cal_bulk_p(cls, r, epsi):
"""Calculates the pressure of a bulk lattice gas under given
density.
Parameters
----------
r : `float` or `np.ndarray`
The density.
epsi : `float`
The attraction strength (times inverse temperature).
Returns
-------
The pressure : `Float`
"""
p = -cls.cal_bulk_om_lg(r, epsi)
return p
@classmethod
def _cal_difMu(cls, r_c, *args):
"""Calculates the difference between a certain chemical
potential of the lattice gas and the chemical potential
belonging to a certain density. This is a help-function for the
function ``cal_bulk_coex_dens``.
Parameters
----------
r_c : `float`
The colloid density of the system
*args:
First argument: Attraction strength (times inverse
temperature). (`float`)
Second argument: The reference chemical potential which the
chemical potential for at density r_c should be compared to.
(`float`)
Returns
-------
difMu : `float`
The difference between the two colloidal chem. pot.
"""
epsi = args[0]
mu_c = args[1]
mu = cls.cal_bulk_mu_lg(r_c, epsi)
return mu-mu_c
@classmethod
def cal_bulk_coex_dens(cls, mu, epsi, init_min=0.01, init_max=0.99):
"""Calculates the coexisting densities of a bulk system lattice
gas system under given chemical potential.
Parameters
----------
mu : `Float`
The chemical potential of the lattice gas.
epsi : `Float`
The attraction strength (times inverse temperature).
Returns
-------
r_coex : `Tuple`
The coexisting densities arranged in a tuple of the shape
(vapour_dens, liquid_dens)
"""
def dmu(rc, *args):
epsi = args[0]
mu_c = args[1]
return np.diag(cls.cal_bulk_dmu_lg(rc, epsi))
if (init_max-init_min < 0.5 or init_min<=0 or init_max>=1 or
abs(init_max+init_min-1)>0.01):
init_min=0.01
init_max=0.99
r_coex = op.fsolve(cls._cal_difMu,
| np.array([init_min, init_max]) | numpy.array |
from numba import njit
import numpy as np
def det1(A):
"""Compute the determinants of a series of 1x1 matrices.
Parameters
----------
A : nx1x1 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
return A.flatten()
@njit(cache=True)
def det2(A):
"""Compute the determinants of a series of 2x2 matrices.
Parameters
----------
A : nx2x2 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
n = np.shape(A)[0]
dets = np.zeros(n)
for i in range(n):
dets[i] = A[i, 0, 0] * A[i, 1, 1] - A[i, 0, 1] * A[i, 1, 0]
return dets
@njit(cache=True)
def det3(A):
"""Compute the determinants of a series of 3x3 matrices.
Parameters
----------
A : nx3x3 array_like
Arrays to compute determinants of
Returns
-------
dets : array of length n
Matrix determinants
"""
n = np.shape(A)[0]
dets = np.zeros(n)
for i in range(n):
dets[i] = (
A[i, 0, 0] * (A[i, 1, 1] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 1])
- A[i, 0, 1] * (A[i, 1, 0] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 0])
+ A[i, 0, 2] * (A[i, 1, 0] * A[i, 2, 1] - A[i, 1, 1] * A[i, 2, 0])
)
return dets
def inv1(A):
"""Compute the inverses of a series of 1x1 matrices.
Parameters
----------
A : nx1x1 array_like
Arrays to compute inverses of
Returns
-------
dets : nx1x1 array
Matrix inverses
"""
return 1.0 / A
@njit(cache=True)
def inv2(A):
"""Compute the inverses of a series of 2x2 matrices.
Parameters
----------
A : nx2x2 array_like
Arrays to compute inverses of
Returns
-------
dets : nx2x2 array
Matrix inverses
"""
invdets = 1.0 / det2(A)
n = len(invdets)
invs = np.zeros((n, 2, 2))
for i in range(n):
invs[i, 0, 0] = invdets[i] * A[i, 1, 1]
invs[i, 1, 1] = invdets[i] * A[i, 0, 0]
invs[i, 0, 1] = -invdets[i] * A[i, 0, 1]
invs[i, 1, 0] = -invdets[i] * A[i, 1, 0]
return invs
@njit(cache=True)
def inv3(A):
"""Compute the inverses of a series of 3x3 matrices.
Parameters
----------
A : nx3x3 array_like
Arrays to compute inverses of
Returns
-------
dets : nx3x3 array
Matrix inverses
"""
invdets = 1.0 / det3(A)
n = len(invdets)
invs = np.zeros((n, 3, 3))
for i in range(n):
invs[i, 0, 0] = invdets[i] * (A[i, 1, 1] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 1])
invs[i, 0, 1] = -invdets[i] * (A[i, 0, 1] * A[i, 2, 2] - A[i, 0, 2] * A[i, 2, 1])
invs[i, 0, 2] = invdets[i] * (A[i, 0, 1] * A[i, 1, 2] - A[i, 0, 2] * A[i, 1, 1])
invs[i, 1, 0] = -invdets[i] * (A[i, 1, 0] * A[i, 2, 2] - A[i, 1, 2] * A[i, 2, 0])
invs[i, 1, 1] = invdets[i] * (A[i, 0, 0] * A[i, 2, 2] - A[i, 0, 2] * A[i, 2, 0])
invs[i, 1, 2] = -invdets[i] * (A[i, 0, 0] * A[i, 1, 2] - A[i, 0, 2] * A[i, 1, 0])
invs[i, 2, 0] = invdets[i] * (A[i, 1, 0] * A[i, 2, 1] - A[i, 1, 1] * A[i, 2, 0])
invs[i, 2, 1] = -invdets[i] * (A[i, 0, 0] * A[i, 2, 1] - A[i, 0, 1] * A[i, 2, 0])
invs[i, 2, 2] = invdets[i] * (A[i, 0, 0] * A[i, 1, 1] - A[i, 0, 1] * A[i, 1, 0])
return invs
if __name__ == "__main__":
np.random.seed(0)
A2 = np.random.rand(1000, 2, 2)
A3 = np.random.rand(1000, 3, 3)
dets2 = det2(A2)
dets3 = det3(A3)
invs2 = inv2(A2)
invs3 = inv3(A3)
# Check error between this implementation and numpy, use hybrid error measure (f_ref - f_test)/(f_ref + 1) which
# measures the relative error for large numbers and absolute error for small numbers
errors = {}
errors["det2"] = np.linalg.norm((dets2 - np.linalg.det(A2)) / (dets2 + 1.0))
errors["det3"] = np.linalg.norm((dets3 - np.linalg.det(A3)) / (dets3 + 1.0))
errors["inv2"] = np.linalg.norm((invs2 - np.linalg.inv(A2)) / (invs2 + 1.0))
errors["inv3"] = np.linalg.norm((invs3 - | np.linalg.inv(A3) | numpy.linalg.inv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# imports
import numpy as np
import numpy.linalg as npla
import scipy as sp
import matplotlib.pyplot as plt
def identity_vf(M, N, RM=None, RN=None):
"""Get vector field for the identity transformation.
This returns the vector field (tau_u, tau_v) corresponding to the identity
transformation, which maps the image plane to itself.
For more details on these vector fields, see the doc for affine_to_vf
inputs:
--------
M : int
vertical (number of rows) size of image plane being worked with
N : int
horizontal (number of cols) size of image plane being worked with
RM : int (optional)
number of points in the M direction desired. by default, this is M,
giving the identity transformation. when a number other than M is
provided, this corresponds to a resampling in the vertical direction.
(we put this operation in this function because it is so naturally
related)
RN : int (optional)
number of points in the N direction desired. by default, this is N,
giving the identity transformation. when a number other than N is
provided, this corresponds to a resampling in the horizontal direction.
(we put this operation in this function because it is so naturally
related)
outputs:
-------
eu : numpy.ndarray (size (M, N))
horizontal component of vector field corresponding to (I, 0)
ev : numpy.ndarray (size (M, N))
vertical component of vector field corresponding to (I, 0)
"""
if RM is None:
RM = M
if RN is None:
RN = N
m_vec = np.linspace(0, M-1, RM)
n_vec = np.linspace(0, N-1, RN)
eu = np.dot(m_vec[:,np.newaxis], np.ones(RN)[:,np.newaxis].T)
ev = np.dot(np.ones(RM)[:,np.newaxis], n_vec[:,np.newaxis].T)
return (eu, ev)
def get_default_pgd_dict(**kwargs):
"""Get default parameter dictionary for proximal gradient descent solvers
Valid key-value pairs are:
init_pt = function with two arguments (m, n), two return values (numpy
arrays of size (m, n))
initial iterate to start GD at, represented as a function: must be a
function with two arguments (m, n), the first of which represents image
height and the second of which represents image width; and must return
a tuple of two numpy arrays, each of size m, n, corresponding to the
initial deformation field
center : numpy array of shape (2,)
Denotes an optional (set to np.array([0,0]) by default) center
coordinate to use when solving the parametric version of the problem
(parametric = True below). All affine transformations computed then
have the form A * ( [i,j] - center ) + center + b, where A may have
more structure if certain values of motion_model is
set. This kind of reparameterization does not make a difference in the
nonparametric version of the problem, so nothing is implemented for
this case.
sigma : float (positive)
Bandwidth parameter in the gaussian filter used for the cost smoothing.
(larger -> smaller cutoff frequency, i.e. more aggressive filtering)
See gaussian_filter_2d
sigma0 : float (positive)
Bandwidth parameter in the gaussian filter used for complementary
smoothing in registration_l2_spike.
(larger -> smaller cutoff frequency, i.e. more aggressive filtering)
See gaussian_filter_2d
sigma_scene : float (positive)
Bandwidth parameter in the gaussian filter used in scene smoothing in
registration_l2_bbg. (larger -> smaller cutoff frequency, i.e. more
aggressive filtering) See gaussian_filter_2d
window : NoneType or numpy array of size (m, n)
Either None, if no window is to be used, or an array of size (m, n)
(same as image size), denoting the cost window function to be applied
(l2 error on residual is filtered, then windowed, before computing).
NOTE: current implementation makes window independent of any setting of
the parameter center specified above
max_iter : int
Maximum number of iterations to run PGD for
tol : float (positive)
Minimum relative tolerance before exiting optimization: optimization
stops if the absolute difference between the loss at successive
iterations is below this threshold.
step : float (positive)
Step size. Currently using constant-step gradient descent
lam : float (positive)
Regularization weight (multiplicative constant on the regularization
term in the loss)
use_nesterov : bool
Whether or not to use Nesterov accelerated gradient descent
use_restarting : bool
Whether or not to use adaptive restarted Nesterov accelerated gradient
descent. Speeds things up significantly, but maybe does not work well
out of the box with proximal iteration
motion_model : string (default 'nonparametric')
Sets the motion model that the registration algorithm will use (i.e.
what constraints are enforced on the transformation vector field).
Values that are implemented are:
'translation'
transformation vector field is constrained to be translational (a
pixel shift of the input). 2-dimensional.
'rigid'
transformation vector field is constrained to be a rigid motion / a
euclidean transformation (i.e. a combination of a
positively-oriented rotation and a translation). 3-dimensional.
'similarity'
transformation vector field is constrained to be a similarity
transformation (i.e. a combination of a global dilation and a
translation). 4-dimensional.
'affine'
transformation vector field is constrained to be an affine
translation (i.e. a combination of a linear map and a translation).
6-dimensional.
'nonparametric'
transformation vector field is allowed to be completely general,
but regularization is added to the gradient descent solver via a
complexity penalty, and the solver runs proximal gradient descent
instead. (see e.g. entry for lambda for more info on associated
parameters).
gamma : float (min 0, max 1)
Nesterov accelerated GD momentum parameter. 0 corresponds to the
"usual" Nesterov AGD. 1 corresponds to "vanilla" GD. The optimal value
for a given problem is the reciprocal condition number. Setting this to
1 is implemented differently from setting use_nesterov to False (the
algorithm is the same; but the former is slower)
theta : float
initial momentum term weight; typically 1
precondition : bool
Whether or not to use a preconditioner (divide by some scalars on each
component of the gradient) for the A and b gradients in parametric
motion models (see motion_model)..
epoch_len : int (positive)
Length of an epoch; used for printing status messages
quiet : bool
If True, nothing will be printed while optimizing.
record_movie : bool
If True, a "movie" gets created from the optimization trajectory and
logged to disk (see movie_fn param). Requires moviepy to be installed
(easy with conda-forge). Potentially requires a ton of memory to store
all the frames (all iterates)
movie_fn : string
If record_movie is True, this gives the location on disk where the
movie will be saved
movie_fps : int
If record_movie is True, this gives the fps of the output movie.
window_pad_size : int
If record_movie is true, denotes the thickness of the border
designating the window to be output in the movie
frame_printing_stride : int
If record_movie is true, denotes the interval at which log information
will be written to the movie (every frame_printing_stride frames, log
info is written; the actual movie fps is set by movie_fps above)
font_size : int
If record_movie is true, denotes the font size used for printing
logging information to the output window. Set smaller for smaller-size
images.
NOTE: No value checking is implemented right now.
Inputs:
--------
kwargs :
any provided key-value pairs will be added to the parameter dictionary,
replacing any defaults they overlap with
Outputs:
--------
param_dict : dict
dict of parameters to be used for a proximal gd solver. Pass these to
e.g. nonparametric_registration or similar solvers.
"""
param_dict = {}
# Problem parameters: filter bandwidths, etc
param_dict['sigma'] = 3
param_dict['sigma_scene'] = 1.5
param_dict['sigma0'] = 1
param_dict['init_pt'] = lambda m, n: identity_vf(m, n)
param_dict['motion_model'] = 'nonparametric'
param_dict['window'] = None
param_dict['center'] = np.zeros((2,))
# Solver parameters: tolerances, stopping conditions, step size, etc
param_dict['max_iter'] = int(1e4)
param_dict['tol'] = 1e-4
param_dict['step'] = 1
param_dict['lam'] = 1
param_dict['use_nesterov'] = False
param_dict['use_restarting'] = False
param_dict['gamma'] = 0
param_dict['theta'] = 1
param_dict['precondition'] = True
# Logging parameters
param_dict['epoch_len'] = 50
param_dict['quiet'] = False
param_dict['record_movie'] = False
param_dict['movie_fn'] = ''
param_dict['movie_fps'] = 30
param_dict['window_pad_size'] = 5
param_dict['frame_printing_stride'] = 10 # 3 times per second
param_dict['font_size'] = 30
param_dict['movie_gt'] = None
param_dict['movie_proc_func'] = None
# Legacy/compatibility stuff
param_dict['parametric'] = False
param_dict['translation_mode'] = False
param_dict['rigid_motion_mode'] = False
param_dict['similarity_transform_mode'] = False
# Add user-provided params
for arg in kwargs.keys():
param_dict[arg] = kwargs[arg]
return param_dict
def affine_to_vf(A, b, M, N):
"""Given (A, b), return associated vector field on M x N image plane
An affine transformation is parameterized by an invertible matrix A and a
vector b, and sends a 2D vector x to the 2D vector A*x + b. In the image
context, x lies in the M by N image plane. This function takes the pair (A,
b), and returns the associated vector field (tau_u, tau_v): here tau_u and
tau_v are M by N matrices such that (tau_u)_{ij} = (1st row of A) * [i, j]
+ b_1, and (tau_v)_{ij} = (2nd row of A) * [i, j] + b_2. The matrices thus
represent how the affine transformation (A, b) deforms the sampled image
plane.
Thus in general tau_u and tau_v have entries that may not be contained in
the M by N image plane and may not be integers. These issues of boundary
effects and interpolation effects are to be handled by other functions
inputs:
--------
A : numpy.ndarray (size (2, 2))
GL(2) part of affine transformation to apply
b : numpy.ndarray (size (2,))
translation part of affine transformation to apply
M : int
vertical (number of rows) size of image plane being worked with
N : int
horizontal (number of cols) size of image plane being worked with
outputs:
-------
tau_u : numpy.ndarray (size (M, N))
horizontal component of vector field corresponding to (A, b)
tau_v : numpy.ndarray (size (M, N))
vertical component of vector field corresponding to (A, b)
"""
# Do it with broadcasting tricks (dunno if it's faster)
A0 = A[:,0]
A1 = A[:,1]
eu = np.dot(np.arange(M)[:,np.newaxis], np.ones(N)[:,np.newaxis].T)
ev = np.dot(np.ones(M)[:,np.newaxis], np.arange(N)[:,np.newaxis].T)
tau = A0[np.newaxis, np.newaxis, :] * eu[..., np.newaxis] + \
A1[np.newaxis, np.newaxis, :] * ev[..., np.newaxis] + \
b[np.newaxis, np.newaxis, :] * np.ones((M, N, 1))
return (tau[:,:,0], tau[:,:,1])
def vf_to_affine(tau_u, tau_v, ctr):
"""Get affine transformation corresponding to a vector field.
General vector fields need not correspond to a particular affine
transformation. In our formulation, we parameterize affine transforms as
tau_u = a * (m-ctr[0] * \One)\One\\adj
+ b * \One (n - ctr[1]*\One)\\adj
+ (c + ctr[0]) * \One\One\\adj,
and similarly for tau_v.
We use the fact that this parameterization is used here to recover the
parameters of the affine transform using simple summing/differencing.
We need ctr as an input because the translation parameter is ambiguous
without knowing the center. However, we can always recover the parameters
of the transformation with respect to any fixed center (say, ctr = zero).
In general, if one provides ctr=np.zeros((2,)) to this function, it is a
left inverse of affine_to_vf called with the correct M, N parameters.
inputs:
--------
tau_u, tau_v : M by N numpy arrays
u and v (resp.) components of the transformation field.
ctr : (2,) shape numpy array
center parameter that the transform was computed with. see center
option in registration_l2. translation parameter is ambiguous without
knowing the center.
outputs:
--------
A : (2,2) numpy array
The A matrix corresponding to the affine transform. Follows our
conventions for how we compute with vector fields in determining how
the entries of A are determined
b : (2,) shape numpy array
The translation parameter corresponding to the affine transform.
Follows standard coordinates on the image plane (as elsewhere).
"""
M, N = tau_u.shape
a00 = tau_u[1, 0] - tau_u[0, 0]
a01 = tau_u[0, 1] - tau_u[0, 0]
a10 = tau_v[1, 0] - tau_v[0, 0]
a11 = tau_v[0, 1] - tau_v[0, 0]
A = np.array([[a00, a01], [a10, a11]])
u_sum = np.sum(tau_u)
v_sum = np.sum(tau_v)
m_sum = np.sum(np.arange(M) - ctr[0] * np.ones((M,)))
n_sum = np.sum(np.arange(N) - ctr[1] * np.ones((N,)))
b0 = (u_sum - a00 * m_sum * N - a01 * M * n_sum) / M / N - ctr[0]
b1 = (v_sum - a10 * m_sum * N - a11 * M * n_sum) / M / N - ctr[1]
b = np.array([b0, b1])
return A, b
def registration_l2_exp(Y, X, W, Om, center, transform_mode, optim_vars, param_dict=get_default_pgd_dict(), visualize=False):
"""
This is yet another version of the cost-smoothed motif detection, in which we also infer
a (constant) background around the motif
Inputs:
Y -- input image
X -- motif, embedded into an image of the same size as the target image
Om -- support of the motif
transform_mode -- 'affine', 'similarity', 'euclidean', 'translation'
Outputs:
same as usual
"""
from time import perf_counter
vecnorm_2 = lambda A: np.linalg.norm( A.ravel(), 2 )
m, n, c = Y.shape
# Gradient descent parameters
MAX_ITER = param_dict['max_iter']
TOL = param_dict['tol']
step = param_dict['step']
if transform_mode == 'affine':
[A, b] = optim_vars
elif transform_mode == 'similarity':
[dil, phi, b] = optim_vars
A = dil * np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'euclidean':
[phi, b] = optim_vars
A = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'translation':
[b] = optim_vars
A = np.eye(2)
else:
raise ValueError('Wrong transform mode.')
# initialization (here, affine motion mode)
corr = np.dot(np.eye(2) - A, center)
tau_u, tau_v = affine_to_vf(A, b + corr, m, n)
# External smoothing: calculate gaussian weights
g = gaussian_filter_2d(m,n,sigma_u=param_dict['sigma'])
g = g / np.sum(g)
h = gaussian_filter_2d(m,n,sigma_u=5*param_dict['sigma'])
h = h / np.sum(h)
# Calculate initial error
error = np.inf * np.ones( (MAX_ITER,) )
Rvals = np.zeros( (MAX_ITER,) )
# initial interpolated image and error
cur_Y = image_interpolation_bicubic(Y, tau_u, tau_v )
# initialize the background
beta0 = cconv_fourier(h[...,np.newaxis], cur_Y - X)
beta = cconv_fourier(h[...,np.newaxis], beta0)
cur_X = np.zeros((m,n,c))
cur_X = (1-Om)*beta + Om*X
FWres = W * cconv_fourier(g[...,np.newaxis], cur_Y-cur_X)
grad_A = np.zeros( (2,2) )
grad_b = np.zeros( (2,) )
m_vec = np.arange(m) - center[0]
n_vec = np.arange(n) - center[1]
if param_dict['use_nesterov'] is False:
for idx in range(MAX_ITER):
# Get the basic gradient ingredients
Y_dot_u = dimage_interpolation_bicubic_dtau1(Y, tau_u, tau_v)
Y_dot_v = dimage_interpolation_bicubic_dtau2(Y, tau_u, tau_v)
# Get the "tau gradient" part.
# All the translation-dependent parts of the cost can be handled
# here, so that the parametric parts are just the same as always.
dphi_dY = cconv_fourier(dsp_flip(g)[...,np.newaxis], FWres)
tau_u_dot = np.sum(dphi_dY * Y_dot_u, -1)
tau_v_dot = np.sum(dphi_dY * Y_dot_v, -1)
# Get parametric part gradients
# Convert to parametric gradients
# Get row and col sums
tau_u_dot_rowsum = np.sum(tau_u_dot, 1)
tau_u_dot_colsum = np.sum(tau_u_dot, 0)
tau_v_dot_rowsum = np.sum(tau_v_dot, 1)
tau_v_dot_colsum = np.sum(tau_v_dot, 0)
# Put derivs
# These need to be correctly localized to the region of interest
grad_A[0, 0] = np.dot(tau_u_dot_rowsum, m_vec)
grad_A[1, 0] = np.dot(tau_v_dot_rowsum, m_vec)
grad_A[0, 1] = np.dot(tau_u_dot_colsum, n_vec)
grad_A[1, 1] = np.dot(tau_v_dot_colsum, n_vec)
grad_b[0] = np.sum(tau_u_dot_rowsum)
grad_b[1] = np.sum(tau_v_dot_rowsum)
# Precondition for crab body motif
grad_A /= 100
dphi_dbeta0 = -cconv_fourier( dsp_flip(h)[...,np.newaxis], (1-Om) * dphi_dY )
# Now update parameters
grad_norm = np.sqrt(npla.norm(grad_A.ravel(),2)**2 + npla.norm(grad_b,ord=2)**2)
#phi = phi - step * grad_phi / 86
if idx > 5:
if transform_mode == 'affine':
A = A - step * grad_A
b = b - step * grad_b
elif transform_mode == 'similarity':
grad_dil, grad_phi, grad_b = l2err_sim_grad(dil, phi, grad_A, grad_b)
dil = dil - step * grad_dil * 0.1
phi = phi - step * grad_phi
b = b - step * grad_b
A = dil * np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'euclidean':
grad_phi, grad_b = l2err_se_grad(phi, grad_A, grad_b)
phi = phi - step * grad_phi
b = b - step * grad_b
A = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
elif transform_mode == 'translation':
b = b - step * grad_b
A = np.eye(2)
beta0 = beta0 - 25 * step * dphi_dbeta0
corr = np.dot(np.eye(2) - A, center)
tau_u, tau_v = affine_to_vf(A, b + corr, m, n)
# Bookkeeping (losses and exit check)
cur_Y = image_interpolation_bicubic(Y, tau_u, tau_v )
beta = cconv_fourier(h[...,np.newaxis], beta0)
cur_X = np.zeros((m,n,c))
cur_X = (1-Om)*beta + Om*X
FWres = W * cconv_fourier(g[...,np.newaxis], cur_Y-cur_X)
error[idx] = .5 * np.sum(FWres ** 2)
cur_X_wd = cur_X * Om
for ic in range(3):
cur_X_wd[:,:,ic] -= np.mean(cur_X_wd[:,:,ic][cur_X_wd[:,:,ic] > 0])
cur_Y_wd = cur_Y * Om
for ic in range(3):
cur_Y_wd[:,:,ic] -= np.mean(cur_Y_wd[:,:,ic][cur_Y_wd[:,:,ic] > 0])
Rvals[idx] = np.sum(Om * cur_X_wd * cur_Y_wd) / ( vecnorm_2(Om * cur_X_wd) * vecnorm_2(Om * cur_Y_wd) )
if idx > 0 and error[idx] > error[idx-1]:
# print('Nonmontone, cutting step')
step = step / 2
else:
step = step * 1.01
cur_Y_disp = cur_Y.copy()
cur_Y_disp[:,:,1] = Om[:,:,1]
cur_Y_disp[:,:,2] = Om[:,:,2]
loopStop = perf_counter()
if grad_norm < TOL:
if param_dict['quiet'] is False:
print(f'Met objective at iteration {idx}, '
'exiting...')
break
if (idx % param_dict['epoch_len']) == 0:
if param_dict['quiet'] is False:
print('iter {:d} objective {:.4e} correlation {:.4f}'.format(idx, error[idx], Rvals[idx]))
if visualize is True:
if (idx % 10) == 0:
if param_dict['quiet'] is False:
plt.imshow(cur_Y_disp)
plt.show()
# This next block of code is for Nesterov accelerated GD.
else:
raise NotImplementedError('Test function only implements vanilla GD')
if transform_mode == 'affine':
optim_vars_new = [A, b]
elif transform_mode == 'similarity':
optim_vars_new = [dil, phi, b]
elif transform_mode == 'euclidean':
optim_vars_new = [phi, b]
elif transform_mode == 'translation':
optim_vars_new = [b]
return tau_u, tau_v, optim_vars_new, error, Rvals
def dilate_support(Om,sigma):
M = Om.shape[0]
N = Om.shape[1]
psi = gaussian_filter_2d(M,N,sigma_u=sigma)
delta = np.exp(-2) * ((2.0*np.pi*sigma) ** -.5)
Om_tilde = cconv_fourier(psi[...,np.newaxis],Om)
for i in range(M):
for j in range(N):
if Om_tilde[i,j,0] < delta:
Om_tilde[i,j,0] = 0
Om_tilde[i,j,1] = 0
Om_tilde[i,j,2] = 0
else:
Om_tilde[i,j,0] = 1
Om_tilde[i,j,1] = 1
Om_tilde[i,j,2] = 1
return Om_tilde
def rotation_mat(theta):
sin = np.sin(theta)
cos = np.cos(theta)
mat = np.array([[cos, -sin], [sin, cos]])
return mat
def l2err_se_grad(phi, grad_A, grad_b):
""" Calculate loss gradient in SE registration prob using aff gradient
This gradient is for the parametric version of the problem, with the
parameterization in terms of the special euclidean group (oriented rigid
motions of the plane).
It wraps l2err_aff_grad, since chain rule lets us easily calculate this
problem's gradient using the affine problem's gradient.
Implementation ideas:
- for ease of implementation, require the current angle phi as an input,
although it could probably be determined from tau_u and tau_v in general.
Inputs:
phi : angle parameter of matrix part of current rigid motion iterate.
grad_A : gradient of the cost with respect to A (matrix parameter of
affine transform) (output from l2err_aff_grad)
grad_b : gradient of the cost with respect to b (translation parameter
of affine transform) (output from l2err_aff_grad)
Outputs:
grad_phi : gradient of the cost with respect to phi (angular parameter of
rotational part of special euclidean transform:
grad_b : gradient of the cost with respect to b (translation parameter
of rigid motion)
"""
# rigid motion derivative matrix
G = np.array([[-np.sin(phi), -np.cos(phi)], [np.cos(phi), -np.sin(phi)]])
# Put derivatives
grad_phi = np.sum(G * grad_A)
return grad_phi, grad_b
def l2err_sim_grad(dil, phi, grad_A, grad_b):
""" Calculate loss gradient in similarity xform registration prob
This gradient is for the parametric version of the problem, with the
parameterization in terms of the similarity transformations (rigid motions
with the rotation multiplied by a scale parameter).
It wraps l2err_aff_grad, since chain rule lets us easily calculate this
problem's gradient using the affine problem's gradient.
Implementation ideas:
- for ease of implementation, require the current angle phi as an input,
although it could probably be determined from tau_u and tau_v in general.
Inputs:
dil : dilation (scale) parameter of matrix part of current similarity
transform iterate.
phi : angle parameter of matrix part of current rigid motion iterate.
grad_A : gradient of the cost with respect to A (matrix parameter of
affine transform) (output from l2err_aff_grad)
grad_b : gradient of the cost with respect to b (translation parameter
of affine transform) (output from l2err_aff_grad)
Outputs:
grad_phi : gradient of the cost with respect to dil (dilation/scale
parameter of similarity transform)
grad_phi : gradient of the cost with respect to phi (angular parameter of
rotational part of special euclidean transform:
grad_b : gradient of the cost with respect to b (translation parameter
of rigid motion)
"""
# rigid motion matrix
G = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
# rigid motion derivative matrix
Gdot = np.array([[-np.sin(phi), -np.cos(phi)], [np.cos(phi), -np.sin(phi)]])
# Put derivatives
grad_dil = np.sum(G * grad_A)
grad_phi = dil * np.sum(Gdot * grad_A)
return grad_dil, grad_phi, grad_b
def apply_random_transform( X0, Om0, c, mode, s_dist, phi_dist, theta_dist, b_dist, return_params=True ):
N0 = X0.shape[0]
N1 = X0.shape[1]
C = X0.shape[2]
tf_params = sample_random_transform( mode, s_dist, phi_dist, theta_dist, b_dist )
A = tf_params[0]
b = tf_params[1]
# apply the transformation
corr = np.dot(np.eye(2) - A, c)
(tau_u, tau_v) = affine_to_vf(A, b + corr, N0, N1)
X = image_interpolation_bicubic(X0, tau_u, tau_v)
Om = image_interpolation_bicubic(Om0, tau_u, tau_v)
if return_params is False:
return X, Om
else:
return X, Om, tf_params
def sample_random_transform( mode, s_dist, phi_dist, theta_dist, b_dist ):
s_min = s_dist[0]
s_max = s_dist[1]
phi_min = phi_dist[0]
phi_max = phi_dist[1]
theta_min = theta_dist[0]
theta_max = theta_dist[1]
b_min = b_dist[0]
b_max = b_dist[1]
b = np.zeros((2,))
b[0] = np.random.uniform(b_min,b_max)
b[1] = np.random.uniform(b_min,b_max)
if mode == 'affine':
s1 = np.random.uniform(s_min,s_max)
s2 = np.random.uniform(s_min,s_max)
phi = np.random.uniform(phi_min,phi_max)
theta = np.random.uniform(theta_min,theta_max)
U = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
V = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
S = np.diag([s1, s2])
A = np.matmul( U, np.matmul(S,V.transpose() ) )
return [A, b, None, None]
elif mode == 'similarity':
dil = np.random.uniform(s_min,s_max)
phi = np.random.uniform(phi_min,phi_max)
A = dil * np.array([[np.cos(phi), - | np.sin(phi) | numpy.sin |
import tensorflow as tf
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
from IPython import display
import pdb
from tensorflow.contrib.layers import flatten
import random
class LeNet:
def __init__(self, x, y_, doDecom = [True, True, True, True, False]):
self.build(x, y_)
self.doDecom = doDecom
def build(self, x, y_):
in_dim = int(x.get_shape()[1])
out_dim = int(y_.get_shape()[1])
self.x = x
# Hyperparameters
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
self.conv1_w = tf.Variable(tf.truncated_normal(shape = [5,5,1,6],mean = mu, stddev = sigma),name = 'conv1_w')
self.conv1_b = tf.Variable(tf.zeros(6),name = 'conv1_b')
self.conv1 = tf.nn.conv2d(self.x,self.conv1_w, strides = [1,1,1,1], padding = 'VALID') + self.conv1_b
self.relu1 = tf.nn.relu(self.conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
self.pool_1 = tf.nn.max_pool(self.relu1,ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
# Layer 2: Convolutional. Output = 10x10x16.
self.conv2_w = tf.Variable(tf.truncated_normal(shape = [5,5,6,16], mean = mu, stddev = sigma),name = 'conv2_w')
self.conv2_b = tf.Variable(tf.zeros(16),name = 'conv2_b')
self.conv2 = tf.nn.conv2d(self.pool_1, self.conv2_w, strides = [1,1,1,1], padding = 'VALID') + self.conv2_b
self.relu2 = tf.nn.relu(self.conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
self.pool_2 = tf.nn.max_pool(self.relu2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
# Flatten. Input = 5x5x16. Output = 400.
self.fla = flatten(self.pool_2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
self.fc1_w = tf.Variable(tf.truncated_normal(shape = (400,120), mean = mu, stddev = sigma),name = 'fc1_w')
self.fc1_b = tf.Variable(tf.zeros(120),name = 'fc1_b')
self.fc1 = tf.matmul(self.fla,self.fc1_w) + self.fc1_b
self.relu3 = tf.nn.relu(self.fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
self.fc2_w = tf.Variable(tf.truncated_normal(shape = (120,84), mean = mu, stddev = sigma),name = 'fc2_w')
self.fc2_b = tf.Variable(tf.zeros(84),name = 'fc2_b')
self.fc2 = tf.matmul(self.relu3,self.fc2_w) + self.fc2_b
self.relu4 = tf.nn.relu(self.fc2)
# Layer 5: Fully Connected. Input = 84. Output = number of features.
self.fc3_w = tf.Variable(tf.truncated_normal(shape = (84,out_dim), mean = mu , stddev = sigma),name = 'fc3_w')
self.fc3_b = tf.Variable(tf.zeros(out_dim),name = 'fc3_b')
self.y = tf.matmul(self.relu4, self.fc3_w) + self.fc3_b
# lists
self.var_list = [self.conv1_w, self.conv1_b, self.conv2_w, self.conv2_b,
self.fc1_w, self.fc1_b, self.fc2_w, self.fc2_b,
self.fc3_w, self.fc3_b]
self.hidden_list = [self.conv1, self.conv2, self.fc1, self.fc2, self.y]
self.input_list = [self.x, self.pool_1, self.fla, self.relu3, self.relu4]
# vanilla single-task loss
one_hot_targets = y_
self.cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=one_hot_targets , logits=self.y))
# performance metrics
correct_prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(y_,1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def rebuild_decom(self, x, y_):
in_dim = int(x.get_shape()[1])
out_dim = int(y_.get_shape()[1])
self.x = x
self.var_list = []
pos = 0
# Hyperparameters
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
if self.doDecom[0]:
self.conv1_w1 = tf.Variable(tf.convert_to_tensor(np.float32(np.expand_dims(np.expand_dims(self.weights_svd[pos],axis=0),axis=0))), trainable = False)
self.conv1_w2 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+1])))
self.conv1_w3 = tf.Variable(tf.convert_to_tensor(np.float32(np.expand_dims(np.expand_dims(self.weights_svd[pos+2],axis=0),axis=0))), trainable = False)
self.conv1_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[1])))
self.conv1 = tf.nn.conv2d(self.x,self.conv1_w1, strides = [1,1,1,1], padding = 'VALID')
self.conv1 = tf.nn.conv2d(self.conv1,self.conv1_w2, strides = [1,1,1,1], padding = 'VALID')
self.conv1 = tf.nn.conv2d(self.conv1,self.conv1_w3, strides = [1,1,1,1], padding = 'VALID') + self.conv1_b
self.relu1 = tf.nn.relu(self.conv1)
self.pool_1 = tf.nn.max_pool(self.relu1,ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
self.var_list.append(self.conv1_w1)
self.var_list.append(self.conv1_w2)
self.var_list.append(self.conv1_w3)
self.var_list.append(self.conv1_b)
pos = pos + 3
else:
self.conv1_w = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])))
self.conv1_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[1])))
self.conv1 = tf.nn.conv2d(self.x,self.conv1_w, strides = [1,1,1,1], padding = 'VALID') + self.conv1_b
self.relu1 = tf.nn.relu(self.conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
self.pool_1 = tf.nn.max_pool(self.relu1,ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
self.var_list.append(self.conv1_w)
self.var_list.append(self.conv1_b)
pos = pos + 1
# Layer 2: Convolutional. Output = 10x10x16.
if self.doDecom[1]:
self.conv2_w1 = tf.Variable(tf.convert_to_tensor(np.float32(np.expand_dims(np.expand_dims(self.weights_svd[pos],axis=0),axis=0))), trainable = False)
self.conv2_w2 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+1])))
self.conv2_w3 = tf.Variable(tf.convert_to_tensor(np.float32(np.expand_dims(np.expand_dims(self.weights_svd[pos+2],axis=0),axis=0))), trainable = False)
self.conv2_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[3])))
self.conv2 = tf.nn.conv2d(self.pool_1,self.conv2_w1, strides = [1,1,1,1], padding = 'VALID')
self.conv2 = tf.nn.conv2d(self.conv2,self.conv2_w2, strides = [1,1,1,1], padding = 'VALID')
self.conv2 = tf.nn.conv2d(self.conv2,self.conv2_w3, strides = [1,1,1,1], padding = 'VALID') + self.conv2_b
self.relu2 = tf.nn.relu(self.conv2)
self.pool_2 = tf.nn.max_pool(self.relu2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
self.fla = flatten(self.pool_2)
self.var_list.append(self.conv2_w1)
self.var_list.append(self.conv2_w2)
self.var_list.append(self.conv2_w3)
self.var_list.append(self.conv2_b)
pos = pos + 3
else:
self.conv2_w = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])))
self.conv2_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[3])))
self.conv2 = tf.nn.conv2d(self.pool_1, self.conv2_w, strides = [1,1,1,1], padding = 'VALID') + self.conv2_b
self.relu2 = tf.nn.relu(self.conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
self.pool_2 = tf.nn.max_pool(self.relu2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'VALID')
# Flatten. Input = 5x5x16. Output = 400.
self.fla = flatten(self.pool_2)
self.var_list.append(self.conv2_w)
self.var_list.append(self.conv2_b)
pos = pos + 1
# Layer 3: Fully Connected. Input = 400. Output = 120.
if self.doDecom[2]:
self.fc1_w1 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])), trainable = False)
self.fc1_w2 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+1])))
self.fc1_w3 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+2])), trainable = False)
self.fc1_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[5])))
self.fc1 = tf.matmul(tf.matmul(tf.matmul(self.fla,self.fc1_w1),self.fc1_w2),self.fc1_w3) + self.fc1_b
self.relu3 = tf.nn.relu(self.fc1)
self.var_list.append(self.fc1_w1)
self.var_list.append(self.fc1_w2)
self.var_list.append(self.fc1_w3)
self.var_list.append(self.fc1_b)
pos = pos + 3
else:
self.fc1_w = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])))
self.fc1_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[5])))
self.fc1 = tf.matmul(self.fla,self.fc1_w) + self.fc1_b
self.relu3 = tf.nn.relu(self.fc1)
self.var_list.append(self.fc1_w)
self.var_list.append(self.fc1_b)
pos = pos + 1
# Layer 4: Fully Connected. Input = 120. Output = 84.
if self.doDecom[3]:
self.fc2_w1 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])), trainable = False)
self.fc2_w2 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+1])))
self.fc2_w3 = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos+2])), trainable = False)
self.fc2_b = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_w[7])))
self.fc2 = tf.matmul(tf.matmul(tf.matmul(self.relu3,self.fc2_w1),self.fc2_w2),self.fc2_w3) + self.fc2_b
self.relu4 = tf.nn.relu(self.fc2)
self.var_list.append(self.fc2_w1)
self.var_list.append(self.fc2_w2)
self.var_list.append(self.fc2_w3)
self.var_list.append(self.fc2_b)
pos = pos + 3
else:
self.fc2_w = tf.Variable(tf.convert_to_tensor(np.float32(self.weights_svd[pos])))
self.fc2_b = tf.Variable(tf.convert_to_tensor( | np.float32(self.weights_w[7]) | numpy.float32 |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 6 01:21:42 2021
@author: dv516
"""
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from case_studies.Controller_tuning.Control_system import reactor_phi_2st, reactor_phi_2stNS
import matplotlib.pyplot as plt
def scaled_to_absolute(x, bounds):
absolute = bounds[:,0] + np.array(x)*(bounds[:,1] - bounds[:,0])
return absolute
def absolute_to_scaled(x, bounds):
scaled = (np.array(x) - bounds[:,0])/(bounds[:,1] - bounds[:,0])
return scaled
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
def plot_reactor_respRand(pi, plot, method, bounds, noise, c, x0 = [.6, 310],
xref = [.666, 308.489], N=200, T=8, NS = False):
ax1, ax2, ax3, ax4 = plot
if not NS:
_, sys_resp, control_resp = reactor_phi_2st(pi, bounds, noise, x0 = x0, N = N, \
T = T, return_sys_resp = True)
else:
_, sys_resp, control_resp = reactor_phi_2stNS(pi, noise, x0 = x0, N = N, \
T = T, return_sys_resp = True)
x1 = np.array(sys_resp)[:,0] ; x2 = np.array(sys_resp)[:,1]
ax1.plot(np.arange(len(x1))/len(x1)*T, x1, c = c, label = method )
ax1.plot([0, T], [xref[0], xref[0]], '--k')
ax2.plot([0, T], [xref[1], xref[1]], '--k')
ax2.plot(np.arange(len(x2))/len(x2)*T, x2, c = c)
u1 = np.array(control_resp)[:,0] ; u2 = np.array(control_resp)[:,1]
ax3.plot(np.arange(len(u1))/len(u1)*T, u1, c = c, label = method )
ax4.plot(np.arange(len(u2))/len(u2)*T, u2, c=c)
return ax1, ax2, ax3, ax4
def fix_starting_points(complete_list, x0, init_out, only_starting_point = False):
if only_starting_point:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
complete_list[i] = dict_out
else:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
for j in range(1, N_eval):
if (g_arr[j] > 1e-3).any() or (init_out[0] < f_arr[j]):
dict_out['x_best_so_far'][j] = np.array(x0)
dict_out['f_best_so_far'][j] = init_out[0]
dict_out['g_best_so_far'][j] = np.array(init_out[1])
complete_list[i] = dict_out
return complete_list
def cost_control_noise(x, bounds_abs, noise, N_SAA, x0 = [.116, 368.489], \
N = 200, T = 20, NS = False):
f_SAA = 0 ; g_SAA = -np.inf
if not NS:
f = lambda x: reactor_phi_2st(x, bounds_abs, noise, x0 = x0, N = N, \
T = T, return_sys_resp = False)
else:
f = lambda x: reactor_phi_2stNS(x, noise, x0 = x0, N = N, \
T = T, return_sys_resp = False)
for i in range(N_SAA):
f_sample = f(x)
f_SAA += f_sample[0]/N_SAA
g_SAA = np.maximum(g_SAA, float(f_sample[1][0]))
return f_SAA, [g_SAA]
pi = [.8746, .0257, -1.43388, -0.00131, 0.00016, 55.8692, 0.7159, .0188, .00017]
pi_init = [.8746, .0257, -1.43388, -0.00131, 0.00016, 0, 0, 0, 0, 0]
bounds_abs = np.zeros((10, 2))
for i in range(5):
if pi[i] > 0:
bounds_abs[i] = [pi[i]/2, pi[i]*2]
bounds_abs[i+5] = [-pi[i]*10, pi[i]*10]
else:
bounds_abs[i] = [pi[i]*2, pi[i]/2]
bounds_abs[i+5] = [pi[i]*10, -pi[i]*10]
x0 = (np.array(pi_init) - bounds_abs[:,0]) / (bounds_abs[:,1]-bounds_abs[:,0])
N_SAA = 1
noise = np.array([.001, 1])/3
cost_rand = lambda x: cost_control_noise(x, bounds_abs, noise, N_SAA, \
x0 = [.116, 368.489], N = 200, T = 20)
ContrSynRand_DIRECT_cost = lambda x, grad: cost_rand(x)
bounds = np.array([[0, 1]]*10)
x0 = (np.array(pi_init) - bounds_abs[:,0]) / (bounds_abs[:,1]-bounds_abs[:,0])
initial_outputRand = cost_rand(x0)
x0_abs = np.array(pi_init)
cost_randNS = lambda x: cost_control_noise(x, bounds_abs, noise, N_SAA, \
x0 = [.116, 368.489], N = 200, T = 20, NS = True)
max_f_eval = 100
N = 10
ContrSynRand_pybobyqa_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_pybobyqa = PyBobyqaWrapper().solve(cost_rand, x0, bounds=bounds.T, \
maxfun= max_f_eval, constraints=1, \
seek_global_minimum = True, \
objfun_has_noise = True)
ContrSynRand_pybobyqa_list.append(ContrSynRand_pybobyqa)
print('10 Py-BOBYQA iterations completed')
N = 10
ContrSynRand_simplex_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_simplex = simplex_method(cost_rand, x0, bounds, max_iter = 50, \
constraints = 1, rnd_seed = i, mu_con = 1e6)
ContrSynRand_simplex_list.append(ContrSynRand_simplex)
print('10 simplex iterations completed')
N = 10
ContrSynRand_FiniteDiff_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_FiniteDiff = finite_Diff_Newton(cost_randNS, x0_abs, bounds = bounds_abs, \
con_weight = 1e6, check_bounds = True)
ContrSynRand_FiniteDiff_list.append(ContrSynRand_FiniteDiff)
print('10 Approx Newton iterations completed')
N = 10
ContrSynRand_BFGS_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_BFGS = BFGS_optimizer(cost_randNS, x0_abs, bounds = bounds_abs, \
con_weight = 1e6, check_bounds = True)
ContrSynRand_BFGS_list.append(ContrSynRand_BFGS)
print('10 BFGS iterations completed')
N = 10
ContrSynRand_Adam_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_Adam = Adam_optimizer(cost_rand, x0, method = 'forward', \
bounds = bounds, alpha = 0.4, \
beta1 = 0.2, beta2 = 0.1, \
max_f_eval = 100, con_weight = 1e6, \
check_bounds = True)
ContrSynRand_Adam_list.append(ContrSynRand_Adam)
print('10 Adam iterations completed')
N_min_s = 15
init_radius = 0.5
method = 'Discrimination'
N = 10
ContrSynRand_CUATRO_global_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_CUATRO_global = CUATRO(cost_rand, x0, init_radius, bounds = bounds, \
N_min_samples = N_min_s, tolerance = 1e-10,\
beta_red = 0.9, rnd = rnd_seed, method = 'global', \
constr_handling = method)
ContrSynRand_CUATRO_global_list.append(ContrSynRand_CUATRO_global)
print('10 CUATRO global iterations completed')
N_min_s = 6
init_radius = 0.5
# method = 'Fitting'
method = 'Discrimination'
N = 10
ContrSynRand_CUATRO_local_list = []
for i in range(N):
rnd_seed = i
ContrSynRand_CUATRO_local = CUATRO(cost_rand, x0, init_radius, bounds = bounds, \
N_min_samples = N_min_s, tolerance = 1e-10,\
beta_red = 0.9, rnd = rnd_seed, method = 'local', \
constr_handling = method)
ContrSynRand_CUATRO_local_list.append(ContrSynRand_CUATRO_local)
print('10 CUATRO local iterations completed')
N = 10
ContrSynRand_SQSnobFit_list = []
for i in range(N):
ContrSynRand_SQSnobFit = SQSnobFitWrapper().solve(cost_randNS, x0_abs, bounds_abs, mu_con = 1e6, \
maxfun = max_f_eval, constraints=1)
ContrSynRand_SQSnobFit_list.append(ContrSynRand_SQSnobFit)
print('10 SnobFit iterations completed')
### SQSnobfit tends to fail, so manually add failure:
# dict_fail = {}
# dict_fail['x_best_so_far'] =
N = 10
ContrSynRand_DIRECT_list = []
for i in range(N):
ContrSynRand_DIRECT = DIRECTWrapper().solve(ContrSynRand_DIRECT_cost, x0, bounds, mu_con = 1e6, \
maxfun = max_f_eval, constraints=1)
ContrSynRand_DIRECT_list.append(ContrSynRand_DIRECT)
print('10 DIRECT iterations completed')
with open('BayesContrSynRand_list.pickle', 'rb') as handle:
ContrSynRand_Bayes_list = pickle.load(handle)
ContrSynRand_Bayes_list = fix_starting_points(ContrSynRand_Bayes_list, x0, initial_outputRand)
ContrSynRand_DIRECT_list = fix_starting_points(ContrSynRand_DIRECT_list, x0, initial_outputRand)
ContrSynRand_simplex_list = fix_starting_points(ContrSynRand_simplex_list, x0, initial_outputRand)
ContrSynRand_pybobyqa_list = fix_starting_points(ContrSynRand_pybobyqa_list, x0, initial_outputRand)
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 2}
plt.rcParams.update(params)
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_pybobyqa_list)):
x_best = np.array(ContrSynRand_pybobyqa_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_pybobyqa_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_pybobyqa_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'Py-BOBYQA'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_g'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_PyBOBYQA_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_FiniteDiff_list)):
x_best = np.array(ContrSynRand_FiniteDiff_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_FiniteDiff_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_FiniteDiff_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'Fin. Diff.'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_g'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_FiniteDiff_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_BFGS_list)):
x_best = np.array(ContrSynRand_BFGS_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_BFGS_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_BFGS_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'BFGS'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_g'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_BFGS_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_Adam_list)):
x_best = np.array(ContrSynRand_Adam_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_Adam_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_Adam_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'Adam'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_g'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_Adam_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_CUATRO_global_list)):
x_best = np.array(ContrSynRand_CUATRO_global_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_CUATRO_global_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_CUATRO_global_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'CUATRO_g'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_g'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_CUATROg_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_CUATRO_local_list)):
x_best = np.array(ContrSynRand_CUATRO_local_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_CUATRO_local_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_CUATRO_local_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'CUATRO_l'+str(i))
# ax1.plot(x_ind, f_best, label = 'CUATRO_l'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_CUATROl_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_Bayes_list)):
x_best = np.array(ContrSynRand_Bayes_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_Bayes_list[i]['f_best_so_far'])
nbr_feval = len(ContrSynRand_Bayes_list[i]['f_store'])
ax1.step(np.arange(len(f_best)), f_best, where = 'post', \
label = 'BO'+str(i)+'; #f_eval: ' + str(nbr_feval))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_BO_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_simplex_list)):
x_best = np.array(ContrSynRand_simplex_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_simplex_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_simplex_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'Simplex'+str(i))
ax1.legend()
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.set_yscale('log')
fig1.savefig('ContrSyn_random_plots/ContrSyn_Simplex_Convergence_plot.svg', format = "svg")
# ## Change to x_best_So_far
# fig1 = plt.figure()
# ax1 = fig1.add_subplot()
# for i in range(len(ContrSynRand_Nest_list)):
# x_best = np.array(ContrSynRand_Nest_list[i]['x_best_so_far'])
# f_best = np.array(ContrSynRand_Nest_list[i]['f_best_so_far'])
# x_ind = np.array(ContrSynRand_Nest_list[i]['samples_at_iteration'])
# ax1.step(x_ind, f_best, where = 'post', label = 'Nest.'+str(i))
# ax1.legend()
# ax1.set_yscale('log')
# ax1.set_xlabel('Nbr. of function evaluations')
# ax1.set_ylabel('Best function evaluation')
# fig1.savefig('ContrSyn_random_plots/ContrSyn_Nesterov_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_SQSnobFit_list)):
x_best = np.array(ContrSynRand_SQSnobFit_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_SQSnobFit_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_SQSnobFit_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'SQSnobfit.'+str(i))
ax1.legend()
ax1.set_yscale('log')
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
fig1.savefig('ContrSyn_random_plots/ContrSyn_SQSnobFit_Convergence_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
for i in range(len(ContrSynRand_DIRECT_list)):
x_best = np.array(ContrSynRand_DIRECT_list[i]['x_best_so_far'])
f_best = np.array(ContrSynRand_DIRECT_list[i]['f_best_so_far'])
x_ind = np.array(ContrSynRand_DIRECT_list[i]['samples_at_iteration'])
ax1.step(x_ind, f_best, where = 'post', label = 'DIRECT'+str(i))
ax1.legend()
ax1.set_yscale('log')
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
fig1.savefig('ContrSyn_random_plots/ContrSyn_DIRECT_Convergence_plot.svg', format = "svg")
sol_Cg = average_from_list(ContrSynRand_CUATRO_global_list)
test_CUATROg, test_av_CUATROg, test_min_CUATROg, test_max_CUATROg = sol_Cg
sol_Cl = average_from_list(ContrSynRand_CUATRO_local_list)
test_CUATROl, test_av_CUATROl, test_min_CUATROl, test_max_CUATROl = sol_Cl
# sol_Nest = average_from_list(ContrSynRand_Nest_list)
# test_Nest, test_av_Nest, test_min_Nest, test_max_Nest = sol_Nest
sol_Splx = average_from_list(ContrSynRand_simplex_list)
test_Splx, test_av_Splx, test_min_Splx, test_max_Splx = sol_Splx
sol_SQSF = average_from_list(ContrSynRand_SQSnobFit_list)
test_SQSF, test_av_SQSF, test_min_SQSF, test_max_SQSF = sol_SQSF
sol_DIR = average_from_list(ContrSynRand_DIRECT_list)
test_DIR, test_av_DIR, test_min_DIR, test_max_DIR = sol_DIR
sol_pybbyqa = average_from_list(ContrSynRand_pybobyqa_list)
test_pybbqa, test_av_pybbqa, test_min_pybbqa, test_max_pybbqa = sol_pybbyqa
sol_findiff = average_from_list(ContrSynRand_FiniteDiff_list)
test_findiff, test_av_findiff, test_min_findiff, test_max_findiff = sol_findiff
sol_BFGS = average_from_list(ContrSynRand_BFGS_list)
test_BFGS, test_av_BFGS, test_min_BFGS, test_max_BFGS = sol_BFGS
sol_Adam = average_from_list(ContrSynRand_Adam_list)
test_Adam, test_av_Adam, test_min_Adam, test_max_Adam = sol_Adam
sol_BO = average_from_list(ContrSynRand_Bayes_list)
test_BO, test_av_BO, test_min_BO, test_max_BO = sol_BO
fig = plt.figure()
ax = fig.add_subplot()
ax.step(np.arange(1, 101), test_av_CUATROg, where = 'post', label = 'CUATRO_g', c = 'b')
ax.fill_between(np.arange(1, 101), test_min_CUATROg, \
test_max_CUATROg, color = 'b', alpha = .5)
ax.step(np.arange(1, 101), test_av_CUATROl, where = 'post', label = 'CUATRO_l', c = 'c')
ax.fill_between(np.arange(1, 101), test_min_CUATROl, \
test_max_CUATROl, color = 'c', alpha = .5)
ax.step(np.arange(1, 101), test_av_pybbqa, where = 'post', label = 'Py-BOBYQA ', c = 'green')
ax.fill_between(np.arange(1, 101), test_min_pybbqa, \
test_max_pybbqa, color = 'green', alpha = .5)
ax.step(np.arange(1, 101), test_av_SQSF, where = 'post', label = 'Snobfit*', c = 'orange')
ax.fill_between(np.arange(1, 101), test_min_SQSF, \
test_max_SQSF, color = 'orange', alpha = .5)
ax.step(np.arange(1, 101), test_av_BO, where = 'post', label = 'Bayes. Opt.', c = 'red')
ax.fill_between(np.arange(1, 101), test_min_BO, \
test_max_BO, color = 'red', alpha = .5)
ax.legend()
ax.set_yscale('log')
ax.set_xlabel('Number of function evaluations')
ax.set_ylabel('Best function evaluation')
ax.set_xlim([1, 100])
ax.set_ylim([1, 150])
ax.legend(loc = 'upper right')
fig.savefig('ContrSyn_publication_plots/ContrSyn_Model.svg', format = "svg")
fig = plt.figure()
ax = fig.add_subplot()
# ax.step(np.arange(1, 101), test_av_Nest, where = 'post', label = 'Nesterov', c = 'brown')
# ax.fill_between(np.arange(1, 101), test_min_Nest, \
# test_max_Nest, color = 'brown', alpha = .5)
ax.step(np.arange(1, 101), test_av_Splx, where = 'post', label = 'Simplex', c = 'green')
ax.fill_between( | np.arange(1, 101) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 10:33:41 2020
@author: <NAME>
"""
# =============================================================================
# Import Libraries
# =============================================================================
import numpy as np
from numba import jit
import matplotlib.pyplot as plt
# =============================================================================
# Define Functions
# =============================================================================
def ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = mu + a*x_{t-1} + e_t
"""
x_path = np.zeros(T)
x_path[0] = x_0
shocks = np.random.normal(0,sigma,T) # The first term isn't used and will be ignored for sake of code readability
# iteratively construct the AR1 according to x_t = mu + a*x_{t-1} + e_t
for t in range(1,T):
x_path[t] = mu + a*x_path[t-1] + shocks[t]
return x_path # Return the path of the AR1
def censored_ar_1(mu, a, sigma, T, x_0):
"""
This function computes a simulated ar1 process assuming x_t = max(mu + a*x_{t-1} + e_t,0)
"""
x_path = np.zeros(T)
x_path[0] = x_0
shocks = np.random.normal(0,sigma,T) # The first term isn't used and will be ignored for sake of code readability
# iteratively construct the AR1 according to x_t = mu + a*x_{t-1} + e_t
for t in range(1,T):
x_path[t] = max(mu + a*x_path[t-1] + shocks[t], 0)
return x_path # Return the path of the AR1
def compound_interest_rates(interest_path):
'''
This function takes in a path of portfolio returns (a Tx1 numpy array) and it returns a T+1xT+1
numpy array. The returned array can be seen as a lower triangular matrix with ones on the diagonal and
whose lower diagonal entries correspond to the product of the returns up to that index.
'''
T = len(interest_path) # The number of periods - 1 (because we exclued the intial period)
CI = np.zeros([T+1,T+1]) # Initialize the matrix of compund interest paths in each period.
# Loop over rows and columns and sub in the corresponding compound interest rates for the matrix multiplication
for i in range(T+1):
for j in range(T+1):
if j < i:
CI[i, j] = np.prod(interest_path[j:i])
elif j == i:
CI[i, j] = 1
elif j > i:
continue
return CI
def asset_path(income_path, consumption_path, initial_savings, interest_path):
"""
This fucntion computes the total amount you would have saved given a time series of interest rates
given by interest_path and a time series of savings amounts given by savings path with the first index
corresponding to the first time period. It computes the value of the asset at time T-1, the final index.
Inputs:
All inputs need to be Tx1 Numpy Arrays
"""
T = len(income_path) # How many time periods?
S = np.subtract(income_path, consumption_path) # Compute per period savings as per period income minus consumption
S = np.insert(arr = S, obj = 0, values = initial_savings) # Technical trick, from a mathemtical perspective we can consider initial assets to simply be the savings in period 0.
CI = compound_interest_rates(interest_path) # Convert the per period interest path to a compounded interest matrix per period
A = np.dot(CI,S) #Final asset time series is just this dot product
return A
#@jit(nopython = True) # This skips the python interpreter in favor of a more low level interpreter, helps speed up large scale simulations
def asset_monte_carlo(N, T, percentiles, initial_savings, inc_fn, cons_fn, int_fn):
'''
This function runs a monte-carlo simulation on the intrest rate, income and consumption stochastic processes to obtain quantiles on asset values (Q_t) for each time period and the expected return
'''
sim_A = np.empty([N,T+1]) #Simulated assets form an NXT+1 matrix which will be collapsed into T+1x1 vectors corresponding to the percentiles
#Randomly simulate asset paths according to the inc_fun, cons_fn and int_fn functions,
# then compile the simulated paths into a matrix
for n in range(N):
income_path = inc_fn()
consumption_path = cons_fn()
interest_path = int_fn()
A_n = asset_path(income_path, consumption_path, initial_savings, interest_path)
sim_A[n, :] = | np.transpose(A_n) | numpy.transpose |
"""@package sqp_linsearch
Constraints on the original Voce-Chaboche model for limited information optimization.
"""
from numdifftools import nd_algopy as nda
import numpy as np
def g3_vco_upper(x, constants, variables):
""" Constraint on the maximum ratio of stress at saturation to initial yield stress for the original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
max_hardening_to_yield = constants['rho_yield_sup']
n_backstresses = int((len(x) - 4) / 2)
sy0 = x[1]
q_inf = x[2]
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 4 + 2 * i
gamma_ind = 5 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return (sy0 + q_inf + sum_ck_gammak) / sy0 - max_hardening_to_yield
def g3_vco_lower(x, constants, variables):
""" Constraint on the minimum ratio of stress at saturation to initial yield stress for the original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
min_hardening_to_yield = constants['rho_yield_inf']
n_backstresses = int((len(x) - 4) / 2)
sy0 = x[1]
q_inf = x[2]
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 4 + 2 * i
gamma_ind = 5 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return -(sy0 + q_inf + sum_ck_gammak) / sy0 + min_hardening_to_yield
def g4_vco_upper(x, constants, variables):
""" Constraint on the maximum ratio of isotropic to combined isotropic/kinematic hardening at saturation for the
original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
iso_kin_ratio_max = constants['rho_iso_sup']
q_inf = x[2]
n_backstresses = int((len(x) - 4) / 2)
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 4 + 2 * i
gamma_ind = 5 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return q_inf / (q_inf + sum_ck_gammak) - iso_kin_ratio_max
def g4_vco_lower(x, constants, variables):
""" Constraint on the minimum ratio of isotropic to combined isotropic/kinematic hardening at saturation for the
original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
iso_kin_ratio_min = constants['rho_iso_inf']
q_inf = x[2]
n_backstresses = int((len(x) - 4) / 2)
sum_ck_gammak = 0.
for i in range(n_backstresses):
c_ind = 4 + 2 * i
gamma_ind = 5 + 2 * i
sum_ck_gammak += x[c_ind] / x[gamma_ind]
return -q_inf / (q_inf + sum_ck_gammak) + iso_kin_ratio_min
def g5_vco_lower(x, constants, variables):
""" Constraint on the lower bound ratio of gamma_1 to b for the original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
b = x[3]
gamma1 = x[5]
gamma_b_ratio_min = constants['rho_gamma_inf']
return -gamma1 / b + gamma_b_ratio_min
def g5_vco_upper(x, constants, variables):
""" Constraint on the upper bound ratio of gamma_1 to b for the original VC model.
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
b = x[3]
gamma1 = x[5]
gamma_b_ratio_max = constants['rho_gamma_sup']
return gamma1 / b - gamma_b_ratio_max
def g6_vco_lower(x, constants, variables):
""" Constraint on the lower bound ratio of gamma_1 to gamma_2 for the original VC model.
gamma_1 is always x[5] and gamma_2 is always x[7].
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
gamma1 = x[5]
gamma2 = x[7]
gamma_1_2_ratio_min = constants['rho_gamma_12_inf']
return -gamma1 / gamma2 + gamma_1_2_ratio_min
def g6_vco_upper(x, constants, variables):
""" Constraint on the upper bound ratio of gamma_1 to gamma_2 for the original VC model.
gamma_1 is always x[5] and gamma_2 is always x[7].
:param np.ndarray x: Parameters of original Voce-Chaboche model.
:param dict constants: Defines the constants for the constraint.
:param dict variables: Defines constraint values that depend on x.
:return float: Value of the constraint in standard form.
"""
gamma1 = x[5]
gamma2 = x[7]
gamma_1_2_ratio_max = constants['rho_gamma_12_sup']
return gamma1 / gamma2 - gamma_1_2_ratio_max
def g_kin_ratio_vco_lower(x, constants, variables):
c1 = x[4]
gamma1 = x[5]
c2 = x[6]
gamma2 = x[7]
gamma_kin_ratio_min = constants['rho_kin_ratio_inf']
return -(c1 / gamma1) / (c2 / gamma2) + gamma_kin_ratio_min
def g_kin_ratio_vco_upper(x, constants, variables):
c1 = x[4]
gamma1 = x[5]
c2 = x[6]
gamma2 = x[7]
gamma_kin_ratio_max = constants['rho_kin_ratio_sup']
return (c1 / gamma1) / (c2 / gamma2) - gamma_kin_ratio_max
# Gradients and Hessians of all the above constraints
def g3_vco_lower_gradient(x, constants, variables):
fun_wrapper = lambda x1: g3_vco_lower(x1, constants, variables)
grad_fun = nda.Gradient(fun_wrapper)
grad = grad_fun(x)
return | np.reshape(grad, (-1, 1)) | numpy.reshape |
"""
Simulate GHOST/Veloce instrument observations.
This is a simple simulation code for GHOST or Veloce,
with a class :class:`GhostArm` that simulates
a single arm of the instrument. The key default parameters
are hardwired for each named
configuration in the :func:`__init__ <GhostArm.__init__>` function of ARM.
Note that in this simulation code, the 'x' and 'y' directions are the
along-slit and dispersion directions respectively
(similar to physical axes), but by convention, images are returned/displayed
with a vertical slit and a horizontal dispersion direction.
For a simple simulation, run, e.g.::
import pymfe
blue = pymfe.ghost.Arm('blue')
blue.simulate_frame()
TODO:
1) Add spectrograph aberrations (just focus and coma)
2) Add pupil illumination plus aberrations.
"""
from __future__ import division, print_function
import numpy as np
from .polyspect import Polyspect
GHOST_BLUE_SZX = 4112 # 4096 #
GHOST_BLUE_SZY = 4096 # 4112 #
GHOST_RED_SZX = 6160
GHOST_RED_SZY = 6144
class GhostArm(Polyspect):
"""
Class representing an arm of the spectrograph.
A class for each arm of the spectrograph. The initialisation
function takes a series of strings representing the configuration.
It can be ``"red"`` or ``"blue"`` for the arm (first string),
and ``"std"`` or ``"high"`` for the mode (second string).
This class initialises and inherits all attributes and
methods from :any:`Polyspect`, which is the module that
contains all spectrograph generic functions.
"""
def __init__(self, arm='blue', mode='std',
detector_x_bin=1, detector_y_bin=1):
"""
The class initialisation takes the arm, resolution mode and binning
modes as inputs and defines all needed attributes.
It starts by initialising the :any:`Polyspect` class with the correct
detector sizes ``szx`` and ``szy``, order numbers (``m_min``, ``m_max``)
and whether the CCD is transposed. Transposed in this case implies that
the spectral direction is in the x axis of the CCD image, which is the
case for the GHOST data.
Most of the parameters sent to the
:class:`PolySpect <polyfit.polyspect.PolySpect>` initialization function
are self-explanatory, but here is a list of those
that may not be:
+------------------------------+---------------------------------------+
| **Variable Name** | **Purpose/meaning** |
+------------------------------+---------------------------------------+
| ``m_ref``, ``m_min`` and | Reference, minimum and maximum order |
| ``m_max`` | indices for the camera. |
+------------------------------+---------------------------------------+
| ``szx`` and ``szy`` | Number of pixels in the x and y |
| | directions |
+------------------------------+---------------------------------------+
| ``nlenslets`` | Number of lenslets in the IFU |
+------------------------------+---------------------------------------+
| ``lenslet_high_size`` and | Unused |
| ``lenslet_std_size`` | |
+------------------------------+---------------------------------------+
Attributes
----------
arm: str
Which arm of the GHOST spectrograph is to be initialized. Can be
``'red'`` or ``'blue'``.
spect: str
Which spectrograph in usage. Defaults to ``'ghost'``.
lenslet_high_size: int
Lenslet flat-to-flat in microns for high mode. Defaults to
``118.0```.
lenslet_std_size: int
Lenslet flat-to-flat in microns for standard mode. Defaults to
``197.0``.
mode: str
Resolution mode. Can be either ``'std'`` or ``'high'``.
nlenslets: int
Number of lenslets of the IFU. This value is set depending on
whether ``mode`` is ``'std'`` (17) or ``'high'`` (28).
detector_x_bin: int, optional
The x binning of the detector. Defaults to 1.
detector_y_bin: int, optional
The y binning of the detector. Defaults to 1.
"""
# MCW 190822 - swapped szy and szx values for new data
if arm == 'red':
Polyspect.__init__(self, m_ref=50,
szx=GHOST_RED_SZX, szy=GHOST_RED_SZY,
m_min=34, m_max=64, transpose=True)
elif arm == 'blue':
Polyspect.__init__(self, m_ref=80,
szx=GHOST_BLUE_SZX, szy=GHOST_BLUE_SZY,
m_min=63, m_max=95, transpose=True) #was 63 and 95, or 62 and 92 for shifted NRC data.
else:
print("Unknown spectrograph arm!")
raise UserWarning
# A lot of these parameters are yet unused.
# These are a legacy of the original simulator and are left here
# because they may become useful in the future.
self.spect = 'ghost'
self.arm = arm
self.lenslet_high_size = 118.0 # Lenslet flat-to-flat in microns
self.lenslet_std_size = 197.0 # Lenslet flat-to-flat in microns
self.mode = mode
# x is in the spatial direction for this module, contrary to intuition
# and convention because Mike is weird like this....
# y is in the spectral direction
self.ybin, self.xbin = detector_x_bin, detector_y_bin
# Now we determine the number of fibers based on mode.
if mode == 'high':
self.nlenslets = 28
elif mode == 'std':
self.nlenslets = 17
else:
print("Unknown mode!")
raise UserWarning
def bin_data(self, data):
"""
Generic data binning function.
Generic function used to create a binned equivalent of a
spectrograph image array for the purposes of equivalent extraction.
Data are binned to the binning specified by the class attributes
``detector_x_bin`` and ``detector_y_bin``.
This function is mostly used to re-bin calibration images (flat,
arc, etc) to the binning of related science data prior to performing
extraction.
.. note::
This function is now
implemented elsewhere as the
:any:`ghost.primitives_ghost.GHOST._rebin_ghost_ad`
method in the :any:`ghost.primitives_ghost.GHOST` primtive class,
and takes care of all the binning.
Parameters
----------
data: :obj:`numpy.ndarray`
The (unbinned) data to be binned
Raises
------
UserWarning
If the data provided is not consistent with CCD size, i.e., not
unbinned
Returns
-------
binned_array: :obj:`numpy.ndarray`
Re-binned data.
"""
if data.shape != (self.szx, self.szy):
raise UserWarning('Input data for binning is not in the expected'
'format')
if self.xbin == 1 and self.ybin == 1:
return data
rows = self.xbin
cols = self.ybin
binned_array = data.reshape(int(data.shape[0] / rows),
rows,
int(data.shape[1] / cols),
cols).sum(axis=1).sum(axis=2)
return binned_array
def slit_flat_convolve(self, flat, slit_profile=None, spatpars=None,
microns_pix=None, xpars=None, num_conv=3):
"""
Correlate a flat field image with a slit profile image. Note that this is
not convolution, as we don't want to shift the image.
Function that takes a flat field image and a slit profile and
convolves them in two dimensions. Returns result of the convolution,
which should be used for tramline fitting in findApertures.
This function is a first step in finding the centre of each order.
Given the potentially overlapping nature of fiber images in flat field
frames, a convolution method is employed with a sampled slit profile,
in which the center of the order will, ideally, match the profile best
and reveal the maximum of the convolution.
A convolution map is then fed into a fitting function where the location
of the maxima in the map are found, and a model is fit to determine a
continuous function describing the centre of the orders.
Unfortunately, the slit magnification changes across the detector. Rather
than writing a giant for loop, num_conv convolutions are performed with
a different slit magnification corresponding to each order stored in the
list orders.
For each of these orders, a convolution is done in 2D by interpolating
the magnified slit profile with the slit coordinates, normalising it and
inverse Fourier transforming the product between the flat transform and the
shifted slit profile::
# Create the slit model.
mod_slit = np.interp(profilex*spat_scale[i], slit_coord, slit_profile)
# Normalise the slit model and Fourier transform for convolution
mod_slit /= np.sum(mod_slit)
mod_slit_ft = np.fft.rfft(np.fft.fftshift(mod_slit))
flat_conv_cube[j, :, i] = np.fft.irfft((im_fft[:, i] * mod_slit_ft)/num_conv)
Now, we have the convolution at ``num_conv`` orders, and the final
result is an interpolation between these.
.. note::
The function currently contains code related to convoling with a
fixed synthetic profile, which is not used. This is legacy code and
sometimes used only for testing purposes. The normal usage is to
have the spatial scale model parameters as inputs which determine
the slit magnification as a function of order and pixel along the
orders. The flat convolution is done using only a smaller number of
orders (defaults to 3) and interpolated over the others but could in
principle be done with all orders considered.
Parameters
----------
flat: :obj:`numpy.ndarray`
A flat field image from the spectrograph
slit_profile: :obj:`numpy.ndarray`, optional
A slit profile as a 1D array with the slit profile fiber amplitudes.
If none is supplied this function will assume identical fibers and
create one to be used in the convolution based on default parameters
specified in the ghost class.
spatpars: :obj:`numpy.ndarray`, optional
The 2D polynomial parameters for the slit spatial scale.
Required if slit_profile is not None.
microns_pix: float, optional
The slit scale in microns per pixel.
Required if slit_profile is not None.
xpars: :obj:`numpy.ndarray`, optional
The 2D polynomial parameters for the x (along-slit) coordinate.
Required if slit_profile is not None.
num_conv: int, optional, optional
The number of different convolution functions to use for different
orders.
The final convolved profile is an interpolation between these.
Returns
-------
flat_conv: :obj:`numpy.ndarray`
The convolved 2D array.
"""
# FIXME: Error checking of inputs is needed here
# TODO: Based on test of speed, the convolution code with an input
# slit_profile could go order-by-order.
if self.arm == 'red':
# Now put in the default fiber profile parameters for each mode.
# These are used by the convolution function on polyspect
# These were determined based on visual correspondence with
# simulated data and may need to be revised once we have real
# data. The same applies to the blue arm parameters.
if self.mode == 'std':
fiber_separation = 4.15
profile_sigma = 1.1
elif self.mode == 'high':
fiber_separation = 2.49
profile_sigma = 0.7
elif self.arm == 'blue':
# Additional slit rotation accross an order needed to match Zemax.
# self.extra_rot = 2.0
# Now put in the default fiber profile parameters for each mode.
# These are used by the convolution function on polyspect
if self.mode == 'std':
fiber_separation = 3.97
profile_sigma = 1.1
elif self.mode == 'high':
fiber_separation = 2.53
profile_sigma = 0.7
else:
print("Unknown spectrograph arm!")
raise UserWarning
# Fourier transform the flat for convolution
im_fft = np.fft.rfft(flat, axis=0)
# Create a x baseline for convolution
xbase = flat.shape[0]
profilex = np.arange(xbase) - xbase // 2
# This is the original code which is based on the fixed fiber_separation
# defined above.
if slit_profile is None:
flat_conv = np.zeros_like(im_fft)
# At this point create a slit profile
# Now create a model of the slit profile
mod_slit = np.zeros(xbase)
if self.mode == 'high':
nfibers = 26
else:
nfibers = self.nlenslets
for i in range(-(nfibers // 2), -(nfibers // 2) + nfibers):
mod_slit += np.exp(-(profilex - i * fiber_separation) ** 2 /
2.0 / profile_sigma ** 2)
# Normalise the slit model and fourier transform for convolution
mod_slit /= np.sum(mod_slit)
mod_slit_ft = np.fft.rfft(np.fft.fftshift(mod_slit))
# Now convolved in 2D
for i in range(im_fft.shape[1]):
flat_conv[:, i] = im_fft[:, i] * mod_slit_ft
# Now inverse transform.
flat_conv = np.fft.irfft(flat_conv, axis=0)
# If a profile is given, do this instead.
else:
flat_conv = np.zeros_like(flat)
flat_conv_cube = np.zeros((num_conv, flat.shape[0], flat.shape[1]))
# Our orders that we'll evaluate the spatial scale at:
orders = np.linspace(self.m_min, self.m_max, num_conv).astype(int)
mprimes = self.m_ref / orders - 1
y_values = np.arange(self.szy)
# The slit coordinate in microns
slit_coord = (np.arange(len(slit_profile)) -
len(slit_profile) // 2) * microns_pix
x_map = np.empty((len(mprimes), self.szy))
# Now convolved in 2D
for j, mprime in enumerate(mprimes):
# The spatial scales
spat_scale = self.evaluate_poly(spatpars)[
orders[j] - self.m_min]
# The x pixel values, just for this order
x_map[j] = self.evaluate_poly(xpars)[orders[j] - self.m_min]
for i in range(im_fft.shape[1]):
# Create the slit model.
mod_slit = np.interp(profilex * spat_scale[i], slit_coord,
slit_profile)
# Normalise the slit model and Fourier transform for
# convolution
mod_slit /= np.sum(mod_slit)
mod_slit_ft = np.fft.rfft(np.fft.fftshift(mod_slit))
# FIXME: Remove num_conv on next line and see if it makes
# a difference!
flat_conv_cube[j, :, i] = np.fft.irfft(
(im_fft[:, i] * mod_slit_ft.conj()) / num_conv
)
# Work through every y coordinate and interpolate between the
# convolutions with the different slit profiles.
x_ix = np.arange(flat.shape[0]) - flat.shape[0] // 2
# Create an m index, and reverse x_map if needed.
# FIXME: This assumes a minimum size of x_map which should be
# checked above, i.e. mprimes has 2 or more elements.
m_map_ix = np.arange(len(mprimes))
if x_map[1, 0] < x_map[0, 0]:
m_map_ix = m_map_ix[::-1]
x_map = x_map[::-1]
for i in range(im_fft.shape[1]):
m_ix_for_interp = | np.interp(x_ix, x_map[:, i], m_map_ix) | numpy.interp |
import numpy as np
import matplotlib.pyplot as plt
from gym_flock.envs.spatial.utils import _get_pos_diff
from scipy.spatial import Delaunay
from pathlib import Path
def in_obstacle(obstacles, px, py):
"""
Check if query point is within any of the rectangular obstacles
:param obstacles: list of rectangular obstacles [(xmin, xmax, ymin, ymax)]
:param px: query point x coordinate
:param py: query point y coordinate
:return:
"""
for (xmin, xmax, ymin, ymax) in obstacles:
if xmin <= px <= xmax and ymin <= py <= ymax:
return True
return False
def gen_obstacle_grid(ranges):
obstacle_list = []
for (x1, x2) in ranges:
for (y1, y2) in ranges:
obstacle_list.append((x1, x2, y1, y2))
return obstacle_list
def generate_lattice(free_region, lattice_vectors):
"""
Generate hexagonal lattice
From https://stackoverflow.com/questions/6141955/efficiently-generate-a-lattice-of-points-in-python
:param free_region:
:param lattice_vectors:
:return:
"""
(xmin, xmax, ymin, ymax) = free_region
image_shape = np.array([xmax - xmin, ymax - ymin])
center_pix = image_shape // 2
# Get the lower limit on the cell size.
dx_cell = max(abs(lattice_vectors[0][0]), abs(lattice_vectors[1][0]))
dy_cell = max(abs(lattice_vectors[0][1]), abs(lattice_vectors[1][1]))
# Get an over estimate of how many cells across and up.
nx = image_shape[0] // dx_cell
ny = image_shape[1] // dy_cell
# Generate a square lattice, with too many points.
x_sq = np.arange(-nx, nx, dtype=float)
y_sq = np.arange(-ny, nx, dtype=float)
x_sq.shape = x_sq.shape + (1,)
y_sq.shape = (1,) + y_sq.shape
# Now shear the whole thing using the lattice vectors
x_lattice = lattice_vectors[0][0] * x_sq + lattice_vectors[1][0] * y_sq
y_lattice = lattice_vectors[0][1] * x_sq + lattice_vectors[1][1] * y_sq
# Trim to fit in box.
mask = ((x_lattice < image_shape[0] / 2.0) & (x_lattice > -image_shape[0] / 2.0))
mask = mask & ((y_lattice < image_shape[1] / 2.0) & (y_lattice > -image_shape[1] / 2.0))
x_lattice = x_lattice[mask]
y_lattice = y_lattice[mask]
# Translate to the center pix.
x_lattice += (center_pix[0] + xmin)
y_lattice += (center_pix[1] + ymin)
# Make output compatible with original version.
out = np.empty((len(x_lattice), 2), dtype=float)
out[:, 0] = y_lattice
out[:, 1] = x_lattice
return out
def reject_collisions(points, obstacles=None):
"""
:param points:
:param obstacles:
:return:
"""
if obstacles is None or len(obstacles) is 0:
return points
# remove points within obstacle
n_points = np.shape(points)[0]
flag = np.ones((n_points,), dtype=np.bool)
for i in range(n_points):
if in_obstacle(obstacles, points[i, 0], points[i, 1]):
flag[i] = False
return points[flag, :]
def gen_square(env):
env.x_max = env.x_max_init * env.n_agents / 4
env.y_max = env.y_max_init * env.n_agents / 4
per_side = int(env.n_targets / 4)
targets = set()
# initialize fixed grid of targets
tempx = np.linspace(-env.x_max, -env.x_max, 1)
tempy = | np.linspace(-env.y_max, env.y_max, per_side, endpoint=False) | numpy.linspace |
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
class Flock(object):
def __init__(self, flock_params, boid_params):
"""Initialise a flock with boid and flock information"""
self.boid_count = flock_params['boid_count']
self.fly_middle_strength = flock_params['fly_middle_strength']
self.nearby_distance = flock_params['nearby_distance']
self.formation_distance = flock_params['formation_distance']
self.speed_formation_strength = flock_params['speed_formation_strength']
lower_pos_limit = np.array([boid_params['min_x_position'],boid_params['min_y_position']])
upper_pos_limit = np.array([boid_params['max_x_position'],boid_params['max_y_position']])
lower_vel_limit = np.array([boid_params['min_x_velocity'],boid_params['min_y_velocity']])
upper_vel_limit = np.array([boid_params['max_x_velocity'],boid_params['max_y_velocity']])
self.positions = lower_pos_limit[:,np.newaxis] + \
np.random.rand(2, int(self.boid_count))*(upper_pos_limit - lower_pos_limit)[:,np.newaxis]
self.velocities = lower_vel_limit[:,np.newaxis] + \
np.random.rand(2, int(self.boid_count))*(upper_vel_limit - lower_vel_limit)[:,np.newaxis]
"""I define helper functions so that Flock can call fly_middle or fly_away if need to"""
def fly_middle(self):
"""Fly towards the middle"""
Flock.fly_middle_helper(self.positions,self.velocities,self.fly_middle_strength)
self.positions += self.velocities
@staticmethod
def fly_middle_helper(positions, velocities, fly_middle_strength):
"""A helper method that does the maths for fly_middle"""
positions=np.asarray(positions)
velocities=np.asarray(velocities)
middle = | np.mean(positions, 1) | numpy.mean |
"""Causal Generative Neural Networks.
Author : <NAME> & <NAME>
Ref : Causal Generative Neural Networks (https://arxiv.org/abs/1711.08936)
Date : 09/5/17
.. MIT License
..
.. Copyright (c) 2018 <NAME>
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
import networkx as nx
import numpy as np
import itertools
import warnings
import torch as th
from copy import deepcopy
from joblib import Parallel, delayed
from sklearn.preprocessing import scale
from tqdm import trange
from .model import GraphModel
from ..pairwise.GNN import GNN
from ...utils.loss import MMDloss
from ...utils.Settings import SETTINGS
from ...utils.graph import dagify_min_edge
def message_warning(msg, *a, **kwargs):
"""Ignore everything except the message."""
return str(msg) + '\n'
warnings.formatwarning = message_warning
class CGNN_block(th.nn.Module):
"""CGNN 'block' which represents a FCM equation between a cause and its parents."""
def __init__(self, sizes):
"""Init the block with the network sizes."""
super(CGNN_block, self).__init__()
layers = []
for i, j in zip(sizes[:-2], sizes[1:-1]):
print(i,j)
layers.append(th.nn.Linear(i, j))
layers.append(th.nn.ReLU())
layers.append(th.nn.Linear(sizes[-2], sizes[-1]))
self.layers = th.nn.Sequential(*layers)
def forward(self, x):
"""Forward through the network."""
return self.layers(x)
def reset_parameters(self):
for layer in self.layers:
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
class CGNN_model(th.nn.Module):
"""Class for one CGNN instance."""
def __init__(self, adj_matrix, batch_size, nh=20, gpu=None,
gpu_id=0, confounding=False, initial_graph=None, **kwargs):
"""Init the model by creating the blocks and extracting the topological order."""
super(CGNN_model, self).__init__()
gpu = SETTINGS.get_default(gpu=gpu)
device = 'cuda:{}'.format(gpu_id) if gpu else 'cpu'
self.topological_order = [i for i in nx.topological_sort(nx.DiGraph(adj_matrix))]
self.adjacency_matrix = adj_matrix
self.confounding = confounding
if initial_graph is None:
self.i_adj_matrix = self.adjacency_matrix
else:
self.i_adj_matrix = initial_graph
self.blocks = th.nn.ModuleList()
self.generated = [None for i in range(self.adjacency_matrix.shape[0])]
self.noise = th.zeros(batch_size, self.adjacency_matrix.shape[0]).to(device)
self.corr_noise = dict([[(i, j), th.zeros(batch_size, 1).to(device)] for i, j
in zip(*np.nonzero(self.i_adj_matrix)) if i < j])
self.criterion = MMDloss(batch_size, device=device)
self.score = th.FloatTensor([0]).to(device)
for i in range(self.adjacency_matrix.shape[0]):
if not confounding:
self.blocks.append(CGNN_block([int(self.adjacency_matrix[:, i].sum()) + 1, nh, 1]))
else:
self.blocks.append(CGNN_block([int(self.i_adj_matrix[:, i].sum()) +
int(self.adjacency_matrix[:, i].sum()) + 1, nh, 1]))
def forward(self):
"""Generate according to the topological order of the graph."""
self.noise.data.normal_()
if not self.confounding:
for i in self.topological_order:
self.generated[i] = self.blocks[i](th.cat([v for c in [
[self.generated[j] for j in np.nonzero(self.adjacency_matrix[:, i])[0]],
[self.noise[:, [i]]]] for v in c], 1))
else:
for i in self.topological_order:
self.generated[i] = self.blocks[i](th.cat([v for c in [
[self.generated[j] for j in np.nonzero(self.adjacency_matrix[:, i])[0]],
[self.corr_noise[min(i, j), max(i, j)] for j in | np.nonzero(self.i_adj_matrix[:, i]) | numpy.nonzero |
import pvl
from pysis import isis
from warnings import warn
from pysis.exceptions import ProcessError
from numbers import Number
import numpy as np
import tempfile
def point_info(cube_path, x, y, point_type, allow_outside=False):
"""
Use Isis's campt to get image/ground point info from an image
Parameters
----------
cube_path : str
path to the input cube
x : float
point in the x direction. Either a sample or a longitude value
depending on the point_type flag
y : float
point in the y direction. Either a line or a latitude value
depending on the point_type flag
point_type : str
Options: {"image", "ground"}
Pass "image" if x,y are in image space (sample, line) or
"ground" if in ground space (longitude, lattiude)
Returns
-------
: PvlObject
Pvl object containing campt returns
"""
point_type = point_type.lower()
if point_type not in {"image", "ground"}:
raise Exception(f'{point_type} is not a valid point type, valid types are ["image", "ground"]')
if isinstance(x, Number) and isinstance(y, Number):
x, y = [x], [y]
if point_type == "image":
# convert to ISIS pixels
x = np.add(x, .5)
y = np.add(y, .5)
if pvl.load(cube_path).get("IsisCube").get("Mapping"):
pvlres = []
# We have a projected image
for x,y in zip(x,y):
try:
if point_type.lower() == "ground":
pvlres.append(isis.mappt(from_=cube_path, longitude=x, latitude=y, allowoutside=allow_outside, coordsys="UNIVERSAL", type_=point_type))
elif point_type.lower() == "image":
pvlres.append(isis.mappt(from_=cube_path, sample=x, line=y, allowoutside=allow_outside, type_=point_type))
except ProcessError as e:
print(f"CAMPT call failed, image: {cube_path}\n{e.stderr}")
return
dictres = [dict(pvl.loads(res)["Results"]) for res in pvlres]
if len(dictres) == 1:
dictres = dictres[0]
else:
with tempfile.NamedTemporaryFile("w+") as f:
# ISIS's campt wants points in a file, so write to a temp file
if point_type == "ground":
# campt uses lat, lon for ground but sample, line for image.
# So swap x,y for ground-to-image calls
x,y = y,x
f.write("\n".join(["{}, {}".format(xval,yval) for xval,yval in zip(x, y)]))
f.flush()
try:
pvlres = isis.campt(from_=cube_path, coordlist=f.name, allowoutside=allow_outside, usecoordlist=True, coordtype=point_type)
except ProcessError as e:
warn(f"CAMPT call failed, image: {cube_path}\n{e.stderr}")
return
pvlres = pvl.loads(pvlres)
dictres = []
if len(x) > 1 and len(y) > 1:
for r in pvlres:
if r['GroundPoint']['Error'] is not None:
raise ProcessError(returncode=1, cmd=['pysis.campt()'], stdout=r, stderr=r['GroundPoint']['Error'])
return
else:
# convert all pixels to PLIO pixels from ISIS
r[1]["Sample"] -= .5
r[1]["Line"] -= .5
dictres.append(dict(r[1]))
else:
if pvlres['GroundPoint']['Error'] is not None:
raise ProcessError(returncode=1, cmd=['pysis.campt()'], stdout=pvlres, stderr=pvlres['GroundPoint']['Error'])
return
else:
pvlres["GroundPoint"]["Sample"] -= .5
pvlres["GroundPoint"]["Line"] -= .5
dictres = dict(pvlres["GroundPoint"])
return dictres
def image_to_ground(cube_path, sample, line, lattype="PlanetocentricLatitude", lonttype="PositiveEast360Longitude"):
"""
Use Isis's campt to convert a line sample point on an image to lat lon
Returns
-------
lats : np.array, float
1-D array of latitudes or single floating point latitude
lons : np.array, float
1-D array of longitudes or single floating point longitude
"""
try:
res = point_info(cube_path, sample, line, "image")
except ProcessError as e:
raise ProcessError(returncode=e.returncode, cmd=e.cmd, stdout=e.stdout, stderr=e.stderr)
try:
if isinstance(res, list):
lats, lons = np.asarray([[r[lattype].value, r[lonttype].value] for r in res]).T
else:
lats, lons = res[lattype].value, res[lonttype].value
except Exception as e:
if isinstance(res, list):
lats, lons = | np.asarray([[r[lattype], r[lonttype]] for r in res]) | numpy.asarray |
import numpy as np
import nanonet.tb as tb
def test_simple_atomic_chain():
""" """
site_energy = -1.0
coupling = -1.0
l_const = 1.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=-1, )
xyz_file = """1
H cell
A 0.0000000000 0.0000000000 0.0000000000
"""
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -1.0})
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / l_const, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = site_energy + 2 * coupling * np.cos(l_const * kk)
np.testing.assert_allclose(band_structure, desired_value[:, np.newaxis], atol=1e-9)
def test_atomic_chain_two_kinds_of_atoms():
""" """
site_energy1 = -1.0
site_energy2 = -2.0
coupling = -1.0
l_const = 2.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=site_energy1, )
b = tb.Orbitals('B')
b.add_orbital(title='s', energy=site_energy2, )
xyz_file = """2
H cell
A 0.0000000000 0.0000000000 0.0000000000
B 0.0000000000 0.0000000000 1.0000000000
"""
tb.set_tb_params(PARAMS_A_B={'ss_sigma': coupling})
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / 2, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = np.zeros(band_structure.shape)
b = site_energy1 + site_energy2
c = site_energy1 * site_energy2 - (2.0 * coupling * | np.cos(0.5 * kk * l_const) | numpy.cos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.