prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
#import pydart2 as pydart
from gym import utils
from gym.envs.dart import dart_env_2bot
from os import path
from followTraj import followTraj
#environment where robot/robot arm helps RL policy-driven human get up using force propossal from value function approximation provided by human policy baseline
class DartStandUp3dAssistEnv(dart_env_2bot.DartEnv2Bot, utils.EzPickle):
def __init__(self):
"""
This class will manage external/interacting desired force!! (not DartEnv2Bot)
put all relevant functionality in here, to keep it easily externally accessible
"""
#set numpy printout precision options
np.set_printoptions(precision=5)
np.set_printoptions(suppress=True)
np.set_printoptions(linewidth=220)
########################
## loading world/skels
#modelLocs = ['getUpWithHelperBot3D_damp.skel'] #for two biped skels
kr5Loc = path.join('KR5','KR5 sixx R650.urdf') #for kr5 arm as assistant
#modelLocs = [kr5Loc,'getUpWithHelperBot3D_arm.skel'] #regular biped as ana
#kima biped below with new kima skel - experienced ode issues which killed rllab, might be fixable with updating dart to 6.4.0+ using wenhao's modified lcp.cpp seems to address the issue.
#modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima.skel']
#kima biped below with kima skel from 2/18 - different joint limits, mapping of dofs and euler angle layout of root - MUST KEEP OLD in file name - used for both new experiments and ak's policy with constraint
modelLocs = [kr5Loc,'kima/getUpWithHelperBot3D_armKima_old.skel']
#set to use old policy (trained with constraint) configurations, or not - eventually get rid of this when reasonable policy is built
self.useAK_Kima = False
#set false if wanting to consume policy, true to train - this disables viewer
#if True then using policy will yield error : AttributeError: 'NoneType' object has no attribute 'runSingleStep'
trainPolicy = False
print('\n!!!!!!!!!!!!!!!!! Viewer is disabled : {}\n'.format(trainPolicy))
self.setTrainGAE(modelLocs, trainPolicy, True)
#dart_env_2bot.DartEnv2Bot.__init__(self, modelLocs, 8, dt=.002, disableViewer=trainPolicy)
####################################
# external/interactive force initialization
#init and clear all policy, experiment, and vfrd-related quantities so that they exist but are false/clear
self.clearCurrPolExpDict()
#initialize all force and trajectory values
if (not self.noAssist):
self.initAssistFrcTrajVals()
#connect human to constraint
self.connectHuman = False
#connect bot to constraint
self.connectBot = False
#display debug information regarding force application - turn off if training
self.dbgAssistFrcData = True
#display ANA reward dbg data - slow, make false if training
self.dbgANAReward = True
#calc and display post-step ANA eef force - make false if training
self.dbgANAEefFrc = False
#display full ANA force results if dbgANAEefFrc is true
self.dbgANAFrcDet = False
############################
# set human/robot conection and motion trajectory object and params - if train==True then overrides dynamicbot to be false (OPT to solve for location is expensive)
#_solvingBot : whether or not helper bot's motion is solved
#_dynamicBot : true solves dynamics, false solves IK
#removed : #_helpingBot : whether or not helper bot is actually coupled to human(i.e. helping by directly applying force)
self.setTrainAndInitBotState(trainPolicy, _solvingBot=False, _dynamicBot=False, trajTyp='linear')
utils.EzPickle.__init__(self)
#setup to either train getup ala GAE paper or getup normally
def setTrainGAE(self, modelLocs, trainPolicy, trainGAE):
if trainGAE :
#!! move bot and box out of skel's way - back 2 m in skel file (in z)
#set pose to be prone
self.setProne = True
#if no assistance then set this to true - only use for GAE getup test - get rid of this once test complete
self.noAssist = True
#changed frameskip to 1 from 8 to match GAE
dart_env_2bot.DartEnv2Bot.__init__(self, modelLocs, 1, dt=.01, disableViewer=trainPolicy)
else :
#set pose to not be prone
self.setProne = False
#if no assistance then set this to true - only use for GAE getup test - get rid of this once test complete
self.noAssist = False
dart_env_2bot.DartEnv2Bot.__init__(self, modelLocs, 8, dt=.002, disableViewer=trainPolicy)
#initialize all assist force and trajectory values
def initAssistFrcTrajVals(self):
#currently using 3 dof force and 3 dof frc loc : assisting component size for observation
self.extAssistSize = 6
self.updateFrcType()
#whether or not the trajectory should be followed
self.followTraj = True
#True : ANA height to determine location along trajectory; False : kinematically move traj forward based on frame in rollout
self.useANAHeightForTrajLoc = True
#whether to train on force multiplier (if false train on actual force, which has been status quo so far)
self.useMultNotForce = False
#whether to always use specific force set here in ctor or to randomly regenerate force
self.usePresetFrc = False
#list of x,y,z initial assist force multipliers
self.frcMult = np.array([0.05, 0.5, 0.0])
print('INIT ASSIST FORCE MULTIPLIER BEING SET TO : {}'.format(self.frcMult))
#set this to nonzero value modify random force gen result for training to test learning - ignored if self.usePresetFrc is true
self.cheatGetUpMult = np.array([0,0,0])
if((self.usePresetFrc) or ( np.any(self.cheatGetUpMult > 0))):
print('!!!!!!!!!!!!!!!!!!!!!!!! Warning : DartStandUp3dAssistEnv:ctor using hard coded force multiplier {} or augmenting the force multiplier by +{}'.format(self.frcMult,self.cheatGetUpMult))
#bounds of force mult to be used in random force generation
self.frcMultBnds = np.array([[0.0,0.3,-0.001],[0.2, 0.8, 0.001]])
self.frcBnds = self.getForceFromMult(self.frcMultBnds)
#return assistive component of ANA's observation - put here so can be easily modified
def getSkelAssistObs(self, skelHldr):
if self.noAssist :
return np.array([])
#frc component
frcObs = skelHldr.getObsForce()
#frc application target on traj element
tarLoc = skelHldr.cnstrntBody.to_world(x=skelHldr.cnstrntOnBallLoc)
return np.concatenate([frcObs, tarLoc])
#set essential state flags if training human policy
#If training human (or purely consuming trained policy) :
# robot should be set to not mobile, not connected to constraint ball, and not simulated/solved (although IK to ball might be good for collision info with human)
# human needs to get assist force applied -every timestep-
#_dynamicBot is simulated - if training then forced to false
#sovlingBot uses IK to traj loc if not Simulated, solves ID if is simulated, if false and dynamic bot arm is ragdoll
#helpingBot is connected to human
def setTrainAndInitBotState(self, _train, _solvingBot=False, _dynamicBot=False, trajTyp='linear'):
#human is always mobile
self.skelHldrs[self.humanIdx].setSkelMobile(True)
#whether we are training or not
self.trainHuman = _train
#_dynamicBot bot is fwd simulated - set mobile if true, if immobile (false) either does nothing or solves IK
self.skelHldrs[self.botIdx].setSkelMobile(_dynamicBot)
#bot will solve either IK of eef if not active or ID of force gen if active
self.solvingBot = _solvingBot
#set to false to enable robot to help, otherwise, set to true if applying specific force to robot -
#external force must be applied every time step
#must be set before apply_tau unless robot is actively helping
self.skelHldrs[self.humanIdx].setAssistFrcEveryTauApply = True
if (_train):
# #set to false to enable robot to help, otherwise, set to true if training and applying specific force to robot
# #must be set before apply_tau unless robot is actively helping
# self.skelHldrs[self.humanIdx].setAssistFrcEveryTauApply = True
#set mobility - turn off mobility of bot during training
self.skelHldrs[self.botIdx].setSkelMobile(False)
#display ANA reward dbg data - slow, make false if training
self.dbgANAReward = False
#calc and display post-step ANA eef force - make false if training
self.dbgANAEefFrc = False
#display full ANA force results if dbgANAEefFrc is true
self.dbgANAFrcDet = False
#display debug information regarding force application - turn off if training
self.dbgAssistFrcData = False
#turn off all debug displays during training
for _,hndlr in self.skelHldrs.items():
hndlr.debug=False
#else :
# #if not training :
# if (_helpingBot):
# #do not apply force to human if robot is actively helping - force should be sent to robot via simulation
# self.skelHldrs[self.humanIdx].setAssistFrcEveryTauApply = False
# pass
# else :
# #force to apply assist every step - demonstrating force without policy
# #not training, bot is solving but not helping - if helping then needs to be constrained to human to exert force
# if(not _train):# and _solvingBot):
# self.skelHldrs[self.humanIdx].setAssistFrcEveryTauApply = True
#build trajectory object being used to evolve motion of constraint
self.trackTraj = self.trackTrajFactory(trajTyp)
#self.trackTraj.debug = True
self.constraintsBuilt = False
#set human and helper bot starting poses - these are just rough estimates of initial poses - poses will be changed every reset
self.skelHldrs[self.humanIdx].setStartingPose()
#set states and constraints - true means set robot init pose, so that it can IK to eef pos
if (not self.noAssist):
self._resetEefLocsAndCnstrnts(True)
#call this for bot prestep
def botPreStep(self, frc, frcMult, recip=True):
#set sphere forces to counteract gravity, to hold still in space if bot is applying appropriate force
#self.grabLink.bodynodes[0].set_ext_force(self.sphereForce)
#set the desired force the robot wants to generate
self._setTargetForce(self.skelHldrs[self.botIdx],frc,frcMult, reciprocal=recip)
#calc robot optimization tau/IK Pos per frame
self.skelHldrs[self.botIdx].preStep( | np.array([0]) | numpy.array |
from __future__ import division, absolute_import, print_function
from builtins import range
import numpy as np
import os
import sys
import esutil
import time
import matplotlib.pyplot as plt
import scipy.optimize
from astropy.time import Time
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmQeSysSlope(object):
"""
Class which computes the slope of the system QE degredation.
Parameters
----------
fgcmConfig: FgcmConfig
Config object
fgcmPars: FgcmParameters
Parameter object
fgcmStars: FgcmStars
Stars object
initialCycle: `bool`
Is this the initial cycle? (Force gray computation)
"""
def __init__(self, fgcmConfig, fgcmPars, fgcmStars):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.debug('Initializing FgcmQeSysSlope')
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.plotPath = fgcmConfig.plotPath
self.fgcmPars = fgcmPars
self.fgcmStars = fgcmStars
self.bandFitIndex = fgcmConfig.bandFitIndex
self.instrumentParsPerBand = fgcmConfig.instrumentParsPerBand
self.instrumentSlopeMinDeltaT = fgcmConfig.instrumentSlopeMinDeltaT
self.ccdGrayMaxStarErr = fgcmConfig.ccdGrayMaxStarErr
def computeQeSysSlope(self, name):
"""
Compute QE system slope
Parameters
----------
name: `str`
Name to put on filenames
"""
objID = snmm.getArray(self.fgcmStars.objIDHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsMagADUModelErr = snmm.getArray(self.fgcmStars.obsMagADUModelErrHandle)
# Select good stars and good observations of said stars
goodStars = self.fgcmStars.getGoodStarIndices(checkMinObs=True)
_, goodObs = self.fgcmStars.getGoodObsIndices(goodStars, expFlag=self.fgcmPars.expFlag, checkBadMag=True)
# Further filter good observations
ok, = np.where((obsMagADUModelErr[goodObs] < self.ccdGrayMaxStarErr) &
(obsMagADUModelErr[goodObs] > 0.0) &
(obsMagStd[goodObs] < 90.0))
goodObs = goodObs[ok]
# Make copies so we don't overwrite anything
obsMagStdGO = obsMagStd[goodObs]
obsMagErr2GO = obsMagADUModelErr[goodObs]**2.
obsExpIndexGO = obsExpIndex[goodObs]
# Remove the previously applied slope
deltaQESlopeGO = (self.fgcmPars.compQESysSlopeApplied[obsBandIndex[goodObs], self.fgcmPars.expWashIndex[obsExpIndexGO]] *
(self.fgcmPars.expMJD[obsExpIndexGO] -
self.fgcmPars.washMJDs[self.fgcmPars.expWashIndex[obsExpIndexGO]]))
obsMagStdGO -= deltaQESlopeGO
# split per wash interval
washH, washRev = esutil.stat.histogram(self.fgcmPars.expWashIndex[obsExpIndexGO], rev=True, min=0)
washIndices, = np.where(washH > 0)
for washIndex in washIndices:
i1a = washRev[washRev[washIndex]: washRev[washIndex + 1]]
# Split per band, and compute the delta-T and delta-Mag
bandH, bandRev = esutil.stat.histogram(obsBandIndex[goodObs[i1a]], rev=True, min=0)
bandIndices, = np.where(bandH > 0)
deltaTAll = None
for bandIndex in bandIndices:
if not self.fgcmPars.hasExposuresInBand[bandIndex]:
continue
i2a = bandRev[bandRev[bandIndex]: bandRev[bandIndex + 1]]
# Now lump the stars together
thisObjID = objID[obsObjIDIndex[goodObs[i1a[i2a]]]]
thisMjd = self.fgcmPars.expMJD[obsExpIndexGO[i1a[i2a]]]
thisMag = obsMagStdGO[i1a[i2a]]
thisMagErr2 = obsMagErr2GO[i1a[i2a]]
minID = thisObjID.min()
maxID = thisObjID.max()
# we need to sort and take unique to get the index of the first mjd
st = np.argsort(thisMjd)
minMjd = np.zeros(maxID - minID + 1)
starIndices, firstIndex = np.unique(thisObjID[st] - minID, return_index=True)
minMjd[starIndices] = thisMjd[st[firstIndex]]
firstMag = np.zeros_like(minMjd, dtype=np.float32)
firstMag[starIndices] = thisMag[st[firstIndex]]
firstMagErr2 = np.zeros_like(firstMag)
firstMagErr2[starIndices] = thisMagErr2[st[firstIndex]]
deltaT = thisMjd - minMjd[thisObjID - minID]
deltaMag = thisMag - firstMag[thisObjID - minID]
deltaMagErr2 = thisMagErr2 + firstMagErr2[thisObjID - minID]
okDelta, = np.where((deltaT > self.instrumentSlopeMinDeltaT) &
(deltaMagErr2 < self.ccdGrayMaxStarErr))
deltaT = deltaT[okDelta]
deltaMag = deltaMag[okDelta]
deltaMagErr2 = deltaMagErr2[okDelta]
# Check if we are doing one band at a time or lumping together.
if not self.instrumentParsPerBand:
# Lump all together. Not the most efficient, may need to update.
if deltaTAll is None:
deltaTAll = deltaT
deltaMagAll = deltaMag
deltaMagErr2All = deltaMagErr2
elif bandIndex in self.bandFitIndex:
# only add if this is one of the fit bands
deltaTAll = np.append(deltaTAll, deltaT)
deltaMagAll = np.append(deltaMagAll, deltaMag)
deltaMagErr2All = np.append(deltaMagErr2All, deltaMagErr2)
else:
# Do per band
if deltaT.size < 500:
# Just do no slope
extraString = ' (Not enough observations)'
slopeMean = 0.0
slopeMeanErr = 0.0
else:
extraString = ''
slope = deltaMag / deltaT
slopeErr2 = deltaMagErr2 / np.abs(deltaT)**2.
slopeMean = np.clip(-1 * np.sum(slope / slopeErr2) / np.sum(1. / slopeErr2), -0.001, 0.0)
slopeMeanErr = np.sqrt(1. / np.sum(1. / slopeErr2))
self.fgcmLog.info("Wash interval %d, computed qe slope in %s band: %.6f +/- %.6f mmag/day%s" %
(washIndex, self.fgcmPars.bands[bandIndex], slopeMean*1000.0, slopeMeanErr*1000.0, extraString))
self.fgcmPars.compQESysSlope[bandIndex, washIndex] = slopeMean
if not self.instrumentParsPerBand:
# Compute all together
if deltaTAll.size < 500:
extraString = ' (Not enough observations)'
slopeMeanAll = 0.0
slopeMeanErrAll = 0.0
else:
extraString = ''
slopeAll = deltaMagAll / deltaTAll
slopeErr2All = deltaMagErr2All / np.abs(deltaTAll)**2.
slopeMeanAll = np.clip(-1 * np.sum(slopeAll / slopeErr2All) / np.sum(1. / slopeErr2All), -0.001, 0.0)
slopeMeanErrAll = np.sqrt(1. / np.sum(1. / slopeErr2All))
self.fgcmLog.info("Wash interval %d, computed qe slope in all bands: %.6f +/- %.6f mmag/day%s" %
(washIndex, slopeMeanAll*1000.0, slopeMeanErrAll*1000.0, extraString))
self.fgcmPars.compQESysSlope[:, washIndex] = slopeMeanAll
if self.plotPath is not None:
# Make the plots
firstMJD = np.floor(np.min(self.fgcmPars.expMJD))
fig = plt.figure(1, figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(111)
colors = ['g', 'b', 'r', 'c', 'm', 'y', 'k']
started = False
for i in range(self.fgcmPars.nWashIntervals):
use, = np.where(self.fgcmPars.expWashIndex == i)
if use.size == 0:
# There are none in this interval, that's fine
continue
washMJDRange = [np.min(self.fgcmPars.expMJD[use]), np.max(self.fgcmPars.expMJD[use])]
if self.instrumentParsPerBand:
# Need to plot all of them one-by-one
for j in range(self.fgcmPars.nBands):
if not self.fgcmPars.hasExposuresInBand[j]:
continue
label = self.fgcmPars.bands[j] if not started else None
ax.plot(washMJDRange - firstMJD,
1000.0*((washMJDRange - self.fgcmPars.washMJDs[i])*self.fgcmPars.compQESysSlope[j, i] +
self.fgcmPars.parQESysIntercept[j, i]), linestyle='--', color=colors[j % len(colors)], linewidth=2, label=label)
else:
ax.plot(washMJDRange - firstMJD,
1000.0*((washMJDRange - self.fgcmPars.washMJDs[i])*self.fgcmPars.compQESysSlope[0, i] +
self.fgcmPars.parQESysIntercept[0, i]), 'r--', linewidth=3)
started = True
if self.instrumentParsPerBand:
ax.legend(loc=3)
ax.set_xlabel(r'$\mathrm{MJD}\ -\ %.0f$' % (firstMJD), fontsize=16)
ax.set_ylabel('$2.5 \log_{10} (S^{\mathrm{optics}})\,(\mathrm{mmag})$', fontsize=16)
ax.tick_params(axis='both', which='major', labelsize=14)
# Make the vertical wash markers
ylim = ax.get_ylim()
for i in range(self.fgcmPars.nWashIntervals):
ax.plot([self.fgcmPars.washMJDs[i] - firstMJD, self.fgcmPars.washMJDs[i]-firstMJD],
ylim, 'k--')
fig.savefig('%s/%s_qesys_washes_%s.png' % (self.plotPath,
self.outfileBaseWithCycle,
name))
plt.close(fig)
def plotQeSysRefStars(self, name):
"""
Plot reference stars (if available). Compare residuals.
Parameters
----------
name: `str`
name to give the files
"""
if not self.fgcmStars.hasRefstars:
self.fgcmLog.info("No reference stars for QE sys plots.")
return
if self.plotPath is None:
return
plt.set_cmap('viridis')
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
objRefIDIndex = snmm.getArray(self.fgcmStars.objRefIDIndexHandle)
refMag = snmm.getArray(self.fgcmStars.refMagHandle)
goodStars = self.fgcmStars.getGoodStarIndices(checkMinObs=True)
_, goodObs = self.fgcmStars.getGoodObsIndices(goodStars, expFlag=self.fgcmPars.expFlag, checkBadMag=True)
# Take out the previous slope...
obsMagStdGO = obsMagStd[goodObs]
obsExpIndexGO = obsExpIndex[goodObs]
deltaQESysGO = (self.fgcmPars.parQESysIntercept[obsBandIndex[goodObs], self.fgcmPars.expWashIndex[obsExpIndexGO]] +
self.fgcmPars.compQESysSlopeApplied[obsBandIndex[goodObs], self.fgcmPars.expWashIndex[obsExpIndexGO]] *
(self.fgcmPars.expMJD[obsExpIndexGO] -
self.fgcmPars.washMJDs[self.fgcmPars.expWashIndex[obsExpIndexGO]]))
obsMagObsGO = obsMagStdGO - deltaQESysGO
goodRefObsGO, = | np.where(objRefIDIndex[obsObjIDIndex[goodObs]] >= 0) | numpy.where |
import torch
from torch_geometric.utils import remove_self_loops
from torch_sparse import coalesce
import pandas as pd
from random import sample, seed
import numpy as np
from torch_geometric.data import Data
import networkx as nx
def read_network(features_path,edge_path,directed,reverse, convert_to_BoW = False):
if not directed:
reverse = False
feats = []
target = []
rename = {}
class_rename = {}
cnt = 0
class_cnt = 0
print('Read features: RUNNING')
print(f'file {features_path}')
with open(features_path, 'r') as f:
for line in f:
info = line.split()
if len(info) == 1:
info = line.split(',')
rename[info[0]] = cnt
feats.append(np.array([float(x) for x in info[1:-1]]))
if info[-1] not in class_rename:
class_rename[info[-1]] = class_cnt
class_cnt+=1
target.append(class_rename[info[-1]])
cnt += 1
# TF-IDF to binary BoW
if convert_to_BoW:
feats = (np.array(feats) > 0)
else:
feats = np.array(feats)
y = torch.tensor(target,dtype=torch.long)
n = len(target)
x = torch.tensor(np.array(feats), dtype=torch.float)
print('Read features: DONE')
# 3. Split similar to Planetoid
num_classes = len(set(target))
df = pd.DataFrame(target)
df.columns = ['target']
print('Read edges: RUNNING')
print(f'file {edge_path}')
# 4. Read edges
with open(edge_path) as f:
G1 = nx.DiGraph([[rename[line.split()[0]],rename[line.split()[1]]]
for line in f
if line.split()[0] in rename and line.split()[1] in rename])
with open(edge_path) as f:
G2 = nx.DiGraph([[rename[line.split()[1]],rename[line.split()[0]]]
for line in f
if line.split()[0] in rename and line.split()[1] in rename])
print(len(G1.edges()))
print(len(G2.edges()))
G1.remove_edges_from(nx.selfloop_edges(G1))
G2.remove_edges_from(nx.selfloop_edges(G2))
row = []
col = []
if not reverse:
row = row + [e[0] for e in G1.edges()]
col = col + [e[1] for e in G1.edges()]
if reverse or not directed:
row = row + [e[0] for e in G2.edges()]
col = col + [e[1] for e in G2.edges()]
print('Read edges: DONE')
print(f' {len(row)} edges')
edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0)
is_rev = []
if not reverse:
is_rev = is_rev + [0] * len(G1.edges())
if reverse or not directed:
is_rev = is_rev + [1] * len(G1.edges())
assert (len(is_rev) == edge_index.shape[1])
data = Data(x=x, edge_index=edge_index, y=y)
data.is_reversed = torch.tensor(is_rev,dtype=torch.uint8)
data.node_name_mapping = rename
return data
def load_embedding(embFile,featFile = None):
xDict = {}
yDict = {}
rename_class = {}
cnt_class = 0
with open(featFile, 'r') as f:
for line in f:
s = line.split()
id = s[0]
emb = [float(x) for x in s[1:-1]]
xDict[id] = emb
if s[-1] not in rename_class:
rename_class[s[-1]] = cnt_class
cnt_class += 1
yDict[id] = rename_class[s[-1]]
rename_id={}
cnt = 0
for k in xDict.keys():
rename_id[cnt] = k
cnt += 1
# Read embedding
if embFile is not None:
err = 0
with open(embFile, 'r') as f:
skip = True
for line in f:
s = line.split()
if skip and len(s)==2:
skip = False
continue
skip = False
id = s[0]
emb = [float(x) for x in s[1:]]
if id in xDict:
xDict[id] = xDict[id] + emb
else:
err +=1
if err>0:
print(f'WARNING: {err} items have no embedding generated')
x = []
target = []
for i in range(cnt):
x.append(np.array(xDict[rename_id[i]]))
target.append(yDict[rename_id[i]])
x = ( | np.array(x) | numpy.array |
"""
A pytest module to test Galois field polynomial alternate constructors.
"""
import numpy as np
import pytest
import galois
FIELDS = [
galois.GF2, # GF(2)
galois.GF(31), # GF(p) with np.int dtypes
galois.GF(36893488147419103183), # GF(p) with object dtype
galois.GF(2**8), # GF(2^m) with np.int dtypes
galois.GF(2**100), # GF(2^m) with object dtype
galois.GF(7**3), # GF(p^m) with np.int dtypes
galois.GF(109987**4), # GF(p^m) with object dtypes
]
@pytest.mark.parametrize("field", FIELDS)
def test_zero(field):
p = galois.Poly.Zero(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 0
assert np.array_equal(p.nonzero_degrees, [])
assert np.array_equal(p.nonzero_coeffs, [])
assert np.array_equal(p.degrees, [0])
assert np.array_equal(p.coeffs, [0])
assert p.integer == 0
@pytest.mark.parametrize("field", FIELDS)
def test_one(field):
p = galois.Poly.One(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 0
assert np.array_equal(p.nonzero_degrees, [0])
assert np.array_equal(p.nonzero_coeffs, [1])
assert np.array_equal(p.degrees, [0])
assert np.array_equal(p.coeffs, [1])
assert p.integer == 1
@pytest.mark.parametrize("field", FIELDS)
def test_identity(field):
p = galois.Poly.Identity(field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 1
assert np.array_equal(p.nonzero_degrees, [1])
assert np.array_equal(p.nonzero_coeffs, [1])
assert np.array_equal(p.degrees, [1,0])
assert np.array_equal(p.coeffs, [1,0])
assert p.integer == field.order
@pytest.mark.parametrize("field", FIELDS)
def test_random(field):
p = galois.Poly.Random(2, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 2
@pytest.mark.parametrize("field", FIELDS)
def test_integer(field):
integer = field.order + 1 # Corresponds to p(x) = x + 1
p = galois.Poly.Integer(integer, field=field)
assert isinstance(p, galois.Poly)
assert p.field is field
assert p.degree == 1
assert | np.array_equal(p.nonzero_degrees, [1,0]) | numpy.array_equal |
# -*- coding: utf-8 -*-
'''
Created on 19 Nov 2014
@author: alasdaire
'''
import numpy as np
import pandas as pd
import quantipy as qp
from quantipy.core.cluster import Cluster
from quantipy.core.chain import Chain
from quantipy.core.helpers import functions as helpers
from quantipy.core.tools.dp.io import unicoder
from quantipy.core.builds.excel.formats.xlsx_formats import XlsxFormats
import quantipy.core.cluster
from xlsxwriter import Workbook
from xlsxwriter.utility import xl_rowcol_to_cell
import os
from string import ascii_uppercase
from collections import OrderedDict, Counter
from warnings import warn
from PIL import Image
import requests
from io import BytesIO
import pickle
import itertools
import re
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
TEST_SUFFIX = list(ascii_uppercase)
TEST_PREFIX = ['']+list(ascii_uppercase)
CD_TRANSMAP = {
'en-GB': {
'cc': 'Cell Contents',
'N': 'Counts',
'c%': 'Column Percentages',
'r%': 'Row Percentages',
'str': 'Statistical Test Results',
'cp': 'Column Proportions',
'cm': 'Means',
'stats': 'Statistics',
'mb': 'Minimum Base',
'sb': 'Small Base'},
'fr-FR': {
'cc': 'Contenu cellule',
'N': 'Total',
'c%': 'Pourcentage de colonne',
'r%': 'Pourcentage de ligne',
'str': 'Résultats test statistique',
'cp': 'Proportions de colonne',
'cm': 'Moyennes de colonne',
'stats': 'Statistiques',
'mb': 'Base minimum',
'sb': 'Petite base'}}
for lang in CD_TRANSMAP:
for key in CD_TRANSMAP[lang]:
CD_TRANSMAP[lang][key] = CD_TRANSMAP[lang][key]
TOT_REP = [("'@H'", '\u25BC'), ("'@L'", '\u25B2')]
ARROW_STYLE = {"'@H'": 'DOWN', "'@L'": 'UP'}
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def paint_box(worksheet, frames, format_dict, rows, cols, metas, formats_spec,
has_weighted_views=False, y_italicise=dict(), ceil=False, floor=False,
testcol_map=None, is_array=False, array_views=None, decimals=None, net_only=True):
'''
Writes a "box" of data
Parameters
----------
worksheet : xlsxwriter.Workbook.worksheet
frames : list
list of pd.DataFrame objects
format_dict : Format
The dict of all formats
rows : list
Number of rows in each pd.DataFrame
cols : list
Number of cols in each pd.DataFrame
Column range of box
metas : list
list of dict - view metas
ceil : bool
Whether ceiling view (this is overwritten for array tables)
floor : bool
Whether floor view
'''
sep = formats_spec.test_seperator
if len(metas) == 0:
rsize = rows[-1][1] - rows[0][0]
else:
rsize = rows[-1][1] - rows[0][0] + 1
csize = cols[-1][1] - cols[0][0] + 1
coords = [
[rows[0][0] + (i // csize), cols[0][0] + (i % csize)]
for i in range(rsize * csize)]
if len(metas) > 0:
is_block_0 = metas[0]['agg']['is_block']
if metas[0]['agg']['name'].startswith('NPS'): is_block_0 = False
if all(p not in metas[0]['agg']['fullname'] for p in ['}+', '+{', '*:']):
is_block_0 = False
coordsGenerator = (coord for coord in coords)
for i, coord in enumerate(coordsGenerator):
idxf = (i // csize) % len(frames)
if is_array:
ceil = (i // frames[idxf].shape[1])==0
# floor = (i // frames[idxf].shape[1])==frames[0].shape[0]-1
floor = (i // frames[idxf].shape[1])==(frames[0].shape[0]*len(frames))-1
box_coord = [coord[0] - coords[0][0], coord[1] - coords[0][1]]
# pick cell format
format_name = ''
if len(metas) == 0:
method = 'dataframe_columns'
else:
fullname, name, method, is_weighted, is_block, is_dummy = (
metas[idxf]['agg']['fullname'],
metas[idxf]['agg']['name'],
metas[idxf]['agg']['method'],
metas[idxf]['agg']['is_weighted'],
metas[idxf]['agg']['is_block'],
metas[idxf]['agg'].get('is_dummy', False))
_, _, relation, rel_to, _, shortname = fullname.split('|')
is_totalsum = metas[idxf]['agg']['name'] in ['counts_sum', 'c%_sum']
if name.startswith('NPS'): is_block = False
if all(p not in fullname for p in ['}+', '+{', '*:']): is_block = False
# cell position
if is_array:
if metas[0]['agg']['fullname'] in array_views[0:len(frames)]:
if i % csize == 0:
format_name = 'left-'
if metas[0]['agg']['fullname'] in array_views[-len(frames):]:
# if i % csize == (csize - 1) or (cols[idxf][0] == cols[idxf][1]):
if i % csize == (csize - 1):
format_name = 'right-'
else:
if i % csize == 0:
format_name = 'left-'
if i % csize == (csize - 1) or (cols[idxf][0] == cols[idxf][1]):
format_name = format_name + 'right-'
if format_name == '':
format_name = format_name + 'interior-'
if ceil:
if is_array:
format_name = format_name + 'top-'
else:
if i < (csize):
format_name = format_name + 'top-'
if floor:
if is_array:
format_name = format_name + 'bottom-'
else:
if i >= ((rsize * csize) - csize):
format_name = format_name + 'bottom-'
# additional format spec
if method == 'dataframe_columns':
format_name = format_name + 'STR'
else:
# background color (frequency/ coltests) / top border Totalsum
if is_array:
# if (i // frames[idxf].shape[1]) % 2 == 0:
if (box_coord[0] // len(frames)) % 2 == 0:
format_name = format_name + 'bg-'
else:
cond_1 = method in ['frequency', 'coltests'] and relation in [':', 'x++:']
cond_2 = method in ['default']
cond_3 = is_block_0
if cond_1 or cond_2 or cond_3:
if not shortname.startswith('cbase'):
if box_coord[0] == 0:
format_name = format_name + 'frow-bg-'
elif (box_coord[0] // len(frames)) % 2 == 0:
format_name = format_name + 'bg-'
# first row (coltests - means)
if method == 'coltests' and relation != ':':
if box_coord[0] == 0:
format_name = format_name + 'frow-'
# choose view format type
# base
if shortname.startswith('cbase'):
if is_array:
format_name = format_name + 'N'
else:
if not ceil:
if is_weighted:
format_name = format_name + 'frow-BASE'
else:
if has_weighted_views:
format_name = format_name + 'frow-UBASE'
else:
format_name = format_name + 'frow-BASE'
else:
if is_weighted:
format_name = format_name + 'BASE'
else:
if has_weighted_views:
format_name = format_name + 'UBASE'
else:
format_name = format_name + 'BASE'
# frequency
elif method == 'frequency':
# counts
if rel_to == '':
if relation in [':', 'x++:'] or is_array or is_block:
format_name = format_name + 'N'
elif is_totalsum:
if is_array:
format_name = format_name + 'N'
elif is_dummy or idxf >= 1:
format_name = format_name + 'N'
else:
format_name = format_name + 'frow-N'
# complex logics
else:
if len(frames) == 1 or is_array:
format_name = format_name + 'N-NET'
else:
if idxf == 0:
format_name = format_name + 'frow-N-NET'
elif idxf == len(frames)-1:
format_name = format_name + 'brow-N-NET'
else:
format_name = format_name + 'mrow-N-NET'
# %
elif rel_to in ['x', 'y']:
if relation in [':', 'x++:'] or is_array or is_block:
format_name = format_name + 'PCT'
elif is_totalsum:
if is_array:
format_name = format_name + 'PCT'
elif is_dummy or idxf >= 1:
format_name = format_name + 'PCT'
else:
format_name = format_name + 'frow-PCT'
# complex logics
else:
if len(frames) == 1 or is_array:
format_name = format_name + 'PCT-NET'
else:
if idxf == 0:
format_name = format_name + 'frow-PCT-NET'
elif idxf == len(frames)-1:
format_name = format_name + 'brow-PCT-NET'
else:
format_name = format_name + 'mrow-PCT-NET'
# descriptvies
elif method == 'descriptives':
if is_array:
format_name = format_name + 'DESCRIPTIVES-XT'
elif len(frames) == 1:
format_name = format_name + 'DESCRIPTIVES'
else:
if idxf == 0:
format_name = format_name + 'frow-DESCRIPTIVES'
elif idxf == len(frames)-1:
format_name = format_name + 'brow-DESCRIPTIVES'
else:
format_name = format_name + 'mrow-DESCRIPTIVES'
# coltests
elif method == 'coltests':
if relation == ':' or ('t.props' not in fullname.split('|')[1]):
format_name += 'TESTS'
else:
test_key = '{}N-NET'.format(format_name)
net_bg_color_user = format_dict[test_key].__dict__['bg_color']
net_bg_color_default = XlsxFormats().bg_color
is_bg_default = net_bg_color_user in ['#FFFFFF',
net_bg_color_default]
if rel_to == '':
format_name += 'N'
elif rel_to in ['x', 'y']:
format_name += 'PCT'
if not (is_bg_default or is_array):
format_name += '-NET'
# default
elif method == 'default':
format_name = format_name + 'DEFAULT'
# method not found...
else:
raise Exception(
"View method not recognised...\nView: {}\nMethod: {}" % (
metas[idxf]['agg']['fullname'],
method))
# net only?
if idxf==0:
if net_only and format_name.endswith('NET'):
format_name += '-ONLY'
rel_to_decimal = False
arrow = _none = object()
# Value to write into cell
# Dataframe
if method == 'dataframe_columns':
data = frames[idxf].head(
box_coord[0] // len(frames)+1
).values[-1]
# Check data for NaN and replace with '-'
if not isinstance(data, str):
if np.isnan(data) or | np.isinf(data) | numpy.isinf |
import tensorflow as tf
import numpy as np
from ... import ops
class DownsampleTest(tf.test.TestCase):
def test_downsample(self):
first = [
[1, 1, 2, 2],
[0, 0, 2, 2],
[3, 3, 4, 4],
[3, 3, 2, 2]]
second = [[0.5, 2], [3, 3]]
first = np.reshape(first, [1, 4, 4, 1])
second = | np.reshape(second, [1, 2, 2, 1]) | numpy.reshape |
"""
This file contains a couple of S/N estimation codes
designed for use during SAMI observing runs.
UPDATED: 08.04.2013, <NAME>
- Edited to comply with new conventions in sami_utils.
- Edited to accept new target table format.
23.08.2012, <NAME>
- Changed name of "sn" function to "sn_re".
- Writing new S/N code based on the secondary star observation.
NOTES: 10.04.2013, <NAME>
- I no longer return SN_all, but sn_Re, the median SN @Re.
- Removed the SN_all array from the sn function.
26.08.2013, <NAME>
- Updated fields for the SAMI target table.
- Also changed all mentions of 'z' to 'zpec'.
- Major bug fixes in case where target not found on target table.
27.08.2013, <NAME>
- Writing surface brightness map function.
For reasons I (JTA) don't remember, this code was never quite finished
or put into action. The intention had been to use S/N measurements to aid
the observers in deciding when a field was finished, but this code is not
mentioned in the observers' instructions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pylab as py
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# use astropy for all astronomy related things.
import astropy.io.fits as pf
import astropy.io.ascii as tab
import sys
from matplotlib.patches import Circle
# Relative imports from sami package
from .. import utils
from .. import samifitting as fitting
def sn_map(rssin):
"""
Plot SNR of all 12 SAMI targets across fraction of Re.
Process:
- Deduce the noise level from the standard star:
+ obtain listed brightness,
+ use existing 2D Gauss function to get SBP,
+ (photometric aperture and aperture correction?),
+ normalise flux,
+ calculate integrated S/N for star,
+ establish noise level.
- Run the SDSS-SB fuction on all targets,
+ Convert brightness to S/N,
+ Plot all 12 targets:
- x-axis: fraction of Re (from target selection table),
- y-axis: S/N,
- horizontal lines @S/N=5, 10.
"""
print("HAY!")
def sn_list(inlist, tablein, l1, l2, ifus='all'):
"""
Wrapper function to provide S/N estimates for >1 file
inlist [ascii] list of files (format?)
tablein [ascii]
"""
#To print only two decimal places in all numpy arrays
np.set_printoptions(precision=2)
files=[]
for line in open(inlist):
cols=line.split(' ')
cols[0]=str.strip(cols[0])
files.append(np.str(cols[0]))
print("I have received", len(files), \
"files for which to calculate and combine S/N measurements.")
# Define the list of IFUs to display
if ifus == 'all':
IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]
else:
IFUlist = [ifus]
print("I will calculate S/N for", len(IFUlist), "IFUs.")
SN_all_sq=np.empty((len(IFUlist), len(files)))
for i in range(len(files)):
insami=files[i]
SN_all=sn_re(insami, tablein, plot=False, ifus=ifus, verbose=False)
SN_all_sq[:,i]=SN_all*SN_all
# Add the squared SN values and square root them
SN_tot=np.sqrt(np.sum(SN_all_sq, axis=1))
print(IFUlist)
print(SN_tot)
def sn_re(insami, tablein, l1, l2, plot=False, ifus='all',
log=True, verbose=True, output=False, seek_centroid=True):
"""
Purpose: Main function, estimates S/N for any or all probes in an RSS file.
Input variables:
insami [fits] Input RSS file.
tablein [ascii] Observations table.
l1, l2 [flt] Wavelength range for S/N estimation.
ifus [str] Probe number, or 'all' for all 13.
log [bool] Logarithimic scaling for plot -- CURRENTLY NOT ENVOKED.
verbose [bool] Toggles diagnostic verbosity.
Process:
1) Interpret input.
[Set up plot]
2) Read target table (new format for SAMI survey),
[Commence all-IFU loop, read data]
3) Identify wavelength range over which to estimate SNR,
4) Calculate SNR for all cores in the RSS file.
5) Locate galaxy centre as peak SNR core.
6) Identify cores intercepted by Re (listed).
7) Get SNR @Re as median of collapsed wavelength region.
[End all-IFU loop]
"""
# --------------------
# (1) Interpret input
# --------------------
if ifus == 'all':
IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]
else:
IFUlist = ifu_num = [int(ifus)]
n_IFU = len(IFUlist)
if verbose:
print('')
print('--------------------------------')
print('Running sami.observing.sn.sn_re.')
print('--------------------------------')
print('')
if n_IFU == 1: print('Processing', n_IFU, 'IFU. Plotting is', end=' ')
if n_IFU > 1: print('Processing', n_IFU, 'IFUs. Plotting is', end=' ')
if not plot: print('OFF.')
if plot: print('ON.')
print('')
# --------------------
# Set up plot process
# --------------------
# Define number of cores, core diameter (in arcsec).
# -- is this stored someplace in sami.utils/generic?
n_core = 61
r_core = 1.6
# Create the figure
if plot:
# Get Field RA, DEC
hdulist = pf.open(insami)
primary_header = hdulist['PRIMARY'].header
field_dec = primary_header['MEANDEC']
# To create the even grid to display the cubes on
# (accurate to 1/10th core diameter)
dx = 4.44e-5 /np.cos(np.pi *field_dec /180.)
dy = 4.44e-5
fig = py.figure()
# Number of rows and columns needed in the final display box
# This is a bit of a fudge...
if n_IFU==1:
im_n_row = 1
im_n_col = 1
elif n_IFU==2:
im_n_row = 1
im_n_col = 2
elif n_IFU==3:
im_n_row = 1
im_n_col = 3
elif n_IFU==4:
im_n_row = 2
im_n_col = 2
elif n_IFU>3 and n_IFU<=6:
im_n_row = 2
im_n_col = 3
elif n_IFU>6 and n_IFU<=9:
im_n_row = 3
im_n_col = 3
elif n_IFU>9 and n_IFU<=12:
im_n_row = 3
im_n_col = 4
elif n_IFU>12:
im_n_row = 4
im_n_col = 4
# ISK: trying to improve the rows and columns a bit:
# def isodd(num): return num & 1 and True or False
# if n <= 3:
# r = 1
# c = n
# elif n > 6:
# r = 3
# c = 3
# ----------------------
# (2) Read target table
# ----------------------
tabname = ['name', 'ra', 'dec', 'r_petro', 'r_auto', 'z_tonry', 'zspec',
'M_r', 'Re', '<mu_Re>', 'mu(Re)', 'mu(2Re)', 'ellip', 'PA', 'M*',
'g-i', 'A_g', 'CATID', 'SURV_SAMI', 'PRI_SAMI', 'BAD_CLASS']
target_table = tab.read(tablein, names=tabname, data_start=0)
CATID = target_table['CATID'].tolist()
# Start a little counter to keep track
# -- a fudge for the way the plot loop is set up...
counter = 0
# --------------------------
# Commence the all-IFU loop
# --------------------------
for ifu_num in IFUlist:
counter = counter + 1
# Read single IFU
myIFU = utils.IFU(insami, ifu_num, flag_name=False)
# And find the row index for this SAMI target.
try:
this_galaxy = CATID.index(int(myIFU.name))
no_such_galaxy = False
except:
this_galaxy = []
no_such_galaxy = True
pass
"""
There are other ways to do this with a numpy array as input.
Lists are far better at this, so have made a CATID list.
this_galaxy = np.where(target_table['CATID'] == int(myIFU.name))
this_galaxy = np.where(CATID == int(myIFU.name))
this_galaxy = [CATID == int(myIFU.name)]
"""
# ----------------------------
# (3) Define wavelength range
# ----------------------------
if no_such_galaxy:
z_target = 0.0
z_string = '0.0'
# see below for explanation of this.
idx1 = l1
idx2 = l2
print(('-- IFU #' + str(ifu_num)))
print(" This galaxy was not found in the Target Table. ")
else:
z_target = target_table['zspec'][this_galaxy]
z_string = str(z_target)
l_range = myIFU.lambda_range
l_rest = l_range/(1+z_target)
# identify array elements closest to l1, l2 **in rest frame**
idx1 = (np.abs(l_rest - l1)).argmin()
idx2 = (np.abs(l_rest - l2)).argmin()
if verbose:
print('-------------------------------------------------------')
print((' IFU #' + str(ifu_num)))
print('-------------------------------------------------------')
print((' Redshift: ' + z_string))
print((' Spectral range: ' +
str(np.around([l_rest[idx1], l_rest[idx2]]))))
print((' Observed at: ' +
str(np.around([l_range[idx1], l_range[idx2]]))))
print('')
# -------------------------
# (4) Get SNR of all cores
# -------------------------
sn_spec = myIFU.data/np.sqrt(myIFU.var)
# Median SN over lambda range (per Angstrom)
sn = np.nanmedian(sn_spec[:, idx1:idx2], axis=1) * (1./myIFU.cdelt1)
# ----------------------------------
# (5) Find galaxy centre (peak SNR)
# ----------------------------------
# Initialise a couple of arrays for this loop
core_distance = np.zeros(n_core)
good_core = np.zeros(n_core)
centroid_ra = 0.
centroid_dec = 0.
# Get target Re from table (i.e., match entry by name)
if no_such_galaxy:
print(" No Re listed, calculating SNR at centroid instead.")
re_target = 0.
else:
re_target = target_table['Re'][this_galaxy]
# Get either centroid, or table RA, DEC
if seek_centroid:
if no_such_galaxy:
centroid = np.where(myIFU.n ==1)
else:
centroid = np.where(sn == np.nanmax(sn))
centroid_ra = myIFU.xpos[centroid]
centroid_dec = myIFU.ypos[centroid]
if not seek_centroid:
if no_such_galaxy:
centroid = np.where(myIFU.n ==1)
else:
centroid_ra = target_table['ra'][this_galaxy]
centroid_dec = target_table['dec'][this_galaxy]
test_distance = 3600.* np.sqrt(
(myIFU.xpos - centroid_ra)**2 +
(myIFU.ypos - centroid_dec)**2 )
centroid = np.abs(test_distance - 0).argmin()
if verbose:
print(' S/N @Centroid =', np.round(sn[centroid]), '[/Angstrom]')
print('')
# ----------------------------------------
# (6) Identify cores at approximately Re
# ----------------------------------------
# Check that there is an Re listed, some times there isn't.
if no_such_galaxy:
sn_Re = 0.
else:
core_distance = 3600.* np.sqrt(
(myIFU.xpos - centroid_ra)**2 +
(myIFU.ypos - centroid_dec)**2 )
good_core[(core_distance > re_target - 0.5*r_core)
& (core_distance < re_target + 0.5*r_core)] = True
# Get median S/N of cores @Re:
if 1 in good_core:
sn_Re = np.nanmedian(sn[good_core == True])
sn_min = min(sn[good_core == True])
sn_max = max(sn[good_core == True])
if verbose:
if not 1 in good_core:
sn_str = str(np.round(np.nanmedian(sn)))
print("** Could not match Re")
print(('=> Median overall S/N = '+sn_str))
print('')
else:
print('=> [Min, Max, Median] S/N @Re = [', end=' ')
print('%0.2f' % min(sn[good_core == True]), ',', end=' ')
print('%0.2f' % max(sn[good_core == True]), ',', end=' ')
print('%0.2f' % sn_Re, '] [/Angstrom]')
print('')
# ----------
# DRAW PLOT
# ----------
if plot:
# Set image size to fit the bundle.
size_im = 100
N_im = np.arange(size_im)
# Create a linear grid, centred at Fibre #1.
x_ctr = myIFU.xpos[np.sum(np.where(myIFU.n == 1))]
y_ctr = myIFU.ypos[np.sum(np.where(myIFU.n == 1))]
# Set axis origin: highest RA, lowest DEC.
x_0 = x_ctr + (size_im/2)*dx
y_0 = y_ctr - (size_im/2)*dy
# Direction of each axis: RA decreases, DEC increases.
x_lin = x_0-N_im*dx
y_lin = y_0+N_im*dy
# Create image --
# 1) Find indices of nearest linear points to actual core positions.
b = 0 # (reset index)
core_x = []
core_y = []
for b in range(n_core):
nx = np.abs(x_lin - myIFU.xpos[b]).argmin()
ny = | np.abs(y_lin - myIFU.ypos[b]) | numpy.abs |
from __future__ import division
from numpy.linalg import norm
import numpy as np
import utils
import pdb
import emcee
lammy = 0.01
verbose = 1
X = 0
y = 0
iteration = 1
alpha = 1e-2
d = 0
hist_grad = 0
epsilon = 0
scale = True
diffpriv = False
def init(dataset, epsilon):
passedEpsilon = epsilon
data = utils.load_dataset(dataset)
global X
X = data['X']
global y
y = data['y']
global d
d = X.shape[1]
global hist_grad
hist_grad = np.zeros(d)
global samples
samples = []
def lnprob(x, alpha):
return -(alpha / 2) * np.linalg.norm(x)
nwalkers = max(4 * d, 250)
sampler = emcee.EnsembleSampler(nwalkers, d, lnprob, args=[passedEpsilon])
p0 = [np.random.rand(d) for i in range(nwalkers)]
pos, _, state = sampler.run_mcmc(p0, 100)
sampler.reset()
sampler.run_mcmc(pos, 1000, rstate0=state)
print("Mean acceptance fraction:", np.mean(sampler.acceptance_fraction))
samples = sampler.flatchain
return d
def funObj(ww, X, y, batch_size):
yXw = y * X.dot(ww)
# Calculate the function value
f = np.sum(np.logaddexp(0, -yXw)) + 0.5 * lammy * ww.T.dot(ww)
# Calculate the gradient value
res = - y / np.exp(np.logaddexp(0, yXw))
if scale:
g = (1 / batch_size) * X.T.dot(res) / \
max(1, np.linalg.norm(X.T.dot(res))) + lammy * ww
else:
g = (1 / batch_size) * X.T.dot(res) + lammy * ww
return f, g
# Reports the direct change to w, based on the given one.
# Batch size could be 1 for SGD, or 0 for full gradient.
def privateFun(theta, ww, batch_size=0):
global iteration
ww = | np.array(ww) | numpy.array |
from functools import reduce
from Obj3D import Point3D, Sphere, Cone, calculateBound, calScaleRatio
import numpy as np
from numpy import linalg as LA
from scipy.spatial import distance_matrix
def getObjList(nodes, graph, node_idx=None):
if node_idx:
# 球体索引列表
sphere_idxs = [node_idx]+list(graph[node_idx])
sphere_list = [Sphere(Point3D(*nodes[x].pos), nodes[x].r) for x in sphere_idxs]
# 椎体索引对列表
cone_idx_pairs = [(node_idx, x) for x in graph[node_idx]]
cone_list = [Cone(Point3D(*nodes[p[0]].pos),nodes[p[0]].r,Point3D(*nodes[p[1]].pos),nodes[p[1]].r) for p in cone_idx_pairs]
else: # Returen all nodes
sphere_list=[]
cone_list=[]
for node_idx in nodes.keys():
# 加入当前节点对应的球体
sphere_list.append(Sphere(Point3D(*nodes[node_idx].pos), nodes[node_idx].r))
# 椎体索引对列表
cone_idx_pairs = [(node_idx, x) for x in graph[node_idx] if node_idx<x]
cone_list_local = [Cone(Point3D(*nodes[p[0]].pos),nodes[p[0]].r,Point3D(*nodes[p[1]].pos),nodes[p[1]].r) \
for p in cone_idx_pairs]
cone_list.extend(cone_list_local)
return sphere_list, cone_list
def checkSphereV2(mark, sphere, img_shape):
bbox = list(sphere.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>img_shape[i]):
bbox[j] = img_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
# points=img_idxs[:3, xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] # 3*M
# points=points.T # M*3
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
points=np.hstack((xs,ys,zs))
sphere_c_mat = np.array([sphere.center_point.toList()]) # 1*3
# 计算所有点到所有球心的距离
dis_mat = distance_matrix(points,sphere_c_mat) # M*1
# 判断距离是否小于半径
res_idxs = np.where(dis_mat<=sphere.radius)[0]
mark[xmin+x_idxs[res_idxs], ymin+y_idxs[res_idxs], zmin+z_idxs[res_idxs]] = 255
def checkConeV2(mark, cone, img_shape):
bbox = list(cone.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>img_shape[i]):
bbox[j] = img_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
# points=img_idxs[:, xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] # 3*M
# points=points.T # M*3
xs = | np.asarray(xmin+x_idxs) | numpy.asarray |
#!/usr/bin/env python3
import os
import cv2
import collections
import math
import argparse
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import tensorflow as tf
import keras as K
import seaborn as sns # noqa: F401
import sklearn # noqa: F401
from IPython import embed # noqa: F401
# For issue `failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED`
# https://github.com/tensorflow/tensorflow/issues/45070
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
np.set_printoptions(precision=3, suppress=True)
TARGET_SPEED = 31.0
COLUMN_NAMES = collections.OrderedDict({
'center': 0,
'left': 1,
'right': 2,
'steering': 3,
'throttle': 4,
'brake': 5,
'speed': 6
})
class DataGenerator(K.utils.Sequence):
def __init__(self, samples, batch_size=4, dim=(160, 320, 1)):
self._batch_size = batch_size
self._samples = samples
self._indices = np.arange(samples.shape[0])
def __len__(self):
# must implement
return math.ceil(len(self._samples) / self._batch_size)
def __getitem__(self, index):
# must implement
indices = self._indices[index * self._batch_size: (index + 1) * self._batch_size]
batch_samples = self._samples[indices]
images = []
measurements = []
for sample in batch_samples:
for data in self._read_data(sample):
image, steering, throttle, brake, speed = data
images.append(image)
outputs = [
steering,
throttle,
brake,
speed
]
measurements.append(outputs)
return np.asarray(images), np.asarray(measurements)
def on_epoch_end(self):
np.random.shuffle(self._indices)
@staticmethod
def _read_data(sample, steering_correction=0.15):
# print('reaing %s' % sample[COLUMN_NAMES['center']])
image_center = cv2.imread(sample[COLUMN_NAMES['center']])
image_center = cv2.cvtColor(image_center, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
image_left = cv2.imread(sample[COLUMN_NAMES['left']])
image_left = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
image_right = cv2.imread(sample[COLUMN_NAMES['right']])
image_right = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
steering = float(sample[COLUMN_NAMES['steering']])
throttle = float(sample[COLUMN_NAMES['throttle']])
brake = float(sample[COLUMN_NAMES['brake']])
speed = float(sample[COLUMN_NAMES['speed']])
yield image_left , steering + steering_correction , throttle, brake, speed # noqa: E203
yield image_center , steering , throttle, brake, speed # noqa: E203
yield image_right , steering - steering_correction , throttle, brake, speed # noqa: E203
yield np.fliplr(image_left) , -steering - steering_correction , throttle, brake, speed # noqa: E203
yield | np.fliplr(image_center) | numpy.fliplr |
from .output import pretty_draw
from .misc import constant, dataframe_value_mapping
import networkx as nx
import pandas as pd
import numpy as np
from copy import copy, deepcopy
from .formats import bif_parser
import os.path
from itertools import repeat, combinations
from .factor import TableFactor, DictValueMapping
def descendants(G, x):
"""
Set of all descendants of node in a graph, not including itself.
:param G: target graph
:param x: target node
:return: set of descendants
"""
return set(nx.dfs_preorder_nodes(G, x)) - {x}
def ancestors(G, x):
"""
Set of all ancestors of node in a graph, not including itself.
:param G: target graph
:param x: target node
:return: set of ancestors
"""
G_reversed = G.reverse()
return descendants(G_reversed, x)
def are_equal_graphs(G1, G2):
"""
Check graph equality (equal node names, and equal edges between them).
:param G1: first graph
:param G2: second graph
:return: are they equal
"""
if set(G1.nodes()) != set(G2.nodes()):
return False
return all(map(lambda x: G1.has_edge(*x), G2.edges())) and all(map(lambda x: G2.has_edge(*x), G1.edges()))
class ErdosRenyiDGMGen:
def __init__(self, n=10, p=0.5, factor_gen=None):
self.n = n if hasattr(n, 'rvs') else constant(n)
self.p = p if hasattr(p, 'rvs') else constant(p)
self.factor_gen = factor_gen
def __call__(self):
result = DGM(nx.gnp_random_graph(self.n.rvs(), self.p.rvs()))
for node, node_data in result.nodes(data=True):
node_data['cpd'] = self.factor_gen(result.nodes(), ([node] + result.predecessors(node)))
node_data['cpd'].normalize(node, copy=False)
return result
class TreeDGMGen:
def __init__(self, n=10, factor_gen=None):
self.n = n if hasattr(n, 'rvs') else constant(n)
self.factor_gen = factor_gen
def __call__(self):
def random_tree(n=10):
assert n >= 1
nodes = [0]
result = nx.DiGraph()
result.add_nodes_from(list(range(n)))
counter = 1
n -= 1
while n >= 1:
node = nodes.pop()
n_children = | np.random.randint(1, n + 1) | numpy.random.randint |
'search stuff'
import sys
import os
import pickle
import itertools as it
import numpy as np
from collections import defaultdict
from xbin import XformBinner
from homog import hinv, hrot
from concurrent.futures import ProcessPoolExecutor
from .worms import Segment, Segments, Worms
from .criteria import CriteriaList, Cyclic, WormCriteria
from . import util
# import numba
class SimpleAccumulator:
def __init__(self, max_results=1000000, max_tmp_size=1024):
self.max_tmp_size = max_tmp_size
self.max_results = max_results
self.temporary = []
def checkpoint(self):
if len(self.temporary) is 0: return
if hasattr(self, 'scores'):
sc, li, lp = [self.scores], [self.lowidx], [self.lowpos]
else:
sc, li, lp = [], [], []
scores = np.concatenate([x[0] for x in self.temporary] + sc)
lowidx = np.concatenate([x[1] for x in self.temporary] + li)
lowpos = np.concatenate([x[2] for x in self.temporary] + lp)
order = np.argsort(scores)
self.scores = scores[order[:self.max_results]]
self.lowidx = lowidx[order[:self.max_results]]
self.lowpos = lowpos[order[:self.max_results]]
self.temporary = []
def accumulate(self, gen):
for future in gen:
result = future.result()
if result is not None:
self.temporary.append(result)
if len(self.temporary) >= self.max_tmp_size:
self.checkpoint()
yield None
def final_result(self):
self.checkpoint()
try:
return self.scores, self.lowidx, self.lowpos
except AttributeError:
return None
class MakeXIndexAccumulator:
def __init__(self, sizes, thresh=1, from_seg=0, to_seg=-1,
max_tmp_size=1024, cart_resl=2.0, ori_resl=15.0):
self.sizes = sizes
self.thresh = thresh
self.max_tmp_size = max_tmp_size
self.from_seg = from_seg
self.to_seg = to_seg
self.tmp = []
self.binner = XformBinner(cart_resl, ori_resl)
self.xindex = defaultdict(list)
# self.xindex = dict()
def checkpoint(self):
print('MakeXIndexAccumulator checkpoint', end='')
sys.stdout.flush()
if len(self.tmp) is 0: return
sc = np.concatenate([x[0] for x in self.tmp])
indices = np.concatenate([x[1] for x in self.tmp])[sc <= self.thresh]
assert np.all(indices < self.sizes)
positions = np.concatenate([x[2] for x in self.tmp])[sc <= self.thresh]
from_pos = positions[:, self.from_seg]
to_pos = positions[:, self.to_seg]
xtgt = hinv(from_pos) @ to_pos
bin_idx = self.binner.get_bin_index(xtgt)
for k, v in zip(bin_idx, indices):
self.xindex[k].append(v)
# self.xindex = {**{k: v for k, v in zip(bin_idx, indices)},
# **self.xindex}
# print('IndexAcculator checkpoint, xindex size:', len(self.xindex))
self.tmp = []
print('done, xindex size:', len(self.xindex))
sys.stdout.flush()
def accumulate(self, gen):
for future in gen:
result = future.result()
if result is not None:
self.tmp.append(result)
if len(self.tmp) >= self.max_tmp_size:
self.checkpoint()
yield None
def final_result(self):
self.checkpoint()
return self.xindex, self.binner
# GLOBAL_xindex_set = set([0])
# @numba.vectorize([numba.float64(numba.int64)])
# def is_in_xindex_set_numba(idx):
# global GLOBAL_xindex_set
# if idx in GLOBAL_xindex_set:
# return 0.
# else:
# return 999999.
class XIndexedCriteria(WormCriteria):
def __init__(self, xindex, binner, nfold, from_seg=-1):
self.xindex_set = set(xindex.keys())
self.binner = binner
self.from_seg = from_seg
self.cyclic_xform = hrot([0, 0, 1], 360 / nfold)
# global GLOBAL_xindex_set
# GLOBAL_xindex_set = self.xindex_set
def get_xform_commutator(self, from_pos, to_pos):
return np.linalg.inv(from_pos) @ to_pos
def is_in_xindex_set(self, idxary):
is_in = np.ones(idxary.size, dtype='f') * 999999.
for i, idx in enumerate(idxary.flat):
if idx in self.xindex_set:
is_in[i] = 0
return is_in.reshape(idxary.shape)
def score(self, segpos, **kw):
from_pos = segpos[self.from_seg]
to_pos = self.cyclic_xform @ from_pos
xtgt = self.get_xform_commutator(from_pos, to_pos)
bin_idx = self.binner.get_bin_index(xtgt)
return self.is_in_xindex_set(bin_idx)
def alignment(self, segpos, **kw):
return np.eye(4)
class XIndexedAccumulator:
def __init__(self, segments, tail, splitpoint, head, xindex, binner,
nfold, from_seg, to_seg,
max_tmp_size=1024, max_results=100000):
self.segments = segments
self.tail = tail
self.splitpoint = splitpoint
self.head = head
self.xindex = xindex
self.binner = binner
self.from_seg = from_seg
self.to_seg = to_seg
self.cyclic_xform = hrot([0, 0, 1], 360 / nfold)
self.max_tmp_size = max_tmp_size
self.max_results = max_results
self.temporary = []
def checkpoint(self):
if len(self.temporary) is 0: return
ntmp = sum(len(tmp[0]) for tmp in self.temporary)
print('XIndexedAccumulator checkpoint... ncandidates:', ntmp, end=' ')
sys.stdout.flush()
if hasattr(self, 'scores'):
sc, li, lp = [self.scores], [self.lowidx], [self.lowpos]
else:
sc, li, lp = [], [], []
scores = np.concatenate([x[0] for x in self.temporary])
if scores.shape[0] is 0: return
assert np.all(scores == 0)
lowidx = np.concatenate([x[1] for x in self.temporary])
lowpos = np.concatenate([x[2] for x in self.temporary])
scores = scores[:self.max_results]
lowpos = lowpos[:self.max_results]
lowidx = lowidx[:self.max_results]
from_pos = lowpos[:, -1]
to_pos = self.cyclic_xform @ from_pos
xtgt = hinv(from_pos) @ to_pos
bin_idx = self.binner.get_bin_index(xtgt)
# head_idx = np.stack([self.xindex[i] for i in bin_idx])
lowidxtmp, headidxtmp = [], []
for i, b in enumerate(bin_idx):
for headidx in self.xindex[b]:
lowidxtmp.append(lowidx[i])
headidxtmp.append(headidx)
lowidx = np.stack(lowidxtmp)
head_idx = np.stack(headidxtmp)
join_idx, valid = self.segments[self.splitpoint].merge_idx(
self.tail[-1], lowidx[:, -1],
self.head[0], head_idx[:, 0])
lowidx = lowidx[valid][join_idx >= 0]
head_idx = head_idx[valid][join_idx >= 0]
join_idx = join_idx[join_idx >= 0]
# join_idx = self.segments[self.splitpoint].merge_idx_slow(
# self.tail[-1], lowidx[:, -1],
# self.head[0], head_idx[:, 0])
# lowidx = lowidx[join_idx >= 0]
# head_idx = head_idx[join_idx >= 0]
# join_idx = join_idx[join_idx >= 0]
lowidx = np.concatenate(
[lowidx[:, :-1], join_idx[:, None], head_idx[:, 1:]], axis=1)
ifrom, ito = lowidx[:, self.from_seg], lowidx[:, self.to_seg]
site1 = self.segments[self.from_seg].entrysiteid[ifrom]
site2 = self.segments[self.from_seg].exitsiteid[ifrom]
site3 = self.segments[self.to_seg].entrysiteid[ito]
ok = (site1 != site2) * (site1 != site3) * (site2 != site3)
# print('!!!!!!!!', np.sum(ok), ok.shape)
# print('site1', *['%6i' % np.sum(site1 == i) for i in range(10)])
# print('site2', *['%6i' % np.sum(site2 == i) for i in range(10)])
# print('site3', *['%6i' % np.sum(site3 == i) for i in range(10)])
lowidx = lowidx[ok]
if hasattr(self, 'lowidx'):
self.lowidx = np.concatenate([self.lowidx, lowidx])
else:
self.lowidx = lowidx
self.temporary = []
print('done, total pre-err =', len(self.lowidx))
sys.stdout.flush()
def accumulate(self, gen):
for future in gen:
result = future.result()
if result is not None:
self.temporary.append(result)
if len(self.temporary) >= self.max_tmp_size:
self.checkpoint()
yield None
def final_result(self):
self.checkpoint()
try:
return self.lowidx
except AttributeError:
return None
def _get_chunk_end_seg(sizes, max_workers, memsize):
end = len(sizes) - 1
while end > 1 and (util.bigprod(sizes[end:]) < max_workers or
memsize <= 64 * util.bigprod(sizes[:end])): end -= 1
return end
def grow(
segments,
criteria,
*,
thresh=2,
expert=0,
memsize=1e6,
executor=None,
executor_args=None,
max_workers=None,
verbosity=2,
chunklim=None,
max_samples=int(1e12),
max_results=int(1e4),
cart_resl=2.0,
ori_resl=15.0,
xindex_cache_file=None
):
if True: # setup
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
if isinstance(segments, list):
segments = Segments(segments)
# if isinstance(executor, (ProcessPoolExecutor, ThreadPoolExecutor)):
# raise ValueError('please use dask.distributed executor')
if verbosity > 0:
print('grow, from', criteria.from_seg, 'to', criteria.to_seg)
for i, seg in enumerate(segments):
print(' segment', i,
'enter:', seg.entrypol,
'exit:', seg.exitpol)
for sp in seg.spliceables: print(' ', sp)
elif verbosity == 0:
print('grow, nseg:', len(segments))
if verbosity > 2:
global __print_best
__print_best = True
if not isinstance(criteria, CriteriaList):
criteria = CriteriaList(criteria)
if max_workers is not None and max_workers <= 0:
max_workers = util.cpu_count()
if executor_args is None and max_workers is None:
executor_args = dict()
elif executor_args is None:
executor_args = dict(max_workers=max_workers)
elif executor_args is not None and max_workers is not None:
raise ValueError('executor_args incompatible with max_workers')
if executor is None:
executor = util.InProcessExecutor
max_workers = 1
if max_workers is None: max_workers = util.cpu_count()
nworker = max_workers or util.cpu_count()
if criteria.origin_seg is None:
matchlast = _check_topology(segments, criteria, expert)
sizes = [len(s) for s in segments]
end = _get_chunk_end_seg(sizes, max_workers, memsize)
ntot, chunksize, nchunks = (util.bigprod(x)
for x in (sizes, sizes[:end], sizes[end:]))
if max_samples is not None:
max_samples = np.clip(chunksize * max_workers, max_samples, ntot)
every_other = max(1, int(ntot / max_samples)) if max_samples else 1
njob = int(np.sqrt(nchunks / every_other) / 32) * nworker
njob = np.clip(nworker, njob, nchunks)
actual_ntot = int(ntot / every_other)
actual_nchunk = int(nchunks / every_other)
actual_perjob = int(ntot / every_other / njob)
actual_chunkperjob = int(nchunks / every_other / njob)
if verbosity >= 0:
print('tot: {:,} chunksize: {:,} nchunks: {:,} nworker: {} njob: {}'.format(
ntot, chunksize, nchunks, nworker, njob))
print('worm/job: {:,} chunk/job: {} sizes={} every_other={}'.format(
int(ntot / njob), int(nchunks / njob), sizes, every_other))
print('max_samples: {:,} max_results: {:,}'.format(
max_samples, max_results))
print('actual tot: {:,}'.format(int(actual_ntot)))
print('actual nchunks: {:,}'.format(int(actual_nchunk)))
print('actual worms/job: {:,}'.format(int(actual_perjob)))
print('actual chunks/job: {:,}'.format(int(actual_chunkperjob)))
_grow_args = dict(executor=executor, executor_args=executor_args,
njob=njob, end=end, thresh=thresh,
matchlast=matchlast, every_other=every_other,
max_results=max_results, nworker=nworker,
verbosity=verbosity)
if njob > 1e9 or nchunks >= 2**63 or every_other >= 2**63:
print('too big?!?')
print(' njob', njob)
print(' nchunks', nchunks, nchunks / 2**63)
print(' every_other', every_other, every_other / 2**63)
raise ValueError('system too big')
accum = SimpleAccumulator(max_results=max_results, max_tmp_size=1e5)
_grow(segments, criteria, accum, **_grow_args)
result = accum.final_result()
if result is None: return None
scores, lowidx, lowpos = result
lowposlist = [lowpos[:, i] for i in range(len(segments))]
score_check = criteria.score(segpos=lowposlist, verbosity=verbosity)
assert np.allclose(score_check, scores)
detail = dict(ntot=ntot, chunksize=chunksize, nchunks=nchunks,
nworker=nworker, njob=njob, sizes=sizes, end=end)
else: # hash-based protocol...
assert len(criteria) is 1
matchlast = _check_topology(segments, criteria, expert)
splitpoint = criteria.from_seg
tail, head = segments.split_at(splitpoint)
print('HASH PROTOCOL splitting at segment', splitpoint)
print(' full:', [len(s) for s in segments])
headsizes = [len(s) for s in head]
headend = _get_chunk_end_seg(headsizes, max_workers, memsize)
ntot, chunksize, nchunks = (util.bigprod(x)
for x in (headsizes, headsizes[:headend],
headsizes[headend:]))
if max_samples is not None:
max_samples = np.clip(chunksize * max_workers, max_samples, ntot)
every_other = max(1, int(ntot / max_samples)) if max_samples else 1
njob = int(np.sqrt(nchunks / every_other) / 8 / nworker) * nworker
njob = np.clip(nworker, njob, nchunks)
_grow_args = dict(executor=executor, executor_args=executor_args,
njob=njob, end=headend, thresh=thresh,
matchlast=0, every_other=every_other,
max_results=max_results, nworker=nworker,
verbosity=verbosity)
t1 = 0
if xindex_cache_file and os.path.exists(xindex_cache_file):
print('!' * 100)
print('reading xindex, xbinner from', xindex_cache_file)
xindex, binner = pickle.load(open(xindex_cache_file, 'rb'))
else:
# if 1:
accum1 = MakeXIndexAccumulator(headsizes, from_seg=0, to_seg=-1,
cart_resl=cart_resl, ori_resl=ori_resl)
headcriteria = Cyclic(criteria[0].nfold, from_seg=0, to_seg=-1,
tol=criteria[0].tol * 1.25,
lever=criteria[0].lever)
print('STEP ONE: growing head into xindex')
print(' ntot {:,}'.format(ntot))
print(' headsizes {}'.format(headsizes))
print(' headend {:,}'.format(headend))
print(' njob {:,}'.format(njob))
print(' nchunks {:,}'.format(nchunks))
print(' chunksize {:,}'.format(chunksize))
print(' thresh {:,}'.format(thresh))
print(' matchlast {:,}'.format(0))
print(' every_other {:,}'.format(every_other))
print(' max_results {:,}'.format(max_results))
print(' nworker {:,}'.format(nworker))
print(' act. ntot {:,}'.format(int(ntot / every_other)))
print(' act. nchunks {:,}'.format(
int(nchunks / every_other)))
print(' act. worms/job {:,}'.format(
int(ntot / every_other / njob)))
print(' act. chunks/job {:,}'.format(
int(nchunks / every_other / njob)))
import time
t1 = time.time()
_grow(head, headcriteria, accum1, **_grow_args)
xindex, binner = accum1.final_result()
t1 = time.time() - t1
print('!' * 100)
print("TIME PHASE ONE", t1)
print('!' * 100)
if xindex_cache_file:
print('!' * 100)
print('dumping xindex to', xindex_cache_file)
print('!' * 100)
pickle.dump((xindex, binner), open(xindex_cache_file, 'wb'))
################### PHASE TWO ####################
tailcriteria = XIndexedCriteria(xindex, binner,
criteria[0].nfold, from_seg=-1)
accum2 = XIndexedAccumulator(segments, tail, splitpoint, head, xindex,
binner, criteria[0].nfold,
from_seg=criteria.from_seg,
to_seg=criteria.to_seg,
max_results=max_results * 20)
tailsizes = [len(s) for s in tail]
tailend = _get_chunk_end_seg(tailsizes, max_workers, memsize)
ntot, chunksize, nchunks = (util.bigprod(x)
for x in (tailsizes, tailsizes[:tailend],
tailsizes[tailend:]))
if max_samples is not None:
max_samples = np.clip(chunksize * max_workers, max_samples, ntot)
every_other = max(1, int(ntot / max_samples * 20)
) if max_samples else 1
njob = int(np.ceil(np.sqrt(nchunks / every_other) / 32 / nworker))
njob = np.clip(nworker, njob * nworker, nchunks)
_grow_args = dict(
executor=executor,
executor_args=executor_args,
njob=njob, end=tailend, thresh=thresh,
matchlast=None, every_other=every_other,
max_results=max_results, nworker=nworker,
verbosity=verbosity)
print('STEP TWO: using xindex, nentries {:,}'.format(len(xindex)))
print(' ntot {:,}'.format(ntot))
print(' tailsizes {}'.format(tailsizes))
print(' tailend {:,}'.format(tailend))
print(' njob {:,}'.format(njob))
print(' nchunks {:,}'.format(nchunks))
print(' chunksize {:,}'.format(chunksize))
print(' thresh {:,}'.format(thresh))
print(' matchlast None')
print(' every_other {:,}'.format(every_other))
print(' max_results {:,}'.format(max_results))
print(' nworker {:,}'.format(nworker))
print(' act. ntot {:,}'.format(int(ntot / every_other)))
print(' act. nchunks {:,}'.format(
int(nchunks / every_other)))
print(' act. worms/job {:,}'.format(
int(ntot / every_other / njob)))
print(' act. chunks/job {:,}'.format(
int(nchunks / every_other / njob)))
print(' executor ', type(executor()))
import time
t2 = time.time()
_grow(tail, tailcriteria, accum2, **_grow_args)
# import cProfile
# cProfile.runctx('_grow(tail, tailcriteria, accum2, **_grow_args)',
# locals(), globals(), 'grow2.stats')
# import pstats
# pst = pstats.Stats('grow2.stats')
# pst.strip_dirs().sort_stats('time').print_stats(20)
lowidx = accum2.final_result()
t2 = time.time() - t2
print('!' * 100)
print("TIME PHASE ONE", t1)
print("TIME PHASE TWO", t2)
# print(' best 28 cores 1608.94K/s small 1min job 681k/.s')
print('!' * 100)
if lowidx is None:
print('grow: no results')
return
print('refold segments')
lowpos = _refold_segments(segments, lowidx)
lowposlist = [lowpos[:, i] for i in range(len(segments))]
print('score refolded segments')
scores = criteria.score(segpos=lowposlist, verbosity=verbosity)
print('organize results')
nlow = sum(scores <= thresh)
order = np.argsort(scores)[:nlow]
scores = scores[order]
lowpos = lowpos[order]
lowidx = lowidx[order]
detail = dict(ntot=ntot, chunksize=chunksize, nchunks=nchunks,
nworker=nworker, njob=njob, sizes=tailsizes, end=tailend)
return Worms(segments, scores, lowidx, lowpos, criteria, detail)
def _refold_segments(segments, lowidx):
pos = | np.zeros_like(lowidx, dtype='(4,4)f') | numpy.zeros_like |
import numpy as np
class Renderer:
def __init__(self, height, width, config):
self.height = height
self.width = width
self.content = None
self.zbuffer = None
self.m = None
self.f = 1.0
self.resize(height, width)
self.colors = config.colors
self.bonds = config.bonds
self.btoggle = len(self.bonds) > 0
self.pos, self.sym = np.array(config.coordinates), config.symbols
self.ztoggle = True
self.zoom = 1.0
self.rot = np.identity(3)
self.rotcounter = [0, 0, 0]
self.draw_scene()
def draw_scene(self):
"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""
mx, my = self.m
rot = np.matmul(self.pos, self.rot)
self.clear()
# Draw bonds
for bond in self.bonds:
i, j = bond
# if bond is (i, j) with i == j, just draw the label (no bonds)
if i == j:
x, y, z = rot[i]
xp, yp = round(float(x) * self.f * self.zoom + mx), round(float(y) * self.zoom + my)
if 1 < xp < self.width - 2 and 1 < yp < self.height - 3 and float(z) < self.zbuffer[yp][xp]:
self.zbuffer[yp][xp] = float(z)
self.content[yp][xp] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
# else draw the bond with the labels at the end points
else:
# Draw the two labels at the end points
xa, ya, za = rot[i]
xa = float(xa) * self.f * self.zoom + mx
ya = float(ya) * self.zoom + my
xb, yb, zb = rot[j]
xb = float(xb) * self.f * self.zoom + mx
yb = float(yb) * self.zoom + my
xap, yap = round(xa), round(ya)
xbp, ybp = round(xb), round(yb)
if 1 < xap < self.width - 2 and 1 < yap < self.height - 3 and float(za) < self.zbuffer[yap][xap]:
self.zbuffer[yap][xap] = float(za)
self.content[yap][xap] = self.sym[i][0].upper() + "," + self.colors[self.sym[i].upper()]
if 1 < xbp < self.width - 2 and 1 < ybp < self.height - 3 and float(zb) < self.zbuffer[ybp][xbp]:
self.zbuffer[ybp][xbp] = float(zb)
self.content[ybp][xbp] = self.sym[j][0].upper() + "," + self.colors[self.sym[j].upper()]
if not self.btoggle:
continue
# Then start at xap+1 and go to xbp-1, drawing line segments
sy = -1 if ya > yb else 1
sx = -1 if xa > xb else 1
sz = -1 if za > zb else 1
dx = float((xb - xa) / (yb - ya)) if abs(yb - ya) > 0 else 0
dy = float((yb - ya) / (xb - xa)) if abs(xb - xa) > 0 else 0
dz = float((zb - za) / (xb - xa)) if abs(xb - xa) > 0 else 0
if abs(dy) <= 1:
for k in range(1, abs(xap - xbp)):
xk = xap + sx * k
yk = round(float(ya) + sx * k * dy)
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(xap - xbp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
else:
for k in range(1, abs(yap - ybp)):
xk = round((float(xa) + sy * k * dx))
yk = yap + sy * k
zk = round((float(za) + sz * k * dz))
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3 and float(zk) < \
self.zbuffer[yk][xk]:
col = self.colors[self.sym[i].upper()] if k < abs(yap - ybp) / 2 else self.colors[
self.sym[j].upper()]
self.zbuffer[yk][xk] = float(zk)
self.content[yk][xk] = "·,%s" % col
return True
def rotate(self, direction):
"""
Set an internal rotation matrix that is applied to the coordinates before every render.
:param direction: 1 and -1 are x and -x, 2 is either z/y, depending on whether the ztoggle is active or not
"""
if direction == 1:
self.rot = | np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, 0.9962, -0.0872], [0.0, 0.0872, 0.9962]]) | numpy.matmul |
import numpy as np
import collections
import re
from copy import copy
import math
# from .myabc import MN
from acqdp.tensor_network.tensor_valued import TensorValued
from acqdp.tensor_network.tensor import Tensor
from acqdp.tensor_network.tensor_network import TensorNetwork
from typing import List, Dict, Optional, Set
INDENT = " "
"""The unit of indentation used for :meth:`Operation.tree_string`."""
class Operation(object):
"""Base class for quantum opertations.
By itself, it can represent a generic quantum operation, although one would
not be able to do much with it. Usually, one should use
:class:`ImmutableOperation` (or more commonly, one of its subclasses) for
simple quantum operations with an explicit tensor representation, and
:class:`Circuit` for quantum operations better represented as a composition
of simpler operations. (Technically an :class:`ImmutableOperation` can
represent a complex operation by using a :class:`TensorNetwork` as the data,
but that would be an uncommon use case.)
:ivar name: Name of the quantum operation. Defaults to "GenOp".
:vartype name: str, optional
"""
def __init__(self, name="GenOp"):
self.name = name
def __str__(self) -> str: # pragma: no cover
return str(self.name)
def tree_string(self, indent=0): # pragma: no cover
"""Return an indented string that describes the operation.
This is mainly used for visualization of :class:`Circuit` instances.
Notably, the returned string should not include the name of the
operation, which would already be included by default in the tree
string of the "parent" operation.
It is fine to return an empty string, but otherwise, the string should
be indented with `indent` copies of the string :data:`INDENT`, and
terminated with a newline.
:param indent: The amount of indent needed. Defaults to 0.
:type indent: int, optional
"""
return ""
def __repr__(self) -> str: # pragma: no cover
return repr(vars(self))
def __mul__(self, other):
"""Return two operations in parallel as a quantum circuit.
The resulting :class:`Circuit` will have a number of qubits :math:`n`
equal to the total number of qubits in both operands, indexed by
integers from 0 to :math:`n-1`. The qubits are ordered naturally, i.e.,
qubits in the left operand come first, and within both operands the
original qubit orders are preserved.
Note that this function will regard both operands as atomic operations:
No attempt is made to "expand" the operands even if one or both of them
are themselves :class:`Circuit` instances.
"""
if isinstance(other, Operation):
return Circuit().append(self, list(range(len(self.shape))), 0).append(other, list(range(len(self.shape), len(self.shape) + len(other.shape))), 0)
else:
return NotImplemented
def __or__(self, other):
"""Return the concatenation of two operations as a quantum circuit.
The left operand will happen first. Note that this ordering convention
is different from how the result would be represented as a product of
matrices. For example, ``ZeroState | HGate | ZGate`` will result in the
state :math:`ZH|0\\rangle`.
If the left operand is a :class:`Circuit` instance, then the qubit names
in it will be preserved, and the same will apply to the right operand if
it is a :class:`Circuit` instance too; otherwise qubits in the right
operand will be indexed by integers from 0. The qubit names will
determine how the two circuits are connected.
If the left operand is not a :class:`Circuit` instance, then the qubits
in *both* operands will be indexed by integers from 0, and the circuits
will be connected correspondingly.
"""
if isinstance(self, Circuit):
if isinstance(other, Circuit):
copy_self = copy(self)
for time_step in other.operations_by_time:
for op in other.operations_by_time[time_step]:
operation = other.operations_by_time[time_step][op]
copy_self.append(operation['operation'],
operation['qubits'])
return copy_self
else:
return copy(self).append(other, list(range(len(other.shape))))
else:
return Circuit().append(self, list(range(len(self.shape)))).append(other, list(range(len(other.shape))))
def _indices_with_property(self, pattern):
res = [i for i in range(len(self.shape))
if bool(re.match(pattern, self.shape[i]))]
return res, len(res)
@property
def _input_indices(self):
return self._indices_with_property("^[ibc]")
@property
def _output_indices(self):
return self._indices_with_property(".*[odc]$")
@property
def tensor_pure(self):
"""Convert the operation into a tensor network representing the action
of the operation in the pure state picture.
Examples of tensor representations in the pure state picture include
state vectors of pure states, and unitary matrices of unitary gates.
Raises an error if the operation is not pure.
"""
from .converter import Converter
return Converter.convert_pure(self)
@property
def tensor_density(self):
"""Convert the operation into a tensor network representing the action
of the operation in the density matrix picture.
Examples of tensor representations in the density matrix picture include
density matrices of general quantum states, and Choi matrices of quantum
operations. As a special case, when the operation is pure, the tensor
network returned by :attr:`tensor_density` will consist of two disjoint
components, one being the tensor network returned by :attr:`tensor_pure`
and the other being its adjoint.
"""
from .converter import Converter
return Converter.convert_density(self)
@property
def tensor_control(self):
"""Convert a controlled operation into a tensor network in the pure
state picture, but with only one open edge for each controlling qubit.
A qubit in an operation can be regarded as a controlling qubit if its
value in the computational basis is never changed by the operation. As
such, its input wire and output wire can be represented by the same edge
in a tensor network, thus simplifying the tensor network. In other
words, the :attr:`tensor_pure` for a controlled operation will be a
block diagonal matrix, and its :attr:`tensor_control` will be a more
compact representation of the same matrix.
As a special case, if the operation is a diagonal gate, then every qubit
can be regarded as a controlling qubit. See :class:`Diagonal`.
"""
from .converter import Converter
return Converter.convert_control(self)
def adjoint(self): # pragma: no cover
"""Return the adjoint of a quantum operation. ``~op`` is an alias of
``op.adjoint()``.
"""
raise NotImplementedError()
def __invert__(self):
return self.adjoint()
class ImmutableOperation(Operation):
"""Class for quantum operations with explicit tensor representations. The
operation is not supposed to be modified.
:param data: A tensor representation of the operation. For this base class,
it will be the tensor returned by :attr:`~Operation.tensor_density`.
This means that for an operation with :math:`n` input qubits and
:math:`m` output qubits, the tensor should be of rank :math:`2(n+m)`.
A derived class of this may use different representation more suitable
for a specific class of operations.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar ~.shape: A list of strings describing the input and output qubits of
the quantum operation, with each string describing a qubit involved. For
this base class, each string should be one of "i", "o" or "io",
indicating whether each qubit is an input qubit, an output qubit, or
both.
:vartype ~.shape: List[str]
:ivar name: Name of the quantum operation. Defaults to "ImOp".
:vartype name: str, optional
"""
def __init__(self,
data: np.ndarray,
shape: List[str],
name="ImOp") -> None:
Operation.__init__(self, name)
self.shape = shape
self.process(data)
def __str__(self) -> str: # pragma: no cover
return str(vars(self))
def process(self, data):
"""Convert the input data into an appropriately shaped tensor, and
initialize the operation with this tensor.
A derived class that uses a different tensor representation of the
operation should override this function in order to do shape checking
and initialization correctly.
:param data: The ``data`` parameter passed to
:meth:`ImmutableOperation.__init__`.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or
np.ndarray
"""
_, lin = self._indices_with_property("^i")
_, lout = self._indices_with_property(".*o$")
if isinstance(data, TensorValued):
if data.shape != tuple([2] * (2 * (lin + lout))):
raise ValueError("Invalid operation: Input dimensions does not match the claimed shape")
self._tensor_density = data
elif type(data) != np.ndarray or np.size(data) != np.prod(np.shape(data)):
raise TypeError("Invalid operation: Operation should be an np.ndarray.")
else:
try:
self._tensor_density = Tensor(np.reshape(data, tuple([2] * (2 * (lin + lout)))))
except ValueError:
raise ValueError("Invalid operation: Operation dimension does not match qubits.")
@classmethod
def operation_from_kraus(cls, shape, kraus, name="ImOp_Kraus"):
"""Construct a quantum operation from its Kraus operator representation.
:param shape: The shape of the operation, in the same format as the
:attr:`shape` attribute of :class:`ImmutableOperation`.
:type shape: List[str]
:param kraus: The list of Kraus operators.
:type kraus: List[acqdp.tensor_network.tensor_valued.TensorValued or
np.ndarray]
:param name: Name of the resulting quantum operation. Defaults to
"ImOp_Kraus".
:type name: str, optional
:returns: A quantum operation constructed from the given Kraus operator
representation.
:rtype: ImmutableOperation
"""
lin = len([i for i in shape if i[0] == 'i'])
lout = len([i for i in shape if i[-1] == 'o'])
shape_tensor = tuple([2] * (lin + lout))
tensor_density = np.zeros(tuple([2] * (2 * (lin + lout))))
for operator in kraus:
if not isinstance(operator, np.ndarray):
raise TypeError("Invalid operation: Kraus Operator should be an np.ndarray.")
if (operator.shape != shape_tensor) and (operator.shape != (2 ** lout, 2 ** lin)):
raise ValueError("Invalid operation: Dimensions do not match")
i = np.reshape(operator, shape_tensor)
tensor_density += np.multiply.outer(i, np.conj(i))
return cls(tensor_density, shape, name)
@property
def is_pure(self):
"""Return True if the operation is a pure operation.
Pure operations include pure states, isometries, projections and their
combinations.
Note that currently this function determines whether an operation is
pure solely based on the class of the operation, without inspecting the
actual data. For example, if a quantum state ``s`` is initialized with
``s = State(num_qubits, data)``, then ``s.is_pure`` will always be False
and ``s.tensor_pure`` will always raise an error, even if ``data`` is
actually the density matrix of a pure state.
"""
return isinstance(self, PureOperation)
class State(ImmutableOperation):
"""Class for simple quantum states.
Quantum states are regarded as a special case of quantum operations where
there is no input qubit, and each qubit is an output qubit.
:param num_qubits: Number of qubits in the state.
:type num_qubits: int
:param data: The density matrix representation of the state.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "State".
:vartype name: str, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name="State") -> None:
ImmutableOperation.__init__(self, data, tuple(['o'] * num_qubits), name)
class Measurement(ImmutableOperation):
"""Class for simple (destructive) measurements.
Destructive measurements are regarded as a special case of quantum
operations where each qubit is an input qubit, and there is no output
qubit. Such a measurement maps an arbitrary quantum state to a number.
:param num_qubits: Number of qubits measured.
:type num_qubits: int
:param data: The POVM (positive operator-valued measure) representation of
the measurement.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "Meas".
:vartype name: str, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name="Meas") -> None:
ImmutableOperation.__init__(self, data, tuple(['i'] * num_qubits), name)
class Channel(ImmutableOperation):
"""Class for simple quantum channels.
This class is used for the common case where the input and output Hilbert
spaces are the same, i.e., each qubit is both an input qubit and an output
qubit. For channels that do not satisfy this constraint, please use
:class:`ImmutableOperation` directly.
:param num_qubits: Number of qubits the channel operates on.
:type num_qubits: int
:param data: The tensor representation of the channel in the density matrix
picture.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum operation. Defaults to "Channel".
:vartype name: str, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name='Channel') -> None:
ImmutableOperation.__init__(self, data, tuple(['io'] * num_qubits), name)
class PureOperation(ImmutableOperation):
"""Class for simple pure quantum operations.
:param data: The tensor representation of the operation in the pure state
picture. It will be the tensor returned by
:attr:`~Operation.tensor_pure`. This means that for an operation with
:math:`n` input qubits and :math:`m` output qubits, the tensor should be
of rank :math:`n+m`, i.e., half the rank of the tensor in the density
matrix picture.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar ~.shape: The shape of the operation, in the same format as the
:attr:`shape` attribute of :class:`ImmutableOperation`.
:vartype ~.shape: List[str]
:ivar name: Name of the quantum operation. Defaults to "PureOp".
:vartype name: str, optional
:ivar self_adjoint: Whether the operation is self-adjoint. Defaults to
False.
:vartype self_adjoint: bool, optional
"""
def __init__(self,
data: np.ndarray,
shape: List[str],
name="PureOp",
self_adjoint=False) -> None:
ImmutableOperation.__init__(self, data, shape, name)
self.self_adjoint = self_adjoint
def set_adjoint_op(self, adjoint_op):
"""Set the known adjoint of the operation.
Usually, the adjoint of a pure operation is constructed on demand by
calculating the :attr:`~Operation.tensor_pure` of the operation, then
constructing a new :class:`PureOperation` object, which can be an
inefficient process. By setting the adjoint of a operation to a known
value, one can bypass this procedure. Note that, for efficiency, there
is no check that ``adjoint_op`` is actually the adjoint of ``self``.
The adjoint of ``adjoint_op`` is also set to ``self`` so there is no
need to use this function twice.
:param adjoint_op: The known adjoint of ``self``.
:type adjoint_op: PureOperation
"""
self.adjoint_op = adjoint_op
adjoint_op.adjoint_op = self
def process(self, data):
_, lin = self._indices_with_property("^i")
_, lout = self._indices_with_property(".*o$")
if isinstance(data, TensorValued):
if data.shape != tuple([2] * (lin + lout)):
raise ValueError("Invalid operation: Input dimensions does not match the claimed shape")
self._tensor_pure = data
elif type(data) != np.ndarray or np.size(data) != np.prod(np.shape(data)):
raise TypeError("Invalid operation: Operation should be an np.ndarray.")
else:
try:
self._tensor_pure = Tensor(np.reshape(data, tuple([2] * (lin + lout))))
except ValueError:
raise ValueError("Invalid operation: Operation dimension does not match qubits.")
def adjoint(self):
if self.self_adjoint:
return self
elif hasattr(self, 'adjoint_op'):
return self.adjoint_op
else:
transition_dict = {'i': 'o', 'o': 'i'}
shape = ["".join([transition_dict[i] for i in s[::-1]]) for s in self.shape]
_, lin = self._indices_with_property("^i")
_, lout = self._indices_with_property(".*o$")
tensor_pure = (~self._tensor_pure) % tuple(list(range(lout, lout + lin)) + list(range(lout)))
return PureOperation(tensor_pure, shape, "~" + self.name)
class PureState(PureOperation, State):
"""Class for simple pure quantum states.
:param num_qubits: Number of qubits in the state.
:type num_qubits: int
:param data: The state vector representation of the state.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "PureState".
:vartype name: str, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name='PureState') -> None:
PureOperation.__init__(self, data, tuple(['o'] * num_qubits), name, False)
class PureMeas(PureOperation, Measurement):
"""Class for simple projective measurements.
:param num_qubits: Number of qubits measured.
:type num_qubits: int
:param data: The vector representation of the measurement. Note that it
should be the *complex conjugation* of the state vector of the state
projected onto.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "PureMeas".
:vartype name: str, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name='PureMeas') -> None:
PureOperation.__init__(self, data, tuple(['i'] * num_qubits), name, False)
class Unitary(PureOperation, Channel):
"""Class for simple unitary gates.
:param num_qubits: Number of qubits the unitary operates on.
:type num_qubits: int
:param data: The matrix representation of the unitary.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "Unitary".
:vartype name: str, optional
:ivar self_adjoint: Whether the unitary is self-adjoint. Defaults to False.
:vartype self_adjoint: bool, optional
"""
def __init__(self,
num_qubits,
data: np.ndarray,
name='Unitary',
self_adjoint=False) -> None:
PureOperation.__init__(self, data, tuple(['io'] * num_qubits), name, self_adjoint)
class ControlledOperation(PureOperation):
"""Class for simple controlled operations.
:param data: The tensor representation of the controlled operation. It will
be the tensor returned by :attr:`~Operation.tensor_control`. This means
that for an operation with :math:`k` controlling qubits, :math:`n`
non-control input qubits and :math:`m` non-control output qubits, the
tensor should be of rank :math:`k+n+m`.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar ~.shape: The shape of the operation, in the same format as the
:attr:`shape` attribute of :class:`ImmutableOperation`, but in addition
to "i", "o", and "io", the string "c" is also allowed, which indicates a
controlling qubit.
:vartype ~.shape: List[str]
:ivar name: Name of the quantum operation. Defaults to "C-Op".
:vartype name: str, optional
:ivar self_adjoint: Whether the operation is self-adjoint. Defaults to
False.
:vartype self_adjoint: bool, optional
"""
def __init__(self,
data: np.ndarray,
shape: List[str],
name="C-Op",
self_adjoint=False) -> None:
PureOperation.__init__(self, data, shape, name, self_adjoint)
def process(self, data):
_, lctrl = self._indices_with_property("^c$")
_, lin = self._indices_with_property("^i")
_, lout = self._indices_with_property(".*o$")
if isinstance(data, TensorValued):
if data.shape != tuple([2] * (lctrl + lin + lout)):
raise ValueError("Invalid operation: Input dimensions does not match the claimed shape")
self._tensor_control = data
elif type(data) != np.ndarray or np.size(data) != np.prod(np.shape(data)):
raise TypeError("Invalid operation: Operation should be an np.ndarray.")
else:
try:
self._tensor_control = Tensor(np.reshape(data, tuple([2] * (lctrl + lin + lout))))
except ValueError:
raise ValueError("Invalid operation: Operation dimension does not match qubits.")
def adjoint(self):
if self.self_adjoint:
return self
else:
transition_dict = {'i': 'o', 'o': 'i', 'c': 'c'}
shape = ["".join([transition_dict[i] for i in s[::-1]]) for s in self.shape]
_, lctrl = self._indices_with_property("^c$")
_, lin = self._indices_with_property("^i")
_, lout = self._indices_with_property(".*o$")
tensor_control = ~(self._tensor_control)\
% tuple(list(range(lctrl))
+ list(range(lctrl + lout, lctrl + lout + lin))
+ list(range(lctrl, lctrl + lout)))
return ControlledOperation(tensor_control, shape, "~" + self.name)
class Diagonal(ControlledOperation):
"""Class for simple diagonal gates.
A diagonal gate can be regarded as a controlled phase shift, where every
qubit is a controlling qubit.
:param num_qubits: Number of qubits the diagonal gate operates on.
:type num_qubits: int
:param data: The diagonal elements of the matrix representation of the gate.
:type data: acqdp.tensor_network.tensor_valued.TensorValued or np.ndarray
:ivar name: Name of the quantum state. Defaults to "Diag".
:vartype name: str, optional
:ivar self_adjoint: Whether the gate is self-adjoint. Defaults to False.
:vartype self_adjoint: bool, optional
"""
def __init__(self,
num_cbits,
data: np.ndarray,
name='Diag',
self_adjoint=False) -> None:
ControlledOperation.__init__(self, data, tuple(['c'] * num_cbits), name, self_adjoint)
XGate = Unitary(1, np.array([[0, 1], [1, 0]]), "X", True)
"""Single-qubit Pauli X gate."""
YGate = Unitary(1, np.array([[0, 1j], [-1j, 0]]), "Y", True)
"""Single-qubit Pauli Y gate."""
ZGate = Diagonal(1, np.array([1, -1]), "Z", True)
"""Single-qubit Pauli Z gate."""
HGate = Unitary(1, np.sqrt(0.5) * np.array([[1, 1], [1, -1]]), "H", True)
"""Single-qubit Hadamard gate."""
TGate = Diagonal(1, np.array([1, (1 + 1j) * np.sqrt(0.5)]), "T")
"""Single-qubit T gate, i.e., a :math:`\\pi/4` rotation around the Z axis on the Bloch sphere."""
SGate = Diagonal(1, np.array([1, 1j]), "S")
"""Single-qubit S gate, i.e., a :math:`\\pi/2` rotation around the Z axis on the Bloch sphere."""
Trace = Measurement(1, TensorNetwork(open_edges=[0, 0], bond_dim=2), "Tr")
"""The partial trace operation, defined as an measurement that maps every
normalized single-qubit state to 1.
In a circuit, a partial trace operation can simulate discarding a qubit. Note
that this operation is inherently not pure since discarding one part of an
entangled pure state will result in a mixed state.
"""
IGate = Unitary(1,
TensorNetwork(open_edges=[0, 0], bond_dim=2),
"I", True)
"""Single-qubit Identity gate."""
SWAPGate = Unitary(2,
TensorNetwork(open_edges=[0, 1, 1, 0], bond_dim=2),
"SWAP", True)
"""Two-qubit SWAP gate."""
CZGate = Diagonal(2, np.array([[1, 1], [1, -1]]), name='CZ', self_adjoint=True)
"""Two-qubit CZ gate."""
XHalfGate = Unitary(1,
| np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]]) | numpy.array |
#===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from scipy import sparse
import itertools
from ...models import WarpedGP
def in_ipynb():
try:
cfg = get_ipython().config
return 'IPKernelApp' in cfg
except NameError:
return False
def find_best_layout_for_subplots(num_subplots):
r, c = 1, 1
while (r*c) < num_subplots:
if (c==(r+1)) or (r==c):
c += 1
elif c==(r+2):
r += 1
c -= 1
return r, c
def helper_predict_with_model(self, Xgrid, plot_raw, apply_link, percentiles, which_data_ycols, predict_kw, samples=0):
"""
Make the right decisions for prediction with a model
based on the standard arguments of plotting.
This is quite complex and will take a while to understand,
so do not change anything in here lightly!!!
"""
# Put some standards into the predict_kw so that prediction is done automatically:
if predict_kw is None:
predict_kw = {}
if 'likelihood' not in predict_kw:
if plot_raw:
from ...likelihoods import Gaussian
from ...likelihoods.link_functions import Identity
lik = Gaussian(Identity(), 1e-9) # Make the likelihood not add any noise
else:
lik = None
predict_kw['likelihood'] = lik
if 'Y_metadata' not in predict_kw:
predict_kw['Y_metadata'] = {}
if 'output_index' not in predict_kw['Y_metadata']:
predict_kw['Y_metadata']['output_index'] = Xgrid[:,-1:].astype(np.int)
mu, _ = self.predict(Xgrid, **predict_kw)
if percentiles is not None:
percentiles = self.predict_quantiles(Xgrid, quantiles=percentiles, **predict_kw)
else: percentiles = []
if samples > 0:
fsamples = self.posterior_samples(Xgrid, size=samples, **predict_kw)
fsamples = fsamples[:, which_data_ycols, :]
else:
fsamples = None
# Filter out the ycolums which we want to plot:
retmu = mu[:, which_data_ycols]
percs = [p[:, which_data_ycols] for p in percentiles]
if plot_raw and apply_link:
for i in range(len(which_data_ycols)):
retmu[:, [i]] = self.likelihood.gp_link.transf(mu[:, [i]])
for perc in percs:
perc[:, [i]] = self.likelihood.gp_link.transf(perc[:, [i]])
if fsamples is not None:
for s in range(fsamples.shape[-1]):
fsamples[:, i, s] = self.likelihood.gp_link.transf(fsamples[:, i, s])
return retmu, percs, fsamples
def helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution):
"""
Figure out the data, free_dims and create an Xgrid for
the prediction.
This is only implemented for two dimensions for now!
"""
#work out what the inputs are for plotting (1D or 2D)
if fixed_inputs is None:
fixed_inputs = []
fixed_dims = get_fixed_dims(fixed_inputs)
free_dims = get_free_dims(self, visible_dims, fixed_dims)
if len(free_dims) == 1:
#define the frame on which to plot
resolution = resolution or 200
Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits, resolution=resolution)
Xgrid = np.zeros((Xnew.shape[0],self.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
x = Xgrid
y = None
elif len(free_dims) == 2:
#define the frame for plotting on
resolution = resolution or 35
Xnew, x, y, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution)
Xgrid = np.zeros((Xnew.shape[0], self.input_dim))
Xgrid[:,free_dims] = Xnew
#xmin = Xgrid.min(0)[free_dims]
#xmax = Xgrid.max(0)[free_dims]
for i,v in fixed_inputs:
Xgrid[:,i] = v
else:
raise TypeError("calculated free_dims {} from visible_dims {} and fixed_dims {} is neither 1D nor 2D".format(free_dims, visible_dims, fixed_dims))
return fixed_dims, free_dims, Xgrid, x, y, xmin, xmax, resolution
def scatter_label_generator(labels, X, visible_dims, marker=None):
ulabels = []
for lab in labels:
if not lab in ulabels:
ulabels.append(lab)
if marker is not None:
marker = itertools.cycle(list(marker))
else:
m = None
try:
input_1, input_2, input_3 = visible_dims
except:
try:
# tuple or int?
input_1, input_2 = visible_dims
input_3 = None
except:
input_1 = visible_dims
input_2 = input_3 = None
for ul in ulabels:
from numbers import Number
if isinstance(ul, str):
try:
this_label = unicode(ul)
except NameError:
#python3
this_label = ul
elif isinstance(ul, Number):
this_label = 'class {!s}'.format(ul)
else:
this_label = ul
if marker is not None:
m = next(marker)
index = np.nonzero(labels == ul)[0]
if input_2 is None:
x = X[index, input_1]
y = np.zeros(index.size)
z = None
elif input_3 is None:
x = X[index, input_1]
y = X[index, input_2]
z = None
else:
x = X[index, input_1]
y = X[index, input_2]
z = X[index, input_3]
yield x, y, z, this_label, index, m
def subsample_X(X, labels, num_samples=1000):
"""
Stratified subsampling if labels are given.
This means due to rounding errors you might get a little differences between the
num_samples and the returned subsampled X.
"""
if X.shape[0] > num_samples:
print("Warning: subsampling X, as it has more samples then {}. X.shape={!s}".format(int(num_samples), X.shape))
if labels is not None:
subsample = []
for _, _, _, _, index, _ in scatter_label_generator(labels, X, (0, None, None)):
subsample.append(np.random.choice(index, size=max(2, int(index.size*(float(num_samples)/X.shape[0]))), replace=False))
subsample = np.hstack(subsample)
else:
subsample = np.random.choice(X.shape[0], size=1000, replace=False)
X = X[subsample]
labels = labels[subsample]
#=======================================================================
# <<<WORK IN PROGRESS>>>
# <<<DO NOT DELETE>>>
# plt.close('all')
# fig, ax = plt.subplots(1,1)
# from GPy.plotting.matplot_dep.dim_reduction_plots import most_significant_input_dimensions
# import matplotlib.patches as mpatches
# i1, i2 = most_significant_input_dimensions(m, None)
# xmin, xmax = 100, -100
# ymin, ymax = 100, -100
# legend_handles = []
#
# X = m.X.mean[:, [i1, i2]]
# X = m.X.variance[:, [i1, i2]]
#
# xmin = X[:,0].min(); xmax = X[:,0].max()
# ymin = X[:,1].min(); ymax = X[:,1].max()
# range_ = [[xmin, xmax], [ymin, ymax]]
# ul = np.unique(labels)
#
# for i, l in enumerate(ul):
# #cdict = dict(red =[(0., colors[i][0], colors[i][0]), (1., colors[i][0], colors[i][0])],
# # green=[(0., colors[i][0], colors[i][1]), (1., colors[i][1], colors[i][1])],
# # blue =[(0., colors[i][0], colors[i][2]), (1., colors[i][2], colors[i][2])],
# # alpha=[(0., 0., .0), (.5, .5, .5), (1., .5, .5)])
# #cmap = LinearSegmentedColormap('{}'.format(l), cdict)
# cmap = LinearSegmentedColormap.from_list('cmap_{}'.format(str(l)), [colors[i], colors[i]], 255)
# cmap._init()
# #alphas = .5*(1+scipy.special.erf(np.linspace(-2,2, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# alphas = (scipy.special.erf(np.linspace(0,2.4, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# cmap._lut[:, -1] = alphas
# print l
# x, y = X[labels==l].T
#
# heatmap, xedges, yedges = np.histogram2d(x, y, bins=300, range=range_)
# #heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
#
# im = ax.imshow(heatmap, extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cmap, aspect='auto', interpolation='nearest', label=str(l))
# legend_handles.append(mpatches.Patch(color=colors[i], label=l))
# ax.set_xlim(xmin, xmax)
# ax.set_ylim(ymin, ymax)
# plt.legend(legend_handles, [l.get_label() for l in legend_handles])
# plt.draw()
# plt.show()
#=======================================================================
return X, labels
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update
def get_x_y_var(model):
"""
Either the the data from a model as
X the inputs,
X_variance the variance of the inputs ([default: None])
and Y the outputs
If (X, X_variance, Y) is given, this just returns.
:returns: (X, X_variance, Y)
"""
# model given
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values
X_variance = model.X.variance.values
else:
try:
X = model.X.values
except AttributeError:
X = model.X
X_variance = None
try:
Y = model.Y.values
except AttributeError:
Y = model.Y
if isinstance(model, WarpedGP) and not model.predict_in_warped_space:
Y = model.Y_normalized
if sparse.issparse(Y): Y = Y.todense().view(np.ndarray)
return X, X_variance, Y
def get_free_dims(model, visible_dims, fixed_dims):
"""
work out what the inputs are for plotting (1D or 2D)
The visible dimensions are the dimensions, which are visible.
the fixed_dims are the fixed dimensions for this.
The free_dims are then the visible dims without the fixed dims.
"""
if visible_dims is None:
visible_dims = np.arange(model.input_dim)
dims = np.asanyarray(visible_dims)
if fixed_dims is not None:
dims = [dim for dim in dims if dim not in fixed_dims]
return | np.asanyarray([dim for dim in dims if dim is not None]) | numpy.asanyarray |
import math
from pathlib import Path
from scipy import interpolate
import numpy as np
import os
from .. import use_desim
from ..desim import minidesim as dsm
# import DESHIMA.use_desim as use_desim
# import DESHIMA.desim.minidesim as dsm
# import SubplotAnimationSlider as aniS
# plt.style.use('dark_background')
class filterbank(object):
"""
Class that represents the filterbank in an MKID chip.
Properties
------------
Fmin : scalar
Resonance frequency of the filter with the smallest resonance frequency
Unit: Hz
R: scalar
FWHM * F, where FWHM stands for full width at half maximum
Unit: -
Fmax : scalar
Resonance frequency of the filter with the largest resonance frequency
Unit: Hz
num_filters: scalar
Number of filters in the filterbank of the MKID
Unit: -
"""
def __init__(self, F_min, R, num_filters = 1, f_spacing = 380, num_bins = 1500, D1 = 0):
self.F_min = F_min
self.F0 = F_min
self.R = R
self.num_filters = num_filters
self.f_spacing = f_spacing
self.num_bins = num_bins
self.F_max = F_min * (1 + 1/f_spacing)**(num_filters - 1)
F = np.logspace(np.log10(self.F_min), np.log10(self.F_max), num_filters)
self.filters = F
self.FWHM = self.filters/R
self.D1 = D1
self.path_model = Path(__file__).parent.parent.parent
def calcLorentzian(self, x_array):
"""Calculates values of a Lorentzian curve.
Parameters
------------
x_array: vector
Frequencies of which the corresponding value of the Lorentzian curve
is calculated
Unit: Hz
Returns
------------
y_array: vector
Values of Lorentzian curve, calculated with the values given in x_array
Unit: -
"""
y_array = 1/math.pi * 1/2 * self.FWHM / ((x_array-self.F0)**2 + (1/2 * self.FWHM)**2)
return y_array
def getPoints_etaF_curve(self, pwv, EL):
"""Obtains values of the atmospheric transmission eta_atm from desim,
with given values of the precipitable water vapor and elevation.
Parameters
------------
pwv: vector or scalar
Values of the precipitable water vapor for which the atmospheric
transmission is calculated.
Unit: mm
EL: vector or scalar
Values of the elevation for which the atmospheric
transmission is calculated.
Unit: degrees
Returns
------------
eta_atm: vector or scalar
Values of the atmospheric transmission, calculated with the given
values of pwv and EL
Unit: -
"""
eta_atm = dsm.eta_atm_func(self.filters, pwv, EL)
return eta_atm
def getPoints_TP_curve(self, EL_vector, pwv):
"""Obtains values of the KID power Pkid_summed and the sky temperature Tb_sky from desim,
with given values of the precipitable water vapor and elevation.
Parameters
------------
EL_vector: vector or scalar
Values of the elevation for which the KID power and sky temperature
are to be calculated.
Unit: degrees
pwv: vector or scalar
Values of the precipitable water vapor for which the KID power and
sky temperature are to be calculated.
Unit: mm
Returns
------------
Pkid_summed: vector or scalar
Values of the KID power, calculated with the given values of pwv and
EL. The filter response of the filters in the filterbank of the KID
is taken into account and is integrated to obtain the KID power.
Unit: W
Tb_sky: vector or scalar
Values of the sky temperature, calculated with the given
values of pwv and EL.
Unit: K
"""
use_desim_instance = use_desim.use_desim()
self.eta_atm_df, self.F_highres = dsm.load_eta_atm()
self.eta_atm_func_zenith = dsm.eta_atm_interp(self.eta_atm_df)
Tb_sky, psd_KID_desim, F_bins = use_desim_instance.calcT_psd_P(self.eta_atm_df, self.F_highres, self.eta_atm_func_zenith, self.filters, EL_vector, self.num_filters, pwv, self.R, self.num_bins, self.D1)
first_dif = F_bins[1] - F_bins[0]
last_dif = F_bins[-1] - F_bins[-2]
# delta_F = np.concatenate((np.array([0.]), np.logspace(np.log10(first_dif), np.log10(last_dif), self.num_bins-1)))
# delta_F = delta_F.reshape([1, delta_F.shape[0]])
delta_F = first_dif
Pkid = np.zeros(psd_KID_desim.shape)
for i in range(psd_KID_desim.shape[2]):
Pkid[:, :, i] = psd_KID_desim[:, :, i] * delta_F
length_EL_vector = len(EL_vector)
Pkid_summed = np.zeros([self.num_filters, length_EL_vector])
for j in range(0, self.num_filters):
Pkid_summed[j, :] = np.sum(Pkid[j, :, :], axis=0)
return Pkid_summed, Tb_sky
def save_TP_data(self, EL_vector, pwv_vector):
"""
Saves values of the KID power Pkid_summed and the sky temperature Tb_sky, that are obtained by the 'getPoints_TP_curve' method.
"""
for i in range(0, len(pwv_vector)):
Pkid, Tb_sky = self.getPoints_TP_curve(EL_vector, pwv_vector[i])
# filename_Pkid = "C:/Users/Esmee/Documents/BEP/DESHIMA/Python/BEP/Data/Pkid/Pkid_for_pwv_" \
# + str(pwv_vector[i]) + ".txt"
# filename_Tb_sky = "C:/Users/Esmee/Documents/BEP/DESHIMA/Python/BEP/Data/Tb_sky/Tb_sky_for_pwv_" \
# + str(pwv_vector[i]) + ".txt"
self.path_model.joinpath('Data/Pkid/').mkdir(parents = True, exist_ok = True)
self.path_model.joinpath('Data/Tb_sky/').mkdir(parents = True, exist_ok = True)
if self.D1:
filename_Pkid = self.path_model.joinpath('Data/Pkid/Pkid_for_pwv_' + str(pwv_vector[i]) + '_D1.txt')
filename_Tb_sky = self.path_model.joinpath('Data/Tb_sky/Tb_sky_for_pwv_' + str(pwv_vector[i]) + "_D1.txt")
else:
filename_Pkid = self.path_model.joinpath('Data/Pkid/Pkid_for_pwv_' + str(pwv_vector[i]) + '.txt')
filename_Tb_sky = self.path_model.joinpath('Data/Tb_sky/Tb_sky_for_pwv_' + str(pwv_vector[i]) + ".txt")
np.savetxt(filename_Pkid, Pkid)
np.savetxt(filename_Tb_sky, Tb_sky)
Pkid = 0; Tb_sky = 0
def save_etaF_data(self, pwv_vector, EL):
"""
Saves values of the atmospheric transmission eta_atm, that are obtained by the 'getPoints_etaF_curve' method.
"""
eta_atm = np.zeros([len(pwv_vector), len(self.filters)]) #num_filters can also be another (larger) numbers
for k in range(0, len(pwv_vector)):
eta_atm[k, :] = self.getPoints_etaF_curve(pwv_vector[k], EL)
# filename_eta_atm = "C:/Users/Esmee/Documents/BEP/DESHIMA/Python/BEP/Data/eta_atm/eta_atm.txt"
# filename_F= "C:/Users/Esmee/Documents/BEP/DESHIMA/Python/BEP/Data/F/F.txt"
self.path_model.joinpath('Data/eta_atm/').mkdir(parents = True, exist_ok = True)
self.path_model.joinpath('Data/F/').mkdir(parents = True, exist_ok = True)
filename_eta_atm = self.path_model.joinpath('Data/eta_atm/eta_atm.txt')
filename_F = self.path_model.joinpath('Data/F/F.txt')
np.savetxt(filename_eta_atm, eta_atm)
| np.savetxt(filename_F, self.filters) | numpy.savetxt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 07 14:42:32 2021
@author: silviapagliarini
"""
import os
import numpy as np
import pandas as pd
import csv
from pydub import AudioSegment
import scipy.io.wavfile as wav
def opensmile_executable(data, baby_id, classes, args):
"""
Generate a text file executable on shell to compute multiple times opensmile features.
If option labels_creation == True, it also generates a csv file containing number of the sound and label.
INPUT
- path to directory
- type of dataset (can be a single directory, or a dataset keywords): see args.baby_id
OUTPUT
A text file for each directory with the command lines to compute MFCC for each extracted sound in the directory.
"""
f = open(args.data_dir + '/' + 'executable_opensmile_' + baby_id + '.txt', 'w+')
i = 0
while i < len(data):
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/InitialDatasets/singleVoc/single_vocalizations/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/completeDataset/'
name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/subsetSilence/'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/Datasets/HumanLabels/exp1'
#name = './build/progsrc/smilextract/SMILExtract -C config/mfcc/MFCC12_0_D_A.conf -I /Users/silviapagliarini/Documents/BabbleNN/interspeech_Wave'
if baby_id == 'AnneModel':
f.write(name + '/' + os.path.basename(data[i]) + ' -csvoutput ' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
else:
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/humanVSlena/human'
#output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/completeDataset'
output_dir = '/Users/silviapagliarini/Documents/opensmile/HumanData_analysis/subsetSilence'
os.makedirs(output_dir + '/' + baby_id, exist_ok=True)
for c in range(0,len(classes)):
os.makedirs(output_dir + '/' + baby_id + '/' + classes[c], exist_ok=True)
f.write(name + baby_id[0:4] + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
#f.write(name + '/' + baby_id + '_segments/' + os.path.basename(data[i]) + ' -csvoutput ' + output_dir + '/' + baby_id + '/' + os.path.basename(data[i])[0:-3] + 'mfcc.csv')
f.write('\n')
i = i + 1
f.close()
if args.labels_creation == True:
# writing the data rows
labels = []
i = 0
while i < len(data):
j = 0
while j < len(classes):
if os.path.basename(data[i]).find(classes[j]) != -1:
labels.append(classes[j])
j = j + 1
i = i + 1
with open(args.data_dir + '/' + 'LENAlabels_' + baby_id + '.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'Label'])
i = 0
while i < len(data):
csvwriter.writerow([str(i), labels[i]])
i = i + 1
print('Done')
def list(args):
"""
Create a list of all the babies in the dataset in order to simplify the following steps of the analysis.
INPUT
- path to directory (subdirectories should be the single family directories).
OUTPUT
- .csv file with name of the baby and age of the baby in days.
"""
listDir = glob2.glob(args.data_dir + '/0*')
with open(args.data_dir + '/baby_list_basic.csv', 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(['ID', 'AGE'])
i = 0
while i<len(listDir):
name = os.path.basename(listDir[i])
age = int(name[6])*365 + int(name[8]) * 30 + int(name[10])
csvwriter.writerow([name, age])
i = i + 1
print('Done')
def merge_labels(babies, args):
"""
Create a LENA-like .csv with the human corrections included. When a label has been identified as wrong, it is substitute with the
noise lable NOF.
INPUT
- path to directory
- list of babies
OUTPUT
.csv file containing cleaned labels.
"""
for i in range(0,len(babies)):
print(babies[i])
lena = pd.read_csv(args.data_dir + '/' + babies[i] + '_segments.csv')
human = pd.read_csv(args.data_dir + '/' + babies[i] + '_scrubbed_CHNrelabel_lplf_1.csv')
time_stamp_lena_start = lena["startsec"]
time_stamp_lena_end = lena["endsec"]
prominence = human["targetChildProminence"]
lena_labels = lena["segtype"]
CHNSP_pos = np.where(lena_labels == 'CHNSP')[0]
CHNNSP_pos = | np.where(lena_labels == 'CHNNSP') | numpy.where |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for basic mappers'''
import numpy as np
# for repr
from numpy import array
from mvpa2.testing.tools import ok_, assert_raises, assert_false, assert_equal, \
assert_true, assert_array_equal, nodebug
from mvpa2.testing import sweepargs
from mvpa2.testing.datasets import datasets
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.base import ChainMapper, IdentityMapper
from mvpa2.featsel.base import StaticFeatureSelection
from mvpa2.mappers.slicing import SampleSliceMapper, StripBoundariesSamples
from mvpa2.support.copy import copy
from mvpa2.datasets.base import Dataset
from mvpa2.base.collections import ArrayCollectable
from mvpa2.datasets.base import dataset_wizard
from mvpa2.mappers.flatten import ProductFlattenMapper
import itertools
import operator
from mvpa2.base import externals
# arbitrary ndarray subclass for testing
class myarray(np.ndarray):
pass
def test_flatten():
samples_shape = (2, 2, 4)
data_shape = (4,) + samples_shape
data = np.arange(np.prod(data_shape)).reshape(data_shape).view(myarray)
pristinedata = data.copy()
target = [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]]
target = np.array(target).view(myarray)
index_target = np.array([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3],
[0, 1, 0], [0, 1, 1], [0, 1, 2], [0, 1, 3],
[1, 0, 0], [1, 0, 1], [1, 0, 2], [1, 0, 3],
[1, 1, 0], [1, 1, 1], [1, 1, 2], [1, 1, 3]])
# test only flattening the first two dimensions
fm_max = FlattenMapper(maxdims=2)
fm_max.train(data)
assert_equal(fm_max(data).shape, (4, 4, 4))
# array subclass survives
ok_(isinstance(data, myarray))
# actually, there should be no difference between a plain FlattenMapper and
# a chain that only has a FlattenMapper as the one element
for fm in [FlattenMapper(space='voxel'),
ChainMapper([FlattenMapper(space='voxel'),
StaticFeatureSelection(slice(None))])]:
# not working if untrained
assert_raises(RuntimeError,
fm.forward1,
np.arange(np.sum(samples_shape) + 1))
fm.train(data)
ok_(isinstance(fm.forward(data), myarray))
ok_(isinstance(fm.forward1(data[2]), myarray))
assert_array_equal(fm.forward(data), target)
assert_array_equal(fm.forward1(data[2]), target[2])
assert_raises(ValueError, fm.forward, np.arange(4))
# all of that leaves that data unmodified
assert_array_equal(data, pristinedata)
# reverse mapping
ok_(isinstance(fm.reverse(target), myarray))
ok_(isinstance(fm.reverse1(target[0]), myarray))
ok_(isinstance(fm.reverse(target[1:2]), myarray))
assert_array_equal(fm.reverse(target), data)
assert_array_equal(fm.reverse1(target[0]), data[0])
assert_array_equal(fm.reverse(target[1:2]), data[1:2])
assert_raises(ValueError, fm.reverse, np.arange(14))
# check one dimensional data, treated as scalar samples
oned = | np.arange(5) | numpy.arange |
import base64
from collections import defaultdict
from typing import TYPE_CHECKING, Type
import numpy as np
if TYPE_CHECKING:
from pydantic import BaseModel
from ...typing import T
from ..pydantic_model import PydanticDocument
class PydanticMixin:
"""Provide helper functions to convert to/from a Pydantic model"""
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of Document class."""
from ..pydantic_model import PydanticDocument as DP
from pydantic import schema_json_of
return schema_json_of(DP, title='Document Schema', indent=indent)
def to_pydantic_model(self) -> 'PydanticDocument':
"""Convert a Document object into a Pydantic model."""
from ..pydantic_model import PydanticDocument as DP
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_pydantic_model()
elif f in ('scores', 'evaluations'):
_p_dict[f] = {k: v.to_dict() for k, v in v.items()}
elif f == 'blob':
_p_dict[f] = base64.b64encode(v).decode('utf8')
else:
_p_dict[f] = v
return DP(**_p_dict)
@classmethod
def from_pydantic_model(cls: Type['T'], model: 'BaseModel') -> 'T':
"""Build a Document object from a Pydantic model
:param model: the pydantic data model object that represents a Document
:return: a Document object
"""
from ... import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_pydantic_model(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_pydantic_model(d) for d in model.matches]
for (field, value) in model.dict(
exclude_none=True, exclude={'chunks', 'matches'}
).items():
f_name = field
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(v)
elif f_name == 'embedding' or f_name == 'tensor':
fields[f_name] = | np.array(value) | numpy.array |
"""
geoutils.vectortools provides a toolset for working with vector data.
"""
from __future__ import annotations
import warnings
from collections import abc
from numbers import Number
from typing import TypeVar
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import rasterio as rio
import shapely
from rasterio import features, warp
from rasterio.crs import CRS
from scipy.spatial import Voronoi
from shapely.geometry.polygon import Polygon
import geoutils as gu
# This is a generic Vector-type (if subclasses are made, this will change appropriately)
VectorType = TypeVar("VectorType", bound="Vector")
class Vector:
"""
Create a Vector object from a fiona-supported vector dataset.
"""
def __init__(self, filename: str | gpd.GeoDataFrame):
"""
Load a fiona-supported dataset, given a filename.
:param filename: The filename or GeoDataFrame of the dataset.
:return: A Vector object
"""
if isinstance(filename, str):
with warnings.catch_warnings():
# This warning shows up in numpy 1.21 (2021-07-09)
warnings.filterwarnings("ignore", ".*attribute.*array_interface.*Polygon.*")
ds = gpd.read_file(filename)
self.ds = ds
self.name: str | gpd.GeoDataFrame | None = filename
elif isinstance(filename, gpd.GeoDataFrame):
self.ds = filename
self.name = None
else:
raise ValueError("filename argument not recognised.")
self.crs = self.ds.crs
def __repr__(self) -> str:
return str(self.ds.__repr__())
def __str__(self) -> str:
"""Provide string of information about Raster."""
return self.info()
def info(self) -> str:
"""
Returns string of information about the vector (filename, coordinate system, number of layers, features, etc.).
:returns: text information about Vector attributes.
:rtype: str
"""
as_str = [ # 'Driver: {} \n'.format(self.driver),
f"Filename: {self.name} \n",
f"Coordinate System: EPSG:{self.ds.crs.to_epsg()}\n",
f"Number of features: {len(self.ds)} \n",
f"Extent: {self.ds.total_bounds.tolist()} \n",
f"Attributes: {self.ds.columns.tolist()} \n",
self.ds.__repr__(),
]
return "".join(as_str)
@property
def bounds(self) -> rio.coords.BoundingBox:
"""Get a bounding box of the total bounds of the Vector."""
return rio.coords.BoundingBox(*self.ds.total_bounds)
def copy(self: VectorType) -> VectorType:
"""Return a copy of the Vector."""
# Utilise the copy method of GeoPandas
new_vector = self.__new__(type(self))
new_vector.__init__(self.ds.copy())
return new_vector # type: ignore
def crop2raster(self, rst: gu.Raster) -> None:
"""
Update self so that features outside the extent of a raster file are cropped.
Reprojection is done on the fly if both data set have different projections.
:param rst: A Raster object or string to filename
"""
# If input is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst)
# Convert raster extent into self CRS
# Note: could skip this if we could test if rojections are same
# Note: should include a method in Raster to get extent in other projections, not only using corners
left, bottom, right, top = rst.bounds
x1, y1, x2, y2 = warp.transform_bounds(rst.crs, self.ds.crs, left, bottom, right, top)
self.ds = self.ds.cx[x1:x2, y1:y2]
def create_mask(
self,
rst: str | gu.georaster.RasterType | None = None,
crs: CRS | None = None,
xres: float | None = None,
yres: float | None = None,
bounds: tuple[float, float, float, float] | None = None,
buffer: int | float | np.number = 0,
) -> np.ndarray:
"""
Rasterize the vector features into a boolean raster which has the extent/dimensions of \
the provided raster file.
Alternatively, user can specify a grid to rasterize on using xres, yres, bounds and crs.
Only xres is mandatory, by default yres=xres and bounds/crs are set to self's.
Vector features which fall outside the bounds of the raster file are not written to the new mask file.
:param rst: A Raster object or string to filename
:param crs: A pyproj or rasterio CRS object (Default to rst.crs if not None then self.crs)
:param xres: Output raster spatial resolution in x. Only is rst is None.
:param yres: Output raster spatial resolution in y. Only if rst is None. (Default to xres)
:param bounds: Output raster bounds (left, bottom, right, top). Only if rst is None (Default to self bounds)
:param buffer: Size of buffer to be added around the features, in the raster's projection units.
If a negative value is set, will erode the features.
:returns: array containing the mask
"""
# If input rst is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst) # type: ignore
# If no rst given, use provided dimensions
if rst is None:
# At minimum, xres must be set
if xres is None:
raise ValueError("at least rst or xres must be set")
if yres is None:
yres = xres
# By default, use self's CRS and bounds
if crs is None:
crs = self.ds.crs
if bounds is None:
bounds = self.ds.total_bounds
# Calculate raster shape
left, bottom, right, top = bounds
height = abs((right - left) / xres)
width = abs((top - bottom) / yres)
if width % 1 != 0 or height % 1 != 0:
warnings.warn("Bounds not a multiple of xres/yres, use rounded bounds")
width = int(np.round(width))
height = int(np.round(height))
out_shape = (height, width)
# Calculate raster transform
transform = rio.transform.from_bounds(left, bottom, right, top, width, height)
# otherwise use directly rst's dimensions
elif isinstance(rst, gu.Raster):
out_shape = rst.shape
transform = rst.transform
crs = rst.crs
bounds = rst.bounds
else:
raise ValueError("`rst` must be either a str, geoutils.Raster or None")
# Copying GeoPandas dataframe before applying changes
gdf = self.ds.copy()
# Crop vector geometries to avoid issues when reprojecting
left, bottom, right, top = bounds # type: ignore
x1, y1, x2, y2 = warp.transform_bounds(crs, gdf.crs, left, bottom, right, top)
gdf = gdf.cx[x1:x2, y1:y2]
# Reproject vector into rst CRS
gdf = gdf.to_crs(crs)
# Create a buffer around the features
if not isinstance(buffer, (int, float, np.number)):
raise ValueError(f"`buffer` must be a number, currently set to {type(buffer)}")
if buffer != 0:
gdf.geometry = [geom.buffer(buffer) for geom in gdf.geometry]
elif buffer == 0:
pass
# Rasterize geometry
mask = features.rasterize(
shapes=gdf.geometry, fill=0, out_shape=out_shape, transform=transform, default_value=1, dtype="uint8"
).astype("bool")
# Force output mask to be of same dimension as input rst
if rst is not None:
mask = mask.reshape((rst.count, rst.height, rst.width)) # type: ignore
return mask
def rasterize(
self,
rst: str | gu.georaster.RasterType | None = None,
crs: CRS | None = None,
xres: float | None = None,
yres: float | None = None,
bounds: tuple[float, float, float, float] | None = None,
in_value: int | float | abc.Iterable[int | float] | None = None,
out_value: int | float = 0,
) -> np.ndarray:
"""
Return an array with input geometries burned in.
By default, output raster has the extent/dimensions of the provided raster file.
Alternatively, user can specify a grid to rasterize on using xres, yres, bounds and crs.
Only xres is mandatory, by default yres=xres and bounds/crs are set to self's.
Burn value is set by user and can be either a single number, or an iterable of same length as self.ds.
Default is an index from 1 to len(self.ds).
:param rst: A raster to be used as reference for the output grid
:param crs: A pyproj or rasterio CRS object (Default to rst.crs if not None then self.crs)
:param xres: Output raster spatial resolution in x. Only is rst is None.
Must be in units of crs, if set.
:param yres: Output raster spatial resolution in y. Only if rst is None.
Must be in units of crs, if set. (Default to xres)
:param bounds: Output raster bounds (left, bottom, right, top). Only if rst is None
Must be in same system as crs, if set. (Default to self bounds).
:param in_value: Value(s) to be burned inside the polygons (Default is self.ds.index + 1)
:param out_value: Value to be burned outside the polygons (Default is 0)
:returns: array containing the burned geometries
"""
# If input rst is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst) # type: ignore
if (rst is not None) and (crs is not None):
raise ValueError("Only one of rst or crs can be provided.")
# Reproject vector into requested CRS or rst CRS first, if needed
# This has to be done first so that width/height calculated below are correct!
if crs is None:
crs = self.ds.crs
if rst is not None:
crs = rst.crs # type: ignore
vect = self.ds.to_crs(crs)
# If no rst given, now use provided dimensions
if rst is None:
# At minimum, xres must be set
if xres is None:
raise ValueError("at least rst or xres must be set")
if yres is None:
yres = xres
# By default, use self's bounds
if bounds is None:
bounds = vect.total_bounds
# Calculate raster shape
left, bottom, right, top = bounds
width = abs((right - left) / xres)
height = abs((top - bottom) / yres)
if width % 1 != 0 or height % 1 != 0:
warnings.warn("Bounds not a multiple of xres/yres, use rounded bounds")
width = int(np.round(width))
height = int(np.round(height))
out_shape = (height, width)
# Calculate raster transform
transform = rio.transform.from_bounds(left, bottom, right, top, width, height)
# otherwise use directly rst's dimensions
else:
out_shape = rst.shape # type: ignore
transform = rst.transform # type: ignore
# Set default burn value, index from 1 to len(self.ds)
if in_value is None:
in_value = self.ds.index + 1
# Rasterize geometry
if isinstance(in_value, abc.Iterable):
if len(in_value) != len(vect.geometry): # type: ignore
raise ValueError(
"in_value must have same length as self.ds.geometry, currently {} != {}".format(
len(in_value), len(vect.geometry) # type: ignore
)
)
out_geom = ((geom, value) for geom, value in zip(vect.geometry, in_value))
mask = features.rasterize(shapes=out_geom, fill=out_value, out_shape=out_shape, transform=transform)
elif isinstance(in_value, Number):
mask = features.rasterize(
shapes=vect.geometry, fill=out_value, out_shape=out_shape, transform=transform, default_value=in_value
)
else:
raise ValueError("in_value must be a single number or an iterable with same length as self.ds.geometry")
return mask
def query(self: VectorType, expression: str, inplace: bool = False) -> VectorType:
"""
Query the Vector dataset with a valid Pandas expression.
:param expression: A python-like expression to evaluate. Example: "col1 > col2"
:param inplace: Whether the query should modify the data in place or return a modified copy.
:returns: Vector resulting from the provided query expression or itself if inplace=True.
"""
# Modify inplace if wanted and return the self instance.
if inplace:
self.ds.query(expression, inplace=True)
return self
# Otherwise, create a new Vector from the queried dataset.
new_vector = self.__new__(type(self))
new_vector.__init__(self.ds.query(expression))
return new_vector # type: ignore
def buffer_without_overlap(self, buffer_size: int | float, plot: bool = False) -> np.ndarray:
"""
Returns a Vector object containing self's geometries extended by a buffer, without overlapping each other.
The algorithm is based upon this tutorial: https://statnmap.com/2020-07-31-buffer-area-for-nearest-neighbour/.
The buffered polygons are created using Voronoi polygons in order to delineate the "area of influence" \
of each geometry.
The buffer is slightly inaccurate where two geometries touch, due to the nature of the Voronoi polygons,\
hence one geometry "steps" slightly on the neighbor buffer in some cases.
The algorithm may also yield unexpected results on very simple geometries.
Note: A similar functionality is provided by momepy (http://docs.momepy.org) and is probably more robust.
It could be implemented in GeoPandas in the future: https://github.com/geopandas/geopandas/issues/2015
:examples:
>>> outlines = gu.Vector(gu.datasets.get_path('glacier_outlines'))
>>> outlines = gu.Vector(outlines.ds.to_crs('EPSG:32645'))
>>> buffer = outlines.buffer_without_overlap(500)
>>> ax = buffer.ds.plot() # doctest: +SKIP
>>> outlines.ds.plot(ax=ax, ec='k', fc='none') # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
:param buffer_size: Buffer size in self's coordinate system units.
:param plot: Set to True to show intermediate plots, useful for understanding or debugging.
:returns: A Vector containing the buffered geometries.
"""
# Dissolve all geometries into one
gdf = self.ds
merged = gdf.dissolve()
# Add buffer around geometries
merged_buffer = merged.buffer(buffer_size)
# Extract only the buffered area
buffer = merged_buffer.difference(merged)
# Crop Voronoi polygons to bound geometry and add missing polygons
bound_poly = gu.projtools.bounds2poly(gdf)
bound_poly = bound_poly.buffer(buffer_size)
voronoi_all = generate_voronoi_with_bounds(gdf, bound_poly)
if plot:
plt.figure(figsize=(16, 4))
ax1 = plt.subplot(141)
voronoi_all.plot(ax=ax1)
gdf.plot(fc="none", ec="k", ax=ax1)
ax1.set_title("Voronoi polygons, cropped")
# Extract Voronoi polygons only within the buffer area
voronoi_diff = voronoi_all.intersection(buffer.geometry[0])
# Split all polygons, and join attributes of original geometries into the Voronoi polygons
# Splitting, i.e. explode, is needed when Voronoi generate MultiPolygons that may extend over several features.
voronoi_gdf = gpd.GeoDataFrame(geometry=voronoi_diff.explode(index_parts=True)) # requires geopandas>=0.10
joined_voronoi = gpd.tools.sjoin(gdf, voronoi_gdf, how="right")
# Plot results -> some polygons are duplicated
if plot:
ax2 = plt.subplot(142, sharex=ax1, sharey=ax1)
joined_voronoi.plot(ax=ax2, column="index_left", alpha=0.5, ec="k")
gdf.plot(ax=ax2, column=gdf.index.values)
ax2.set_title("Buffer with duplicated polygons")
# Find non unique Voronoi polygons, and retain only first one
_, indexes = np.unique(joined_voronoi.index, return_index=True)
unique_voronoi = joined_voronoi.iloc[indexes]
# Plot results -> unique polygons only
if plot:
ax3 = plt.subplot(143, sharex=ax1, sharey=ax1)
unique_voronoi.plot(ax=ax3, column="index_left", alpha=0.5, ec="k")
gdf.plot(ax=ax3, column=gdf.index.values)
ax3.set_title("Buffer with unique polygons")
# Dissolve all polygons by original index
merged_voronoi = unique_voronoi.dissolve(by="index_left")
# Plot
if plot:
ax4 = plt.subplot(144, sharex=ax1, sharey=ax1)
gdf.plot(ax=ax4, column=gdf.index.values)
merged_voronoi.plot(column=merged_voronoi.index.values, ax=ax4, alpha=0.5)
ax4.set_title("Final buffer")
plt.show()
return gu.Vector(merged_voronoi)
# -----------------------------------------
# Additional stand-alone utility functions
# -----------------------------------------
def extract_vertices(gdf: gpd.GeoDataFrame) -> list[list[tuple[float, float]]]:
r"""
Function to extract the exterior vertices of all shapes within a gpd.GeoDataFrame.
:param gdf: The GeoDataFrame from which the vertices need to be extracted.
:returns: A list containing a list of (x, y) positions of the vertices. The length of the primary list is equal \
to the number of geometries inside gdf, and length of each sublist is the number of vertices in the geometry.
"""
vertices = []
# Loop on all geometries within gdf
for geom in gdf.geometry:
# Extract geometry exterior(s)
if geom.geom_type == "MultiPolygon":
exteriors = [p.exterior for p in geom]
elif geom.geom_type == "Polygon":
exteriors = [geom.exterior]
elif geom.geom_type == "LineString":
exteriors = [geom]
elif geom.geom_type == "MultiLineString":
exteriors = geom
else:
raise NotImplementedError(f"Geometry type {geom.geom_type} not implemented.")
vertices.extend([list(ext.coords) for ext in exteriors])
return vertices
def generate_voronoi_polygons(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Generate Voronoi polygons (tessellation) from the vertices of all geometries in a GeoDataFrame.
Uses scipy.spatial.voronoi.
:param: The GeoDataFrame from whose vertices are used for the Voronoi polygons.
:returns: A GeoDataFrame containing the Voronoi polygons.
"""
# Extract the coordinates of the vertices of all geometries in gdf
vertices = extract_vertices(gdf)
coords = np.concatenate(vertices)
# Create the Voronoi diagram and extract ridges
vor = Voronoi(coords)
lines = [shapely.geometry.LineString(vor.vertices[line]) for line in vor.ridge_vertices if -1 not in line]
polys = list(shapely.ops.polygonize(lines))
if len(polys) == 0:
raise ValueError("Invalid geometry, cannot generate finite Voronoi polygons")
# Convert into GeoDataFrame
voronoi = gpd.GeoDataFrame(geometry=gpd.GeoSeries(polys))
voronoi.crs = gdf.crs
return voronoi
def generate_voronoi_with_bounds(gdf: gpd.GeoDataFrame, bound_poly: Polygon) -> gpd.GeoDataFrame:
"""
Generate Voronoi polygons that are bounded by the polygon bound_poly, to avoid Voronoi polygons that extend \
far beyond the original geometry.
Voronoi polygons are created using generate_voronoi_polygons, cropped to the extent of bound_poly and gaps \
are filled with new polygons.
:param: The GeoDataFrame from whose vertices are used for the Voronoi polygons.
:param: A shapely Polygon to be used for bounding the Voronoi diagrams.
:returns: A GeoDataFrame containing the Voronoi polygons.
"""
# Create Voronoi polygons
voronoi = generate_voronoi_polygons(gdf)
# Crop Voronoi polygons to input bound_poly extent
voronoi_crop = voronoi.intersection(bound_poly)
voronoi_crop = gpd.GeoDataFrame(geometry=voronoi_crop) # convert to DataFrame
# Dissolve all Voronoi polygons and subtract from bounds to get gaps
voronoi_merged = voronoi_crop.dissolve()
bound_gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(bound_poly))
bound_gdf.crs = gdf.crs
gaps = bound_gdf.difference(voronoi_merged)
# Merge cropped Voronoi with gaps, if not empty, otherwise return cropped Voronoi
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Geometry is in a geographic CRS. Results from 'area' are likely incorrect.")
tot_area = | np.sum(gaps.area.values) | numpy.sum |
# coding=utf-8
"""
Plot the plasma zoo diagramm (plasma-types in T-n diagramm).
Simply run this script to produce a png plot:
$ python plasma_zoo.py
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'University of Stuttgart'
__license__ = 'MIT'
# import standard modules
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as consts
from matplotlib.colors import LogNorm
from matplotlib import ticker
# credit string to include at top of plot, to ensure people know they can use the plot
# (someone once told me, every plot appearing somewhere in the internet
# should contain information on how to use it, otherwise it is useless)
# note that the license refers only to that specific plot
# the license for the code is mentioned in the LICENSE file (and above)
credit_str = f'{__author__}, CC BY-SA 4.0'
def calc_debye( n=1e20, T=1, unit='eV' ):
#;{{{
"""
Calculate the Debye length.
Parameters
----------
n: float
plasma density in m^-3
T: float
plasma temperature in K (or eV, see parameter 'unit')
unit: str
if set to 'eV', plasma temperature is assumed to be in eV
Returns
-------
float
Debye length in meters.
"""
if unit == 'eV':
T *= consts.e/consts.k
return np.sqrt( consts.epsilon_0 * consts.k * T / (consts.e**2 * n) )
#;}}}
def calc_ND( n=1e20, T=1, unit='eV' ):
#;{{{
"""
Calculate the plasma parameter (number of particles in Debye sphere).
Parameters
----------
n: float
plasma density in m^-3
T: float
plasma temperature in K (or eV, see parameter 'unit')
unit: str
if set to 'eV', plasma temperature is assumed to be in eV
Returns
-------
float
Number of particles in Debye sphere.
"""
lambda_D = calc_debye(n,T,unit=unit)
return n * 4./3. * np.pi * lambda_D**3
#;}}}
def calc_Trel():
#;{{{
"""
Calculate the temperature when a plasma becomes relativistic.
Parameters
----------
Returns
-------
float
Temperature in eV above which the plasma becomes relativitic.
"""
return consts.m_e*consts.c**2 / consts.e
#;}}}
def calc_Tdeg( plasma_density ):
#;{{{
"""
Calculate the plasma temperature, when the plasma becomes degenerated.
Parameters
----------
plasma_density: float
plasma density in m^-3
Returns
-------
float
temperature in eV
"""
return consts.hbar**2/(2.*consts.m_e) * (3.*np.pi**2*plasma_density)**(2./3.) / consts.e
#;}}}
def calc_Tnonideal( plasma_density ):
#;{{{
"""
Calculate the plasma temperature, when the plasma becomes non-ideal
Parameters
----------
plasma_density: float
plasma density in m^-3
Returns
-------
float
temperature in eV
"""
# non-ideal plasmas with strong coupling parameter
return consts.e**2/(4.*np.pi*consts.epsilon_0) * plasma_density**(1./3.) / consts.e
#;}}}
def build_plasma_zoo():
#;{{{
"""
Return a dictionary containing the plasma zoo.
The keys of the dictionary are strings labelling the plasma type.
For each key, a numpy array with two elements is returned,
where the first element corresponds to the plasma density,
the second to the plasma temperature.
Parameters
----------
Returns
-------
dictionary
"""
plasma_zoo = {
'interstellar\nmedium': | np.array([1e7, .8e0]) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | np.array([]) | numpy.array |
#!/usr/bin/python
# coding: UTF-8
#
# Author: <NAME>
# Contact: <EMAIL>
#
# Feel free to contact for any information.
from __future__ import division, print_function
import logging
import numpy as np
import scipy.optimize as opt
from scipy.linalg import norm as matrix_norm
########################################
## Declaring Class
class Preprocessor(object):
logger = logging.getLogger(__name__)
_peak_types = ["triang", "norm", "lorentz"]
def __init__(self, max_osc=-1, nH=1, energy_ratio=0.1):
self.nH = nH
self.max_osc = max_osc
self.ptype = "norm"
self.energy_ratio = energy_ratio
self.f_min = 0
self.f_max = 1e10
self.theta_init = None
@classmethod
def _remove_peak(cls, t, s, ptype="norm"):
"""Fit and remove peak of a given type"""
if ptype=="norm":
def peak(t, *p):
_t = (t-p[0])/p[2]
return p[1]*np.exp(-_t*_t)
_wd = 0.5
_amp = np.max(s)
_pos = t[s==_amp][0]
elif ptype=="triang":
def peak(t, *p):
s = 1-np.abs((t-p[0])/p[2])
s[s<0] = 0
return p[1]*s
_wd = 1.0
_amp = | np.max(s) | numpy.max |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import numpy as np
from ogb.lsc.pcqm4m_pyg import PygPCQM4MDataset
from rdkit import Chem
import rdkit.Chem.AllChem as AllChem
import joblib
import numpy as np
import math
from scipy.spatial.distance import cdist
# ===================== NODE START =====================
atomic_num_list = list(range(119))
chiral_tag_list = list(range(4))
degree_list = list(range(11))
possible_formal_charge_list = list(range(16))
possible_numH_list = list(range(9))
possible_number_radical_e_list = list(range(5))
possible_hybridization_list = ['SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'S']
possible_is_aromatic_list = [False, True]
possible_is_in_ring_list = [False, True]
explicit_valence_list = list(range(13))
implicit_valence_list = list(range(13))
total_valence_list = list(range(26))
total_degree_list = list(range(32))
def simple_atom_feature(atom):
atomic_num = atom.GetAtomicNum()
assert atomic_num in atomic_num_list
chiral_tag = int(atom.GetChiralTag())
assert chiral_tag in chiral_tag_list
degree = atom.GetTotalDegree()
assert degree in degree_list
possible_formal_charge = atom.GetFormalCharge()
possible_formal_charge_transformed = possible_formal_charge + 5
assert possible_formal_charge_transformed in possible_formal_charge_list
possible_numH = atom.GetTotalNumHs()
assert possible_numH in possible_numH_list
# 5
possible_number_radical_e = atom.GetNumRadicalElectrons()
assert possible_number_radical_e in possible_number_radical_e_list
possible_hybridization = str(atom.GetHybridization())
assert possible_hybridization in possible_hybridization_list
possible_hybridization = possible_hybridization_list.index(possible_hybridization)
possible_is_aromatic = atom.GetIsAromatic()
assert possible_is_aromatic in possible_is_aromatic_list
possible_is_aromatic = possible_is_aromatic_list.index(possible_is_aromatic)
possible_is_in_ring = atom.IsInRing()
assert possible_is_in_ring in possible_is_in_ring_list
possible_is_in_ring = possible_is_in_ring_list.index(possible_is_in_ring)
explicit_valence = atom.GetExplicitValence()
assert explicit_valence in explicit_valence_list
# 10
implicit_valence = atom.GetImplicitValence()
assert implicit_valence in implicit_valence_list
total_valence = atom.GetTotalValence()
assert total_valence in total_valence_list
total_degree = atom.GetTotalDegree()
assert total_degree in total_degree_list
sparse_features = [
atomic_num, chiral_tag, degree, possible_formal_charge_transformed, possible_numH,
possible_number_radical_e, possible_hybridization, possible_is_aromatic, possible_is_in_ring, explicit_valence,
implicit_valence, total_valence, total_degree,
]
return sparse_features
def easy_bin(x, bin):
x = float(x)
cnt = 0
if math.isinf(x):
return 120
if math.isnan(x):
return 121
while True:
if cnt == len(bin):
return cnt
if x > bin[cnt]:
cnt += 1
else:
return cnt
def peri_features(atom, peri):
rvdw = peri.GetRvdw(atom.GetAtomicNum())
default_valence = peri.GetDefaultValence(atom.GetAtomicNum())
n_outer_elecs = peri.GetNOuterElecs(atom.GetAtomicNum())
rb0 = peri.GetRb0(atom.GetAtomicNum())
sparse_features = [
default_valence,
n_outer_elecs,
easy_bin(rvdw, [1.2 , 1.5 , 1.55, 1.6 , 1.7 , 1.8 , 2.4]),
easy_bin(rb0, [0.33 , 0.611, 0.66 , 0.7 , 0.77 , 0.997, 1.04 , 1.54])
]
return sparse_features
def envatom_feature(mol, radius, atom_idx):
env= Chem.FindAtomEnvironmentOfRadiusN(mol, radius, atom_idx, useHs=True)
submol=Chem.PathToSubmol(mol, env, atomMap={})
return submol.GetNumAtoms()
def envatom_features(mol, atom):
return [
envatom_feature(mol, r, atom.GetIdx()) for r in range(2, 9)
]
def atom_to_feature_vector(atom, peri, mol):
sparse_features = []
sparse_features.extend(simple_atom_feature(atom))
sparse_features.extend(peri_features(atom, peri))
sparse_features.extend(envatom_features(mol, atom))
sparse_features.append(easy_bin(atom.GetProp('_GasteigerCharge'),
[-0.87431233, -0.47758285, -0.38806704, -0.32606976, -0.28913129,
-0.25853269, -0.24494531, -0.20136365, -0.12197541, -0.08234462,
-0.06248558, -0.06079668, -0.05704827, -0.05296379, -0.04884997,
-0.04390136, -0.03881107, -0.03328515, -0.02582824, -0.01916618,
-0.01005982, 0.0013529 , 0.01490858, 0.0276433 , 0.04070013,
0.05610381, 0.07337645, 0.08998278, 0.11564625, 0.14390777,
0.18754518, 0.27317209, 1. ]))
return sparse_features
import os.path as osp
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
chem_feature_factory = ChemicalFeatures.BuildFeatureFactory(fdef_name)
def donor_acceptor_feature(x_num, mol):
chem_feature_factory_feats = chem_feature_factory.GetFeaturesForMol(mol)
features = np.zeros([x_num, 2], dtype = np.int64)
for i in range(len(chem_feature_factory_feats)):
if chem_feature_factory_feats[i].GetFamily() == 'Donor':
node_list = chem_feature_factory_feats[i].GetAtomIds()
for j in node_list:
features[j, 0] = 1
elif chem_feature_factory_feats[i].GetFamily() == 'Acceptor':
node_list = chem_feature_factory_feats[i].GetAtomIds()
for j in node_list:
features[j, 1] = 1
return features
chiral_centers_list = ['R', 'S']
def chiral_centers_feature(x_num, mol):
features = np.zeros([x_num, 1], dtype = np.int64)
t = Chem.FindMolChiralCenters(mol)
for i in t:
idx, type = i
features[idx] = chiral_centers_list.index(type) + 1 # 0 for not center
return features
# ===================== NODE END =====================
# ===================== BOND START =====================
possible_bond_type_list = list(range(32))
possible_bond_stereo_list = list(range(16))
possible_is_conjugated_list = [False, True]
possible_is_in_ring_list = [False, True]
possible_bond_dir_list = list(range(16))
def bond_to_feature_vector(bond):
# 0
bond_type = int(bond.GetBondType())
assert bond_type in possible_bond_type_list
bond_stereo = int(bond.GetStereo())
assert bond_stereo in possible_bond_stereo_list
is_conjugated = bond.GetIsConjugated()
assert is_conjugated in possible_is_conjugated_list
is_conjugated = possible_is_conjugated_list.index(is_conjugated)
is_in_ring = bond.IsInRing()
assert is_in_ring in possible_is_in_ring_list
is_in_ring = possible_is_in_ring_list.index(is_in_ring)
bond_dir = int(bond.GetBondDir())
assert bond_dir in possible_bond_dir_list
bond_feature = [
bond_type,
bond_stereo,
is_conjugated,
is_in_ring,
bond_dir,
]
return bond_feature
# ===================== BOND END =====================
# ===================== ATTN START =====================
def get_rel_pos(mol):
try:
new_mol = Chem.AddHs(mol)
res = AllChem.EmbedMultipleConfs(new_mol, numConfs=10)
### MMFF generates multiple conformations
res = AllChem.MMFFOptimizeMoleculeConfs(new_mol)
new_mol = Chem.RemoveHs(new_mol)
index = | np.argmin([x[1] for x in res]) | numpy.argmin |
import unittest
import numpy as np
from nptest import nptest
class LargeArrayTests(unittest.TestCase):
def test_largearray_matmul_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat)
print(z)
def test_largearray_matmul_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_1(self):
width = 2048
height = 2048
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_2(self):
width = 4096
height = 4096
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(1, width), y_range.reshape(height, 1))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_copy_int64_1(self):
length = 268435435 # (Int32.MaxValue) / sizeof(double) - 20;
x = np.arange(0, length, 1, dtype = np.int64);
z = np.sum(x);
print(z)
y = x.copy()
z = np.sum(y)
print(z)
def test_largearray_copy_int64_2(self):
length = 268435434 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
z = np.sum(x, axis=0);
z = np.sum(z)
print(z)
y = x.copy()
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_meshgrid_int64_2(self):
length = 100 * 100
x = np.arange(0,length, 1, dtype = np.int64)
x1, x2 = np.meshgrid(x,x)
print(x1.shape)
print(x2.shape)
z = np.sum(x1)
print(z)
z = np.sum(x2)
print(z)
def test_largearray_checkerboard_1(self):
x = np.zeros((2048,2048),dtype=int)
x[1::2,::2] = 1
x[::2,1::2] = 1
print(np.sum(x))
def test_largearray_byteswap_int64_2(self):
length = 1024 * 1024* 32 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
y = x.byteswap();
z = np.sum(y, axis=0);
z = np.sum(z)
print(z)
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_unique_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
matrix = matrix[1:40:2, 1:-2:1]
uvalues, indexes, inverse, counts = np.unique(matrix, return_counts = True, return_index=True, return_inverse=True);
print(np.sum(uvalues))
print(np.sum(indexes))
print(np.sum(inverse))
print(np.sum(counts))
def test_largearray_where_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
print(np.sum(matrix))
indices = np.where(matrix % 2 == 0);
m1 = matrix[indices]
print(np.sum(m1))
def test_largearray_insert_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.insert(matrix, 0, [999,100,101])
print(np.sum(m1))
def test_largearray_append_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.append(matrix, [999,100,101])
print(np.sum(m1))
def test_largearray_concatenate_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.arange(1, 16000001, dtype=np.int64).reshape((40, -1));
c = np.concatenate((a, b), axis=0)
print(np.sum(c))
#d = np.concatenate((a.T, b), axis=1)
#print(np.sum(d))
e = np.concatenate((a, b), axis=None)
print(np.sum(e))
def test_largearray_min_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amin(a)
print(np.sum(b))
b = np.amin(a, axis=0)
print(np.sum(b))
b = np.amin(a, axis=1)
print(np.sum(b))
def test_largearray_max_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amax(a)
print(np.sum(b))
b = np.amax(a, axis=0)
print(np.sum(b))
b = np.amax(a, axis=1)
print(np.sum(b))
def test_largearray_setdiff1d_INT64(self):
a = np.arange(16000000, dtype=np.int64);
b = np.array([3, 4, 5, 6])
c = np.setdiff1d(a, b)
print(np.sum(a))
print(np.sum(b))
print(np.sum(c))
def test_largearray_copyto_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape(-1, 5);
print(np.sum(a))
b = np.array([1, 2, 3, 4, 5])
np.copyto(a, b)
print(np.sum(a))
a = np.arange(16000000, dtype=np.int64).reshape(-1, 5);
b = np.array([1, 2, 3, 4, 5])
np.copyto(a, b, where = b % 2 == 0)
print(np.sum(a))
def test_largearray_sin_DOUBLE(self):
a = np.ones(16000000, dtype=np.float64).reshape(-1, 5);
b = np.sin(a)
print(np.sum(b))
def test_largearray_diff_INT64(self):
a = np.arange(0, 16000000 * 3, 3, dtype=np.int64).reshape(-1, 5);
b = np.diff(a)
print(np.sum(b))
def test_largearray_ediff1d_INT64(self):
a = np.arange(0, 16000000 * 3, 3, dtype=np.int64).reshape(-1, 5);
b = | np.ediff1d(a) | numpy.ediff1d |
# -*- coding: utf-8 -*-
"""
Immersion factor calibration.
"""
# Importation of modules
import os
import time
import h5py
import string
import deepdish
import datetime
import numpy as np
from scipy import stats
import matplotlib
import matplotlib.pyplot as plt
from refractivesqlite import dboperations as DB # https://github.com/HugoGuillen/refractiveindex.info-sqlite
# Importation of other modules
from source.processing import ProcessImage, FigureFunctions
from source.geometric_rolloff import MatlabGeometricMengine
# Functions
def radiance_increase(nw, na):
"""
Radiance increase below water (n-squared law)
:param nw:
:param na:
:return:
"""
return transmittance(nw, na) * nw ** 2
def transmittance(n1, n2):
"""
Fresnel equation fro transmittance from medium 1 toward medium 2.
:param n1:
:param n2:
:return:
"""
return 1 - (((n1 - n2) ** 2) / ((n1 + n2) ** 2))
def imagestack_averaging(imagelist, framenumber, dframe, which):
"""
:param imagelist:
:param framenumber:
:param dframe:
:param which:
:return:
"""
p = ProcessImage()
iternumber = int(len(imagelist) / framenumber)
imstack = | np.zeros((darkframe.shape[0], darkframe.shape[0], iternumber)) | numpy.zeros |
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
out = dd.read_parquet(fn, engine=engine)
assert_eq(out, ddf)
check_compression(engine, fn, compression)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=["x"])
check_compression(engine, fn, compression)
@pytest.fixture(
params=[
# fastparquet 0.1.3
{
"columns": [
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.7.1
{
"columns": [
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.8.0
{
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
},
# TODO: fastparquet update
]
)
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(
pandas_metadata
)
assert index_names == ["idx"]
assert column_names == ["A"]
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata["index_columns"] == ["__index_level_0__"]:
assert mapping == {"__index_level_0__": "idx", "A": "A"}
else:
assert mapping == {"idx": "idx", "A": "A"}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ["x"]
e_mapping = {"__index_level_0__": None, "x": "x"}
e_column_index_names = [None]
md = {
"columns": [
{
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "__index_level_0__",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
# pyarrow 0.8.0 None for index
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "x",
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": None,
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
@PYARROW_MARK
def test_read_no_metadata(tmpdir, engine):
# use pyarrow.parquet to create a parquet file without
# pandas metadata
tmp = str(tmpdir) + "table.parq"
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=["A", "B"]
)
pq.write_table(table, tmp)
result = dd.read_parquet(tmp, engine=engine)
expected = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
assert_eq(result, expected)
def test_parse_pandas_metadata_duplicate_index_columns():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_parse_pandas_metadata_column_with_index_name():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_writing_parquet_with_kwargs(tmpdir, engine):
fn = str(tmpdir)
path1 = os.path.join(fn, "normal")
path2 = os.path.join(fn, "partitioned")
pytest.importorskip("snappy")
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
engine_kwargs = {
"pyarrow-dataset": {
"compression": "snappy",
"coerce_timestamps": None,
"use_dictionary": True,
},
"fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None},
}
engine_kwargs["pyarrow-legacy"] = engine_kwargs["pyarrow-dataset"]
ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])
out = dd.read_parquet(path1, engine=engine)
assert_eq(out, ddf, check_index=(engine != "fastparquet"))
# Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
with dask.config.set(scheduler="sync"):
ddf.to_parquet(
path2, engine=engine, partition_on=["a"], **engine_kwargs[engine]
)
out = dd.read_parquet(path2, engine=engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):
fn = str(tmpdir)
with pytest.raises(TypeError):
ddf.to_parquet(fn, engine=engine, unknown_key="unknown_value")
@ANY_ENGINE_MARK
def test_to_parquet_with_get(tmpdir):
from dask.multiprocessing import get as mp_get
tmpdir = str(tmpdir)
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, compute_kwargs={"scheduler": my_get})
assert flag[0]
result = dd.read_parquet(os.path.join(tmpdir, "*"))
assert_eq(result, df, check_index=False)
def test_select_partitioned_column(tmpdir, engine):
pytest.importorskip("snappy")
fn = str(tmpdir)
size = 20
d = {
"signal1": np.random.normal(0, 0.3, size=size).cumsum() + 50,
"fake_categorical1": | np.random.choice(["A", "B", "C"], size=size) | numpy.random.choice |
import numpy as np
import numpy.testing as npt
import pytest
from quara.objects import matrix_basis
from quara.objects.composite_system import CompositeSystem
from quara.objects.elemental_system import ElementalSystem
from quara.objects.gate import get_h, get_i, get_x, get_cnot, get_swap, get_cz
from quara.objects.mprocess_typical import generate_mprocess_from_name
from quara.objects.povm import (
get_x_povm,
get_y_povm,
get_z_povm,
get_xx_povm,
get_xy_povm,
get_yy_povm,
get_zz_povm,
)
from quara.objects.state import get_x0_1q, get_y0_1q, get_z0_1q
from quara.qcircuit.experiment import (
Experiment,
QuaraScheduleItemError,
QuaraScheduleOrderError,
)
from quara.objects.operators import compose_qoperations, tensor_product
class TestExperiment:
def array_states_povms_gates(self):
# Array
e_sys = ElementalSystem(0, matrix_basis.get_comp_basis())
c_sys = CompositeSystem([e_sys])
# State
state_0 = get_x0_1q(c_sys)
state_1 = get_y0_1q(c_sys)
states = [state_0, state_1]
# POVM
povm_0 = get_x_povm(c_sys)
povm_1 = get_x_povm(c_sys)
povms = [povm_0, povm_1]
# Gate
gate_0 = get_i(c_sys)
gate_1 = get_h(c_sys)
gates = [gate_0, gate_1]
return states, povms, gates
def array_experiment_data(self):
# Array
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
state_list = [get_x0_1q(c_sys1), get_y0_1q(c_sys1)]
gate_list = [get_i(c_sys1), get_x(c_sys1)]
povm_list = [get_x_povm(c_sys1), get_y_povm(c_sys1)]
schedules = [
[("state", 0), ("gate", 0), ("povm", 0)],
[("state", 0), ("gate", 0), ("povm", 1)],
]
seed = 7
exp = Experiment(
states=state_list,
povms=povm_list,
gates=gate_list,
schedules=schedules,
seed_data=seed,
)
return exp
def array_experiment_data_with_all_mode(self):
# Array
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
state_list = [get_x0_1q(c_sys1), get_y0_1q(c_sys1)]
gate_list = [get_i(c_sys1), get_x(c_sys1)]
povm_list = [get_x_povm(c_sys1), get_y_povm(c_sys1)]
mprocess_list = [
generate_mprocess_from_name(c_sys1, "x-type1"),
generate_mprocess_from_name(c_sys1, "y-type1"),
]
schedules = [
[("state", 0), ("gate", 0), ("mprocess", 0), ("povm", 0)],
[("state", 0), ("gate", 0), ("mprocess", 0), ("povm", 1)],
]
seed = 7
exp = Experiment(
states=state_list,
povms=povm_list,
gates=gate_list,
mprocesses=mprocess_list,
schedules=schedules,
seed_data=seed,
)
return exp
def array_experiment_data_2qubit(self):
# Array
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
e_sys2 = ElementalSystem(2, matrix_basis.get_normalized_pauli_basis())
c_sys2 = CompositeSystem([e_sys2])
c_sys12 = CompositeSystem([e_sys1, e_sys2])
# Gate
cnot = get_cnot(c_sys12, e_sys1)
swap = get_swap(c_sys12)
cz = get_cz(c_sys12)
# POVM
povm_xx = get_xx_povm(c_sys12)
povm_xy = get_xy_povm(c_sys12)
povm_yy = get_yy_povm(c_sys12)
povm_zz = get_zz_povm(c_sys12)
# State
state1 = get_z0_1q(c_sys1)
state2 = get_z0_1q(c_sys2)
h1 = get_h(c_sys1)
state1 = compose_qoperations(h1, state1)
state12 = tensor_product(state1, state2)
state_list = [state12]
povm_list = [povm_xx, povm_xy, povm_yy, povm_zz]
gate_list = [cnot, swap, cz]
schedules = [
[("state", 0), ("gate", 0), ("povm", 0)],
[("state", 0), ("gate", 0), ("povm", 1)],
[("state", 0), ("gate", 0), ("povm", 2)],
[("state", 0), ("gate", 1), ("povm", 0)],
]
seed = 7
exp = Experiment(
states=state_list,
povms=povm_list,
gates=gate_list,
schedules=schedules,
seed_data=seed,
)
return exp
def array_experiment_data_2qubit_2gate(self):
# Array
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
e_sys2 = ElementalSystem(2, matrix_basis.get_normalized_pauli_basis())
c_sys2 = CompositeSystem([e_sys2])
c_sys12 = CompositeSystem([e_sys1, e_sys2])
# Gate
cnot = get_cnot(c_sys12, e_sys2)
swap = get_swap(c_sys12)
# State
state1 = get_z0_1q(c_sys1)
state2 = get_z0_1q(c_sys2)
h = get_h(c_sys2)
state2 = compose_qoperations(h, state2)
state12 = tensor_product(state1, state2)
# POVM
povm_xx = get_xx_povm(c_sys12)
povm_xy = get_xy_povm(c_sys12)
povm_yy = get_yy_povm(c_sys12)
state_list = [state12]
povm_list = [povm_xx, povm_xy, povm_yy]
gate_list = [cnot, swap]
schedules = [
[("state", 0), ("gate", 0), ("gate", 1), ("povm", 0)],
[("state", 0), ("gate", 0), ("gate", 1), ("povm", 1)],
[("state", 0), ("gate", 0), ("gate", 1), ("povm", 2)],
]
seed = 7
exp = Experiment(
states=state_list,
povms=povm_list,
gates=gate_list,
schedules=schedules,
seed_data=seed,
)
return exp
def test_qoperations(self):
# Arrange
experiment = self.array_experiment_data_with_all_mode()
# Act & Assert
assert len(experiment.qoperations("state")) == 2
# Act & Assert
assert len(experiment.qoperations("povm")) == 2
# Act & Assert
assert len(experiment.qoperations("gate")) == 2
# Act & Assert
assert len(experiment.qoperations("mprocess")) == 2
# Act & Assert
with pytest.raises(ValueError):
experiment.qoperations("unsupported")
def test_num_qoperations(self):
# Arrange
experiment = self.array_experiment_data_with_all_mode()
# Act & Assert
assert experiment.num_qoperations("state") == 2
# Act & Assert
assert experiment.num_qoperations("povm") == 2
# Act & Assert
assert experiment.num_qoperations("gate") == 2
# Act & Assert
assert experiment.num_qoperations("mprocess") == 2
# Act & Assert
with pytest.raises(ValueError):
experiment.num_qoperations("unsupported")
def test_reset_seed(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
states = [get_z0_1q(c_sys)]
gates = [get_x(c_sys)]
povm_x = get_x_povm(c_sys)
povm_y = get_y_povm(c_sys)
povm_z = get_z_povm(c_sys)
povms = [povm_x, povm_y, povm_z]
schedules = [
[("state", 0), ("povm", 0)],
[("state", 0), ("povm", 1)],
[("state", 0), ("povm", 2)],
]
seed = 7
experiment = Experiment(
states=states, gates=gates, povms=povms, schedules=schedules, seed_data=seed
)
# init
actual = experiment.generate_data(0, 10)
expected = [0, 1, 0, 1, 1, 1, 1, 0, 0, 0]
assert np.all(actual == expected)
# reset
seed = 77
experiment.reset_seed_data(seed)
actual = experiment.generate_data(0, 10)
expected = [1, 1, 1, 0, 0, 1, 0, 1, 0, 1]
assert np.all(actual == expected)
def test_copy(self):
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
states = [get_z0_1q(c_sys)]
gates = [get_x(c_sys)]
povm_x = get_x_povm(c_sys)
povm_y = get_y_povm(c_sys)
povm_z = get_z_povm(c_sys)
povms = [povm_x, povm_y, povm_z]
schedules = [
[("state", 0), ("povm", 0)],
[("state", 0), ("povm", 1)],
[("state", 0), ("povm", 2)],
]
seed = 7
experiment = Experiment(
states=states, gates=gates, povms=povms, schedules=schedules, seed_data=seed
)
experiment_copy = experiment.copy()
assert experiment_copy.states is not experiment.states
for actual, expected in zip(experiment_copy.states, experiment.states):
assert np.all(actual.vec == expected.vec)
assert experiment_copy.gates is not experiment.gates
for actual, expected in zip(experiment_copy.gates, experiment.gates):
assert np.all(actual.hs == expected.hs)
assert experiment_copy.povms is not experiment.povms
for actual, expected in zip(experiment_copy.povms, experiment.povms):
assert | np.all(actual.vecs == expected.vecs) | numpy.all |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchquantum as tq
import copy
from torchquantum.macro import C_DTYPE
from torchpack.utils.logging import logger
from typing import List, Dict, Iterable
from torchpack.utils.config import Config
from qiskit.providers.aer.noise.device.parameters import gate_error_values
from qiskit import IBMQ
from qiskit.exceptions import QiskitError
def pauli_eigs(n) -> np.ndarray:
r"""Eigenvalues for :math:`A^{\o times n}`, where :math:`A` is
Pauli operator, or shares its eigenvalues.
As an example if n==2, then the eigenvalues of a tensor product consisting
of two matrices sharing the eigenvalues with Pauli matrices is returned.
Args:
n (int): the number of qubits the matrix acts on
Returns:
list: the eigenvalues of the specified observable
"""
if n == 1:
return np.array([1, -1])
return np.concatenate([pauli_eigs(n - 1), -pauli_eigs(n - 1)])
def diag(x):
# input tensor, output tensor with diagonal as the input
# manual implementation because torch.diag does not support autograd of
# complex number
diag_len = x.shape[-1]
x = x.unsqueeze(-1)
dims = list(x.shape)
x = torch.cat([x, torch.zeros(dims[:-1] + [diag_len]).to(x.device)],
dim=-1)
x = x.view(dims[:-2] + [diag_len * (diag_len + 1)])[..., :-diag_len]
x = x.view(dims[:-2] + [diag_len, diag_len])
return x
class Timer(object):
def __init__(self, device='gpu', name='', times=100):
self.device = device
self.name = name
self.times = times
if device == 'gpu':
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
def __enter__(self):
if self.device == 'gpu':
self.start.record()
def __exit__(self, exc_type, exc_value, tb):
if self.device == 'gpu':
self.end.record()
torch.cuda.synchronize()
print(f"Task: {self.name}: "
f"{self.start.elapsed_time(self.end) / self.times} ms")
def get_unitary_loss(model: nn.Module):
loss = 0
for name, params in model.named_parameters():
if 'TrainableUnitary' in name:
U = params
like_identity = U.matmul(U.conj().permute(0, 2, 1))
identity = torch.eye(U.shape[0], dtype=C_DTYPE,
device=U.device)
loss += F.mse_loss(torch.view_as_real(identity),
torch.view_as_real(like_identity))
return loss
def legalize_unitary(model: nn.Module):
with torch.no_grad():
for name, params in model.named_parameters():
if 'TrainableUnitary' in name:
U = params
U, Sigma, V = torch.svd(U)
params.data.copy_(U.matmul(V.conj().permute(0, 2, 1)))
def switch_little_big_endian_matrix(mat):
if len(mat.shape) % 2 == 1:
is_batch_matrix = True
bsz = mat.shape[0]
reshape = [bsz] + [2] * int(np.log2(mat[0].size))
else:
is_batch_matrix = False
reshape = [2] * int(np.log2(mat.size))
original_shape = mat.shape
mat = mat.reshape(reshape)
axes = list(range(len(mat.shape) // 2))
axes.reverse()
axes += [axis + len(mat.shape) // 2 for axis in axes]
if is_batch_matrix:
axes = [0] + [axis + 1 for axis in axes]
mat = np.transpose(mat, axes=axes).reshape(original_shape)
return mat
def switch_little_big_endian_state(state):
if len(state.shape) > 1:
is_batch_state = True
bsz = state.shape[0]
reshape = [bsz] + [2] * int(np.log2(state[0].size))
elif len(state.shape) == 1:
is_batch_state = False
reshape = [2] * int(np.log2(state.size))
else:
logger.exception(f"Dimension of statevector should be 1 or 2")
raise ValueError
original_shape = state.shape
state = state.reshape(reshape)
if is_batch_state:
axes = list(range(1, len(state.shape)))
axes.reverse()
axes = [0] + axes
else:
axes = list(range(len(state.shape)))
axes.reverse()
mat = np.transpose(state, axes=axes).reshape(original_shape)
return mat
def switch_little_big_endian_matrix_test():
logger.info(switch_little_big_endian_matrix(np.ones((16, 16))))
logger.info(switch_little_big_endian_matrix(np.ones((5, 16, 16))))
def switch_little_big_endian_state_test():
logger.info(switch_little_big_endian_state(np.ones((5, 16))))
logger.info(switch_little_big_endian_state( | np.arange(8) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 16:32:55 2020
IN DEVELOPMENT
atm - automated test measurements
Utility toolset that will eventually enable automated measurement of test structure devices.
Consists of a few important classes:
MeasurementControl - performs and analyses measurements according to a process
MeasurementAnalysis - used to analyse measurement datasets
MeasurementProcess - an algorithm that specifies the sequence of measurements to be performed
MeasurementMap - contains functions to work out correct gates and instruments for a given measurement
MeasurementGeometry - physical geometry of the sample, for now we will only consider linear arrays
@author: krolljg
"""
import qcodes
from qcodes import Instrument
import qtt
from qtt.measurements.scans import scanjob_t, scan1D, scan2D, scan1Dfeedback
from qtt.automation.measurement_analysis import MeasurementAnalysis
import time
import numpy as np
import scipy.optimize as optimisation
class MeasurementControl(Instrument):
"""
Class that allows for control of measurements.
"""
def __init__(
self,
sample_name: str,
station: object,
datadir: str,
autoanalysis: bool = True, #autoanalysis to be implemented
liveplotting: bool = False,
verbose: bool = True,
**kwargs
):
super().__init__(sample_name+'Control', **kwargs)
qcodes.DataSet.default_io = qcodes.DiskIO(datadir)
self.station = station
self.gates = station.gates
self.autoanalysis = autoanalysis
self.liveplotting = liveplotting
self.verbose = verbose
def scan_1D(self, scan_gate, start, end, step, meas_instr, pause_before_start=None, wait_time=0.02,
abort_controller=None,plot_param=None,sub_plots=None):
''' Used to sweep a gate and measure on some instruments '''
if pause_before_start is not None:
try:
self.gates.set(scan_gate, start)
except:
scan_gate(start)
time.sleep(pause_before_start)
scanjob = scanjob_t({'sweepdata': dict({'param': scan_gate,
'start': start,
'end': end,
'step': step,
'wait_time': wait_time}), 'minstrument': meas_instr})
if abort_controller is not None:
dataset = scan1Dfeedback(self.station, scanjob, location=None, verbose=self.verbose, abort_controller=abort_controller, plotparam=plot_param,subplots=sub_plots)
else:
dataset = scan1D(self.station, scanjob, location=None, verbose=self.verbose, plotparam=plot_param,subplots=sub_plots)
return dataset
def scan_2D(self, sweep_gate, sweep_start, sweep_end, sweep_step, step_gate, step_start, step_end, step_step,
meas_instr, pause_before_start=None, sweep_wait=0.02, step_wait=0.02, plot_param=None):
''' Used to sweep a gate and measure on some instruments '''
if pause_before_start is not None:
try:
self.gates.set(step_gate, step_start)
except:
step_gate(step_start)
time.sleep(pause_before_start)
scanjob = scanjob_t({'sweepdata': dict({'param': sweep_gate,
'start': sweep_start,
'end': sweep_end,
'step': sweep_step,
'wait_time': sweep_wait}),
'stepdata': dict({'param': step_gate,
'start': step_start,
'end': step_end,
'step': step_step,
'wait_time': step_wait}),
'minstrument': meas_instr})
dataset = qtt.measurements.scans.scan2D(self.station, scanjob, plotparam=plot_param)
return dataset
def drift_scan(self, scan_gate, start, end_voltage_list, step, meas_instr, forward_datasets = None,
backward_datasets= None, auto_plot=False, threshold=None):
''' Used to perform 1D sweeps up to increasingly higher voltages to look at drift '''
try:
self.gates.set(scan_gate, start)
except:
scan_gate(start)
time.sleep(0.5)
if forward_datasets is None:
forward_datasets = []
if backward_datasets is None:
backward_datasets = []
MA = MeasurementAnalysis()
for end in end_voltage_list:
dataset_forward = self.scan_1D(scan_gate, start, end, step, meas_instr)
forward_datasets.append(dataset_forward)
dataset_backward = self.scan_1D(scan_gate, end, start, step, meas_instr)
backward_datasets.append(dataset_backward)
if auto_plot:
MA.plot_multiple_scans(forward_datasets,backward_datasets)
MA.plot_drift_scans(forward_datasets,backward_datasets)
if threshold is not None:
forward_max = | np.max(MA.forward_diff_list) | numpy.max |
import numpy as np
from shapreg import utils, games, stochastic_games
from tqdm.auto import tqdm
def default_min_variance_samples(game):
'''Determine min_variance_samples.'''
return 5
def default_variance_batches(game, batch_size):
'''
Determine variance_batches.
This value tries to ensure that enough samples are included to make A
approximation non-singular.
'''
if isinstance(game, games.CooperativeGame):
return int(np.ceil(10 * game.players / batch_size))
else:
# Require more intermediate samples for stochastic games.
return int(np.ceil(25 * game.players / batch_size))
def calculate_result(A, b, total):
'''Calculate the regression coefficients.'''
num_players = A.shape[1]
try:
if len(b.shape) == 2:
A_inv_one = np.linalg.solve(A, np.ones((num_players, 1)))
else:
A_inv_one = np.linalg.solve(A, np.ones(num_players))
A_inv_vec = np.linalg.solve(A, b)
values = (
A_inv_vec -
A_inv_one * (np.sum(A_inv_vec, axis=0, keepdims=True) - total)
/ np.sum(A_inv_one))
except np.linalg.LinAlgError:
raise ValueError('singular matrix inversion. Consider using larger '
'variance_batches')
return values
def ShapleyRegression(game,
batch_size=512,
detect_convergence=True,
thresh=0.01,
n_samples=None,
paired_sampling=True,
return_all=False,
min_variance_samples=None,
variance_batches=None,
bar=True,
verbose=False):
# Verify arguments.
if isinstance(game, games.CooperativeGame):
stochastic = False
elif isinstance(game, stochastic_games.StochasticCooperativeGame):
stochastic = True
else:
raise ValueError('game must be CooperativeGame or '
'StochasticCooperativeGame')
if min_variance_samples is None:
min_variance_samples = default_min_variance_samples(game)
else:
assert isinstance(min_variance_samples, int)
assert min_variance_samples > 1
if variance_batches is None:
variance_batches = default_variance_batches(game, batch_size)
else:
assert isinstance(variance_batches, int)
assert variance_batches >= 1
# Possibly force convergence detection.
if n_samples is None:
n_samples = 1e20
if not detect_convergence:
detect_convergence = True
if verbose:
print('Turning convergence detection on')
if detect_convergence:
assert 0 < thresh < 1
# Weighting kernel (probability of each subset size).
num_players = game.players
weights = np.arange(1, num_players)
weights = 1 / (weights * (num_players - weights))
weights = weights / np.sum(weights)
# Calculate null and grand coalitions for constraints.
if stochastic:
null = game.null(batch_size=batch_size)
grand = game.grand(batch_size=batch_size)
else:
null = game.null()
grand = game.grand()
# Calculate difference between grand and null coalitions.
total = grand - null
# Set up bar.
n_loops = int(np.ceil(n_samples / batch_size))
if bar:
if detect_convergence:
bar = tqdm(total=1)
else:
bar = tqdm(total=n_loops * batch_size)
# Setup.
n = 0
b = 0
A = 0
estimate_list = []
# For variance estimation.
A_sample_list = []
b_sample_list = []
# For tracking progress.
var = np.nan * np.ones(num_players)
if return_all:
N_list = []
std_list = []
val_list = []
# Begin sampling.
for it in range(n_loops):
# Sample subsets.
S = np.zeros((batch_size, num_players), dtype=bool)
num_included = np.random.choice(num_players - 1, size=batch_size,
p=weights) + 1
for row, num in zip(S, num_included):
inds = np.random.choice(num_players, size=num, replace=False)
row[inds] = 1
# Sample exogenous (if applicable).
if stochastic:
U = game.sample(batch_size)
# Update estimators.
if paired_sampling:
# Paired samples.
A_sample = 0.5 * (
np.matmul(S[:, :, np.newaxis].astype(float),
S[:, np.newaxis, :].astype(float))
+ np.matmul(np.logical_not(S)[:, :, np.newaxis].astype(float),
np.logical_not(S)[:, np.newaxis, :].astype(float)))
if stochastic:
game_eval = game(S, U) - null
S_comp = np.logical_not(S)
comp_eval = game(S_comp, U) - null
b_sample = 0.5 * (
S.astype(float).T * game_eval[:, np.newaxis].T
+ S_comp.astype(float).T * comp_eval[:, np.newaxis].T).T
else:
game_eval = game(S) - null
S_comp = np.logical_not(S)
comp_eval = game(S_comp) - null
b_sample = 0.5 * (
S.astype(float).T * game_eval[:, np.newaxis].T +
S_comp.astype(float).T * comp_eval[:, np.newaxis].T).T
else:
# Single sample.
A_sample = np.matmul(S[:, :, np.newaxis].astype(float),
S[:, np.newaxis, :].astype(float))
if stochastic:
b_sample = (S.astype(float).T
* (game(S, U) - null)[:, np.newaxis].T).T
else:
b_sample = (S.astype(float).T
* (game(S) - null)[:, np.newaxis].T).T
# Welford's algorithm.
n += batch_size
b += np.sum(b_sample - b, axis=0) / n
A += np.sum(A_sample - A, axis=0) / n
# Calculate progress.
values = calculate_result(A, b, total)
A_sample_list.append(A_sample)
b_sample_list.append(b_sample)
if len(A_sample_list) == variance_batches:
# Aggregate samples for intermediate estimate.
A_sample = np.concatenate(A_sample_list, axis=0).mean(axis=0)
b_sample = np.concatenate(b_sample_list, axis=0).mean(axis=0)
A_sample_list = []
b_sample_list = []
# Add new estimate.
estimate_list.append(calculate_result(A_sample, b_sample, total))
# Estimate current var.
if len(estimate_list) >= min_variance_samples:
var = np.array(estimate_list).var(axis=0)
# Convergence ratio.
std = np.sqrt(var * variance_batches / (it + 1))
ratio = np.max(
np.max(std, axis=0) / (values.max(axis=0) - values.min(axis=0)))
# Print progress message.
if verbose:
if detect_convergence:
print(f'StdDev Ratio = {ratio:.4f} (Converge at {thresh:.4f})')
else:
print(f'StdDev Ratio = {ratio:.4f}')
# Check for convergence.
if detect_convergence:
if ratio < thresh:
if verbose:
print('Detected convergence')
# Skip bar ahead.
if bar:
bar.n = bar.total
bar.refresh()
break
# Forecast number of iterations required.
if detect_convergence:
N_est = (it + 1) * (ratio / thresh) ** 2
if bar and not np.isnan(N_est):
bar.n = np.around((it + 1) / N_est, 4)
bar.refresh()
elif bar:
bar.update(batch_size)
# Save intermediate quantities.
if return_all:
val_list.append(values)
std_list.append(std)
if detect_convergence:
N_list.append(N_est)
# Return results.
if return_all:
# Dictionary for progress tracking.
iters = (
( | np.arange(it + 1) | numpy.arange |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
import capsule_distance
capsule = capsule_distance.Capsule(0.18, -0.5, 0.45)
k_robot = 0.25
k_pedestrian = 0.5
dt = 0.05
def integrate_reference_motion(xy_zero, gain_k, xy_ref, dt):
# [dx/dt = d(x_ref)/dt + k*(x_ref - x)]
# x_ref_new = x_ref + c*exp(-k*t)
# dx/dt = d(x_ref_new)/dt + k*(x_ref_new - x) =
# = d(x_ref)/dt - k*c*exp(-k*t) + k*(x_ref + c*exp(-k*t) - x) =
# = d(x_ref)/dt + k*(x_ref - x)
# The motion law is invariant under addition of c*exp(-k*t) to the reference trajectory
# x_ref_new(0) = x_ref(0) + c
# x_ref_new(0) - x(0) = x_ref(0) + c - x(0)
# Setting c = x(0) - x_ref(0) yields x_ref_new(0) - x(0) = 0, i.e. the new ref. traj.
# passes through the initial position. The reference motion which the robot would follow
# from its initial position is therefore equal to x_ref(t) + (x(0) - x_ref(0))*exp(-k*t)
t = np.arange(0, xy_ref.shape[0])*dt
return xy_ref + (np.reshape(xy_zero, (1,2)) - np.reshape(xy_ref[0, :], (1,2)))*np.reshape(np.exp(-gain_k*t), (xy_ref.shape[0],1))
def integrate_crowd_reference_motion(X_zero, Y_zero, gain_k, X_ref, Y_ref, dt):
t = np.arange(0, X_ref.shape[0])*dt
X_int = X_ref + np.reshape(X_zero - X_ref[0, :], (1, X_ref.shape[1]))*np.reshape(np.exp(-gain_k*t), (X_ref.shape[0], 1))
Y_int = Y_ref + np.reshape(Y_zero - Y_ref[0, :], (1, X_ref.shape[1]))*np.reshape(np.exp(-gain_k*t), (X_ref.shape[0], 1))
return (X_int, Y_int)
def add_circles(X, Y, r, ax):
for i in range(X.shape[0]):
circle = plt.Circle((X[i], Y[i]), r, fill=False, color="k")#[0.8,0.8,0.0])
ax.add_artist(circle)
def plot_index_range(ax, index_range_bounds, baseline_method,
xy_robot, orientation_robot, xy_robot_ref, X_crowd, Y_crowd, X_crowd_ref, Y_crowd_ref):
# consider sub-range
sub_range = np.arange(index_range_bounds[0], index_range_bounds[1], 1)
xy_robot = xy_robot[sub_range, :]
orientation_robot = orientation_robot[sub_range]
xy_robot_ref = xy_robot_ref[sub_range, :]
X_crowd = X_crowd[sub_range, :]
Y_crowd = Y_crowd[sub_range, :]
X_crowd_ref = X_crowd_ref[sub_range, :]
Y_crowd_ref = Y_crowd_ref[sub_range, :]
# shift the data to center the robot
x_robot_zero = xy_robot[0, 0]
y_robot_zero = xy_robot[0, 1]
orientation_robot_zero = orientation_robot[0]
shift = np.array([[x_robot_zero, y_robot_zero]])
xy_robot -= shift
xy_robot_ref -= shift
X_crowd -= shift[0, 0]
Y_crowd -= shift[0, 1]
X_crowd_ref -= shift[0, 0]
Y_crowd_ref -= shift[0, 1]
x_robot_zero = 0.0
y_robot_zero = 0.0
# plot the future trajectories for the pedestrians and the robot
t_normalized = | np.linspace(0.0, 1.0, xy_robot.shape[0]) | numpy.linspace |
"""
genpolar
========
Generates a sequence of plane polar coordinates (r and theta).
Functions defined here:
rtpairs -- generates a list of radii r as specified and angles theta
determined by the number of points as specified
rtuniform -- generates a list of sequence of radii r and angles theta
uniformly distributed over a disk
"""
import numpy as np
def rtpairs(R, N):
"""
Return n uniformly distributed pairs with radius r as specified and angles
theta generated using n.
Parameters
----------
R : list
The list of different radii r of the points.
N : list
The list containing the number of angles theta for each value of r.
The sizes of R and N must be equal.
Returns
-------
radius : ndarray
Shows the radius of each pair. Contains the radii as defined in array R
repeated a number of times as defined in the corresponding value in
array N.
theta : ndarray
Shows the angle of each pair. For a given radius, the angles are evenly
spaced out with the number of angles as defined in N.
Graphical illustration:
>>> import pylab as pl
>>> R = [0.0, 0.1, 0.2]
>>> N = [1, 10, 20]
>>> for r, n in genpolar.rtpairs(R, N):
pl.plot(r * cos(n), r * sin(n), 'bo')
"""
R = np.array(R)
N = np.array(N)
if R.size != N.size:
raise Exception("Size mismatch")
for r, n in zip(R, N): # zip takes values from same position in arrays R &
# N and pairs them up
theta = | np.linspace(0, 2*np.pi, n + 1) | numpy.linspace |
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.special import gammaln as logg
from functools import partial
from numba import jit, float64, int64, int32, boolean
from gensim.models.coherencemodel import CoherenceModel
from hdp_py.StirlingEngine import *
from hdp_py.mixture_functions import *
class HDPTopic:
"""
Model which clusters a corpus of documents nonparametrically via a
Hierarchical Dirichlet Process.
CONSTRUCTOR PARAMETERS
- vocab: a gensim Dictionary object which maps integer ids -> tokens
- h_con: |vocab| length vector of concentration parameters for Dirichlet prior
- gamma, alpha0: scaling parameters > 0 for base measures H and G0
PRIVATE ATTRIBUTES (volatile)
- q_: (J x Kmax) matrix specifying counts of customers (gibbs_direct)
- m_: (J x Kmax) matrix specifying counts of tables
- fk_cust_, fk_cust_new_: functions to compute mixing components for Gibbs sampling
- stir_: an object of class StirlingEngine which computes Stirling numbers
PUBLIC ATTRIBUTES
- direct_samples: (S x N) matrix of k values for each data point i;
exists only after gibbs_direct() has been called
- beta_samples: (S x Kmax+1) matrix representing topic distribution over corpus
exists only after gibbs_direct() has been called
- pi_samples: (S x J x Kmax+1) matrix representing topic distribution over each document
exists only after gibbs_direct() has been called
"""
def __init__(self, vocab, h_con=None, gamma=1, alpha0=1):
self.g_ = gamma
self.a0_ = alpha0
# Set a weak, uniform concentration if none provided
if h_con is None:
L = len(vocab)
self.hypers_ = (L, np.full(L, 0.5))
else:
self.hypers_ = (len(h_con), h_con)
self.vocab = vocab
self.fk_cust_ = cat_fk_cust4
self.fk_cust_new_ = cat_fk_cust4_new
def initial_tally(self, j):
"""
Helper function for computing cluster counts q_ following the
initial random cluster allocation for the tokens.
"""
jk_pairs = np.stack([j, self.direct_samples[0,:]], axis=1)
# Counts words in document j assigned to cluster k
pair_counts = pd.Series(map(tuple, jk_pairs)).value_counts()
j_idx, k_idx = tuple(map(np.array, zip(*pair_counts.index)))
self.q_ *= 0
self.q_[j_idx, k_idx] = pair_counts
def get_dist(self, old, new, used, size):
"""
Helper function which standardizes the operation of computing a
full conditional distribution, for both t and k values.
Also normalizes and ensures there are no NANs.
- old: a (size,) vector of probability values for used values
- new: a scalar representing the combined probability of all unused values
- used: a (size,) mask encoding which values in the sample space are being used
- size: the size of the sample space
"""
num_unused = size - np.sum(used)
dist = None
if num_unused == 0:
# In our truncated sample space, there is no room for "new" values
dist = old
else:
dist = old * used + (new / num_unused) * np.logical_not(used)
# Remove nans and add epsilon so that distribution is all positive
#print(f"{dist.round(3)} (sum = {np.sum(dist)})")
dist[np.logical_not(np.isfinite(dist))] = 0
dist += 1e-10
return dist / np.sum(dist)
@staticmethod
@jit(float64[:](float64[:], float64, boolean[:], int32), nopython=True)
def get_dist_compiled(old, new, used, size):
"""
Helper function which standardizes the operation of computing a
full conditional distribution, for both t and k values.
Also normalizes and ensures there are no NANs.
This version is compiled via numba to make repeated calls more efficient.
- old: a (size,) vector of probability values for used values
- new: a scalar representing the combined probability of all unused values
- used: a (size,) mask encoding which values in the sample space are being used
- size: the size of the sample space
"""
dist = np.zeros(size)
dist_sum, num_unused = 0, 0
# Spread out the probability of a "new" value over all unused slots
for k in range(size):
if not used[k]:
num_unused += 1
for k in range(size):
dist[k] = old[k] if used[k] else new / num_unused
if not np.isfinite(dist[k]): dist[k] = 0
dist[k] += 1e-10
dist_sum += dist[k]
# Return normalized distribution vector
for k in range(size):
dist[k] /= dist_sum
return dist
def draw_z(self, it, x, j, Kmax, verbose):
"""
Helper function which does the draws from the z_ij full conditional,
assigning clusters to each individual token.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_direct()
"""
k_next = self.direct_samples[it,:]
k_counts = np.sum(self.q_, axis=0)
# Cycle through the k values of each customer
for i in np.random.permutation(len(j)):
jj, kk0 = j[i], k_next[i]
# Get vector of customer f_k values (dependent on model specification)
old_mixes = self.fk_cust_(i, x, k_next, Kmax, *self.hypers_)
new_mixes = self.fk_cust_new_(i, x, k_next, Kmax, *self.hypers_)
cust_offset = np.zeros(Kmax)
cust_offset[kk0] = 1
old_k = (self.q_[jj, :] - cust_offset +
self.a0_ * self.beta_samples[it, :-1]) * old_mixes
new_k = self.a0_ * self.beta_samples[it, -1] * new_mixes
k_used = k_counts > 0
k_dist = self.get_dist_compiled(old_k, new_k, k_used, Kmax)
kk1 = np.random.choice(Kmax, p=k_dist)
# Update the necessary count vectors
k_next[i] = kk1
self.q_[jj, kk0] -= 1
k_counts[kk0] -= 1
self.q_[jj, kk1] += 1
k_counts[kk1] += 1
# If kk0 is now unused, must remove its beta_k component and re-normalize
if k_counts[kk0] == 0:
self.beta_samples[it, kk0] = 0
self.beta_samples[it, :] /= np.sum(self.beta_samples[it, :])
# If kk1 was previously unused, must set a new beta_k component
if k_counts[kk1] == 1:
b = np.random.beta(1, self.g_)
beta_u = self.beta_samples[it, -1]
self.beta_samples[it, kk1] = b * beta_u
self.beta_samples[it, -1] = (1-b) * beta_u
def draw_m(self, it, x, j, Kmax, verbose):
"""
Helper function which does the draws from the m_jt full conditional,
which implicitly determines the overall clustering structure for each document.
Updates the counts and the samples matrices at iteration `it`.
Called by gibbs_direct()
"""
k_next = self.direct_samples[it,:]
self.m_ *= 0 # reset the m counts
# Cycle through the k values of each restaurant
j_idx, k_idx = np.where(self.q_ > 0) # find the occupied clusters
for i in np.random.permutation(len(j_idx)):
jj, kk = j_idx[i], k_idx[i]
max_m = self.q_[jj, kk]
abk = self.a0_ * self.beta_samples[it, kk]
m_range = np.arange(max_m) + 1
log_s = np.array([self.stir_.stirlog(max_m, m) for m in m_range])
m_dist = np.exp( logg(abk) - logg(abk + max_m) +
log_s + m_range * np.log(abk) )
m_dist[np.logical_not(np.isfinite(m_dist))] = 0
m_dist += 1e-10
m_dist /= np.sum(m_dist)
mm1 = np.random.choice(m_range, p=m_dist)
self.m_[jj, kk] = mm1
def gibbs_direct(self, x, j, iters, Kmax=None, init_clusters=0.1, resume=False,
verbose=False, every=1):
"""
Runs the Gibbs sampler to generate posterior estimates of k.
x: vector of ints mapping to individual tokens, in a bag of words format
j: vector of ints labeling document number for each token
iters: number of iterations to run
Kmax: maximum number of clusters that can be used (set much higher than needed)
init_clusters: fraction of Kmax to fill with initial cluster assignmetns
resume: if True, will continue from end of previous direct_samples, if dimensions match up
modifies this HDPTopic object's direct_samples attribute
"""
group_counts = pd.Series(j).value_counts()
J, N = np.max(j) + 1, len(j)
if Kmax is None: Kmax = min(100, N)
prev_direct, prev_beta = None, None
start = 0
if resume == True:
# Make sure the x passed in is the same size as it previously was
assert (N == self.direct_samples.shape[1] and
Kmax == self.beta_samples.shape[1] - 1), "Cannot resume with different data."
iters += self.direct_samples.shape[0]
prev_direct, prev_beta = self.direct_samples, self.beta_samples
start = self.direct_samples.shape[0]
self.direct_samples = np.zeros((iters+1, N), dtype='int')
self.beta_samples = np.zeros((iters+1, Kmax+1))
self.stir_ = StirlingEngine(np.max(group_counts) + 1)
np.seterr('ignore')
if resume == True:
# Fill in the start of the samples with the previously computed samples
self.direct_samples[1:start+1,:] = prev_direct
self.beta_samples[1:start+1,:] = prev_beta
# q_ and m_ attributes should already still exist within the object
else:
self.q_ = np.zeros((J, Kmax), dtype='int') # counts number of tokens in doc j, cluster k
self.m_ = np.zeros((J, Kmax), dtype='int') # counts number of k-clusters in doc j
# Initialize all tokens to a random cluster, to improve MCMC mixing
# But only allocate a portion of Kmax, for efficiency
K0_max = min(Kmax, int(init_clusters * Kmax))
self.direct_samples[0,:] = np.random.randint(0, K0_max, size=N)
self.initial_tally(j)
# Initialize to one cluster of type k per document: m_jk = 1
self.m_[:, :] = (self.q_ > 0).astype('int')
# Compute the corresponding beta values from m assignments
Mk = np.sum(self.m_, axis=0)
self.beta_samples[0,:] = np.random.dirichlet(np.append(Mk, self.g_) + 1e-100)
for s in range(start, iters):
# Copy over the previous iteration as a starting point
self.direct_samples[s+1,:] = self.direct_samples[s,:]
self.beta_samples[s+1,:] = self.beta_samples[s,:]
self.draw_z(s+1, x, j, Kmax, verbose)
self.draw_m(s+1, x, j, Kmax, verbose)
Mk = np.sum(self.m_, axis=0)
# Dirichlet weights must be > 0, so in case some k is unused, add epsilon
self.beta_samples[s+1,:] = np.random.dirichlet( | np.append(Mk, self.g_) | numpy.append |
#! /usr/env/python
"""
A class used to create and manage regular raster grids for 2D numerical models
in Landlab.
Do NOT add new documentation here. Grid documentation is now built in a semi-
automated fashion. To modify the text seen on the web, edit the files
`docs/text_for_[gridfile].py.txt`.
"""
import numpy as np
import six
from six.moves import range
from landlab.testing.decorators import track_this_method
from landlab.utils import structured_grid as sgrid
from landlab.utils import count_repeated_values
from .base import ModelGrid
from .base import (CORE_NODE, FIXED_VALUE_BOUNDARY,
FIXED_GRADIENT_BOUNDARY, LOOPED_BOUNDARY,
CLOSED_BOUNDARY, FIXED_LINK, BAD_INDEX_VALUE, ACTIVE_LINK,
INACTIVE_LINK)
from landlab.field.scalar_data_fields import FieldError
from landlab.utils.decorators import make_return_array_immutable, deprecated
from . import raster_funcs as rfuncs
from ..io import write_esri_ascii
from ..io.netcdf import write_netcdf
from landlab.grid.structured_quad import links as squad_links
from landlab.grid.structured_quad import faces as squad_faces
from landlab.grid.structured_quad import cells as squad_cells
from ..core.utils import as_id_array
from ..core.utils import add_module_functions_to_class
from .decorators import return_id_array, return_readonly_id_array
from . import gradients
@deprecated(use='grid.node_has_boundary_neighbor', version='0.2')
def _node_has_boundary_neighbor(mg, id, method='d8'):
"""Test if a node is next to a boundary.
Test if one of the neighbors of node *id* is a boundary node.
Parameters
----------
mg : ModelGrid
Source grid
node_id : int
ID of node to test.
method: string, optional
default is d8 neighbor, other method is 'd4'
Returns
-------
boolean
``True`` if node has a neighbor on the boundary, ``False`` otherwise.
"""
for neighbor in mg.active_neighbors_at_node[id]:
try:
if mg.status_at_node[neighbor] != CORE_NODE:
return True
except IndexError:
return True
if method == 'd8':
for neighbor in mg._get_diagonal_list(id):
try:
if mg.status_at_node[neighbor] != CORE_NODE:
return True
except IndexError:
return True
return False
def _make_arg_into_array(arg):
"""Make an argument into an iterable.
This function tests if the provided object is a Python list or a numpy
array. If not, attempts to cast the object to a list. If it cannot, it will
raise a TypeError.
Parameters
----------
arg : array_like
Input array.
Returns
-------
array_like
The input array converted to an iterable.
Examples
--------
>>> from landlab.grid.raster import _make_arg_into_array
>>> _make_arg_into_array(1)
[1]
>>> _make_arg_into_array((1, ))
[1]
>>> _make_arg_into_array([1, 2])
[1, 2]
>>> import numpy as np
>>> _make_arg_into_array(np.arange(3))
array([0, 1, 2])
"""
ids = arg
if not isinstance(ids, list) and not isinstance(ids, np.ndarray):
try:
ids = list(ids)
except TypeError:
ids = [ids]
return ids
_node_has_boundary_neighbor = np.vectorize(_node_has_boundary_neighbor,
excluded=['mg'])
class RasterModelGridPlotter(object):
"""MixIn that provides plotting functionality.
Inhert from this class to provide a ModelDataFields object with the
method function, ``imshow``, that plots a data field.
"""
def imshow(self, group, var_name, **kwds):
"""Plot a data field.
This is a wrapper for `plot.imshow_grid`, and can take the same
keywords. See that function for full documentation.
Parameters
----------
group : str
Name of group.
var_name : str
Name of field
See Also
--------
landlab.plot.imshow_grid
LLCATS: GINF
"""
from landlab.plot import imshow_grid
kwds['values_at'] = group
imshow_grid(self, var_name, **kwds)
def grid_edge_is_closed_from_dict(boundary_conditions):
"""Get a list of closed-boundary status at grid edges.
Get a list that indicates grid edges that are closed boundaries. The
returned list provides a boolean that gives the boundary condition status
for edges order as [*bottom*, *left*, *top*, *right*].
*boundary_conditions* is a dict whose keys indicate edge location (as
"bottom", "left", "top", "right") and values must be one of "open", or
"closed". If an edge location key is missing, that edge is assumed to be
*open*.
Parameters
----------
boundary_conditions : dict
Boundary condition for grid edges.
Returns
-------
list
List of booleans indicating if an edge is a closed boundary.
Examples
--------
>>> from landlab.grid.raster import grid_edge_is_closed_from_dict
>>> grid_edge_is_closed_from_dict(dict(bottom='closed', top='open'))
[False, False, False, True]
>>> grid_edge_is_closed_from_dict({})
[False, False, False, False]
"""
for condition in boundary_conditions.values():
if condition not in ['open', 'closed']:
raise ValueError('%s: boundary condition type not understood',
condition)
return [boundary_conditions.get(loc, 'open') == 'closed'
for loc in ['right', 'top', 'left', 'bottom']]
def _old_style_args(args):
"""Test if arguments are the old-style RasterModelGrid __init__ method.
The old way of initializing a :any:`RasterModelGrid` was like,
.. code::
grid = RasterModelGrid(n_rows, n_cols)
The new way passes the grid shape as a tuple, like numpy functions,
.. code::
grid = RasterModelGrid((n_rows, n_cols))
Parameters
----------
args : iterable
Arguments to a function.
Examples
--------
>>> from landlab.grid.raster import _old_style_args
>>> _old_style_args((4, 5))
True
>>> _old_style_args(((4, 5), ))
False
>>> _old_style_args(([4, 5], ))
False
"""
return len(args) in (2, 3) and isinstance(args[0], int)
def _parse_grid_shape_from_args(args):
"""Get grid shape from args.
Parameters
----------
args : iterable
Arguments to a function.
Examples
--------
>>> from landlab.grid.raster import _parse_grid_shape_from_args
>>> _parse_grid_shape_from_args((3, 4))
(3, 4)
>>> _parse_grid_shape_from_args(((3, 4), ))
(3, 4)
"""
if _old_style_args(args):
rows, cols = args[0], args[1]
else:
try:
(rows, cols) = args[0]
except ValueError:
raise ValueError('grid shape must be tuple')
return rows, cols
def _parse_grid_spacing_from_args(args):
"""Get grid spacing from args.
Parameters
----------
args : iterable
Arguments to a function.
Examples
--------
>>> from landlab.grid.raster import _parse_grid_spacing_from_args
>>> _parse_grid_spacing_from_args((3, 4, 5))
5
>>> _parse_grid_spacing_from_args(((3, 4), 5))
5
"""
try:
if _old_style_args(args):
return args[2]
else:
return args[1]
except IndexError:
return None
class RasterModelGrid(ModelGrid, RasterModelGridPlotter):
"""A 2D uniform rectilinear grid.
Create a uniform rectilinear grid that has *num_rows* and *num_cols*
of grid nodes, with a row and column spacing of *dx*.
Use the *bc* keyword to specify boundary_conditions along the edge nodes
of the grid. *bc* is a dict whose keys indicate edge location (as
"bottom", "left", "top", "right") and values must be one of "open", or
"closed". If an edge location key is missing, that edge is assumed to be
*open*.
Parameters
----------
shape : tuple of int
Shape of the grid in nodes.
spacing : float, optional
Row and column node spacing.
bc : dict, optional
Edge boundary conditions.
Examples
--------
Create a uniform rectilinear grid that has 4 rows and 5 columns of nodes.
Nodes along the edges will be *open*. That is, links connecting these
nodes to core nodes are *active*.
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
>>> rmg.number_of_node_rows, rmg.number_of_node_columns
(4, 5)
>>> rmg.number_of_active_links
17
Set the nodes along the top edge of the grid to be *closed* boundaries.
This means that any links touching these nodes will be *inactive*.
>>> rmg = RasterModelGrid((4, 5), 1.0, bc={'top': 'closed'})
>>> rmg.number_of_node_rows, rmg.number_of_node_columns
(4, 5)
>>> rmg.number_of_active_links
14
A `RasterModelGrid` can have different node spacings in the *x* and *y*
directions.
>>> grid = RasterModelGrid((4, 5), spacing=(1, 2))
>>> grid.dy, grid.dx
(1.0, 2.0)
>>> grid.node_y # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0., 0.,
1., 1., 1., 1., 1.,
2., 2., 2., 2., 2.,
3., 3., 3., 3., 3.])
>>> grid.node_x # doctest: +NORMALIZE_WHITESPACE
array([ 0., 2., 4., 6., 8.,
0., 2., 4., 6., 8.,
0., 2., 4., 6., 8.,
0., 2., 4., 6., 8.])
Notes
-----
The option for NOT giving rows, cols, and dx no longer works,
because the *field* init requires num_active_cells, etc., to be
defined. Either we force users to give arguments on instantiation,
or set it up such that one can create a zero-node grid.
"""
def __init__(self, *args, **kwds):
"""Create a 2D grid with equal spacing.
Optionally takes numbers of rows and columns and cell size as
inputs. If this are given, calls initialize() to set up the grid.
At the moment, num_rows and num_cols MUST be specified. Both must be
>=3 to allow correct automated setup of boundary conditions.
Parameters
----------
shape : tuple of int
Shape of the grid in nodes.
spacing : tuple or float, optional
Row and column node spacing.
bc : dict, optional
Edge boundary conditions.
Returns
-------
RasterModelGrid
A newly-created grid.
Notes
-----
The option for NOT giving rows, cols, and dx no longer works,
because the *field* init requires num_active_cells, etc., to be
defined. Either we force users to give arguments on instantiation,
or set it up such that one can create a zero-node grid.
"""
dx = kwds.pop('dx', None)
num_rows = kwds.pop('num_rows', None)
num_cols = kwds.pop('num_cols', None)
if num_rows is None and num_cols is None:
num_rows, num_cols = _parse_grid_shape_from_args(args)
elif len(args) > 0:
raise ValueError(
'number of args must be 0 when using keywords for grid shape')
if dx is None:
dx = kwds.pop('spacing', _parse_grid_spacing_from_args(args) or 1.)
if num_rows <= 0 or num_cols <= 0:
raise ValueError('number of rows and columns must be positive')
self._node_status = np.empty(num_rows * num_cols, dtype=np.int8)
# Set number of nodes, and initialize if caller has given dimensions
self._initialize(num_rows, num_cols, dx)
self.set_closed_boundaries_at_grid_edges(
*grid_edge_is_closed_from_dict(kwds.pop('bc', {})))
super(RasterModelGrid, self).__init__(**kwds)
self.looped_node_properties = {}
@classmethod
def from_dict(cls, params):
"""Create a RasterModelGrid from a dictionary.
Parameters
----------
params : dict_like
Initialization parameters for a RasterModelGrid.
Returns
-------
RasterModelGrid
A newly-created grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid.from_dict(
... {'shape': (3, 4), 'bc': {'top': 'closed'}})
>>> grid.number_of_nodes
12
LLCATS: GINF
"""
shape = params['shape']
spacing = params.get('spacing', (1., ) * len(shape))
bc = params.get('bc', {})
return cls(shape, spacing=spacing, bc=bc)
def _initialize(self, num_rows, num_cols, spacing):
"""Set up a raster grid.
Sets up a *num_rows* by *num_cols* grid with cell *spacing* and
(by default) regular boundaries (that is, all perimeter cells are
boundaries and all interior cells are active).
To be consistent with unstructured grids, the raster grid is
managed not as a 2D array but rather as a set of vectors that
describe connectivity information between nodes, links, active links,
cells, active cells, faces, patches, junctions, and corners.
By default, all interior nodes are set to active, and all perimeter
nodes are set as fixed value, open boundaries (type 1, see supporting
documentation).
Note that by default, a RasterModelGrid ONLY has links to
orthogonal neighboring nodes. However, if you wish to work with the
diagonal links (e.g., D8 flow routing), these functions are available
as methods, and the diagonal links can readily be created after
initialization.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> numrows = 20 # number of rows in the grid
>>> numcols = 30 # number of columns in the grid
>>> dx = 10.0 # grid cell spacing
>>> rmg = RasterModelGrid((numrows, numcols), dx)
>>> (rmg.number_of_nodes, rmg.number_of_cells, rmg.number_of_links,
... rmg.number_of_active_links)
(600, 504, 1150, 1054)
>>> rmg = RasterModelGrid((4, 5))
>>> (rmg.number_of_nodes, rmg.number_of_cells, rmg.number_of_links,
... rmg.number_of_active_links)
(20, 6, 31, 17)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1], dtype=int8)
>>> rmg._node_numinlink # doctest: +NORMALIZE_WHITESPACE
array([0, 1, 1, 1, 1,
1, 2, 2, 2, 2,
1, 2, 2, 2, 2,
1, 2, 2, 2, 2])
>>> rmg._node_inlink_matrix # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, -1, -1, -1, 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 22,
23, 24, 25, 26],
[-1, 0, 1, 2, 3, -1, 9, 10, 11, 12, -1, 18, 19, 20, 21, -1,
27, 28, 29, 30]])
>>> rmg._node_numoutlink # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 1,
2, 2, 2, 2, 1,
2, 2, 2, 2, 1,
1, 1, 1, 1, 0])
>>> rmg._node_outlink_matrix[0] # doctest: +NORMALIZE_WHITESPACE
array([ 4, 5, 6, 7, 8, 13, 14, 15, 16, 17, 22, 23, 24, 25, 26,
-1, -1, -1, -1, -1])
>>> rmg._node_numactiveinlink # doctest: +NORMALIZE_WHITESPACE
array([0, 0, 0, 0, 0,
0, 2, 2, 2, 1,
0, 2, 2, 2, 1,
0, 1, 1, 1, 0])
>>> rmg._node_active_inlink_matrix # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, -1, -1, -1, -1, 0, 1, 2, -1, -1, 3, 4, 5, -1, -1,
6, 7, 8, -1],
[-1, -1, -1, -1, -1, -1, 9, 10, 11, 12, -1, 13, 14, 15, 16, -1,
-1, -1, -1, -1]])
>>> rmg._node_numactiveoutlink # doctest: +NORMALIZE_WHITESPACE
array([0, 1, 1, 1, 0,
1, 2, 2, 2, 0,
1, 2, 2, 2, 0,
0, 0, 0, 0, 0])
>>> rmg._node_active_outlink_matrix # doctest: +NORMALIZE_WHITESPACE
array([[-1, 0, 1, 2, -1, -1, 3, 4, 5, -1, -1, 6, 7, 8, -1, -1,
-1, -1, -1, -1],
[-1, -1, -1, -1, -1, 9, 10, 11, 12, -1, 13, 14, 15, 16, -1, -1,
-1, -1, -1, -1]])
>>> rmg.node_at_cell # doctest: +NORMALIZE_WHITESPACE
array([ 6, 7, 8,
11, 12, 13])
>>> rmg.node_at_link_tail # doctest: +NORMALIZE_WHITESPACE
array([ 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7,
8, 9, 10, 11, 12, 13, 10, 11, 12, 13, 14, 15, 16, 17, 18])
>>> rmg.node_at_link_head # doctest: +NORMALIZE_WHITESPACE
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10, 11, 12,
13, 14, 11, 12, 13, 14, 15, 16, 17, 18, 19, 16, 17, 18, 19])
>>> rmg.face_at_link[20]
12
>>> rmg.active_links # doctest: +NORMALIZE_WHITESPACE
array([ 5, 6, 7, 9, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 23, 24,
25])
"""
if isinstance(spacing, float) or isinstance(spacing, int):
spacing = (spacing, spacing)
# Basic info about raster size and shape
self._nrows = num_rows
self._ncols = num_cols
self._dy, self._dx = float(spacing[0]), float(spacing[1])
self.cellarea = self._dy * self._dx
self._node_at_cell = sgrid.node_at_cell(self.shape)
self._cell_at_node = squad_cells.cell_id_at_nodes(
self.shape).reshape((-1, ))
# We need at least one row or column of boundary cells on each
# side, so the grid has to be at least 3x3
assert(np.min((num_rows, num_cols)) >= 3)
# Assign and store node (x,y,z) coordinates.
#
# The relation between node (x,y) coordinates and position is
# illustrated here for a five-column, four-row grid. The numbers show
# node positions, and the - and | symbols show the links connecting
# the nodes.
#
# 15------16------17------18------19
# | | | | |
# | | | | |
# | | | | |
# 10------11------12------13------14
# | | | | |
# | | | | |
# | | | | |
# 5-------6-------7-------8-------9
# | | | | |
# | | | | |
# | | | | |
# 0-------1-------2-------3-------4
#
(self._node_x, self._node_y) = sgrid.node_coords(
(num_rows, num_cols), (self._dy, self._dx), (0., 0.))
# Node boundary/active status:
# Next, we set up an array of "node status" values, which indicate
# whether a given node is an active, non-boundary node, or some type of
# boundary. Here we default to having all perimeter nodes be active
# fixed-value boundaries.
self._node_status[:] = sgrid.status_at_node(
self.shape, boundary_status=FIXED_VALUE_BOUNDARY)
# Cell lists:
# For all cells, we create a list of the corresponding node ID for
# each cell.
#
# Cells and faces in a five-column, four-row grid look like this
# (where the numbers are cell IDs and lines show faces):
#
# |-------|-------|-------|
# | | | |
# | 3 | 4 | 5 |
# | | | |
# |-------|-------|-------|
# | | | |
# | 0 | 1 | 2 |
# | | | |
# |-------|-------|-------|
#
# While we're at it, we will also build the node_activecell list. This
# list records, for each node, the ID of its associated active cell,
# or None if it has no associated active cell (i.e., it is a boundary)
# #self._node_at_cell = sgrid.node_at_cell(self.shape)
# #self._cell_at_node = squad_cells.cell_id_at_nodes(
# self.shape).reshape((-1, ))
self._core_cells = sgrid.core_cell_index(self.shape)
self._neighbors_at_node = (
sgrid.neighbor_node_ids(self.shape).transpose().copy())
self.__diagonal_neighbors_at_node = sgrid.diagonal_node_array(self.shape,
contiguous=True)
self._links_at_node = squad_links.links_at_node(self.shape)
# Link lists:
# For all links, we encode the "tail" and "head" nodes, and the face
# (if any) associated with the link. If the link does not intersect a
# face, then face is assigned None.
# For active links, we store the corresponding link ID.
#
# The numbering scheme for links in RasterModelGrid is illustrated with
# the example of a five-column by four-row grid (each * is a node,
# the lines show links, and the ^ and > symbols indicate the direction
# of each link: up for vertical links, and right for horizontal ones):
#
# *--27-->*--28-->*--29-->*--30-->*
# ^ ^ ^ ^ ^
# 22 23 24 25 26
# | | | | |
# *--18-->*--19-->*--20-->*--21-->*
# ^ ^ ^ ^ ^
# 13 14 15 16 17
# | | | | |
# *---9-->*--10-->*--11-->*--12-->*
# ^ ^ ^ ^ ^
# 4 5 6 7 8
# | | | | |
# *---0-->*---1-->*---2-->*---3-->*
#
# create the tail-node and head-node lists
(self._node_at_link_tail,
self._node_at_link_head) = sgrid.node_index_at_link_ends(self.shape)
self._status_at_link = np.full(squad_links.number_of_links(self.shape),
INACTIVE_LINK, dtype=int)
# Sort them by midpoint coordinates
self._sort_links_by_midpoint()
# set up in-link and out-link matrices and numbers
self._setup_inlink_and_outlink_matrices()
# Flag indicating whether we have created diagonal links.
self._diagonal_links_created = False
# set up the list of active links
self._reset_link_status_list()
# Create 2D array containing, for each node, direction of connected
# link (1=incoming, -1=outgoing, 0=no link present at this position)
# needs to come after BC setting
self._create_link_dirs_at_node()
# set up link unit vectors and node unit-vector sums
self._create_link_unit_vectors()
# set up link faces
#
# Here we assume that we've already created a list of active links
# in which all 4 boundaries are "open", such that each boundary node
# (except the 4 corners) is connected to an adjacent interior node. In
# this case, there will be the same number of faces as active links,
# and the numbering of faces will be the same as the corresponding
# active links. We start off creating a list of all None values. Only
# those links that cross a face will have this None value replaced with
# a face ID.
self._face_at_link = sgrid.face_at_link(self.shape,
actives=self.active_links)
self._create_cell_areas_array()
# List of neighbors for each cell: we will start off with no
# list. If a caller requests it via active_neighbors_at_node or
# _create_neighbor_list, we'll create it if necessary.
self._neighbor_node_dict = {}
# List of diagonal neighbors. As with the neighbor list, we'll only
# create it if requested.
self.diagonal_list_created = False
# List of looped neighbor cells (all 8 neighbors) for
# given *cell ids* can be created if requested by the user.
self._looped_cell_neighbor_list = None
# List of second ring looped neighbor cells (all 16 neighbors) for
# given *cell ids* can be created if requested by the user.
self._looped_second_ring_cell_neighbor_list_created = False
def _setup_nodes(self):
self._nodes = np.arange(self.number_of_nodes,
dtype=int).reshape(self.shape)
return self._nodes
@property
@make_return_array_immutable
def nodes(self):
"""Get a shaped array of nodes.
Returns
-------
ndarray
Node IDs in an array shaped as *number_of_node_rows* by
*number_of_node_columns*.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.nodes
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
You can't change node ids.
>>> grid.nodes[0] = 99 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: assignment destination is read-only
LLCATS: NINF
"""
return super(RasterModelGrid, self).nodes
@property
def nodes_at_right_edge(self):
"""Get nodes along the right edge of a grid.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> vals = np.array([ 0, 1, 2, 3,
... 4, 5, 6, 7,
... 8, 9, 10, 11])
>>> vals[grid.nodes_at_right_edge]
array([ 3, 7, 11])
LLCATS: NINF BC SUBSET
"""
return self.nodes[:, -1]
@property
def nodes_at_top_edge(self):
"""Get nodes along the top edge of a grid.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> vals = np.array([ 0, 1, 2, 3,
... 4, 5, 6, 7,
... 8, 9, 10, 11])
>>> vals[grid.nodes_at_top_edge]
array([ 8, 9, 10, 11])
LLCATS: NINF BC SUBSET
"""
return self.nodes[-1, :]
@property
def nodes_at_left_edge(self):
"""Get nodes along the left edge of a grid.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> vals = np.array([ 0, 1, 2, 3,
... 4, 5, 6, 7,
... 8, 9, 10, 11])
>>> vals[grid.nodes_at_left_edge]
array([0, 4, 8])
LLCATS: NINF BC SUBSET
"""
return self.nodes[:, 0]
@property
def nodes_at_bottom_edge(self):
"""Get nodes along the bottom edge of a grid.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> vals = np.array([ 0, 1, 2, 3,
... 4, 5, 6, 7,
... 8, 9, 10, 11])
>>> vals[grid.nodes_at_bottom_edge]
array([0, 1, 2, 3])
LLCATS: NINF BC SUBSET
"""
return self.nodes[0, :]
def nodes_at_edge(self, edge):
"""Get edge nodes by edge name.
Parameters
----------
edge : {'right', 'top', 'left', 'bottom'}
Edge location.
Returns
-------
slice
Slice of the nodes on an edge.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> vals = np.array([ 0, 1, 2, 3,
... 4, 5, 6, 7,
... 8, 9, 10, 11])
>>> vals[grid.nodes_at_edge('left')]
array([0, 4, 8])
LLCATS: NINF BC SUBSET
"""
if edge not in ('right', 'top', 'left', 'bottom'):
raise ValueError('value for edge not understood')
return getattr(self, 'nodes_at_{edge}_edge'.format(edge=edge))
def _create_cell_areas_array(self):
"""Set up array of cell areas.
This method supports the creation of the array that stores cell areas.
It is not meant to be called manually.
"""
self._area_of_cell = np.full(self.number_of_cells, self.dx * self.dy,
dtype=float)
return self._area_of_cell
def _create_cell_areas_array_force_inactive(self):
"""Set up array cell areas including extra cells for perimeter nodes.
This method supports the creation of the array that stores cell areas.
It differs from _create_cell_areas_array in that it forces ALL nodes to
have a surrounding cell, which is not actually the case for the generic
perimeter node (these are unbounded). This is only possible because the
grid is a raster.
It is not meant to be called manually.
"""
self._forced_cell_areas = np.full(self.shape, self.dx * self.dy,
dtype=float)
self._forced_cell_areas[(0, -1), :] = 0.
self._forced_cell_areas[:, (0, -1)] = 0.
self._forced_cell_areas.shape = (-1, )
return self._forced_cell_areas
@property
def shape(self):
"""Get the shape of the grid.
Returns
-------
shape : tuple of ints
The shape of the grid as number of node rows and node columns.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.shape
(3, 4)
LLCATS: GINF NINF
"""
return (self.number_of_node_rows, self.number_of_node_columns)
@property
def cell_grid_shape(self):
"""Get the shape of the cellular grid (grid with only cells).
Returns
-------
shape : tuple of ints
The shape of the cellular grid as number of cell rows and cell
columns.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.cell_grid_shape
(1, 2)
LLCATS: GINF CINF
"""
return (self.number_of_cell_rows, self.number_of_cell_columns)
@property
def dx(self):
"""Get node spacing in the column direction.
Returns
-------
float
Spacing of node columns.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.dx
1.0
>>> grid = RasterModelGrid((4, 5), 2.0)
>>> grid.dx
2.0
LLCATS: GINF MEAS
"""
return self._dx
@property
def dy(self):
"""Get node spacing in the row direction.
Note in a RasterModelGrid, dy==dx.
Returns
-------
float
Spacing of node rows.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.dy
1.0
>>> grid = RasterModelGrid((4, 5), spacing=(2, 4))
>>> grid.dy
2.0
LLCATS: GINF MEAS
"""
return self._dy
@property
@make_return_array_immutable
def _diagonal_neighbors_at_node(self):
"""Get diagonally neighboring nodes.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Order is LL standard, CCW from east. i.e., [NE, NW, SW, SE].
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 3))
>>> diagonals = grid._diagonal_neighbors_at_node.copy()
>>> diagonals[diagonals == BAD_INDEX_VALUE] = -1
>>> diagonals # doctest: +NORMALIZE_WHITESPACE
array([[ 4, -1, -1, -1], [ 5, 3, -1, -1], [-1, 4, -1, -1],
[ 7, -1, -1, 1], [ 8, 6, 0, 2], [-1, 7, 1, -1],
[10, -1, -1, 4], [11, 9, 3, 5], [-1, 10, 4, -1],
[-1, -1, -1, 7], [-1, -1, 6, 8], [-1, -1, 7, -1]])
LLCATS: DEPR NINF CONN
"""
return self.__diagonal_neighbors_at_node
@deprecated(use='vals[links_at_node]*active_link_dirs_at_node',
version=1.0)
def _active_links_at_node(self, *args):
"""_active_links_at_node([node_ids])
Active links of a node.
Parameters
----------
node_ids : int or list of ints
ID(s) of node(s) for which to find connected active links
Returns
-------
(4, N) ndarray
The ids of active links attached to grid nodes with
*node_ids*. If *node_ids* is not given, return links for all of the
nodes in the grid. Link ids are listed in clockwise order starting
with the south link. Diagonal links are never returned.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.links_at_node[5]
array([ 8, 11, 7, 4])
>>> rmg._active_links_at_node((5, 6))
array([[ 4, 5],
[ 7, 8],
[11, 12],
[ 8, 9]])
>>> rmg._active_links_at_node()
array([[-1, -1, -1, -1, -1, 4, 5, -1, -1, 11, 12, -1],
[-1, -1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1],
[-1, 4, 5, -1, -1, 11, 12, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1, -1]])
array([[-1, -1, -1, -1, -1, 0, 1, -1, -1, 2, 3, -1],
[-1, -1, -1, -1, -1, 4, 5, 6, -1, -1, -1, -1],
[-1, 0, 1, -1, -1, 2, 3, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 4, 5, 6, -1, -1, -1, -1, -1]])
LLCATS: DEPR LINF NINF
"""
if len(args) == 0:
return np.vstack((self._node_active_inlink_matrix2,
self._node_active_outlink_matrix2))
elif len(args) == 1:
node_ids = np.broadcast_arrays(args[0])[0]
return (
np.vstack((self._node_active_inlink_matrix2[:, node_ids],
self._node_active_outlink_matrix2[:, node_ids])
).reshape(4, -1))
else:
raise ValueError('only zero or one arguments accepted')
@property
def _number_of_d8_links(self):
"""
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
LLCATS: GINF LINF
"""
return self.number_of_links + self._number_of_diagonal_links
@property
def _number_of_d8_active_links(self):
"""
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
LLCATS: GINF NINF BC
"""
try:
return self._num_d8_active_links
except AttributeError:
self._num_d8_active_links = self._d8_active_links()[0].size
# this creates the diagonals as well, but that's appropriate if
# you're already asking for this property
return self._num_d8_active_links
@property
@return_readonly_id_array
def _diagonal_links_at_node(self, *args):
"""Diagonal links attached to nodes.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Link ids are listed in counterclockwise order starting from east
(i.e., [NE, NW, SW, SE]).
(was formerly clockwise from south; [SW,NW,NE,SE])
This method only returns diagonal links.
Call links_at_node for all links, and orthogonal_links_at_node for
orthogonal links.
Returns
-------
(N, 4) ndarray
Diagonal neighbor node IDs for the source nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg._diagonal_links_at_node.shape == (12, 4)
True
>>> mg._diagonal_links_at_node[5]
array([25, 24, 17, 20])
>>> mg._diagonal_links_at_node[7]
array([-1, 28, 21, -1])
LLCATS: NINF LINF CONN
"""
try:
return self._diag_links_at_node
except AttributeError:
self._create_diag_links_at_node()
return self._diag_links_at_node
def _create_diag_links_at_node(self):
"""
Create the diagonal link list.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
"""
n_diagonal_links = 2 * (self._nrows - 1) * (self._ncols - 1)
self._diag_link_fromnode = np.zeros(n_diagonal_links, dtype=int)
self._diag_link_tonode = np.zeros(n_diagonal_links, dtype=int)
i = 0
for r in range(self._nrows - 1):
for c in range(self._ncols - 1):
self._diag_link_fromnode[i] = c + r * self._ncols
self._diag_link_tonode[i] = (c + 1) + (r + 1) * self._ncols
i += 1
self._diag_link_fromnode[i] = (c + 1) + r * self._ncols
self._diag_link_tonode[i] = c + (r + 1) * self._ncols
i += 1
self._diagonal_links_created = True
self._reset_list_of_active_diagonal_links()
self._diag_links_at_node = np.empty((self.number_of_nodes, 4),
dtype=int)
self._diag_links_at_node.fill(-1)
# Number of patches is number_of_diagonal_nodes / 2
self._diag_links_at_node[:, 0][np.setdiff1d(np.arange(
self.number_of_nodes), np.union1d(
self.nodes_at_right_edge, self.nodes_at_top_edge))] = \
(np.arange(0, self.number_of_patches*2, 2) +
self.number_of_links)
self._diag_links_at_node[:, 1][np.setdiff1d(np.arange(
self.number_of_nodes), np.union1d(
self.nodes_at_left_edge, self.nodes_at_top_edge))] = \
(np.arange(0, self.number_of_patches*2, 2) + 1 +
self.number_of_links)
self._diag_links_at_node[:, 2][np.setdiff1d(np.arange(
self.number_of_nodes), np.union1d(
self.nodes_at_left_edge, self.nodes_at_bottom_edge))] = \
(np.arange(0, self.number_of_patches*2, 2) +
self.number_of_links)
self._diag_links_at_node[:, 3][np.setdiff1d(np.arange(
self.number_of_nodes), np.union1d(
self.nodes_at_right_edge, self.nodes_at_bottom_edge))] = \
(np.arange(0, self.number_of_patches*2, 2) + 1 +
self.number_of_links)
# now set up the supporting data strs:
self._diag__link_dirs_at_node = np.empty((self.number_of_nodes, 4),
dtype=int)
self._diag__link_dirs_at_node[:, :] = [-1, -1, 1, 1] # default inside
self._diag__link_dirs_at_node[self.nodes_at_bottom_edge] = [
-1, -1, 0, 0]
self._diag__link_dirs_at_node[self.nodes_at_top_edge] = [
0, 0, 1, 1]
self._diag__link_dirs_at_node[self.nodes_at_left_edge] = [
-1, 0, 0, 1]
self._diag__link_dirs_at_node[self.nodes_at_right_edge] = [
0, -1, 1, 0]
self._diag__link_dirs_at_node[self.nodes_at_corners_of_grid] = [
[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]
self._diag__active_link_dirs_at_node = \
self._diag__link_dirs_at_node.copy()
inactive_diags = np.ones(self._number_of_d8_links+1, dtype=bool)
inactive_diags[self._diag_active_links] = False
# note the entended array True-at-end trick is in play here
inactive_links = inactive_diags[self._diag_links_at_node]
self._diag__active_link_dirs_at_node[inactive_links] = 0
self._reset_diag_active_link_dirs()
@property
@make_return_array_immutable
def horizontal_links(self):
"""
LLCATS: LINF
"""
try:
return self._horizontal_links
except AttributeError:
self._horizontal_links = squad_links.horizontal_link_ids(
self.shape)
return self._horizontal_links
@property
@make_return_array_immutable
def vertical_links(self):
"""
LLCATS: LINF
"""
try:
return self._vertical_links
except AttributeError:
self._vertical_links = squad_links.vertical_link_ids(
self.shape)
return self._vertical_links
@property
@return_readonly_id_array
def patches_at_node(self):
"""Get array of patches attached to nodes.
Returns a (N, 4) array of the patches associated with each node in the
grid.
The four possible patches are returned in order CCW from east, i.e.,
NE, NW, SW, SE.
Missing patches are indexed -1.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> mg.patches_at_node
array([[ 0, -1, -1, -1],
[ 1, 0, -1, -1],
[-1, 1, -1, -1],
[ 2, -1, -1, 0],
[ 3, 2, 0, 1],
[-1, 3, 1, -1],
[-1, -1, -1, 2],
[-1, -1, 2, 3],
[-1, -1, 3, -1]])
LLCATS: PINF NINF CONN
"""
try:
return self.node_patch_matrix
except AttributeError:
self.node_patch_matrix = np.full((self.number_of_nodes, 4),
-1, dtype=int)
self.node_patch_matrix[:, 2][
np.setdiff1d(np.arange(self.number_of_nodes),
np.union1d(self.nodes_at_left_edge,
self.nodes_at_bottom_edge))] = \
np.arange(self.number_of_patches)
self.node_patch_matrix[:, 3][
np.setdiff1d(np.arange(self.number_of_nodes),
np.union1d(self.nodes_at_right_edge,
self.nodes_at_bottom_edge))] = \
np.arange(self.number_of_patches)
self.node_patch_matrix[:, 1][
np.setdiff1d(np.arange(self.number_of_nodes),
np.union1d(self.nodes_at_left_edge,
self.nodes_at_top_edge))] = \
np.arange(self.number_of_patches)
self.node_patch_matrix[:, 0][
np.setdiff1d(np.arange(self.number_of_nodes),
np.union1d(self.nodes_at_right_edge,
self.nodes_at_top_edge))] = \
np.arange(self.number_of_patches)
# we no longer blank out any patches that have a closed node as any
# vertex, per modern LL style. Instead, we will make a closed/open
# mask
self._patches_created = True
return self.node_patch_matrix
@property
@return_readonly_id_array
def nodes_at_patch(self):
"""Get array of nodes of a patch.
Returns the four nodes at the corners of each patch in a regular grid.
Shape of the returned array is (nnodes, 4). Returns in order CCW from
east, i.e., [NE, NW, SW, SE].
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> mg.nodes_at_patch
array([[4, 3, 0, 1],
[5, 4, 1, 2],
[7, 6, 3, 4],
[8, 7, 4, 5]])
LLCATS: NINF PINF CONN
"""
self._patches_created = True
base = np.arange(self.number_of_patches)
bottom_left_corner = base + base // (self._ncols - 1)
return np.column_stack((bottom_left_corner + self._ncols + 1,
bottom_left_corner + self._ncols,
bottom_left_corner,
bottom_left_corner + 1))
@property
@return_readonly_id_array
def links_at_patch(self):
"""Get array of links defining each patch.
Examples
--------
>>> mg = RasterModelGrid((3, 4))
>>> mg.links_at_patch
array([[ 4, 7, 3, 0],
[ 5, 8, 4, 1],
[ 6, 9, 5, 2],
[11, 14, 10, 7],
[12, 15, 11, 8],
[13, 16, 12, 9]])
LLCATS: PINF LINF CONN
"""
self._patches_created = True
base = np.arange(self.number_of_patches)
bottom_edge = base + (base // (self._ncols - 1)) * self._ncols
return np.column_stack((bottom_edge + self._ncols,
bottom_edge + 2 * self._ncols - 1,
bottom_edge + self._ncols - 1,
bottom_edge))
@property
@return_readonly_id_array
def patches_at_link(self):
"""Get array of patches adjoined to each link.
Missing paches are indexed as -1.
Examples
--------
>>> mg = RasterModelGrid((3, 4))
>>> mg.patches_at_link
array([[ 0, -1],
[ 1, -1],
[ 2, -1],
[ 0, -1],
[ 0, 1],
[ 1, 2],
[ 2, -1],
[ 0, 3],
[ 1, 4],
[ 2, 5],
[ 3, -1],
[ 3, 4],
[ 4, 5],
[ 5, -1],
[ 3, -1],
[ 4, -1],
[ 5, -1]])
LLCATS: PINF LINF CONN
"""
try:
return self._patches_at_link
except AttributeError:
self._create_patches_at_link()
return self._patches_at_link
def _create_patches_at_link(self):
from .cfuncs import create_patches_at_element
self._patches_created = True
self._patches_at_link = np.empty((self.number_of_links, 2),
dtype=int)
self._patches_at_link.fill(-1)
create_patches_at_element(self.links_at_patch, self.number_of_links,
self._patches_at_link)
# a sort of the links will be performed here once we have corners
def _create_link_dirs_at_node(self):
"""Make array with link directions at each node
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg._links_at_node
array([[ 0, 3, -1, -1],
[ 1, 4, 0, -1],
[ 2, 5, 1, -1],
[-1, 6, 2, -1],
[ 7, 10, -1, 3],
[ 8, 11, 7, 4],
[ 9, 12, 8, 5],
[-1, 13, 9, 6],
[14, -1, -1, 10],
[15, -1, 14, 11],
[16, -1, 15, 12],
[-1, -1, 16, 13]])
>>> rmg._link_dirs_at_node
array([[-1, -1, 0, 0],
[-1, -1, 1, 0],
[-1, -1, 1, 0],
[ 0, -1, 1, 0],
[-1, -1, 0, 1],
[-1, -1, 1, 1],
[-1, -1, 1, 1],
[ 0, -1, 1, 1],
[-1, 0, 0, 1],
[-1, 0, 1, 1],
[-1, 0, 1, 1],
[ 0, 0, 1, 1]], dtype=int8)
"""
# Create arrays for link-at-node information
self._link_dirs_at_node = np.zeros((self.number_of_nodes, 4),
dtype=np.int8)
num_links_per_row = (self.number_of_node_columns * 2) - 1
# Sweep over all links
for lk in range(self.number_of_links):
# Find the orientation
is_horiz = ((lk % num_links_per_row) <
(self.number_of_node_columns - 1))
# Find the IDs of the tail and head nodes
t = self.node_at_link_tail[lk]
h = self.node_at_link_head[lk]
# If the link is horizontal, the index (row) in the links_at_node
# array should be 0 (east) for the tail node, and 2 (west) for the
# head node.
# If vertical, the index should be 1 (north) for the tail node and
# 3 (south) for the head node.
if is_horiz:
tail_index = 0
head_index = 2
else:
tail_index = 1
head_index = 3
# Add this link to the list for this node, set the direction
# (outgoing, indicated by -1), and increment the number found so
# far
self._link_dirs_at_node[t][tail_index] = -1
self._link_dirs_at_node[h][head_index] = 1
# setup the active link equivalent
self._active_link_dirs_at_node = self._link_dirs_at_node.copy()
inactive_links = (self.status_at_link[self.links_at_node] ==
INACTIVE_LINK)
inactive_links[self.link_dirs_at_node == 0] = False
self._active_link_dirs_at_node[inactive_links] = 0
@deprecated(use='no replacement', version=1.0)
def _setup_inlink_and_outlink_matrices(self):
"""Set up matrices that hold the inlinks and outlinks for each node.
Creates data structures to record the numbers of inlinks and outlinks
for each node. An inlink of a node is simply a link that has the node
as its "to" node, and an outlink is a link that has the node as its
"from".
We store the inlinks in a 2-row by num_nodes-column matrix called
_node_inlink_matrix. It has two rows because we know that the nodes in
our raster grid will never have more than two inlinks an two outlinks
each (a given node could also have zero or one of either). The outlinks
are stored in a similar matrix.
The order of inlinks is [SOUTH, WEST].
The order of outlinks is [NORTH, EAST].
We also keep track of the total number of inlinks and outlinks at each
node in the num_inlinks and num_outlinks arrays.
The inlink and outlink matrices are useful in numerical calculations.
Each row of each matrix contains one inlink or outlink per node. So, if
you have a corresponding "flux" matrix, you can map incoming or
outgoing fluxes onto the appropriate nodes. More information on this is
in the various calculate_flux_divergence... functions.
What happens if a given node does not have two inlinks or outlinks? We
simply put the default value -1 in this case. This allows us to use a
cute little trick when computing inflows and outflows. We make our
"flux" array one element longer than the number of links, with the last
element containing the value 0. Thus, any time we add an influx from
link number -1, Python takes the value of the last element in the
array, which is zero. By doing it this way, we maintain the efficiency
that comes with the use of numpy. Again, more info can be found in the
description of the flux divergence functions.
DEJH notes that we may be using BAD_INDEX_VALUE (an arbitrary very
large number), not -1, now.
If you want to use this trick, you'll have to seach for BAD_INDEX_VALUE
manually now.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
"""
(self._node_inlink_matrix,
self._node_numinlink) = sgrid.setup_inlink_matrix(self.shape)
(self._node_outlink_matrix,
self._node_numoutlink) = sgrid.setup_outlink_matrix(self.shape)
@deprecated(use='no replacement', version=1.0)
def _setup_active_inlink_and_outlink_matrices(self):
"""Set up matrices that hold active inlinks and outlinks for each node.
Creates data structures to record the numbers of active inlinks and
active outlinks for each node. These data structures are equivalent to
the "regular" inlink and outlink matrices, except that it uses the IDs
of active links (only).
"""
node_status = self._node_status != CLOSED_BOUNDARY
(self._node_active_inlink_matrix,
self._node_numactiveinlink) = sgrid.setup_active_inlink_matrix(
self.shape, node_status=node_status)
(self._node_active_outlink_matrix,
self._node_numactiveoutlink) = sgrid.setup_active_outlink_matrix(
self.shape, node_status=node_status)
(self._node_active_inlink_matrix2,
self._node_numactiveinlink) = sgrid.setup_active_inlink_matrix2(
self.shape, node_status=node_status)
(self._node_active_outlink_matrix2,
self._node_numactiveoutlink) = sgrid.setup_active_outlink_matrix2(
self.shape, node_status=node_status)
def _reset_list_of_active_diagonal_links(self):
"""Reset the active diagonal links.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Assuming the diagonal links have already been created elsewhere, this
helper method checks their statuses (active/inactive) for internal
consistency after the BC status of some nodes has been changed.
Note that the IDs of the diagonal links need to be compatible with the
"normal" links - so we add self.number_links to these IDs.
Assumes _create_diag_links_at_node() has been called, either explicitly
or by another grid method (e.g., _d8_active_links()).
"""
assert(self._diagonal_links_created), 'Diagonal links not created'
self._diag_activelink_fromnode = []
self._diag_activelink_tonode = []
diag_fromnode_status = self.status_at_node[self._diag_link_fromnode]
diag_tonode_status = self.status_at_node[self._diag_link_tonode]
diag_active_links = (((diag_fromnode_status == CORE_NODE) & ~
(diag_tonode_status == CLOSED_BOUNDARY)) |
((diag_tonode_status == CORE_NODE) & ~
(diag_fromnode_status == CLOSED_BOUNDARY)))
(_diag_active_links, ) = np.where(diag_active_links)
_diag_active_links = as_id_array(_diag_active_links)
diag_fixed_links = ((((diag_fromnode_status ==
FIXED_GRADIENT_BOUNDARY) &
(diag_tonode_status == CORE_NODE)) |
((diag_tonode_status == FIXED_GRADIENT_BOUNDARY) &
(diag_fromnode_status == CORE_NODE))))
(_diag_fixed_links, ) = np.where(diag_fixed_links)
_diag_fixed_links = as_id_array(_diag_fixed_links)
self._diag_activelink_fromnode = self._diag_link_fromnode[
_diag_active_links]
self._diag_activelink_tonode = self._diag_link_tonode[
_diag_active_links]
self._diag_active_links = _diag_active_links + self.number_of_links
self._diag_fixed_links = diag_fixed_links + self.number_of_links
self._diag_inactive_links = np.setdiff1d(np.arange(
self.number_of_links, self._number_of_d8_links),
self._diag_active_links)
self._diag_inactive_links = np.setdiff1d(
self._diag_inactive_links, self._diag_fixed_links)
self._all__d8_active_links = np.concatenate((self.active_links,
self._diag_active_links))
normal_inactive = np.where(self.status_at_link == INACTIVE_LINK)[0]
self._all__d8_inactive_links = np.concatenate(
(normal_inactive, self._diag_inactive_links))
def _reset_diagonal_link_statuses(self):
"""Rest the statuses of diagonal links.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Assuming the diagonal links have already been created elsewhere, this
helper method checks their statuses (active/inactive/fixed) for
internal consistency after the BC status of some nodes has been
changed.
Note that the IDs of the diagonal links need to be compatible with the
"normal" links - so we add self.number_links to these IDs.
Assumes _create_diag_links_at_node() has been called, either
explicitly or by another grid method (e.g., _d8_active_links()).
"""
assert(self._diagonal_links_created), 'Diagonal links not created'
self._diag_activelink_fromnode = []
self._diag_activelink_tonode = []
try:
already_fixed = self._status_at_link == FIXED_LINK
except AttributeError:
already_fixed = np.zeros(self.number_of_links, dtype=bool)
diag_fromnode_status = self._node_status[self._diag_link_fromnode]
diag_tonode_status = self._node_status[self._diag_link_tonode]
if not np.all((diag_fromnode_status[already_fixed] ==
FIXED_GRADIENT_BOUNDARY) |
(diag_tonode_status[already_fixed] ==
FIXED_GRADIENT_BOUNDARY)):
assert np.all(diag_fromnode_status[already_fixed] ==
CLOSED_BOUNDARY != diag_tonode_status[
already_fixed] == CLOSED_BOUNDARY)
diag_fromnode_status[already_fixed] = np.where(
diag_fromnode_status[already_fixed] == CLOSED_BOUNDARY,
FIXED_GRADIENT_BOUNDARY,
diag_fromnode_status[already_fixed])
diag_tonode_status[already_fixed] = np.where(
diag_tonode_status[already_fixed] == CLOSED_BOUNDARY,
FIXED_GRADIENT_BOUNDARY,
diag_tonode_status[already_fixed])
diag_active_links = (((diag_fromnode_status == CORE_NODE) & ~
(diag_tonode_status == CLOSED_BOUNDARY)) |
((diag_tonode_status == CORE_NODE) & ~
(diag_fromnode_status == CLOSED_BOUNDARY)))
# ...this still includes things that will become fixed_link
diag_fixed_links = ((((diag_fromnode_status ==
FIXED_GRADIENT_BOUNDARY) &
(diag_tonode_status == CORE_NODE)) |
((diag_tonode_status == FIXED_GRADIENT_BOUNDARY) &
(diag_fromnode_status == CORE_NODE))) |
already_fixed)
_diag_active_links = np.where(np.logical_and(
diag_active_links, np.logical_not(diag_fixed_links)))
_diag_active_links = _diag_active_links.astype(np.int, copy=False)
self._diag_activelink_fromnode = self._diag_link_fromnode[
_diag_active_links]
self._diag_activelink_tonode = self._diag_link_tonode[
_diag_active_links]
self._diag_active_links = _diag_active_links + self.number_of_links
self._diag_fixed_links = diag_fixed_links + self.number_of_links
self._reset_diag_active_link_dirs()
def _reset_diag_active_link_dirs(self):
self._diag__active_link_dirs_at_node = \
self._diag_link_dirs_at_node.copy()
inactive_diags = np.ones(self._number_of_d8_links+1, dtype=bool)
inactive_diags[self._diag_active_links] = False
# note the entended array True-at-end trick is in play here
inactive_links = inactive_diags[self._diag_links_at_node]
self._diag__active_link_dirs_at_node[inactive_links] = 0
def _reset_link_status_list(self):
"""Rest the status of links.
Assuming the link_status array has already been created elsewhere, this
helper method checks link statuses for internal
consistency after the BC status of some nodes has been changed.
"""
super(RasterModelGrid, self)._reset_link_status_list()
if self._diagonal_links_created:
self._reset_list_of_active_diagonal_links()
self._reset_diag_active_link_dirs()
def _create_link_unit_vectors(self):
"""Make arrays to store the unit vectors associated with each link.
Creates self.link_unit_vec_x and self.link_unit_vec_y. These contain,
for each link, the x and y components of the link's unit vector (that
is, the link's x and y dimensions if it were shrunk to unit length but
retained its orientation). The length of these arrays is the number of
links plus one. The last entry in each array is set to zero, and is
used to handle references to "link -1" (meaning, a non-existent link,
whose unit vector is (0,0)).
Also builds arrays to store the unit-vector component sums for each
node: node_unit_vector_sum_x and node_unit_vector_sum_y. These are
designed to be used when mapping link vector values to nodes (one takes
the average of the x- and y-components of all connected links).
Notes
-----
.. note::
Overrides ModelGrid._create_link_unit_vectors().
Creates the following:
* `self.link_unit_vec_x`, `self.link_unit_vec_y` : `ndarray`
x and y components of unit vectors at each link (extra 0
entries at end)
* `self.node_vector_sum_x`, `self.node_vector_sum_y` : `ndarray`
Sums of x & y unit vector components for each node. Sum is over all
links connected to a given node.
Examples
--------
In the example below, the first 8 links are vertical, and have unit
vectors (0,1), whereas the remaining links are horizontal with (1,0).
The middle columns have x-component vector sums equal to 2 (one
horizontal inlink and one horizontal outlink), while the middle rows
have y-component vector sums equal to 2 (one vertical inlink and one
vertical outlink). The rest of the entries have ones, representing the
left and right columns (only one horizontal link) and top and bottom
rows (only one vertical link).
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4), spacing=(2.0, 2.0))
>>> mg.link_unit_vec_x # doctest: +NORMALIZE_WHITESPACE
array([ 1., 1., 1., 0., 0., 0., 0.,
1., 1., 1., 0., 0., 0., 0.,
1., 1., 1., 0.])
>>> mg.link_unit_vec_y # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 1., 1., 1., 1.,
0., 0., 0., 1., 1., 1., 1.,
0., 0., 0., 0.])
>>> mg.node_unit_vector_sum_x
array([ 1., 2., 2., 1., 1., 2., 2., 1., 1., 2., 2., 1.])
>>> mg.node_unit_vector_sum_y
array([ 1., 1., 1., 1., 2., 2., 2., 2., 1., 1., 1., 1.])
"""
# Create the unit vectors for each link.
# Assume that the order of links is:
# - The first (R-1)*C are vertical and oriented upward
# - The remaining R*(C-1) are horizontal and oriented rightward
self._link_unit_vec_x = np.zeros(self.number_of_links + 1, dtype=float)
self._link_unit_vec_y = np.zeros(self.number_of_links + 1, dtype=float)
# n_vert_links = (self.number_of_node_rows - 1) * \
# self.number_of_node_columns
# self._link_unit_vec_y[:n_vert_links] = 1.0
# self._link_unit_vec_x[n_vert_links:self.number_of_links] = 1.0
self._link_unit_vec_x[squad_links.horizontal_link_ids(self.shape)] = 1.
self._link_unit_vec_y[squad_links.vertical_link_ids(self.shape)] = 1.
# While we're at it, calculate the unit vector sums for each node.
# These will be useful in averaging link-based vectors at the nodes.
# To do this, we take advantage of the node inlink and outlink
# matrices, each of which has 2 rows, corresponding to the maximum
# possible 2 inlinks and 2 outlinks in a raster grid.
#
# Create the arrays
self._node_unit_vector_sum_x = np.zeros(self.number_of_nodes)
self._node_unit_vector_sum_y = np.zeros(self.number_of_nodes)
# x-component contribution from inlinks
self._node_unit_vector_sum_x += np.abs(
self._link_unit_vec_x[self._node_inlink_matrix[0, :]])
self._node_unit_vector_sum_x += np.abs(
self._link_unit_vec_x[self._node_inlink_matrix[1, :]])
# x-component contribution from outlinks
self._node_unit_vector_sum_x += np.abs(
self._link_unit_vec_x[self._node_outlink_matrix[0, :]])
self._node_unit_vector_sum_x += np.abs(
self._link_unit_vec_x[self._node_outlink_matrix[1, :]])
# y-component contribution from inlinks
self._node_unit_vector_sum_y += np.abs(
self._link_unit_vec_y[self._node_inlink_matrix[0, :]])
self._node_unit_vector_sum_y += np.abs(
self._link_unit_vec_y[self._node_inlink_matrix[1, :]])
# y-component contribution from outlinks
self._node_unit_vector_sum_y += np.abs(
self._link_unit_vec_y[self._node_outlink_matrix[0, :]])
self._node_unit_vector_sum_y += np.abs(
self._link_unit_vec_y[self._node_outlink_matrix[1, :]])
def _make_faces_at_cell(self, *args):
"""faces_at_cell([cell_id])
Get array of faces of a cell.
Return an array of the face IDs for the faces of a cell with ID,
*cell_id*. The faces are listed clockwise, starting with the bottom
face. *cell_id* can be either a scalar or an array. If an array,
return the faces for each cell of the array.
Parameters
----------
cell_id : array_like
Grid cell ids.
Returns
-------
(N, 4) ndarray
Face IDs
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.faces_at_cell[0]
array([4, 7, 3, 0])
>>> rmg.faces_at_cell
array([[ 4, 7, 3, 0],
[ 5, 8, 4, 1],
[ 6, 9, 5, 2],
[11, 14, 10, 7],
[12, 15, 11, 8],
[13, 16, 12, 9]])
"""
if len(args) == 0:
cell_ids = np.arange(self.number_of_cells)
elif len(args) == 1:
cell_ids = np.broadcast_arrays(args[0])[0].ravel()
else:
raise ValueError()
node_ids = self.node_at_cell[cell_ids]
inlinks = self._node_inlink_matrix[:, node_ids].T
outlinks = self._node_outlink_matrix[:, node_ids].T
self._faces_at_link = np.squeeze(np.concatenate(
(self._face_at_link[inlinks],
self._face_at_link[outlinks]), axis=1))
def _setup_link_at_face(self):
"""Set up links associated with faces.
Returns an array of the link IDs for the links which intersect the
faces specificed by *face_id*. *face_id* can be either a scalar or an
array.
Parameters
----------
face_id : int
Face of a cell.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> mg.link_at_face[0]
5
>>> mg.link_at_face[(0, 4, 13), ]
array([ 5, 10, 21])
"""
self._link_at_face = squad_faces.link_at_face(self.shape)
return self._link_at_face
def _create_face_at_link(self):
"""Set up array of faces associated with links.
Return an array of the face IDs for the faces that intersect the links
specified by *link_id*. *link_id* can be either a scalar or array. If
*link_id* is not given, return the faces of all links.
If a link does not have an associated face (e.g., some inactive links),
that entry in the returned array is set to `BAD_INDEX_VALUE`.
Parameters
----------
link_id : array-like, optional
Grid links.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.face_at_link[5]
0
>>> faces = rmg.face_at_link[(0, 1, 15, 19, 12, 26), ]
>>> faces[faces == BAD_INDEX_VALUE] = -1
>>> faces
array([-1, -1, 8, 11, 6, -1])
"""
self._face_at_link = squad_faces.face_at_link(self.shape)
return self._face_at_link
@property
def extent(self):
"""Extent of the grid in the y and x-dimensions.
Return the y and x-dimension of the grid. Because boundary nodes
don't have cells, the dimension of the grid is
``((num_rows - 1) * dy, (num_columns - 1) * dx)``, not
``(num_rows * dy, num_cols * dx)``.
Returns
-------
(y_extent, x_extent) : tuple of float
Length of the grid in the y and x-dimensions.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.extent
(3.0, 4.0)
>>> grid = RasterModelGrid((4, 5), 2.)
>>> grid.extent
(6.0, 8.0)
>>> grid = RasterModelGrid((4, 5), spacing=(2, 3))
>>> grid.extent
(6.0, 12.0)
LLCATS: GINF MEAS
"""
# Method added 5/1/13 by DEJH, modified DEJH 4/3/14 to reflect fact
# boundary nodes don't have defined
return (
(self.number_of_node_rows - 1) * self._dy,
(self.number_of_node_columns - 1) * self._dx)
@property
def grid_xdimension(self):
"""Length of the grid in the x-dimension.
Return the x-dimension of the grid. Because boundary nodes don't have
cells, the dimension of the grid is num_cols-1, not num_cols.
Returns
-------
float
Length of the grid in the x-dimension.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.grid_xdimension
4.0
>>> grid = RasterModelGrid((4, 5), 0.5)
>>> grid.grid_xdimension
2.0
>>> grid = RasterModelGrid((4, 5), spacing=(2, 3))
>>> grid.grid_xdimension
12.0
LLCATS: GINF MEAS
"""
return ((self.number_of_node_columns - 1) * self._dx)
@property
def grid_ydimension(self):
"""Length of the grid in the y-dimension.
Return the y-dimension of the grid. Because boundary nodes don't have
cells, the dimension of the grid is num_rows-1, not num_rows.
Returns
-------
float
Length of the grid in the y-dimension.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.grid_ydimension
3.0
>>> grid = RasterModelGrid((4, 5), 0.5)
>>> grid.grid_ydimension
1.5
>>> grid = RasterModelGrid((4, 5), spacing=(2, 3))
>>> grid.grid_ydimension
6.0
LLCATS: GINF MEAS
"""
# Method added 5/1/13 by DEJH, modified DEJH 4/3/14, as above.
return ((self.number_of_node_rows - 1) * self._dy)
@property
def number_of_interior_nodes(self):
"""Number of interior nodes.
Returns the number of interior nodes on the grid, i.e., non-perimeter
nodes. Compare self.number_of_core_nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_interior_nodes
6
LLCATS: NINF
"""
return sgrid.interior_node_count(self.shape)
@property
def number_of_node_columns(self):
"""Number of node columns.
Returns the number of columns, including boundaries.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_node_columns
5
LLCATS: GINF NINF
"""
return self._ncols
@property
def number_of_node_rows(self):
"""Number of node rows.
Returns the number of rows, including boundaries.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_node_rows
4
LLCATS: GINF NINF
"""
return self._nrows
@property
def number_of_cell_columns(self):
"""Number of cell columns.
Returns the number of columns, including boundaries.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_cell_columns
3
LLCATS: GINF NINF
"""
return self._ncols - 2
@property
def number_of_cell_rows(self):
"""Number of cell rows.
Returns the number of rows, including boundaries.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_cell_rows
2
LLCATS: GINF CINF
"""
return self._nrows - 2
@property
def number_of_patches(self):
"""Number of patches.
Returns the number of patches over the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_patches
12
LLCATS: PINF
"""
return (self._nrows - 1) * (self._ncols - 1)
@property
def _number_of_diagonal_links(self):
"""Number of diagonal links.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Returns the number of diagonal links (only) over the grid.
If the diagonal links have not yet been invoked, returns an
AssertionError.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid._number_of_diagonal_links
Traceback (most recent call last):
...
AssertionError: No diagonal links have been created in the grid yet!
>>> _ = grid._diagonal_links_at_node
>>> grid._number_of_diagonal_links
24
LLCATS: GINF LINF
"""
assert self._diagonal_links_created, \
"No diagonal links have been created in the grid yet!"
return 2 * self.number_of_patches
@property
@make_return_array_immutable
def _diag_link_dirs_at_node(self):
"""
Link flux directions at each diagonal node: 1=incoming flux,
-1=outgoing flux, 0=no node present.
Returns
-------
(NODES, 4) ndarray of int
Diagonal link directions relative to the nodes of a grid.
A zero indicates no link at this position.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 3))
>>> grid._create_diag_links_at_node()
>>> grid.status_at_node[grid.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> grid._diag_link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[-1, 0, 0, 0], [-1, -1, 0, 0], [ 0, -1, 0, 0],
[-1, 0, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 0],
[-1, 0, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 0],
[ 0, 0, 0, 1], [ 0, 0, 1, 1], [ 0, 0, 1, 0]])
"""
return self._diag__link_dirs_at_node
@property
@make_return_array_immutable
def _diag_active_link_dirs_at_node(self):
"""
Link flux directions at each diagonal node: 1=incoming flux,
-1=outgoing flux, 0=no flux. Note that inactive links receive zero.
Returns
-------
(NODES, 4) ndarray of int
Diagonal link directions relative to the nodes of a grid.
A zero indicates no link at this position OR that the link at that
position is inactive.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 3))
>>> grid._create_diag_links_at_node()
>>> grid.status_at_node[grid.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> grid._diag_active_link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, -1, 0, 0],
[ 0, 0, 0, 0], [-1, 0, 0, 1], [ 0, -1, 0, 0],
[ 0, 0, 0, 0], [-1, 0, 0, 1], [ 0, 0, 1, 0],
[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 1, 0]])
"""
return self._diag__active_link_dirs_at_node
@property
@deprecated(use='dx', version='0.5')
def node_spacing(self):
"""Spacing betweem node rows and columns.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.node_spacing
1.0
>>> grid = RasterModelGrid((4, 5), 3.0)
>>> grid.node_spacing
3.0
LLCATS: DEPR GINF NINF MEAS
"""
if self._dx != self._dy:
raise RuntimeError('dx and dy are not the same')
return self._dx
@property
@deprecated(use='nodes_at_corners_of_grid', version=1.0)
def corner_nodes(self):
"""
LLCATS: DEPR GINF NINF SUBSET
"""
return self.nodes_at_corners_of_grid
@property
def nodes_at_corners_of_grid(self):
"""Get array of the nodes in grid corners.
Return the IDs to the corner nodes of the grid, sorted by ID.
Returns
-------
(4, ) ndarray
Array of corner node IDs.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.nodes_at_corners_of_grid
array([ 0, 4, 15, 19])
LLCATS: GINF NINF SUBSET
"""
return sgrid.corners((self._nrows, self._ncols))
@property
@deprecated(use='cells_at_corners_of_grid', version=1.0)
def corner_cells(self):
"""
LLCATS: DEPR GINF CINF SUBSET
"""
return self.cells_at_corners_of_grid
@property
def cells_at_corners_of_grid(self):
"""Get array of cells in cellular grid (grid with only cells) corners.
Return the IDs to the corner cells of the cellular grid, sorted by ID.
Returns
-------
(4, ) ndarray
Array of corner node IDs.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.cells_at_corners_of_grid
array([0, 2, 3, 5])
LLCATS: GINF CINF SUBSET
"""
return sgrid.corners(self.cell_grid_shape)
def is_point_on_grid(self, xcoord, ycoord):
"""Check if a point is on the grid.
This method takes x, y coordinates and tests whether they lie within
the grid. The limits of the grid are taken to be links connecting the
boundary nodes. We perform a special test to detect looped boundaries.
Coordinates can be ints or arrays of ints. If arrays, will return an
array of the same length of boolean truth values.
Parameters
----------
xcoord : float or array_like
The point's x-coordinate.
ycoord : float or array_like
The point's y-coordinate.
Returns
-------
bool
``True`` if the point is on the grid. Otherwise, ``False``.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), spacing=(2, 1))
>>> grid.is_point_on_grid(1, 1)
True
>>> grid.is_point_on_grid((1, 1, 1,), (1, 3.1, 6.1))
array([ True, True, False], dtype=bool)
>>> grid.is_point_on_grid((-.1, .1, 3.9, 4.1), (1, 1, 1, 1))
array([False, True, True, False], dtype=bool)
LLCATS: GINF MEAS SUBSET
"""
xcoord, ycoord = np.asarray(xcoord), np.asarray(ycoord)
x_condition = (xcoord > 0.) & (xcoord < (self.shape[1] - 1) * self.dx)
y_condition = (ycoord > 0.) & (ycoord < (self.shape[0] - 1) * self.dy)
if np.all(self._node_status[self.nodes_at_left_edge] == 3) or np.all(
self._node_status[self.nodes_at_right_edge] == 3):
try:
x_condition[:] = 1
except:
x_condition = 1
if np.all(self._node_status[self.nodes_at_top_edge] == 3) or np.all(
self._node_status[self.nodes_at_bottom_edge] == 3):
try:
y_condition[:] = 1
except:
y_condition = 1
return x_condition & y_condition
def nodes_around_point(self, xcoord, ycoord):
"""Get the nodes surrounding a point.
Return IDs of the four nodes of the area around a point with
coordinates *xcoord*, *ycoord*. Node IDs are returned
counter-clockwise order starting from the southwest node.
If either *xcoord* or *ycoord* are arrays the usual numpy broadcasting
rules apply.
Parameters
----------
xcoord : float, array-like
x-coordinate of point
ycoord : float, array-like
y-coordinate of point
Returns
-------
(4, N) ndarray
IDs of nodes around the point.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.nodes_around_point(.4, 1.2)
array([4, 8, 9, 5])
>>> grid.nodes_around_point([.9, 1.1], 1.2)
array([[ 4, 5],
[ 8, 9],
[ 9, 10],
[ 5, 6]])
>>> grid = RasterModelGrid((3, 4), spacing=(2, 1))
>>> grid.nodes_around_point(.5, 1.5)
array([0, 4, 5, 1])
>>> grid = RasterModelGrid((3, 4))
>>> grid.nodes_around_point(.5, 1.5)
array([4, 8, 9, 5])
LLCATS: NINF SUBSET
"""
xcoord, ycoord = np.broadcast_arrays(xcoord, ycoord)
# Method added 4/29/13 by DEJH, modified 9/24/13.
id_ = (ycoord // self._dy * self.number_of_node_columns +
xcoord // self._dx)
try:
id_ = int(id_)
except:
id_ = as_id_array(id_)
return np.array([id_, id_ + self.number_of_node_columns,
id_ + self.number_of_node_columns + 1, id_ + 1])
@deprecated(use='find_nearest_node', version='0.2')
def snap_coords_to_grid(self, xcoord, ycoord):
"""Snap coordinates to the nearest node.
This method takes existing coordinates, inside the grid, and returns
the ID of the closest grid node. That node can be a boundary node.
LLCATS: DEPR NINF SUBSET
"""
# DEJH, 9/24/13.
# This testing suppressed for speed. While suppressed, coordinates
# provided MUST be within the grid or silent instability will occur.
# if type(xcoord) == int:
# if not self.is_point_on_grid(xcoord, ycoord):
# raise LookupError(
# 'Coordinates specified are outside the grid area')
# else: #it's an array
# if not np.all(self.is_point_on_grid(xcoord, ycoord)):
# raise LookupError(
# 'One or more pairs of coordinates specified are outside '
# 'the grid area')
vertices_array = self.nodes_around_point(xcoord, ycoord)
# vertices_array.reshape((4,-1))
xdir_displacement = np.tile(
xcoord, (4, 1)) - self.node_x[vertices_array]
ydir_displacement = np.tile(
ycoord, (4, 1)) - self.node_y[vertices_array]
distances_to_vertices = np.sqrt(
xdir_displacement * xdir_displacement +
ydir_displacement * ydir_displacement)
try:
return vertices_array[(np.argmin(distances_to_vertices, axis=0),
range(distances_to_vertices.shape[1]))]
except:
return vertices_array[np.argmin(distances_to_vertices)]
# ...per fancy indexing
def find_nearest_node(self, coords, mode='raise'):
"""Node nearest a point.
Find the index to the node nearest the given x, y coordinates.
Coordinates are provided as numpy arrays in the *coords* tuple.
Use the *mode* keyword to specify what to do if the given coordinates
are out-of-bounds. See :func:`np.ravel_multi_index` for a
description of possible values for *mode*. Note that a coordinate is
out-of-bounds if it is beyond one half the node spacing from the
exterior nodes.
Returns the indices of the nodes nearest the given coordinates.
Parameters
----------
coords : tuple of array-like
Coordinates of points.
mode : {'raise', 'wrap', 'clip'}, optional
What to do if a point is off the grid.
Returns
-------
array-like
IDs of the nearest nodes.
Notes
-----
For coordinates that are equidistant to two or more nodes, see
the rounding rules for :func:`numpy.around`.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.find_nearest_node([0.2, 0.2])
0
>>> rmg.find_nearest_node((np.array([1.6, 3.6]), np.array([2.3, .7])))
array([12, 9])
>>> rmg.find_nearest_node((-.4999, 1.))
5
LLCATS: NINF SUBSET
"""
return rfuncs.find_nearest_node(self, coords, mode=mode)
@property
def length_of_link(self):
"""Get lengths of links.
Return the link lengths in the grid, as a nlinks-long array.
Returns
-------
(4, N) ndarray
Link lengths.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3), spacing=(3, 4))
>>> grid.length_of_link
array([ 4., 4., 3., 3., 3., 4., 4., 3., 3., 3., 4., 4.])
Since LL version 1, this method is unaffected by the existance or
otherwise of diagonal links on the grid:
>>> grid = RasterModelGrid((3, 3), spacing=(4, 3))
>>> _ = grid._diagonal_links_at_node
>>> grid.length_of_link # doctest: +NORMALIZE_WHITESPACE
array([ 3., 3., 4., 4., 4., 3., 3., 4., 4., 4., 3., 3.])
LLCATS: LINF MEAS
"""
if self._link_length is None:
self._create_length_of_link()
return self._link_length[:self.number_of_links]
else:
return self._link_length[:self.number_of_links]
@property
def _length_of_link_with_diagonals(self):
"""Get lengths of links, with diagonal IDs following orthogonal IDs.
Return the link lengths in the grid, as a nlinks-plus-ndiagonallinks-
long array, if diagonals are already present. This method *does* test
if diagonal links are present in the grid already; if they are,
returns a longer array where the orthogonal links are listed first,
in ID order, then the diagonal links (i.e., diagonal
links have effective ID numbers which count up from the number of
orthogonal links).
If diagonals have not been created, returns the same array as
:func:`length_of_link`.
Returns
-------
(4, N) ndarray
Link lengths.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3), spacing=(3, 4))
>>> grid.length_of_link
array([ 4., 4., 3., 3., 3., 4., 4., 3., 3., 3., 4., 4.])
>>> grid._length_of_link_with_diagonals # doctest: +NORMALIZE_WHITESPACE
array([ 4., 4., 3., 3., 3.,
4., 4., 3., 3., 3.,
4., 4., 5., 5., 5.,
5., 5., 5., 5., 5.])
LLCATS: LINF MEAS
"""
if self._link_length is None:
return self._create_length_of_link()
else:
return self._link_length
def _create_length_of_link(self):
"""Calculate link lengths for a raster grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), spacing=(2, 3))
>>> grid._create_length_of_link()[
... :grid.number_of_links] # doctest: +NORMALIZE_WHITESPACE
array([ 3., 3., 3.,
2., 2., 2., 2.,
3., 3., 3.,
2., 2., 2., 2.,
3., 3., 3.])
>>> grid = RasterModelGrid((3, 3), spacing=(1, 2))
>>> grid._create_length_of_link() # doctest: +NORMALIZE_WHITESPACE
array([ 2. , 2. , 1. , 1. ,
1. , 2. , 2. , 1. ,
1. , 1. , 2. , 2. ,
2.23606798, 2.23606798, 2.23606798, 2.23606798,
2.23606798, 2.23606798, 2.23606798, 2.23606798])
Notes
-----
We initially set all lengths to dy. Then we loop over each row, setting
the horizontal links in that row to dx.
"""
if self._link_length is None:
self._create_diag_links_at_node()
self._link_length = np.empty(
self.number_of_links + self._number_of_diagonal_links)
self._link_length[self.number_of_links:] = np.sqrt(
self._dy ** 2. + self._dx ** 2.)
vertical_links = squad_links.vertical_link_ids(self.shape)
self._link_length[:self.number_of_links] = self.dx
self._link_length[vertical_links] = self._dy
return self._link_length
def _d8_active_links(self):
"""Get active links, including diagonals.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Return a set of active links that include diagonal connections between
grid cells, for use with link-based water-routing schemes.
Diagonal links are listed sequentially after the *regular* orthogonal
links in the return arrays.
Returns
-------
tuple of arrays
Tuple of (link_ids, link_from_nodes, link_to_nodes)
Notes
-----
Calling this method also means the the individual arrays of diagonal
links and their from- and tonodes are held as properties of the class
instance (see return line below).
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> (links, from_nodes, to_nodes) = grid._d8_active_links()
>>> links
array([ 3, 5, 6, 8, 12, 15, 17, 18])
>>> from_nodes
array([1, 3, 4, 4, 0, 2, 4, 4])
>>> to_nodes
array([4, 4, 5, 7, 4, 4, 6, 8])
LLCATS: LINF NINF GEOM BC
"""
if not self._diagonal_links_created:
self._create_diag_links_at_node()
return (
np.concatenate((self.active_links, self._diag_active_links)),
np.concatenate((self._activelink_fromnode,
self._diag_activelink_fromnode)),
np.concatenate((self._activelink_tonode,
self._diag_activelink_tonode))
)
@property
@make_return_array_immutable
def _all_d8_active_links(self):
"""Return all the active links, both orthogonal and diagonal.
LLCATS: LINF NINF GEOM BC
"""
try:
return self._all__d8_active_links
except AttributeError:
self._create_diag_links_at_node
return self._all__d8_active_links
@property
@make_return_array_immutable
def _all_d8_inactive_links(self):
"""Return all the inactive links, both orthogonal and diagonal.
LLCATS: LINF NINF GEOM BC
"""
try:
return self._all__d8_inactive_links
except AttributeError:
self._create_diag_links_at_node
return self._all__d8_inactive_links
@deprecated(use='set_closed_boundaries_at_grid_edges', version='0.5')
def set_inactive_boundaries(self, right_is_inactive, top_is_inactive,
left_is_inactive, bottom_is_inactive):
"""Set boundary nodes to be inactive.
Handles boundary conditions by setting each of the four sides of the
rectangular grid to either 'inactive' or 'active (fixed value)' status.
Arguments are booleans indicating whether the bottom, right, top, and
left are inactive (True) or not (False).
For an inactive boundary:
* the nodes are flagged CLOSED_BOUNDARY (normally status type 4)
* the links between them and the adjacent interior nodes are
inactive (so they appear on link-based lists, but not
active_link-based lists)
This means that if you call the calc_grad_at_active_link
method, the inactive boundaries will be ignored: there can be no
gradients or fluxes calculated, because the links that connect to that
edge of the grid are not included in the calculation. So, setting a
grid edge to CLOSED_BOUNDARY is a convenient way to impose a no-flux
boundary condition. Note, however, that this applies to the grid as a
whole, rather than a particular variable that you might use in your
application. In other words, if you want a no-flux boundary in one
variable but a different boundary condition for another, then use
another method.
Examples
--------
The following example sets the top and left boundaries as inactive in a
four-row by five-column grid that initially has all boundaries active
and all boundary nodes coded as FIXED_VALUE_BOUNDARY (=1):
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0) # rows, columns, spacing
>>> rmg.number_of_active_links
17
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
dtype=int8)
>>> rmg.set_inactive_boundaries(False, True, True, False)
>>> rmg.number_of_active_links
12
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 4, 0, 0, 0, 1, 4, 0, 0, 0, 1, 4, 4, 4, 4, 4],
dtype=int8)
Notes
-----
The four corners are treated as follows:
- bottom left = BOTTOM
- bottom right = BOTTOM
- top right = TOP
- top left = TOP
This scheme is necessary for internal consistency with looped
boundaries.
LLCATS: DEPR BC SUBSET
"""
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid.set_inactive_boundaries')
bottom_edge = range(0, self.number_of_node_columns)
right_edge = range(2 * self.number_of_node_columns - 1,
self.number_of_nodes - 1,
self.number_of_node_columns)
top_edge = range((self.number_of_node_rows - 1) *
self.number_of_node_columns, self.number_of_nodes)
left_edge = range(self.number_of_node_columns,
self.number_of_nodes - self.number_of_node_columns,
self.number_of_node_columns)
if bottom_is_inactive:
self._node_status[bottom_edge] = CLOSED_BOUNDARY
else:
self._node_status[bottom_edge] = FIXED_VALUE_BOUNDARY
if right_is_inactive:
self._node_status[right_edge] = CLOSED_BOUNDARY
else:
self._node_status[right_edge] = FIXED_VALUE_BOUNDARY
if top_is_inactive:
self._node_status[top_edge] = CLOSED_BOUNDARY
else:
self._node_status[top_edge] = FIXED_VALUE_BOUNDARY
if left_is_inactive:
self._node_status[left_edge] = CLOSED_BOUNDARY
else:
self._node_status[left_edge] = FIXED_VALUE_BOUNDARY
self._update_links_nodes_cells_to_new_BCs()
def set_closed_boundaries_at_grid_edges(self, right_is_closed,
top_is_closed,
left_is_closed,
bottom_is_closed):
"""Set boundary not to be closed.
Sets the status of nodes along the specified side(s) of a raster
grid (bottom, right, top, and/or left) to ``CLOSED_BOUNDARY``.
Arguments are booleans indicating whether the bottom, left, top, and
right are closed (``True``) or not (``False``).
For a closed boundary:
* the nodes are flagged ``CLOSED_BOUNDARY`` (status type 4)
* all links that connect to a ``CLOSED_BOUNDARY`` node are
flagged as inactive (so they appear on link-based lists, but
not active_link-based lists)
This means that if you call the calc_grad_at_active_link
method, links connecting to closed boundaries will be ignored: there
can be no gradients or fluxes calculated, because the links that
connect to that edge of the grid are not included in the calculation.
So, setting a grid edge to CLOSED_BOUNDARY is a convenient way to
impose a no-flux boundary condition. Note, however, that this applies
to the grid as a whole, rather than a particular variable that you
might use in your application. In other words, if you want a no-flux
boundary in one variable but a different boundary condition for
another, then use another method.
This method is a replacement for the now-deprecated method
set_inactive_boundaries(). Unlike that method, this one ONLY sets nodes
to CLOSED_BOUNDARY; it does not set any nodes to FIXED_VALUE_BOUNDARY.
Parameters
----------
right_is_closed : boolean
If ``True`` right-edge nodes are closed boundaries.
top_is_closed : boolean
If ``True`` top-edge nodes are closed boundaries.
left_is_closed : boolean
If ``True`` left-edge nodes are closed boundaries.
bottom_is_closed : boolean
If ``True`` bottom-edge nodes are closed boundaries.
Notes
-----
Note that the four corners are treated as follows:
* bottom left = BOTTOM
* bottom right = BOTTOM
* top right = TOP
* top left = TOP
This scheme is necessary for internal consistency with looped
boundaries.
Examples
--------
The following example sets the top and left boundaries as closed in a
four-row by five-column grid that initially has all boundaries open
and all boundary nodes coded as FIXED_VALUE_BOUNDARY (=1):
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0) # rows, columns, spacing
>>> rmg.number_of_active_links
17
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
dtype=int8)
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, False, False)
>>> rmg.number_of_active_links
12
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 1, 0, 0, 0, 4, 1, 0, 0, 0, 4, 4, 4, 4, 4, 4],
dtype=int8)
LLCATS: BC SUBSET
"""
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid.set_closed_boundaries_at_grid_edges')
bottom_edge = range(0, self.number_of_node_columns)
right_edge = range(2 * self.number_of_node_columns - 1,
self.number_of_nodes - 1,
self.number_of_node_columns)
top_edge = range((self.number_of_node_rows - 1) *
self.number_of_node_columns, self.number_of_nodes)
left_edge = range(self.number_of_node_columns,
self.number_of_nodes - self.number_of_node_columns,
self.number_of_node_columns)
if bottom_is_closed:
self._node_status[bottom_edge] = CLOSED_BOUNDARY
if right_is_closed:
self._node_status[right_edge] = CLOSED_BOUNDARY
if top_is_closed:
self._node_status[top_edge] = CLOSED_BOUNDARY
if left_is_closed:
self._node_status[left_edge] = CLOSED_BOUNDARY
self._update_links_nodes_cells_to_new_BCs()
def set_fixed_value_boundaries_at_grid_edges(
self, right_is_fixed_val, top_is_fixed_val, left_is_fixed_val,
bottom_is_fixed_val, value=None,
value_of='topographic__elevation'):
"""Create fixed values boundaries.
Sets the status of nodes along the specified side(s) of a raster
grid---bottom, right, top, and/or left---to FIXED_VALUE_BOUNDARY.
Arguments are booleans indicating whether the bottom, right, top, and
left sides are to be set (True) or not (False).
*value* controls what values are held constant at these nodes. It can
be either a float, an array of length number_of_fixed_nodes or
number_of_nodes (total), or left blank. If left blank, the values will
be set from the those already in the grid fields, according to
'value_of'.
*value_of* controls the name of the model field that contains the
values. Remember, if you don't set value, the fixed values will be set
from the field values ***at the time you call this method***. If no
values are present in the field, the module will complain but accept
this, warning that it will be unable to automatically update boundary
conditions (and such methods, e.g.,
``RasterModelGrid.update_boundary_nodes()``, will raise exceptions
if you try).
The status of links (active or inactive) is automatically updated to
reflect the changes.
The following example sets the bottom and right boundaries as
fixed-value in a four-row by five-column grid that initially has all
boundaries closed (i.e., flagged as node_status=4):
Parameters
----------
bottom_is_fixed_val : boolean
Set bottom edge as fixed boundary.
left_is_fixed_val : boolean
Set left edge as fixed boundary.
top_is_fixed_val : boolean
Set top edge as fixed boundary.
right_is_fixed_val : boolean
Set right edge as fixed boundary.
value : float, array or None (default).
Override value to be kept constant at nodes.
value_of : string.
The name of the grid field containing the values of interest.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), spacing=(1, 1))
>>> rmg.number_of_active_links
17
Put some arbitrary values in the grid fields:
>>> import numpy as np
>>> rmg.at_node['topographic__elevation'] = np.random.rand(20)
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=int8)
>>> rmg.set_fixed_value_boundaries_at_grid_edges(
... True, True, False, False)
>>> rmg.number_of_active_links
12
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 1,
4, 0, 0, 0, 1,
1, 1, 1, 1, 1], dtype=int8)
Note that the four corners are treated as follows:
* bottom left = BOTTOM
* bottom right = BOTTOM
* top right = TOP
* top left = TOP
This scheme is necessary for internal consistency with looped
boundaries.
LLCATS: BC SUBSET
"""
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid.set_closed_boundaries_at_grid_edges')
bottom_edge = range(0, self.number_of_node_columns)
right_edge = range(2 * self.number_of_node_columns - 1,
self.number_of_nodes - 1,
self.number_of_node_columns)
top_edge = range((self.number_of_node_rows - 1) *
self.number_of_node_columns, self.number_of_nodes)
left_edge = range(self.number_of_node_columns,
self.number_of_nodes - self.number_of_node_columns,
self.number_of_node_columns)
if bottom_is_fixed_val:
self._node_status[bottom_edge] = FIXED_VALUE_BOUNDARY
if right_is_fixed_val:
self._node_status[right_edge] = FIXED_VALUE_BOUNDARY
if top_is_fixed_val:
self._node_status[top_edge] = FIXED_VALUE_BOUNDARY
if left_is_fixed_val:
self._node_status[left_edge] = FIXED_VALUE_BOUNDARY
self._update_links_nodes_cells_to_new_BCs()
# save some internal data to speed updating:
self.fixed_value_node_properties = {}
self.fixed_value_node_properties['boundary_node_IDs'] = as_id_array(
np.where(self._node_status == FIXED_VALUE_BOUNDARY)[0])
if value:
if type(value) == float or type(value) == int:
values_to_use = float(value)
elif type(value) == np.ndarray:
if value.size == self.fixed_value_node_properties[
'boundary_node_IDs'].size:
values_to_use = value
elif value.size == self.number_of_nodes:
values_to_use = value.take(
self.fixed_value_node_properties['boundary_node_IDs'])
else:
raise TypeError(
"'value' must be of size nnodes or number of nodes "
"to set!")
else:
try:
values_to_use = self.at_node[value_of].take(
self.fixed_value_node_properties['boundary_node_IDs'])
except FieldError:
pass # we catch this case below
else:
# set a flag to indicate successful setting of internal values
self.fixed_value_node_properties['internal_flag'] = True
if not self.has_field('node', value_of):
six.print_("""
*************************************************
WARNING: set_fixed_value_boundaries_at_grid_edges
has not been provided with a grid field name to
allow internal boundary condition control. You
will not be able to automate BC control with grid
methods like update_boundary_nodes()!
Not expecting this error? Try calling this method
after loading the starting conditions into the
grid fields.
*************************************************
""")
# set a flag to indicate no internal values
self.fixed_value_node_properties['internal_flag'] = False
else:
self.fixed_value_node_properties['internal_flag'] = True
self.fixed_value_node_properties['fixed_value_of'] = value_of
try:
self.fixed_value_node_properties['values'] = values_to_use
except NameError:
pass # the flag will catch this case
def set_looped_boundaries(self, top_bottom_are_looped, sides_are_looped):
"""Create wrap-around boundaries.
Handles boundary conditions by setting corresponding parallel grid
edges as looped "tracks_cell" (==3) status, linked to each other.
If top_bottom_are_looped is True, the top and bottom edges will link
to each other. If sides_are_looped is True, the left and right edges
will link to each other.
Looped boundaries are experimental, and not as yet well integrated into
the Landlab framework. Many functions may not recognise them, or
silently create unforeseen errors. Use at your own risk!
Note that because of the symmetries this BC implies, the corner nodes
are all paired with the bottom/top edges, not the sides.
Parameters
----------
top_bottom_are_looped : bool
Top and bottom are wrap-around.
sides_are_looped : bool
Left and right sides are wrap-around.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0) # rows, columns, spacing
>>> rmg.number_of_active_links
17
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1],
dtype=int8)
>>> rmg.add_zeros('topographic__elevation', at='node')
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.])
>>> rmg.set_looped_boundaries(True, True)
>>> rmg.looped_node_properties['boundary_node_IDs']
array([ 0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19])
>>> rmg.looped_node_properties['linked_node_IDs']
array([10, 11, 12, 13, 14, 8, 6, 13, 11, 5, 6, 7, 8, 9])
LLCATS: BC SUBSET
"""
# Added DEJH Feb 2014
# TODO: Assign BC_statuses also to *links*
bottom_edge = np.array(range(0, self.number_of_node_columns))
right_edge = np.array(range(2 * self.number_of_node_columns - 1,
self.number_of_nodes - 1,
self.number_of_node_columns))
top_edge = np.array(
range((self.number_of_node_rows - 1) * self.number_of_node_columns,
self.number_of_nodes))
left_edge = np.array(range(self.number_of_node_columns,
(self.number_of_nodes -
self.number_of_node_columns),
self.number_of_node_columns))
these_boundary_IDs = np.array([])
these_linked_nodes = np.array([])
if top_bottom_are_looped:
self._node_status[bottom_edge] = LOOPED_BOUNDARY
self._node_status[top_edge] = LOOPED_BOUNDARY
these_boundary_IDs = np.concatenate((these_boundary_IDs,
bottom_edge, top_edge))
these_linked_nodes = np.concatenate((
these_linked_nodes,
top_edge - self.number_of_node_columns,
bottom_edge + self.number_of_node_columns))
if sides_are_looped:
self._node_status[right_edge] = LOOPED_BOUNDARY
self._node_status[left_edge] = LOOPED_BOUNDARY
these_boundary_IDs = np.concatenate((these_boundary_IDs,
left_edge, right_edge))
these_linked_nodes = np.concatenate((
these_linked_nodes,
right_edge - 1, left_edge + 1))
self._update_links_nodes_cells_to_new_BCs()
if not self.looped_node_properties:
existing_IDs = np.array([])
existing_links = np.array([])
else:
unrepeated_node_entries = np.logical_not(
np.in1d(self.looped_node_properties['boundary_node_IDs'],
these_linked_nodes))
existing_IDs = self.looped_node_properties[
'boundary_node_IDs'][unrepeated_node_entries]
existing_links = self.looped_node_properties[
'linked_node_IDs'][unrepeated_node_entries]
self.looped_node_properties = {}
all_the_IDs = np.concatenate((these_boundary_IDs, existing_IDs))
ID_ordering = np.argsort(all_the_IDs)
self.looped_node_properties['boundary_node_IDs'] = (
as_id_array(all_the_IDs[ID_ordering]))
self.looped_node_properties['linked_node_IDs'] = as_id_array(
np.concatenate((these_linked_nodes, existing_links))[ID_ordering])
if np.any(self._node_status[self.looped_node_properties[
'boundary_node_IDs']] == 2):
raise AttributeError(
'Switching a boundary between fixed gradient and looped will '
'result in bad BC handling! Bailing out...')
@deprecated(use='_update_links_nodes_cells_to_new_BCs', version=1.0)
def update_boundary_nodes(self):
"""Update the boundary nodes.
This method updates all the boundary nodes in the grid field on which
they are set (i.e., it updates the field
rmg.at_node[rmg.fixed_gradient_node_properties['fixed_gradient_of']]).
It currently works only with fixed value (type 1) and fixed gradient
(type 2) conditions. Looping must be handled internally to a component,
and is not dealt with here.
LLCATS: DEPR NINF BC
"""
try:
fixed_nodes = self.fixed_value_node_properties['boundary_node_IDs']
except AttributeError:
# no fixed value boundaries have been set
pass
else:
assert self.fixed_value_node_properties['internal_flag'], \
'Values were not supplied to the method that set the ' \
'boundary conditions! You cant update automatically!'
values_val = self.at_node[
self.fixed_value_node_properties['fixed_value_of']]
values_val[self.fixed_value_node_properties[
'boundary_node_IDs']] = self.fixed_value_node_properties[
'values']
try:
values_grad = self.at_node[
self.fixed_gradient_node_properties['fixed_gradient_of']]
values_grad[self.fixed_gradient_node_properties[
'boundary_node_IDs']] = (values_grad[
self.fixed_gradient_node_properties[
'anchor_node_IDs']] +
self.fixed_gradient_node_properties['values_to_add'])
except AttributeError:
# no fixed grad boundaries have been set
pass
# DEJH believes this needs deprecating, but it's pretty hard wired into
# the flow router. So I've restored it for now.
def _calculate_gradients_at_d8_active_links(self, node_values):
"""Calculate gradients over D8 active links.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Parameters
----------
node_values : ndarray
Values at nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), spacing=(3, 4))
>>> z = np.array([3., 3., 3., 3.,
... 3., 3., 0., 0.,
... 3., 0., 0., 0.])
>>> grid._calculate_gradients_at_d8_active_links(z)
... # doctest: +NORMALIZE_WHITESPACE
array([ 0. , -1. , 0. , -0.75, 0. , -1. , 0. , 0. , -0.6 ,
0. , -0.6 , 0. , -0.6 , 0. , 0. ])
LLCATS: LINF GRAD
"""
(active_links, _, _) = self._d8_active_links()
diagonal_links = squad_links.is_diagonal_link(self.shape, active_links)
active_links = active_links[~ diagonal_links]
vertical_links = squad_links.is_vertical_link(
self.shape, active_links)
horizontal_links = squad_links.is_horizontal_link(
self.shape, active_links)
diffs = (node_values[self._activelink_tonode] -
node_values[self._activelink_fromnode])
diffs[vertical_links] /= self.dy
diffs[horizontal_links] /= self.dx
diag_dist = np.sqrt(self.dy ** 2. + self.dx ** 2.)
diagonal_link_slopes = (
(node_values[self._diag_activelink_tonode] -
node_values[self._diag_activelink_fromnode]) / diag_dist)
return np.concatenate((diffs, diagonal_link_slopes))
def _calculate_gradients_at_d8_links(self, node_values):
"""Calculate gradients over all D8 links.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Parameters
----------
node_values : ndarray
Values at nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4), spacing=(3, 4))
>>> z = np.array([3., 3., 3., 3.,
... 3., 3., 0., 0.,
... 3., 0., 0., 0.])
>>> grid._calculate_gradients_at_d8_links(z)
... # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 0. , -0.6, 0. , -0.6, -0.6, -0.6, 0. , -0.6, 0. , 0. ,
0. ])
LLCATS: LINF GRAD
"""
self._create_diag_links_at_node()
diag_dist = np.sqrt(self.dy ** 2. + self.dx ** 2.)
diagonal_link_slopes = (
(node_values[self._diag_link_tonode] -
node_values[self._diag_link_fromnode]) / diag_dist)
return diagonal_link_slopes
@deprecated(use='calc_flux_div_at_node', version=1.0)
def calculate_flux_divergence_at_nodes(self, active_link_flux, out=None):
"""Flux divergence at nodes.
Same as calculate_flux_divergence_at_active_cells, but works with and
returns a list of net unit fluxes that corresponds to all nodes, rather
than just active cells.
Note that we DO compute net unit fluxes at boundary nodes (even though
these don't have active cells associated with them, and often don't
have cells of any kind, because they are on the perimeter). It's up to
the user to decide what to do with these boundary values.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
>>> u = [0., 1., 2., 3., 0.,
... 1., 2., 3., 2., 3.,
... 0., 1., 2., 1., 2.,
... 0., 0., 2., 2., 0.]
>>> u = np.array(u)
>>> grad = rmg.calc_grad_at_link(u)[rmg.active_links]
>>> grad
array([ 1., 1., -1., 1., 1., -1., 1., -1., -1., -1., 1., 1., -1.,
1., -1., 0., 1.])
>>> flux = -grad # downhill flux proportional to gradient
>>> df = rmg.calculate_flux_divergence_at_nodes(flux)
>>> df
array([ 0., -1., -1., 1., 0., -1., 2., 4., -2., 1., -1., 0., 1.,
-4., 1., 0., -1., 0., 1., 0.])
If calculate_gradients_at_nodes is called inside a loop, you can
improve speed by creating an array outside the loop. For example, do
this once, before the loop:
>>> df = rmg.zeros(centering='node') # outside loop
>>> rmg.number_of_nodes
20
Then do this inside the loop:
>>> df = rmg.calculate_flux_divergence_at_nodes(flux, df)
In this case, the function will not have to create the df array.
LLCATS: DEPR NINF GRAD
"""
if out is None:
out = self.zeros(at='node')
return rfuncs.calculate_flux_divergence_at_nodes(
self, active_link_flux, out=out)
@deprecated(use='calc_flux_div_at_node', version=1.0)
def calculate_flux_divergence(self, q, id):
"""Flux divergence.
Candidate for depreciation, DEJH 5/14
.. todo:: UPDATE THIS TO USE NEW DATA STRUCTURES!
This is like calculate_flux_divergences (plural!), but only does
it for cell "id".
LLCATS: DEPR NINF GRAD
"""
if self._DEBUG_TRACK_METHODS:
six.print_('RasterModelGrid.calculate_flux_divergence here with '
'cell ' + id)
six.print_('q: ' + q[self.faces[id, 0:4]])
fd = (
(q[self.faces[id, 0]] - q[self.faces[id, 2]]) / self.dx +
(q[self.faces[id, 1]] - q[self.faces[id, 3]]) / self.dy
)
return fd
@deprecated(use='set_closed_boundaries_at_grid_edges', version='0.1')
def update_noflux_boundaries(self, u, bc=None):
"""Deprecated.
Sets the value of u at all noflux boundary cells equal to the
value of their interior neighbors, as recorded in the
"boundary_nbrs" array.
LLCATS: DEPR BC
"""
if bc is None:
bc = self.default_bc
inds = (bc.boundary_code[id] == bc.LOOPED_BOUNDARY)
u[self.boundary_cells[inds]] = u[bc.tracks_cell[inds]]
return u
def node_vector_to_raster(self, u, flip_vertically=False):
"""Unravel an array of node values.
Converts node vector *u* to a 2D array and returns it, so that it
can be plotted, output, etc.
If the *flip_vertically* keyword is True, this function returns an
array that has the rows in reverse order. This is useful for use in
plot commands (such as the image display functions) that puts the
first row at the top of the image. In the landlab coordinate system,
the first row is thought to be at the bottom. Thus, a flipped matrix
will plot in the landlab style with the first row at the bottom.
The returned array is a view of *u*, not a copy.
See also
--------
RasterModelGrid.nodes
An equivalent property, but without the option to flip the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
>>> u = rmg.zeros(centering='node')
>>> u = u + range(0, len(u))
>>> u # doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.,
11., 12., 13., 14., 15., 16., 17., 18., 19.])
>>> ur = rmg.node_vector_to_raster(u)
>>> ur
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.],
[ 10., 11., 12., 13., 14.],
[ 15., 16., 17., 18., 19.]])
>>> ur = rmg.node_vector_to_raster(u, flip_vertically=True)
>>> ur
array([[ 15., 16., 17., 18., 19.],
[ 10., 11., 12., 13., 14.],
[ 5., 6., 7., 8., 9.],
[ 0., 1., 2., 3., 4.]])
LLCATS: GINF NINF
"""
return sgrid.reshape_array(self.shape, u,
flip_vertically=flip_vertically)
def cell_vector_to_raster(self, u, flip_vertically=False):
"""Unravel a 1D array.
Converts cell vector u to a 2D array and returns it,
so that it can be plotted, output, etc.
If the optional argument flip_vertically=True, the function returns an
array that has the rows in reverse order, for use in plot commands
(such as the image display functions) that put the (0,0) axis at the
top left instead of the bottom left.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
>>> u = rmg.zeros(centering='cell')
>>> u = u + range(0, len(u))
>>> u
array([ 0., 1., 2., 3., 4., 5.])
>>> ur = rmg.cell_vector_to_raster(u)
>>> ur
array([[ 0., 1., 2.],
[ 3., 4., 5.]])
>>> ur = rmg.cell_vector_to_raster(u, flip_vertically=True)
>>> ur
array([[ 3., 4., 5.],
[ 0., 1., 2.]])
LLCATS: GINF CINF
"""
return sgrid.reshape_array((self.shape[0] - 2, self.shape[1] - 2),
u, flip_vertically=flip_vertically)
def roll_nodes_ud(self, data_name, shift, interior_only=False):
"""Roll (shift) specified data on nodes up or down in a raster grid.
Similar to the Numpy roll() function, in that it shifts node values up
by *shift* rows, wrapping the data in the top row(s) around to the
bottom. If the *interior_only* is set, data along the left and right
grid edges are not changed.
Note that the contents of the *data_name* field are changed.
Parameters
----------
data_name : string
Name of node-data item attached to grid.
shift : int
Number of rows to shift upward.
interior_only : bool, optional
If True, data along left and right edges are not shifted
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 3), 1.)
>>> data = rmg.add_zeros('test_data', at='node')
>>> data[:] = np.arange(12)
>>> rmg.roll_nodes_ud('test_data', 1)
>>> data # doctest: +NORMALIZE_WHITESPACE
array([ 9., 10., 11., 0., 1., 2., 3., 4., 5., 6., 7.,
8.])
>>> rmg.roll_nodes_ud('test_data', 2)
>>> data # doctest: +NORMALIZE_WHITESPACE
array([ 3., 4., 5., 6., 7., 8., 9., 10., 11., 0., 1.,
2.])
>>> rmg.roll_nodes_ud('test_data', 1, interior_only=True)
>>> data # doctest: +NORMALIZE_WHITESPACE
array([ 3., 1., 5., 6., 4., 8., 9., 7., 11., 0., 10.,
2.])
LLCATS: NINF
"""
# Get the data
data = self.at_node[data_name]
# Get the IDs of the nodes in the top row, and number of rows and cols
top_ids = self.nodes_at_top_edge
ncols = self.number_of_node_columns
nrows = self.number_of_node_rows
# To handle "interior only" option, we use the variable *offset*,
# which is zero if shifting everything, and 1 if shifting just the
# interior -- we use this to go from column 1 to column N-2 (instead
# of 0 to N-1) when interior_only is True.
if interior_only:
offset = 1
top_ids = top_ids[1:ncols - 1]
else:
offset = 0
# Remember the top N rows
top_rows_to_move = np.zeros((shift, ncols - 2 * offset))
for i in range(0, shift):
top_rows_to_move[shift - (i + 1), :] = data[top_ids - i * ncols]
# Go row by row, starting from top
for i in range(nrows - shift):
to_row = nrows - (i + 1)
from_row = to_row - shift
data[ncols * to_row + offset:ncols * (to_row + 1) - offset] = \
data[ncols * from_row + offset:ncols * (from_row + 1) - offset]
# now replace the bottom *shift* rows
for i in range(0, shift):
data[ncols * i + offset:ncols *
(i + 1) - offset] = top_rows_to_move[i, :]
def _create_neighbor_list(self, bad_index=BAD_INDEX_VALUE):
"""Create list of neighbor node IDs.
Creates a list of IDs of neighbor nodes for each node, as a
2D array. Only record neighbor nodes that are on the other end of an
*active* link. Nodes attached to *inactive* links or neighbor nodes
that would be outside of the grid are given an ID of
:const:`~landlab.grid.base.BAD_INDEX_VALUE`.
Neighbors are ordered as [*right*, *top*, *left*, *bottom*].
"""
# assert(self.neighbor_list_created == False)
# this method can now be called to create multiple neighbor lists with
# different BAD_INDEX_VALUES
# note self.nieghbor_nodes is no longer created... but nobody should be
# calling it direct anyway.
neighbor_nodes = sgrid.neighbor_node_array(
self.shape, closed_boundary_nodes=self.closed_boundary_nodes,
open_boundary_nodes=self.open_boundary_nodes,
inactive=bad_index).T
self.neighbor_list_created = True
return neighbor_nodes
@deprecated(use='node_has_boundary_neighbor', version=1.0)
def has_boundary_neighbor(self, ids, method='d8'):
"""
LLCATS: DEPR NINF CONN BC
"""
return self.node_has_boundary_neighbor(ids, method=method)
def node_has_boundary_neighbor(self, ids, method='d8'):
"""Check if nodes have neighbors that are boundary nodes.
Checks to see if one of the eight neighbor nodes of node(s) with
*id* has a boundary node. Returns True if a node has a boundary node,
False if all neighbors are interior.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((5, 5))
>>> mg.node_has_boundary_neighbor(6)
True
>>> mg.node_has_boundary_neighbor(12)
False
>>> mg.node_has_boundary_neighbor([12, -1])
array([False, True], dtype=bool)
>>> mg.node_has_boundary_neighbor(25)
Traceback (most recent call last):
...
IndexError: index 25 is out of bounds for axis 0 with size 25
LLCATS: NINF CONN BC
"""
ans = _node_has_boundary_neighbor(self, ids, method=method)
if ans.ndim == 0:
return bool(ans)
else:
return ans
@deprecated(use='_diagonal_neighbors_at_node', version=1.0)
def _get_diagonal_list(self, *args, **kwds):
"""_get_diagonal_list([ids], bad_index=BAD_INDEX_VALUE)
Get list of diagonal node IDs.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Return lists of diagonals nodes for nodes with given *ids*. If *ids*
is not given, return the diagonals for all of the nodes in the grid.
For each node, the list gives diagonal ids as [topright, topleft,
bottomleft, bottomright]. Set all diagonals for boundary nodes to -1.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> mg._get_diagonal_list([-1, 6])
array([[-1, -1, 13, -1],
[12, 10, 0, 2]])
>>> mg._get_diagonal_list(7)
array([13, 11, 1, 3])
.. todo:: could use inlink_matrix, outlink_matrix
LLCATS: NINF CONN
"""
# Added DEJH 051513
bad_index = kwds.get('bad_index', BAD_INDEX_VALUE)
try:
self.diagonal_node_dict
except AttributeError:
self.diagonal_node_dict = {}
self.diagonal_node_dict[
bad_index] = self._create_diagonal_list(bad_index=bad_index)
try:
diagonal_nodes = self.diagonal_node_dict[bad_index]
except KeyError:
diagonal_nodes = self._create_diagonal_list(bad_index=bad_index)
self.diagonal_node_dict[bad_index] = diagonal_nodes
if len(args) == 0:
return diagonal_nodes
elif len(args) == 1:
return diagonal_nodes[args[0], :]
else:
raise ValueError('only zero or one arguments accepted')
def _create_diagonal_list(self, bad_index=BAD_INDEX_VALUE):
"""Create list of diagonal node IDs.
MAY 16: Landlab's handling of diagonal links may soon be enhanced;
methods like this may be soon superceded.
Creates a list of IDs of the diagonal nodes to each node, as a 2D
array. Only interior nodes are assigned diagonal neighbors; boundary
nodes get -1 for each neighbor. The order of the diagonal nodes is
[topright, topleft, bottomleft, bottomright].
.. note::
This is equivalent to the diagonals of all cells,
and setting the neighbors of boundary-node cells to -1. In such a
case, each node has one cell and each node-cell pair have the
same ID. However, this is the old-style grid structure as
boundary nodes no longer have associated cells.
DEJH: As of 6/12/14, this method now uses BAD_INDEX_VALUE, and
boundary nodes now have neighbors, where they are found at the ends
of active links.
"""
self.diagonal_list_created = True
self.diagonal_cells = sgrid.diagonal_node_array(
self.shape, out_of_bounds=bad_index)
closed_boundaries = np.empty(4, dtype=np.int)
closed_boundaries.fill(bad_index)
self.diagonal_cells[self.closed_boundary_nodes, :] = closed_boundaries
self.diagonal_cells.ravel()[
np.in1d(self.diagonal_cells.ravel(),
self.closed_boundary_nodes)] = bad_index
return self.diagonal_cells
@deprecated(use='node_is_core', version='0.5')
def is_interior(self, *args):
"""is_interior([ids])
Check of a node is an interior node.
Returns an boolean array of truth values for each node ID provided;
True if the node is an interior node, False otherwise.
If no IDs are provided, method returns a boolean array for every node.
(Interior status is typically indicated by a value of 0 in
node_status.)
LLCATS: DEPR NINF BC
"""
# NG changed this.
# Modified DEJH May 2014 to accept simulaneous tests of multiple nodes;
# should still be back-conmpatible.
try:
node_ids = args[0]
except IndexError: # return all nodes
return np.equal(self._node_status, CORE_NODE)
else:
return np.equal(self._node_status[node_ids], CORE_NODE)
@deprecated(use='node_is_core', version=1.0)
def is_core(self, *args):
"""
LLCATS: DEPR NINF BC
"""
return self.node_is_core(*args)
def node_is_core(self, *args):
"""node_is_core([ids])
Check if a node is a core node.
Returns an boolean array of truth values for each node ID provided;
True if the node is a core node, False otherwise.
If no IDs are provided, method returns a boolean array for every node.
(Core status is typically indicated by a value of 0 in node_status.)
LLCATS: NINF BC
"""
# NG changed this.
# Modified DEJH May 2014 to accept simulaneous tests of multiple nodes;
# should still be back-conmpatible.
try:
node_ids = args[0]
except IndexError: # return all nodes
return np.equal(self._node_status, CORE_NODE)
else:
return np.equal(self._node_status[node_ids], CORE_NODE)
@deprecated(use='nodes_are_all_core', version=1.0)
def are_all_interior(self, IDs):
"""Check if nodes are interior.
Returns a single boolean truth value, True if all nodes with *IDs* are
interior nodes, False if not.
LLCATS: DEPR NINF BC
"""
return np.all(np.equal(self._node_status[IDs], CORE_NODE))
@deprecated(use='nodes_are_all_core', version=1.0)
def are_all_core(self, ids):
"""
LLCATS: DEPR NINF BC
"""
return self.nodes_are_all_core(ids)
def nodes_are_all_core(self, ids):
"""Check if nodes are all core.
Returns a single boolean truth value, True if all nodes with *IDs* are
core nodes, False if not.
Parameters
----------
ids : array-like
Grid nodes.
Returns
-------
boolean
``True`` if all the given nodes are *core* nodes.
LLCATS: NINF BC
"""
return np.all(np.equal(self._node_status[ids], CORE_NODE))
@deprecated(use='no replacement', version=1.0)
def face_connecting_cell_pair(self, cell_a, cell_b):
"""Get the face that connects two cells.
Returns an array of face indices that *cell_a* and *cell_b* share.
If the cells do not share any faces, returns an empty array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> mg.face_connecting_cell_pair(0, 1)
array([4])
>>> mg.face_connecting_cell_pair(0, 2).size # empty array returned
0
LLCATS: DEPR FINF CINF CONN
"""
cell_faces = self.faces_at_cell[[cell_a, cell_b]]
return as_id_array(np.intersect1d(cell_faces[0], cell_faces[1],
assume_unique=True))
@return_id_array
def grid_coords_to_node_id(self, row, col, **kwds):
"""Convert node indices to node ID.
Returns the ID of the node at the specified *row* and *col* of the
raster grid. Since this is a wrapper for the numpy ravel_multi_index
function, the keyword arguments are the same as that function. In
addition, *row* and *col* can both be either scalars or arrays (of the
same length) to get multiple ids.
As with ravel_multi_index use the *mode* keyword to change the
behavior of the method when passed an out-of-range *row* or *col*.
The default is to raise ValueError (not IndexError, as you might
expect).
.. note::
The syntax assumes that first row and column are 0,
so max entry for a mg with 4 rows and 5 cols is row=3, col=4
Parameters
----------
row : array-like
Row of node.
col : array-like
Column of node.
Returns
-------
ndarray
Node IDs.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> mg.grid_coords_to_node_id(2, 3)
13
>>> mg.grid_coords_to_node_id([2, 0], [3, 4])
array([13, 4])
LLCATS: NINF SUBSET MEAS
"""
return np.ravel_multi_index((row, col), self.shape, **kwds)
def _create_face_width(self):
"""Set up array of face widths.
Produces an array of length nfaces containing the face width.
Returns
-------
ndarray of float
Width of faces (listed as horizontal, then vertical).
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> grid.width_of_face
array([ 1., 1., 1., 1.])
"""
n_horizontal_faces = (self.shape[0] - 2) * (self.shape[1] - 1)
self._face_width = np.empty(squad_faces.number_of_faces(self.shape))
self._face_width[:n_horizontal_faces] = self.dx
self._face_width[n_horizontal_faces:] = self.dy
return self._face_width
def _unit_test(self):
"""Stub for adding unit tests to RasterModelGrid."""
pass
def calc_unit_normal_at_patch(self, elevs='topographic__elevation'):
"""Calculate and return the unit normal vector <a, b, c> to a patch.
This method is not defined on a raster, as there is no unique unit
normal for a square patch. Use
`_calc_unit_normals_to_patch_subtriangles` instead.
LLCATS: PINF GRAD
"""
raise NotImplementedError(
'This method is not defined on a raster, as there is no unique '
'unit normal for a square patch. Use '
'`_calc_unit_normals_to_patch_subtriangles` instead.')
@deprecated(use='calc_aspect_at_node', version=1.0)
def calculate_aspect_at_nodes_bestFitPlane(self, id, val):
"""Aspect at nodes.
.. codeauthor:: <NAME> <<EMAIL>>
Calculates the aspect at each node based on the elevation of
the node and its neighbors using a best fit plane calculated
using single value decomposition.
Parameters
----------
id : array-like
ID of nodes at which to calculate the aspect.
val : ndarray
Elevation at all nodes
Returns
-------
ndarray
Aspect at the nodes given by id
LLCATS: DEPR NINF SURF
"""
# additional note, KRB has written three codes in raster.py
# one to calculate slope, one to calculate aspect, and one
# to calculate both
# get the list of neighboring nodes for the nodes given by id
n = self.active_neighbors_at_node[id]
a = []
# for each node in id make a list with the node id and the ids of
# its neighbors.
# determine the values for the x, y, and z coordinates of each node,
# pass these to rfuncs.calculate_slope_aspect_bfp to calculate the
# slope and aspect.
indBool = (n != BAD_INDEX_VALUE)
for i in range(len(id)):
# make a list of the neighbor nodes and
# check that none of the nodes are bad
ns = list(n[0][indBool[0]])
ns.append(id[i])
x = self.node_x[ns]
y = self.node_y[ns]
z = val[ns]
slope, aspect = rfuncs.calculate_slope_aspect_bfp(x, y, z)
a.append(aspect)
del ns
# return aspect alone
return a
@deprecated(use='calc_slope_at_node', version=1.0)
def calculate_slope_at_nodes_bestFitPlane(self, id, val):
"""Slope of best-fit plane at nodes.
.. codeauthor:: <NAME> <<EMAIL>>
Calculates the slope at each node based on the elevation of
the node and its neighbors using a best fit plane calculated
using single value decomposition.
Parameters
----------
id : array-like
ID of nodes at which to calculate the aspect
val : ndarray
Elevation at all nodes
Returns
-------
ndarray
Slope at the nodes given by id
LLCATS: DEPR NINF GRAD SURF
"""
#
# additional note, KRB has written three codes in raster.py
# one to calculate slope, one to calculate aspect, and one
# to calculate both
# get the list of neighboring nodes for the nodes given by id
n = self.active_neighbors_at_node[id]
s = []
# for each node in id make a list with the node id and the ids of
# its neighbors.
# determine the values for the x, y, and z coordinates of each node,
# pass these to rfuncs.calculate_slope_aspect_bfp to calculate the
# slope and aspect.
indBool = (n != BAD_INDEX_VALUE)
for i in range(len(id)):
# make a list of the neighbor nodes and
# check that none of the nodes are bad
ns = list(n[0][indBool[0]])
ns.append(id[i])
x = self.node_x[ns]
y = self.node_y[ns]
z = val[ns]
slope, _ = rfuncs.calculate_slope_aspect_bfp(x, y, z)
s.append(slope)
del ns
# return slope alone
return s
@deprecated(use='calc_slope_at_node, calc_aspect_at_node', version=1.0)
def calculate_slope_aspect_at_nodes_burrough(self, ids=None,
vals='Elevation'):
"""Calculate topographic slope.
Calculates the local topographic slope (i.e., the down-dip slope, and
presented as positive), and the aspect (dip direction in degrees
clockwise from north), at the given nodes, *ids*. All *ids* must be of
core nodes.
This method uses Burrough's 1998 Pg. 190 method similar to the methods
used by ArcMap to calculate slope and aspect.
If *ids* is not provided, the slope will be returned for nodes at all
cells.
*vals* is either the name of an existing grid field from which to draw
topographic data, or an array of values to use. If an array of values
is passed, it must be nnodes long.
If *vals* is not provided, this method will default to trying to use
the field 'Elevation'.
Returns
-------
(slope, aspect) : tuple of float
*slope*, a len(ids) array of slopes at each node provided.
*aspect*, a len(ids) array of aspects at each node provided.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), (4, 4))
>>> z = np.array([0., 0., 0., 0.,
... 3., 3., 3., 3,
... 6., 6., 6., 6.])
>>> (slope,
... aspect) = grid.calculate_slope_aspect_at_nodes_burrough(vals=z)
>>> np.tan(slope)
array([ 0.75, 0.75])
>>> np.degrees(aspect)
array([ 180., 180.])
This method is *deprecated*. Use ``calc_slope_at_node`` and
``calc_aspect_at_node`` instead. Notice that ``calc_slope_at_node``
and ``calc_aspect_at_node`` return values for all nodes, not just
core nodes. In addition, ``calc_aspect_at_node`` returns compass-style
angles in degrees.
>>> np.tan(grid.calc_slope_at_node(elevs=z)[grid.core_nodes])
array([ 0.75, 0.75])
>>> grid.calc_aspect_at_node(elevs=z)[grid.core_nodes]
array([ 180., 180.])
LLCATS: DEPR NINF SURF GRAD
"""
if ids is None:
ids = self.node_at_cell
if not isinstance(ids, np.ndarray):
ids = np.array([ids])
if isinstance(vals, str):
vals = self.at_node[vals]
else:
if len(vals) != self.number_of_nodes:
raise IndexError('*vals* was not of a compatible length!')
neighbors = np.zeros([ids.shape[0], 4], dtype=int)
diagonals = np.zeros([ids.shape[0], 4], dtype=int)
# [right, top, left, bottom]
neighbors[:, ] = self.active_neighbors_at_node[ids]
# [topright, topleft, bottomleft, bottomright]
diagonals[:, ] = self._get_diagonal_list(ids)
right = vals[neighbors[:, 0]]
top = vals[neighbors[:, 1]]
left = vals[neighbors[:, 2]]
bottom = vals[neighbors[:, 3]]
top_right = vals[diagonals[:, 0]]
top_left = vals[diagonals[:, 1]]
bottom_left = vals[diagonals[:, 2]]
bottom_right = vals[diagonals[:, 3]]
dz_dx = ((top_right + 2 * right + bottom_right) -
(top_left + 2 * left + bottom_left)) / (8. * self._dx)
dz_dy = ((bottom_left + 2 * bottom + bottom_right) -
(top_left + 2 * top + top_right)) / (8. * self._dy)
slope = np.zeros([ids.shape[0]], dtype=float)
aspect = np.zeros([ids.shape[0]], dtype=float)
slope = np.arctan(np.sqrt(dz_dx ** 2 + dz_dy ** 2))
aspect = np.arctan2(dz_dy, - dz_dx)
aspect = np.pi * .5 - aspect
aspect[aspect < 0.] = aspect[aspect < 0.] + 2. * np.pi
aspect[slope == 0.] = -1.
return slope, aspect
@deprecated(use='calc_slope_at_node, calc_aspect_at_node', version=1.0)
def calculate_slope_aspect_at_nodes_best_fit_plane(self, nodes, val):
r"""Calculate slope aspect.
Slope aspect of best-fit plane at nodes.
.. codeauthor:: <NAME> <<EMAIL>>
.. note::
THIS CODE HAS ISSUES (SN 25-Sept-14): This code didn't perform
well on a NS facing elevation profile. Please check
slope_aspect_routines_comparison.py under landlab\examples before
using this. Suggested alternative:
calculate_slope_aspect_at_nodes_burrough
Calculates both the slope and aspect at each node based on the
elevation of the node and its neighbors using a best fit plane
calculated using single value decomposition.
Parameters
----------
nodes : array-like
ID of nodes at which to calculate the aspect
val : ndarray
Elevation at all nodes
Returns
-------
(slope, aspect) : tuple of floats
Tuple containing (*slope*, *aspect*)
LLCATS: DEPR NINF GRAD SURF
"""
# additional note, KRB has written three codes in raster.py
# one to calculate slope, one to calculate aspect, and one
# to calculate both
# get the list of neighboring nodes for the nodes given by id
node_neighbors = self.active_neighbors_at_node[nodes]
aspects = []
slopes = []
# for each node in id make a list with the node id and the ids of
# its neighbors.
# determine the values for the x, y, and z coordinates of each node,
# pass these to rfuncs.calculate_slope_aspect_bfp to calculate the
# slope and aspect.
indBool = (node_neighbors != BAD_INDEX_VALUE)
for id_ in range(len(nodes)):
# make a list of the neighbor nodes and
# check that none of the nodes are bad
neighbors = list(node_neighbors[0, indBool[0]])
neighbors.append(nodes[id_])
node_x = self.node_x[neighbors]
node_y = self.node_y[neighbors]
node_z = val[neighbors]
slope, aspect = rfuncs.calculate_slope_aspect_bfp(node_x, node_y,
node_z)
aspects.append(aspect)
slopes.append(slope)
del neighbors
return slopes, aspects
def save(self, path, names=None, format=None, at=None):
"""Save a grid and fields.
If more than one field name is specified for names when saving to ARC
ascii, multiple files will be produced, suffixed with the field names.
When saving to netCDF (.nc), the fields are incorporated into the
single named .nc file.
Parameters
----------
path : str
Path to output file.
names : iterable of strings, optional
List of field names to save, defaults to all if not specified.
format : {'netcdf', 'esri-ascii'}, optional
Output file format. Guess from file extension if not given.
at : str
Grid element where values are defined.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import os
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.save('./mysave.nc')
>>> os.remove('mysave.nc') #to remove traces of this test
LLCATS: GINF
"""
format = format or _guess_format_from_name(path)
path = _add_format_extension(path, format)
if format == 'netcdf':
write_netcdf(path, self, format='NETCDF3_64BIT', names=names,
at=at)
elif format == 'esri-ascii':
write_esri_ascii(path, self, names=names)
else:
raise ValueError('format not understood')
@property
@make_return_array_immutable
def looped_neighbors_at_cell(self):
"""
For each cell in a raster, return the D8 neighboring cells, looping
across grid boundaries as necessary.
Returns lists of looped neighbor cell IDs of given *cell ids*.
If *cell ids* are not given, it returns a 2D array of size
(self.number_of_cells, 8).
Order or neighbors is [ E, NE, N, NW, W, SW, S, SE ]
Output is looped, regardless of boundary conditions! (see examples)
Returns
-------
ndarray (num_cells, 8)
The eight neighbors of each cell.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> neighbors = grid.looped_neighbors_at_cell
>>> neighbors[1, :]
array([2, 5, 4, 3, 0, 3, 4, 5])
>>> neighbors[5, :]
array([3, 0, 2, 1, 4, 1, 2, 0])
>>> grid.looped_neighbors_at_cell[np.array([1, 5]), :]
array([[2, 5, 4, 3, 0, 3, 4, 5],
[3, 0, 2, 1, 4, 1, 2, 0]])
LLCATS: DEPR CINF CONN BC
"""
if self._looped_cell_neighbor_list is not None:
return self._looped_cell_neighbor_list
else:
self._looped_cell_neighbor_list = \
self._create_looped_cell_neighbor_list()
return self.looped_neighbors_at_cell
def _create_looped_cell_neighbor_list(self):
"""Create a list of looped immediate cell neighbors (8 adjacent cells).
Creates a list of looped immediate cell neighbors (*cell ids*) for each
cell as a 2D array of size ( self.number_of_cells, 8 ).
Order or neighbors is [ E, NE, N, NW, W, SW, S, SE ]
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> neighbors = grid._create_looped_cell_neighbor_list()
>>> neighbors[1]
array([2, 5, 4, 3, 0, 3, 4, 5])
>>> neighbors[5]
array([3, 0, 2, 1, 4, 1, 2, 0])
"""
# CAUTION: Some terminology concerning cells in this module
# is asynchronous to general understanding. This is intentionally
# left as is until further discussion among dev group.
# Any such instances are marked with (*TC - Terminoly Caution)
nrows, ncols = self.cell_grid_shape
interior_cells = sgrid.interior_nodes(self.cell_grid_shape) # *TC
cells_at_corners_of_grid = self.cells_at_corners_of_grid # *TC
# The cells along the edges minus the corner cells.
top_edge_cells = self.cell_at_node[self.nodes[-2, :]][2:-2]
bottom_edge_cells = self.cell_at_node[self.nodes[1, :]][2:-2]
left_edge_cells = self.cell_at_node[self.nodes[:, 1]][2:-2]
right_edge_cells = self.cell_at_node[self.nodes[:, -2]][2:-2]
looped_cell_neighbors = np.empty([self.number_of_cells, 8], dtype=int)
# order = [E,NE,N,NW,W,SW,S,SE]
for cell in range(0, self.number_of_cells):
if cell in interior_cells:
neighbor_ = [
cell + 1, cell + 1 + ncols, cell + ncols, cell + ncols - 1,
cell - 1, cell - ncols - 1, cell - ncols, cell - ncols + 1]
elif cell in bottom_edge_cells:
neighbor_ = [
cell + 1, cell + 1 + ncols, cell + ncols, cell + ncols - 1,
cell - 1, cell + (nrows - 1) * ncols - 1,
cell + (nrows - 1) * ncols, cell + (nrows - 1) * ncols + 1]
elif cell in top_edge_cells:
neighbor_ = [
cell + 1, cell - (nrows - 1) * ncols + 1,
cell - (nrows - 1) * ncols, cell - (nrows - 1) * ncols - 1,
cell - 1, cell - ncols - 1, cell - ncols, cell - ncols + 1]
elif cell in right_edge_cells:
neighbor_ = [
cell - ncols + 1, cell + 1, cell + ncols, cell + ncols - 1,
cell - 1, cell - ncols - 1, cell - ncols,
cell - 2 * ncols + 1]
elif cell in left_edge_cells:
neighbor_ = [
cell + 1, cell + ncols + 1, cell + ncols,
cell + 2 * ncols - 1, cell + ncols - 1, cell - 1,
cell - ncols, cell - ncols + 1]
elif cell == cells_at_corners_of_grid[0]: # SW corner
neighbor_ = [
cell + 1, cell + ncols + 1, cell + ncols,
cell + 2 * ncols - 1, cell + ncols - 1,
cell + nrows * ncols - 1, cell + (nrows - 1) * ncols,
cell + (nrows - 1) * ncols + 1]
elif cell == cells_at_corners_of_grid[1]: # SE corner
neighbor_ = [
cell - ncols + 1, cell + 1, cell + ncols, cell + ncols - 1,
cell - 1, cell + (nrows - 1) * ncols - 1,
cell + (nrows - 1) * ncols, cell + (nrows - 2) * ncols + 1]
elif cell == cells_at_corners_of_grid[2]: # NW corner
neighbor_ = [
cell + 1, cell - (nrows - 1) * ncols + 1,
cell - (nrows - 1) * ncols, cell - (nrows - 2) * ncols - 1,
cell + ncols - 1, cell - 1, cell - ncols, cell - ncols + 1]
elif cell == cells_at_corners_of_grid[3]: # NE corner
neighbor_ = [
cell - ncols + 1, cell - nrows * ncols + 1,
cell - (nrows - 1) * ncols, cell - (nrows - 1) * ncols - 1,
cell - 1, cell - ncols - 1, cell - ncols,
cell - 2 * ncols + 1]
looped_cell_neighbors[cell] = neighbor_
return looped_cell_neighbors
@property
@make_return_array_immutable
def second_ring_looped_neighbors_at_cell(self):
"""Get list of second ring looped neighbor cell IDs (all 16 neighbors).
Returns lists of looped second ring neighbor cell IDs of
given *cell ids*. If *cell ids* are not given, it returns
a 2D array of size ( self.number_of_cells, 16 ).
The cells are the 16 which encircle the nine true neighbor cells.
Order of neighbors: Starts with E and goes counter clockwise
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((10, 10))
>>> mg.second_ring_looped_neighbors_at_cell[36, :]
array([38, 46, 54, 53, 52, 51, 50, 42, 34, 26, 18, 19, 20, 21, 22, 30])
>>> mg.second_ring_looped_neighbors_at_cell[8, :]
array([10, 18, 26, 25, 24, 31, 30, 22, 14, 6, 62, 63, 56, 57, 58, 2])
...take a look at the cell grid to understand why:
[56, 57, 58, 59, 60, 61, 62, 63]
[48, 49, 50, 51, 52, 53, 54, 55]
[40, 41, 42, 43, 44, 45, 46, 47]
[32, 33, 34, 35, 36, 37, 38, 39]
[24, 25, 26, 27, 28, 29, 30, 31]
[16, 17, 18, 19, 20, 21, 22, 23]
[ 8, 9, 10, 11, 12, 13, 14, 15]
[ 0, 1, 2, 3, 4, 5, 6, 7]
LLCATS: CINF CONN BC
"""
if self._looped_second_ring_cell_neighbor_list_created:
return self.second_ring_looped_cell_neighbor_list
else:
self.second_ring_looped_cell_neighbor_list = \
self._create_second_ring_looped_cell_neighbor_list()
return self.second_ring_looped_neighbors_at_cell
def _create_second_ring_looped_cell_neighbor_list(self):
"""Create list of looped second ring cell neighbors (16 cells).
Creates a list of looped immediate cell neighbors for each cell as a
2D array of size ( self.number_of_cells, 16 ).
Order or neighbors: Starts with E and goes counter clockwise
"""
inf = self.looped_neighbors_at_cell
second_ring = np.empty([self.number_of_cells, 16], dtype=int)
order = np.arange(-1, 15)
order[0] = 15
for cell in range(0, self.number_of_cells):
cell1, cell2, cell3, cell4 = (inf[cell][1], inf[cell][3],
inf[cell][5], inf[cell][7])
ring_tw = np.concatenate((inf[cell1][0:4], inf[cell2][2:6],
inf[cell3][4:8], inf[cell4][6:8],
inf[cell4][0:2]))[order]
second_ring[cell] = ring_tw
self._looped_second_ring_cell_neighbor_list_created = True
return second_ring
def set_fixed_link_boundaries_at_grid_edges(
self, right_is_fixed, top_is_fixed, left_is_fixed, bottom_is_fixed,
link_value=None, node_value=None,
fixed_node_value_of='topographic__elevation',
fixed_link_value_of='topographic__slope'):
"""Create fixed link boundaries at the grid edges.
Sets the status of links along the specified side(s) of a raster
grid--- bottom vertical links, right horizontal, top vertical links,
and/or left horizontal links ---to FIXED_LINK.
By definition, fixed links exist between fixed gradient nodes
(status_at_node == 2) and core nodes (status_at_node == 0). Because the
outer ring of nodes are fixed gradient (status_at_node == 2), the links
between them are inactive (status_at_link == 4) and are not set using
this function (the inactive links are the top and bottom horizontal
edge links, and left and right edge vertical edge links.)
Arguments are booleans indicating whether the bottom, right, top, and
left sides are to be set (True) or not (False).
*node_value* controls what values are held constant at the fixed
gradient nodes (status_at_node == 2). It can be either a float, an
array of length number_of_fixed_nodes or number_of_nodes (total), or
left blank. If left blank, the values will be set from the those
already in the grid fields, according to 'fixed_node_value_of'.
*link_value* controls what values are held constant at the fixed
links (status_at_link == 2). It can be either a float, an array of
length number_of_fixed_links or number_of_links (total), or
left blank. If left blank, the values will be set from the those
already in the grid fields, according to 'fixed_link_value_of'.
*fixed_node_value_of* controls the name of the model field that
contains the node values. Remember, if you don't set value, the fixed
gradient node values will be set from the field values ***at the time
you call this method***. If no values are present in the field, the
module will complain but accept this, warning that it will be unable to
automatically update boundary conditions (and such methods, e.g.,
``RasterModelGrid.update_boundary_nodes()``, will raise exceptions
if you try).
*fixed_link_value_of* controls the name of the model field that
contains the fixed link values. Remember, if you don't set value, the
fixed link values will be set from the field values ***at the time you
call this method***. If no values are present in the field, the module
will complain but accept this, warning that it will be unable to
automatically update boundary conditions (and such methods, e.g.,
``RasterModelGrid.update_boundary_nodes()``, will raise exceptions
if you try).
The following example sets the bottom and right link boundaries as
fixed-value in a four-row by nine-column grid that initially has all
boundaries set to fixed_gradient (nodes, i.e. flagged at
(status_at_node == 2) and fixed_link (links, i.e., flagged as
(status_at_link == 2).
Parameters
----------
right_is_fixed : boolean
Set right edge horizontal links as fixed boundary.
top_is_fixed : boolean
Set top edge vertical links as fixed boundary.
left_is_fixed : boolean
Set left edge horizontal links as fixed boundary.
bottom_is_fixed : boolean
Set bottom edge vertical links as fixed boundary.
link_value : float, array or None (default).
Override value to be kept constant at links.
node_value : float, array or None (default).
Override value to be kept constant at nodes.
fixed_node_value_of : string.
The name of the grid field containing the values of interest at
nodes.
fixed_link_value_of : string.
The name of the grid field containing the values of interest at
links.
Examples
--------
The following grid is used in the example::
*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I X X X X X X X I
| | | | | | | | |
*--X--->o o o o o o o--X--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I | | | | | | | I
| | | | | | | | |
*--X--->o o o o o o o--X--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I X X X X X X X I
| | | | | | | | |
*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*
.. note::
Links set to :any:`ACTIVE_LINK` are not indicated in this diagram.
``*`` indicates the nodes that are set to
:any:`FIXED_GRADIENT BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``X`` indicates the links that are set to :any:`FIXED_LINK`
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 9), 1.0) # rows, columns, spacing
>>> import numpy as np
>>> z = np.arange(0, rmg.number_of_nodes)
>>> s = np.arange(0, rmg.number_of_links)
>>> rmg['node']['topographic__elevation'] = z
>>> rmg['link']['topographic__slope'] = s
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0,
0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], dtype=int8)
>>> rmg.status_at_link # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 4, 2, 0, 0, 0,
0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 0, 0, 4, 2, 0, 0, 0, 0, 0, 0, 2,
4, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4])
>>> rmg.fixed_link_properties['fixed_gradient_of']
'topographic__slope'
>>> rmg.fixed_gradient_node_properties['fixed_gradient_of']
'topographic__elevation'
LLCATS: BC SUBSET
"""
# THIS HAS TO SET THE RING AROUND IT AS FIXED-VALUE (NODE_STATUS = 2)
# IF NOT ALREADY SET.
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid.set_fixed_link_boundaries_at_grid_edges')
# Fixed link boundaries are found between core nodes (node_status==0)
# and fixed gradient nodes (node_status==2). To assure these conditions
# are met, we store link and node boundary IDs in arrays...
fixed_nodes = | np.array([]) | numpy.array |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import numpy as np
import os, sys, shutil, subprocess, glob
import re
from numpy import pi
from scipy import *
import json
from tabulate import tabulate
from itertools import chain
import flapwmbpt_ini
import prepare_realaxis
# from scipy.interpolate import interp1d
# trans_basis_mode: 0, use wannier function as basis set
# trans_basis_mode: 1, use transformation matrix to rotate the basis set. this matrix doesn't change as a function of iteration.
# trans_basis_mode: 2, use transformation matrix to rotate the basis set. this matrix does change as a function of iteration. this matrix diagonalize the spectral function at the chemical potential.
def open_h_log(control):
if (control['restart']):
control['h_log']=open('./cmd.log', 'a')
else:
control['h_log']=open('./cmd.log', 'w')
print('', file=control['h_log'],flush=True)
print('*********************************',file=control['h_log'],flush=True)
print(' ComDMFT', file=control['h_log'],flush=True)
print('*********************************',file=control['h_log'],flush=True)
print('', file=control['h_log'],flush=True)
#DEBUG
control['h_log'].flush()
os.fsync(control['h_log'].fileno())
#DEBUG
return None
def close_h_log(control):
control['h_log'].close()
return None
def read_comdmft_ini_control():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control=vlocal['control']
return control
def read_comdmft_ini_postprocessing():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control=vlocal['control']
postprocessing_dict=vlocal['postprocessing']
check_key_in_string('mpi_prefix', control)
check_key_in_string('comsuite_dir', postprocessing_dict)
if (control['method']=='spectral') | (control['method']=='band'):
with open(postprocessing_dict['comsuite_dir']+'/comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control_temp=vlocal['control']
postprocessing_dict['kpoints']=postprocessing_dict.get('kpoints', os.path.abspath(postprocessing_dict['comsuite_dir']+'/'+control_temp['initial_lattice_dir'])+'/kpoints')
if ((control['method']=='dos') | (control['method']=='dos_qp')):
check_key_in_string('kmesh', postprocessing_dict)
if ((control['method']=='spectral') | (control['method']=='dos')):
check_key_in_string('self energy', postprocessing_dict)
postprocessing_dict['broadening']=postprocessing_dict.get('broadening', 0.01)
return control, postprocessing_dict
def read_comdmft_ini():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
# print vglobl
# print 'here'
control=vlocal['control']
wan_hmat=vlocal['wan_hmat']
imp=vlocal['imp']
control['name']='control'
wan_hmat['name']='wan_hmat'
imp['name']='imp'
control['restart']=control.get('restart', False)
open_h_log(control)
control['comsuitedir']=os.environ.get('COMSUITE_BIN')
if not control['comsuitedir']:
print("Error: Environment variable COMSUITE_BIN is not defined.", file=control['h_log'],flush=True)
sys.exit()
print('comsuitedir', control['comsuitedir'])
control['conv_table']=[]
### in control
control['cal_mu']=control.get('cal_mu', True)
control['top_dir']=os.path.abspath('./')
check_key_in_string('method', control)
control['sigma_mix_ratio']=control.get('sigma_mix_ratio', 0.5)
control['doping']=control.get('doping', 0.0)
control['dc_mode']=control.get('dc_mode', 'dc_at_gw')
control['u_mode']=control.get('u_mode', 'bnse')
control['trans_basis_mode']=control.get('trans_basis_mode', 0)
if (control['trans_basis_mode']==1):
check_key_in_string('trans_basis', control)
elif (control['trans_basis_mode']==2):
check_key_in_string('metal_threshold', control)
check_key_in_string('spin_orbit', control)
check_key_in_string('impurity_problem', control)
check_key_in_string('impurity_problem_equivalence', control)
check_key_in_string('initial_lattice_dir', control)
control['initial_lattice_dir']=os.path.abspath(control['initial_lattice_dir'])
control['allfile']=find_allfile(control['initial_lattice_dir'])
if ('dc_directory' not in control):
control['dc_directory']='./dc'
control['dc_directory']=os.path.abspath(control['dc_directory'])
if ('impurity_directory' not in control):
control['impurity_directory']='./impurity'
control['impurity_directory']=os.path.abspath(control['impurity_directory'])
if ('lowh_directory' not in control):
control['lowh_directory']='./lowh'
control['lowh_directory']=os.path.abspath(control['lowh_directory'])
if ('wannier_directory' not in control):
control['wannier_directory']='./wannier'
control['wannier_directory']=os.path.abspath(control['wannier_directory'])
if ('initial_self_energy' in control):
control['initial_self_energy'] =os.path.abspath(control['initial_self_energy'])
if (control['trans_basis_mode']!=0):
check_key_in_string('trans_basis', control)
if ('dc_mat_to_read' in control):
control['dc_mat_to_read'] =os.path.abspath(control['dc_mat_to_read'])
if (control['method']=='lda+dmft'):
control['convergence_header']=['step','i_outer','i_latt','i_imp','causality','delta_rho','w_sp_min','w_sp_max', 'mu', 'std_sig', 'n_imp', 'histo_1', 'histo_2', 'ctqmc_sign']
if (control['method']=='lqsgw+dmft'):
control['convergence_header']=['step','i_imp','causality','static_f0','w_sp_min','w_sp_max', 'mu', 'std_sig', 'n_imp', 'histo_1', 'histo_2', 'ctqmc_sign']
# mpi_prefix
if ('mpi_prefix' in control):
control['mpi_prefix_flapwmbpt']=control.get('mpi_prefix_flapwmbpt', control['mpi_prefix'])
control['mpi_prefix_lowh']=control.get('mpi_prefix_lowh', control['mpi_prefix'])
control['mpi_prefix_impurity']=control.get('mpi_prefix_impurity', control['mpi_prefix'])
control['mpi_prefix_wannier']=control.get('mpi_prefix_wannier', control['mpi_prefix'])
if (control['method']=='lda+dmft'):
control['mpi_prefix_lattice']=control.get('mpi_prefix_lattice', control['mpi_prefix'])
if (control['method']=='lqsgw+dmft'):
control['mpi_prefix_dc']=control.get('mpi_prefix_dc', control['mpi_prefix'])
# mpi_prefix_coulomb
if ('mpi_prefix_coulomb' in control):
check_key_in_string('nproc_k_coulomb', control)
check_key_in_string('nproc_tau_coulomb', control)
else:
# temp=[int(x) for x in np.loadtxt(control['initial_lattice_dir']+'/k_tau_freq.dat')]
temp=list(map(int,np.loadtxt(control['initial_lattice_dir']+'/k_tau_freq.dat')))
control['mpi_prefix_coulomb'], control['nproc_k_coulomb'],control['nproc_tau_coulomb']=optimized_nproc_for_comcoulomb(control['mpi_prefix'], temp[0], temp[1],temp[2],temp[3])
# print('mpi_prefix_coulomb', control['mpi_prefix_coulomb'], file=control['h_log'],flush=True)
# max iteration
if (control['method']=='lda+dmft'):
control['max_iter_num_impurity']=control.get('max_iter_num_impurity', 1)
control['max_iter_num_outer']=control.get('max_iter_num_outer', 50)
elif (control['method']=='lqsgw+dmft'):
control['max_iter_num_impurity']=control.get('max_iter_num_impurity', 50)
# directory_name
if (control['method']=='lda+dmft'):
if ('lattice_directory' not in control):
control['lattice_directory']='./lattice'
control['lattice_directory']=os.path.abspath(control['lattice_directory'])
if (control['method']=='lqsgw+dmft'):
if ('coulomb_directory' not in control):
control['coulomb_directory']='./coulomb'
control['coulomb_directory']=os.path.abspath(control['coulomb_directory'])
if (control['method']=='lqsgw+dmft'):
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
control['iter_num_outer']=1
elif (control['method']=='lda+dmft'):
control['iter_num_outer']=1
control['iter_num_impurity']=0
if (control['restart']):
find_place_to_restart(control)
if (control['method']=='lqsgw+dmft'):
print('do_wannier', control['do_wannier'], file=control['h_log'],flush=True)
print('do_coulomb', control['do_coulomb'], file=control['h_log'],flush=True)
print('do_dc', control['do_dc'], file=control['h_log'],flush=True)
# in wan_hmat
check_key_in_string('kgrid', wan_hmat)
check_key_in_string('froz_win_min', wan_hmat)
check_key_in_string('froz_win_max', wan_hmat)
wan_hmat['write_wan']=wan_hmat.get('write_wan', False)
wan_hmat['dis_win_min']=wan_hmat.get('dis_win_min', wan_hmat['froz_win_min'])
wan_hmat['dis_win_max']=wan_hmat.get('dis_win_max', wan_hmat['froz_win_max']+40.0)
control['proj_win_min']=control.get('proj_win_min', wan_hmat['dis_win_min'])
control['proj_win_max']=control.get('proj_win_max', wan_hmat['dis_win_max'])
wan_hmat['num_iter']=wan_hmat.get('num_iter', 0)
wan_hmat['dis_num_iter']=wan_hmat.get('dis_num_iter', 100)
wan_hmat['cut_low']=wan_hmat.get('cut_low', 0.4)
wan_hmat['cut_froz']=wan_hmat.get('cut_froz', 0.10)
wan_hmat['cut_total']=wan_hmat.get('cut_total', 0.0)
if (control['method']=='lqsgw+dmft'):
wan_hmat['rmode']=wan_hmat.get('rmode', 0)
wan_hmat['radfac']=wan_hmat.get('radfac', 1.0)
if (control['method']=='lda+dmft'):
wan_hmat['rmode']=wan_hmat.get('rmode', 0)
wan_hmat['radfac']=wan_hmat.get('radfac', 1.0)
# in imp
check_key_in_string('temperature', imp)
imp['beta']=1.0/(8.6173303*10**-5*imp['temperature'])
if ('initial_self_energy' in control):
control['n_omega']=np.shape(np.loadtxt(control['initial_self_energy']))[0]
else:
control['n_omega']=int(300.0/(2*pi/imp['beta']))
control['omega']=(np.arange(control['n_omega'])*2+1)*pi/imp['beta']
for key, value in imp.items():
if (not (isinstance(imp[key], dict))):
continue
imp[key]['name']=key
# imp[key]['para']=True
# for ktemp in control['impurity_problem_equivalence'] :
# if (ktemp == -1):
# imp[key]['para']=False
if (-1*int(key) in control['impurity_problem_equivalence']):
imp[key]['para']=False
else:
imp[key]['para']=True
imp[key]['problem']=control['impurity_problem'][control['impurity_problem_equivalence'].index(int(key))][1]
if (control['method']=='lda+dmft'):
check_key_in_string('f0', imp[key])
if ((imp[key]['problem']=='p') | (imp[key]['problem']=='d') | (imp[key]['problem']=='f')):
check_key_in_string('f2', imp[key])
if ((imp[key]['problem']=='d') | (imp[key]['problem']=='f')):
check_key_in_string('f4', imp[key])
if (imp[key]['problem']=='f'):
check_key_in_string('f6', imp[key])
# elif (control['method']=='lqsgw+dmft'):
# check_key_in_string('boson_low_truncation', imp[key])
check_key_in_string('thermalization_time', imp[key])
check_key_in_string('measurement_time', imp[key])
check_key_in_string('impurity_matrix', imp[key])
if (control['trans_basis_mode']<2):
imp[key]['impurity_matrix']=np.array(imp[key]['impurity_matrix'])
else:
print("impurity_matrix reset", file=control['h_log'],flush=True)
nimp_orb=len(imp[key]['impurity_matrix'])
imp[key]['impurity_matrix']=np.zeros((nimp_orb,nimp_orb), dtype='int')
for ii in range(nimp_orb):
imp[key]['impurity_matrix'][ii,ii]=ii+1
print('here', file=control['h_log'],flush=True)
print(type(imp[key]['impurity_matrix']), file=control['h_log'],flush=True)
print(imp[key]['impurity_matrix'], file=control['h_log'],flush=True)
print('here', file=control['h_log'],flush=True)
if (control['method']=='lda+dmft'):
check_key_in_string('nominal_n', imp[key])
check_key_in_string('green_cutoff', imp[key])
imp[key]['susceptibility_cutoff']=imp[key].get('susceptibility_cutoff', 50)
imp[key]['susceptibility_tail']=imp[key].get('susceptibility_tail', 300)
if ('coulomb' not in imp[key]):
imp[key]["coulomb"]='full'
control['sig_header']=['# omega(eV)']
for ii in sorted(set(control['impurity_problem_equivalence'])):
for jj in sorted(set(imp[str(abs(ii))]['impurity_matrix'].flatten().tolist())-{0}):
control['sig_header'].append("Re Sig_{"+str(ii)+','+str(jj)+'}(eV)')
control['sig_header'].append("Im Sig_{"+str(ii)+','+str(jj)+'}(eV)')
# check hdf5
if (os.path.isdir(control['initial_lattice_dir']+"/checkpoint/")):
control['hdf5']=False
else:
control['hdf5']=True
print('hdf5', control['hdf5'],file=control['h_log'],flush=True)
# print
print('top_dir', control['top_dir'], file=control['h_log'],flush=True)
if (control['method']=='lda+dmft'):
print('lattice_directory', control['lattice_directory'], file=control['h_log'],flush=True)
elif (control['method']=='lqsgw+dmft'):
print('coulomb_directory', control['coulomb_directory'], file=control['h_log'],flush=True)
print('wannier_directory', control['wannier_directory'], file=control['h_log'],flush=True)
print('dc_directory', control['dc_directory'], file=control['h_log'],flush=True)
print('impurity_directory', control['impurity_directory'], file=control['h_log'],flush=True)
print('lowh_directory', control['lowh_directory'], file=control['h_log'],flush=True)
return control,wan_hmat,imp
def find_impurity_wan(control, wan_hmat):
num_wann=np.shape(wan_hmat['basis'])[0]
control['impurity_wan']=[]
for ip in range(np.shape(control['impurity_problem'])[0]):
if (control['spin_orbit']):
if (control['impurity_problem'][ip][1].lower()=='f'):
control['impurity_wan'].append([0]*14)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==3)):
if (int(wan_hmat['basis'][iwan]['i']*2)==-1):
if (int(wan_hmat['basis'][iwan]['m']*2)==-5):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-3):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==1):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==3):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==5):
control['impurity_wan'][ip][5]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['i']*2)==1):
if (int(wan_hmat['basis'][iwan]['m']*2)==-7):
control['impurity_wan'][ip][6]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-5):
control['impurity_wan'][ip][7]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-3):
control['impurity_wan'][ip][8]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-1):
control['impurity_wan'][ip][9]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==1):
control['impurity_wan'][ip][10]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==3):
control['impurity_wan'][ip][11]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==5):
control['impurity_wan'][ip][12]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==7):
control['impurity_wan'][ip][13]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
else:
if (control['impurity_problem'][ip][1].lower()=='s'):
control['impurity_wan'].append([0]*1)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==0)):
if (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='p'):
control['impurity_wan'].append([0]*3)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==1)):
if (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='d'):
control['impurity_wan'].append([0]*5)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==2)):
if (wan_hmat['basis'][iwan]['m']==-2):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==2):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='f'):
control['impurity_wan'].append([0]*7)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==3)):
if (wan_hmat['basis'][iwan]['m']==-3):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-2):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==2):
control['impurity_wan'][ip][5]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==3):
control['impurity_wan'][ip][6]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
return None
def initial_file_directory_setup(control):
directory_setup(control)
if (control['method'] == 'lda+dmft'):
print('iter_num_impurity', control['iter_num_impurity'], ' max_iter_num_impurity', control['max_iter_num_impurity'], file=control['h_log'],flush=True)
print('iter_num_outer', control['iter_num_outer'], ' max_iter_num_outer', control['max_iter_num_outer'], file=control['h_log'],flush=True)
elif (control['method'] == 'lqsgw+dmft'):
print('iter_num_impurity', control['iter_num_impurity'], file=control['h_log'],flush=True)
print('max_iter_num_impurity', control['max_iter_num_impurity'], file=control['h_log'],flush=True)
return None
def find_place_to_restart(control):
if (control['method']=='lqsgw+dmft'):
control['conv_table']=read_convergence_table(control)
# print(control['conv_table'], file=control['h_log'],flush=True)
if (len(control['conv_table'])>0):
n_imp_problem=np.amax(control['impurity_problem_equivalence'])
last_step=control['conv_table'][-1][0].strip().split('_')[0]
last_imp_iter=control['conv_table'][-1][1].strip()
if (len(control['conv_table'][-1][0].strip().split('_')) > 1):
last_imp=control['conv_table'][-1][0].strip().split('_')[1]
print(last_step, last_imp, last_imp_iter, file=control['h_log'],flush=True)
else:
print(last_step, last_imp_iter, file=control['h_log'],flush=True)
if last_step == 'wannier':
control['do_wannier']=False
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
elif last_step == 'coulomb':
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=1
elif last_step == 'dc':
if (int(last_imp) == n_imp_problem):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=1
else:
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=1
for ii in range(int(last_imp)):
control['conv_table'].pop(-1)
elif (last_step == 'delta'):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=int(last_imp_iter)
control['conv_table'].pop(-1)
elif (last_step == 'impurity'):
if (int(last_imp) == n_imp_problem):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=int(last_imp_iter)+1
else:
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=int(last_imp_iter)
for ii in range(int(last_imp)):
control['conv_table'].pop(-1)
else:
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
else:
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
elif (control['method']=='lda+dmft'):
control['conv_table']=read_convergence_table(control)
if (len(control['conv_table'])>0):
linecnt=0
for ii in range(np.shape(control['conv_table'])[0]):
if control['conv_table'][ii][0].strip()=='dft':
linecnt=ii
control['iter_num_outer']=int(control['conv_table'][ii][1])
for ii in range(linecnt, np.shape(control['conv_table'])[0]):
control['conv_table'].pop(-1)
return None
# def find_iter_num_for_restart(control):
# if (control['restart']):
# line_count=sum(1 for line in open(control['top_dir']+'/convergence.log'))
# if (line_count <=1):
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# else:
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# ff=open(control['top_dir']+'/convergence.log', 'r')
# firstline=ff.readline()
# for line in ff:
# temp=line.split()
# if (temp[0] == 'dft'):
# iter_num_outer=int(temp[1])
# ff.close()
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# ff=open(control['top_dir']+'/convergence.log', 'r')
# firstline=ff.readline()
# for line in ff:
# temp=line.split()
# temp1=temp[0]
# if (temp1 == 'impurity'):
# iter_num_impurity=int(temp[2])
# ff.close()
# else:
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# if (control['method']=='lda+dmft'):
# return iter_num_outer
# elif (control['method']=='lqsgw+dmft'):
# return iter_num_impurity
def initial_lattice_directory_setup(control):
os.chdir(control['lattice_directory'])
if control['hdf5']:
files = glob.iglob(control['initial_lattice_dir']+"/*.rst")
for filename in files:
shutil.copy(filename, './')
else:
files = glob.iglob(control['initial_lattice_dir']+"/checkpoint/*.rst")
for filename in files:
shutil.copy(filename, './checkpoint/')
files = glob.iglob(control['initial_lattice_dir']+"/*el_density")
for filename in files:
shutil.copy(filename, './')
if os.path.exists(control['initial_lattice_dir']+'/kpath'):
shutil.copy(control['initial_lattice_dir']+'/kpath', './')
if os.path.exists(control['initial_lattice_dir']+'/ini'):
shutil.copy(control['initial_lattice_dir']+'/ini', './')
if os.path.exists(control['initial_lattice_dir']+'/symmetry_operations'):
shutil.copy(control['initial_lattice_dir']+'/symmetry_operations', './')
if os.path.exists(control['initial_lattice_dir']+'/kpoints'):
shutil.copy(control['initial_lattice_dir']+'/symmetry_operations', './')
files = glob.iglob(control['initial_lattice_dir']+"/*.cif")
for filename in files:
shutil.copy(filename, './')
iter_string='_'+str(control['iter_num_outer'])
shutil.copy(control['initial_lattice_dir']+'/'+control['allfile']+'.out', control['allfile']+iter_string+'.out')
print("initial dft directory setup done", file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def create_comwann_ini(control, wan_hmat):
f=open('comwann.ini','w')
if (control['method']=='lda+dmft'):
f.write(control['lattice_directory']+'\n')
f.write('dft\n')
elif (control['method']=='lqsgw+dmft'):
f.write(control['initial_lattice_dir']+'\n')
f.write('qp\n')
elif (control['method']=='dft'):
f.write('../\n')
f.write('dft\n')
elif (control['method']=='lqsgw'):
f.write('../\n')
f.write('qp\n')
f.write(str(wan_hmat['dis_win_max'])+'\n')
f.write(str(wan_hmat['dis_win_min'])+'\n')
f.write(str(wan_hmat['froz_win_max'])+'\n')
f.write(str(wan_hmat['froz_win_min'])+'\n')
f.write(str(wan_hmat['num_iter'])+'\n')
f.write(str(wan_hmat['dis_num_iter'])+'\n')
if (wan_hmat['write_wan']):
f.write('1\n')
else:
f.write('0\n')
f.write(str(wan_hmat['cut_low'])+'\n')
f.write(str(wan_hmat['cut_froz'])+'\n')
f.write(str(wan_hmat['cut_total'])+'\n')
f.write(str(wan_hmat['rmode'])+'\n')
f.write(str(wan_hmat['radfac'])+'\n')
f.close()
def create_comcoulomb_ini(control):
f=open('comcoulomb.ini','w')
f.write(control['initial_lattice_dir']+'\n')
f.write(control['wannier_directory']+'\n')
f.write(str(control['nproc_tau_coulomb'])+'\n')
f.write(str(control['nproc_k_coulomb'])+'\n')
f.write(str(control['proj_win_min'])+'\n')
f.write(str(control['proj_win_max'])+'\n')
f.write('F\n')
f.write(control['u_mode']+'\n')
nimp_orb=0
natom=len(control['impurity_wan'])
for ii in range(natom):
nimp_orb=nimp_orb+len(control['impurity_wan'][ii])
f.write(str(nimp_orb)+'\n')
for iatom in range(natom):
f.write(' '.join(map(str,control['impurity_wan'][iatom]))+' ')
f.write('\n')
f.write('1\n')
f.write('F\n')
f.write('3.0\n')
f.write('F\n')
f.close()
# def create_wannier_inip(wan_hmat):
# # in the wannier directory
# g=open('wannier.inip', 'w')
# num_wann=np.shape(wan_hmat['basis'])[0]
# g.write(str(num_wann)+'\n')
# for ii in range(num_wann):
# if (control['spin_orbit']==False):
# tempstr=[wan_hmat['basis'][ii]['atom'], wan_hmat['basis'][ii]['l'], wan_hmat['basis'][ii]['m'], wan_hmat['basis'][ii]['xaxis'][0], wan_hmat['basis'][ii]['xaxis'][1], wan_hmat['basis'][ii]['xaxis'][2], wan_hmat['basis'][ii]['zaxis'][0], wan_hmat['basis'][ii]['zaxis'][1], wan_hmat['basis'][ii]['zaxis'][2]]
# else:
# tempstr=[wan_hmat['basis'][ii]['atom'], wan_hmat['basis'][ii]['l'], wan_hmat['basis'][ii]['i'], wan_hmat['basis'][ii]['m'], wan_hmat['basis'][ii]['xaxis'][0], wan_hmat['basis'][ii]['xaxis'][1], wan_hmat['basis'][ii]['xaxis'][2], wan_hmat['basis'][ii]['zaxis'][0], wan_hmat['basis'][ii]['zaxis'][1], wan_hmat['basis'][ii]['zaxis'][2]]
# g.write(' '.join(map(str, tempstr))+'\n')
# g.close()
# return None
def read_wan_hmat_basis(control):
# in the wannier directory
inip=np.loadtxt(control['wannier_directory']+'/wannier.inip')
basis_info=[]
if (control['spin_orbit']):
for ii in range(np.shape(inip)[0]):
basis_info.append({'atom':int(inip[ii,0]), 'l':int(inip[ii,1]), 'i':inip[ii,2],'m':inip[ii,3],'xaxis':inip[ii,4:7],'zaxis':inip[ii,7:10], 'ind':ii+1})
else:
for ii in range(np.shape(inip)[0]):
basis_info.append({'atom':int(inip[ii,0]), 'l':int(inip[ii,1]), 'm':int(inip[ii,2]),'xaxis':inip[ii,3:6],'zaxis':inip[ii,6:9], 'ind':ii+1})
print(basis_info, file=control['h_log'],flush=True)
print('reading wannier.inip to get basis information', file=control['h_log'],flush=True)
return basis_info
def check_key_in_string(key,dictionary):
if (key not in dictionary):
print('missing \''+key+'\' in '+dictionary['name'],flush=True)
sys.exit()
return None
def overwrite_key_in_string(key,dictionary,dictionaryname,value,h_log):
if (key in dictionary):
print('\''+key+'\' in '+dictionaryname+' is overwritten', file=control['h_log'],flush=True)
return value
# def dft_rst_file_check():
# check_for_files('*acc_core_dft.rst', h_log)
# check_for_files('*chemical_potential_dft.rst', h_log)
# check_for_files('*cor_norm_dft.rst', h_log)
# check_for_files('*dfi_dft.rst', h_log)
# check_for_files('*dfidot2_dft.rst', h_log)
# check_for_files('*dfidot_dft.rst', h_log)
# check_for_files('*e_bnd_dft.rst', h_log)
# check_for_files('*e_core_dft.rst', h_log)
# check_for_files('*el_density_dft.rst', h_log)
# check_for_files('*eny_dft.rst', h_log)
# check_for_files('*etot_dft.rst', h_log)
# check_for_files('*ev_bnd_*_dft.rst', h_log)
# check_for_files('*ffsmt_dft.rst', h_log)
# check_for_files('*fi_dft.rst', h_log)
# check_for_files('*fidot2_dft.rst', h_log)
# check_for_files('*fidot_dft.rst', h_log)
# check_for_files('*g_full_00_*_dft.rst', h_log)
# check_for_files('*g_loc_0_dft.rst', h_log)
# check_for_files('*gfun_dft.rst', h_log)
# check_for_files('*gfun_old_dft.rst', h_log)
# check_for_files('*gfund_dft.rst', h_log)
# check_for_files('*gfund_old_dft.rst', h_log)
# check_for_files('*n_bnd_dft.rst', h_log)
# check_for_files('*p_f_dft.rst', h_log)
# check_for_files('*pcor_dft.rst', h_log)
# check_for_files('*pcor_old_dft.rst', h_log)
# check_for_files('*pd2_f_dft.rst', h_log)
# check_for_files('*pd_f_dft.rst', h_log)
# check_for_files('*ptnl_dft.rst', h_log)
# check_for_files('*q_f_dft.rst', h_log)
# check_for_files('*qcor_dft.rst', h_log)
# check_for_files('*qcor_old_dft.rst', h_log)
# check_for_files('*qd2_f_dft.rst', h_log)
# check_for_files('*qd_f_dft.rst', h_log)
# check_for_files('*restart_ubi.rst', h_log)
# check_for_files('*ro_core_dft.rst', h_log)
# check_for_files('*v_intr_h_dft.rst', h_log)
# check_for_files('*v_intr_xc_dft.rst', h_log)
# check_for_files('*v_mt_h_dft.rst', h_log)
# check_for_files('*v_mt_xc_dft.rst', h_log)
# check_for_files('*z_bnd_*_dft.rst', h_log)
# return None
# def string_addwhitespace(string, stringsize):
# stringout=string
# if stringsize > len(string):
# stringout=string+' '*(stringsize-len(string))
# return stringout
def find_all_in_string(str, ch):
for i, ltr in enumerate(str):
if ltr == ch:
yield i
def read_convergence_table(control):
if os.path.exists(control['top_dir']+'/convergence.log'):
with open(control['top_dir']+'/convergence.log', 'r') as logfile:
tmp=logfile.readlines()
nstep=len(tmp)-2
if (nstep>0):
endind=list(find_all_in_string(tmp[1],' '))[::2]+[len(tmp[1])-1]
startind=[0]+(np.array(list(find_all_in_string(tmp[1],' '))[1::2])+1).tolist()
ncolumn=len(endind)
f=open('./convergence.log', 'r')
f.readline()
f.readline()
convergence_table=[]
for lines in f:
eachline=[]
for ii in range(ncolumn):
eachline.append(lines.rstrip()[startind[ii]:endind[ii]])
if (len(eachline[0])>0):
convergence_table.append(eachline)
f.close()
else:
convergence_table=[]
else:
convergence_table=[]
return convergence_table
def generate_initial_self_energy(control,imp):
os.chdir(control['impurity_directory'])
if ('initial_self_energy' in control):
shutil.copy(control['initial_self_energy'], './sig.dat')
if ('initial_impurity_dir' in control):
initial_impurity_dirname=os.path.abspath(os.path.dirname(control['initial_impurity_dir']))
directories = glob.glob(initial_impurity_dirname+"/*/")
for directory_name in directories:
dest_dir=directory_name.split('/')[-2]
files = glob.iglob(os.path.abspath(directory_name)+"/config*")
for filename in files:
shutil.copy(filename, control['impurity_directory']+'/'+dest_dir)
else:
dc=np.loadtxt(control['dc_directory']+'/dc.dat')
beta=imp['beta']
n_omega=control['n_omega']
omega=control['omega']
cnt=0
dclist=[]
for ii in sorted(set(control['impurity_problem_equivalence'])):
for jj in sorted(set(imp[str(abs(ii))]['impurity_matrix'].flatten().tolist())-{0}):
if (imp[str(abs(ii))]['para']):
dclist=dclist+list(dc[(2*cnt):(2*cnt+2)])
else:
dclist=dclist+list(dc[(2*cnt):(2*cnt+2)]-np.array([0.001*np.sign(ii), 0.0]))
cnt=cnt+1
sig_table=[]
for jj in range(control['n_omega']):
sig_omega=[control['omega'][jj]]+dclist
sig_table.append(sig_omega)
with open('./sig.dat', 'w') as outputfile:
outputfile.write(tabulate(sig_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
if (control['method']=='lqsgw+dmft'):
iter_string='_0'
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_0'
labeling_file('./sig.dat', iter_string)
print('initial_self_energy generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def prepare_initial_ef(control):
os.chdir(control['lowh_directory'])
f=open('ef.dat','w')
f.write('0.0\n')
f.close()
os.chdir(control['top_dir'])
return None
def delta_postprocessing(control,imp):
write_transformation_matrix(control,control['lowh_directory']+'/local_spectral_matrix_ef.dat')
cal_projected_mean_field_diagonal(control,imp)
cal_dc_diagonal(control)
cal_zinv_m1_diagonal(control)
cal_e_imp_diagonal(control)
delta_causality=cal_hyb_diagonal(control,imp)
if (delta_causality ==0):
print('delta causality broken', file=control['h_log'],flush=True)
sys.exit()
return delta_causality
def cal_dc_diagonal(control):
os.chdir(control['dc_directory'])
dc_mat=read_impurity_mat_static(control,control['dc_directory']+'/dc_mat.dat')
h=open('./dc.dat', 'w')
for ii in sorted(set(control['impurity_problem_equivalence'])):
dc_vec=imp_from_mat_to_array(dc_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
for jj in range(len(dc_vec)):
h.write(str(np.real(dc_vec[jj]))+' '+str(np.imag(dc_vec[jj]))+' ')
h.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./dc.dat', iter_string)
print('dc.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
# def cal_dc_diagonal_new(control):
# os.chdir(control['dc_directory'])
# dc_mat=read_impurity_mat_static(control,control['dc_directory']+'/dc_mat.dat')
# h=open('./dc.dat', 'w')
# for ii in sorted(set(control['impurity_problem_equivalence'])):
# dc_vec=imp_from_mat_to_array(dc_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
# for jj in range(len(dc_vec)):
# h.write(str(np.real(dc_vec[jj]))+' '+str(np.imag(dc_vec[jj]))+' ')
# h.close()
# if (control['method']=='lqsgw+dmft'):
# iter_string='_'+str(control['iter_num_impurity'])
# elif (control['method']=='lda+dmft'):
# iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
# labeling_file('./dc.dat', iter_string)
# print('dc.dat generation done', file=control['h_log'],flush=True)
# os.chdir(control['top_dir'])
# return None
def cal_zinv_m1_diagonal(control):
os.chdir(control['dc_directory'])
if os.path.isfile(control['dc_directory']+'/zinv_m1_mat.dat'):
zinv_m1_mat=read_impurity_mat_static(control,control['dc_directory']+'/zinv_m1_mat.dat')
h=open('./zinv_m1.dat', 'w')
for ii in sorted(set(control['impurity_problem_equivalence'])):
zinv_m1_vec=imp_from_mat_to_array(zinv_m1_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
for jj in range(len(zinv_m1_vec)):
h.write(str(np.real(zinv_m1_vec[jj]))+' '+str(np.imag(zinv_m1_vec[jj]))+' ')
h.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./zinv_m1.dat', iter_string)
print('zinv_m1.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def vec_from_mat_dynamic(mat,trans):
vec=np.zeros(np.shape(mat, 0), np.shape(mat, 1))
for ii in range(np.shape(mat, 0)):
vec[ii,:]=np.diag(dot(np.transpose(np.conj(trans)), np.dot(mat[ii,:,:], trans)))
return vec
def prepare_impurity_solver(control,wan_hmat,imp):
# cal_trans_from_patrick(control, imp)
delta=array_impurity_dynamic(control,imp,control['lowh_directory']+'/delta.dat')
write_json_all(control,imp,delta,'hyb.json')
e_imp=generate_mat_from_array_impurity_static(control,imp,control['lowh_directory']+'/e_imp.dat')
trans_basis=read_impurity_mat_static(control,control['lowh_directory']+'/trans_basis.dat')
for key, value in imp.items():
if (not (isinstance(imp[key], dict))):
continue
nimp_orb=len(imp[key]['impurity_matrix'])
os.chdir(control['impurity_directory']+'/'+key)
if (control['spin_orbit']):
ndim=nimp_orb
e_imp_key=np.zeros((ndim, ndim))
trans_key=np.zeros((ndim, ndim))
# equivalence_key=np.zeros((ndim,ndim),dtype='int')
e_imp_key=np.real(e_imp[key])
trans_key=np.real(trans_basis[key])
# equivalence_key=array([[(lambda ii: str(ii) if str(ii)!='0' else '')(ii) for ii in row] for row in imp[key]['impurity_matrix']])
equivalence_key=list(map(lambda row: list(map(lambda x: str(x) if x!='0' else '', list(map(str, row)))), imp[key]['impurity_matrix']))
else:
ndim=nimp_orb*2
e_imp_key=np.zeros((ndim, ndim))
trans_key=np.zeros((ndim, ndim))
equivalence_key_int_mat=np.array(imp[key]['impurity_matrix'])
equivalence_key_int_mat_all=np.zeros((ndim, ndim),dtype='int')
if (imp[key]['para']):
mkey=key
shiftval=0
else:
mkey=str(-int(key))
shiftval=np.amax(equivalence_key_int_mat)
print(mkey, shiftval, file=control['h_log'],flush=True)
#
# On the next line ii>0 evaluates to 1 if ii>0 and evaluates to 0 otherwise
# equivalence_mkey_int_mat=equivalence_key_int_mat+shiftval*array([[(lambda ii: ii>0)(ii) for ii in row] for row in equivalence_key_int_mat])
# equivalence_mkey_int_mat=equivalence_key_int_mat+shiftval*array(map(lambda row: map(int,row), equivalence_key_int_mat>0))
equivalence_mkey_int_mat=equivalence_key_int_mat+shiftval*(equivalence_key_int_mat>0)
e_imp_key[0:nimp_orb,0:nimp_orb]=np.real(e_imp[key])
e_imp_key[nimp_orb:(2*nimp_orb),nimp_orb:(2*nimp_orb)]=np.real(e_imp[mkey])
trans_key[0:nimp_orb,0:nimp_orb]=np.real(trans_basis[key])
trans_key[nimp_orb:(2*nimp_orb),nimp_orb:(2*nimp_orb)]=np.real(trans_basis[mkey])
equivalence_key_int_mat_all[0:nimp_orb,0:nimp_orb]=equivalence_key_int_mat
equivalence_key_int_mat_all[nimp_orb:(2*nimp_orb),nimp_orb:(2*nimp_orb)]=equivalence_mkey_int_mat
equivalence_key=list(map(lambda row: list(map(lambda x: str(x) if x!='0' else '', list(map(str, row)))), equivalence_key_int_mat_all))
write_params_json(control,imp[key],e_imp_key,trans_key,equivalence_key,imp['beta'])
if (control['method']=='lqsgw+dmft'):
write_dynamical_f0_json(imp[key])
os.chdir(control['top_dir'])
return None
def run_impurity_solver(control,imp):
green={}
sigma_bare={}
sigma={}
sigma_to_delta={}
for key, value in imp.items():
if (not (isinstance(imp[key], dict))):
continue
os.chdir(control['impurity_directory']+'/'+key)
solve_impurity_patrick(control)
measure_impurity_patrick(control)
green[key], sigma_bare[key], sigma[key], sigma_to_delta[key]=impurity_postprocessing(control, imp, key)
os.chdir(control['impurity_directory'])
green_table=[]
sigma_table=[]
sigma_to_delta_table=[]
sigma_bare_table=[]
for jj in range(control['n_omega']):
green_omega=[control['omega'][jj]]
sigma_omega=[control['omega'][jj]]
sigma_to_delta_omega=[control['omega'][jj]]
sigma_bare_omega=[control['omega'][jj]]
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
for kk in range(n_iio):
if (ii<0):
pp=kk+n_iio
else:
pp=kk
green_omega=green_omega+[np.real(green[str(abs(ii))][jj,pp]),np.imag(green[str(abs(ii))][jj,pp])]
sigma_omega=sigma_omega+[np.real(sigma[str(abs(ii))][jj,pp]),np.imag(sigma[str(abs(ii))][jj,pp])]
sigma_to_delta_omega=sigma_to_delta_omega+[np.real(sigma_to_delta[str(abs(ii))][jj,pp]),np.imag(sigma_to_delta[str(abs(ii))][jj,pp])]
sigma_bare_omega=sigma_bare_omega+[np.real(sigma_bare[str(abs(ii))][jj,pp]),np.imag(sigma_bare[str(abs(ii))][jj,pp])]
green_table.append(green_omega)
sigma_table.append(sigma_omega)
sigma_to_delta_table.append(sigma_to_delta_omega)
sigma_bare_table.append(sigma_bare_omega)
with open('./gimp.dat', 'w') as outputfile:
outputfile.write(tabulate(green_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
with open('./sig_bare.dat', 'w') as outputfile:
outputfile.write(tabulate(sigma_bare_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
with open('./sig_smth.dat', 'w') as outputfile:
outputfile.write(tabulate(sigma_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
with open('./sig.dat', 'w') as outputfile:
outputfile.write(tabulate(sigma_to_delta_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
shutil.copy('./sig.dat', control['top_dir'])
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./gimp.dat',iter_string)
labeling_file('./sig_bare.dat',iter_string)
labeling_file('./sig_smth.dat',iter_string)
labeling_file('./sig.dat',iter_string)
os.chdir(control['top_dir'])
def generate_mat_from_array_impurity_dynamic(control,imp, filename):
os.chdir(control['impurity_directory'])
dat=np.loadtxt(filename)
start_array={}
end_array={}
last_index=1
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
start_array[ii]=last_index
end_array[ii]=last_index+2*n_iio
last_index=last_index+2*n_iio
# print(start_array)
# print(end_array)
matout={}
for ii in sorted(set(control['impurity_problem_equivalence'])):
nimp_orb=len(imp[str(abs(ii))]['impurity_matrix'])
tempmat=np.zeros((control['n_omega'],nimp_orb,nimp_orb), dtype='complex')
for iomega in range(control['n_omega']):
tempmat2=dat[iomega,start_array[ii]:end_array[ii]]
tempmat[iomega,:,:]=imp_from_array_to_mat(tempmat2[0::2]+tempmat2[1::2]*1j,imp[str(abs(ii))]['impurity_matrix'])
matout[str(ii)]=tempmat
return matout
def generate_mat_from_array_impurity_static(control,imp, filename):
os.chdir(control['impurity_directory'])
dat=np.loadtxt(filename)
start_array={}
end_array={}
last_index=0
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
start_array[ii]=last_index
end_array[ii]=last_index+2*n_iio
last_index=last_index+2*n_iio
# print(start_array)
# print(end_array)
matout={}
for ii in sorted(set(control['impurity_problem_equivalence'])):
tempmat2=dat[start_array[ii]:end_array[ii]]
matout[str(ii)]=imp_from_array_to_mat(tempmat2[0::2]+tempmat2[1::2]*1j,imp[str(abs(ii))]['impurity_matrix'])
return matout
def array_impurity_static(control,imp, filename):
os.chdir(control['impurity_directory'])
dat=np.loadtxt(filename)
start_array={}
end_array={}
last_index=0
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
start_array[ii]=last_index
end_array[ii]=last_index+2*n_iio
last_index=last_index+2*n_iio
# print(start_array)
# print(end_array)
matout={}
for ii in sorted(set(control['impurity_problem_equivalence'])):
tempmat2=dat[start_array[ii]:end_array[ii]]
matout[str(ii)]=tempmat2[0::2]+tempmat2[1::2]*1j
return matout
def array_impurity_dynamic(control,imp, filename):
os.chdir(control['impurity_directory'])
dat=np.loadtxt(filename)
start_array={}
end_array={}
last_index=1
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
start_array[ii]=last_index
end_array[ii]=last_index+2*n_iio
last_index=last_index+2*n_iio
# print(start_array)
# print(end_array)
matout={}
for ii in sorted(set(control['impurity_problem_equivalence'])):
n_iio=np.amax(imp[str(abs(ii))]['impurity_matrix'])
tempmat=np.zeros((control['n_omega'],n_iio), dtype='complex')
for iomega in range(control['n_omega']):
tempmat2=dat[iomega,start_array[ii]:end_array[ii]]
tempmat[iomega,:]=tempmat2[0::2]+tempmat2[1::2]*1j
matout[str(ii)]=tempmat
return matout
def cal_projected_mean_field_diagonal(control,imp):
os.chdir(control['lowh_directory'])
hmat=read_impurity_mat_static(control,control['lowh_directory']+'/e_projected_mat.dat')
h=open('./projected_eig.dat', 'w')
for ii in sorted(set(control['impurity_problem_equivalence'])):
h_vec=imp_from_mat_to_array(hmat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
for jj in range(len(h_vec)):
h.write(str(np.real(h_vec[jj]))+' '+str(np.imag(h_vec[jj]))+' ')
h.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./projected_eig.dat', iter_string)
print('projected_eig.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def cal_e_imp_diagonal(control):
os.chdir(control['lowh_directory'])
eig=np.loadtxt('projected_eig.dat')
dc=np.loadtxt(control['dc_directory']+'/dc.dat')
f=open('e_imp.dat', 'w')
f.write(" ".join(map(str, eig-dc))+'\n')
f.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./e_imp.dat', iter_string)
print('e_imp.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def imp_from_array_to_mat(vecin,equivalence_mat):
nimp_orb=len(equivalence_mat)
matout=np.zeros((nimp_orb, nimp_orb), dtype='complex')
for ii in range(nimp_orb):
for jj in range(nimp_orb):
if (equivalence_mat[ii,jj]!=0):
matout[ii,jj]=vecin[equivalence_mat[ii,jj]-1]
return matout
def imp_from_mat_to_array(matin,equivalence_mat):
n_iio=np.amax(equivalence_mat)
vecout=np.zeros(n_iio, dtype='complex')
degen_vec=np.zeros(n_iio, dtype='int')
nimp_orb=len(matin)
# print(nimp_orb)
# print(equivalence_mat)
# print(type(equivalence_mat))
# print(matin)
# print(type(matin))
for ii in range(nimp_orb):
for jj in range(nimp_orb):
print(ii, jj)
if (equivalence_mat[ii,jj]!=0):
ind=equivalence_mat[jj,jj]-1
vecout[ind]=vecout[ind]+matin[ii,jj]
degen_vec[ind]=degen_vec[ind]+1
vecout=vecout/(degen_vec*1.0)
return vecout
# def read_trans_basis(control,filename):
# trans_basis={}
# g=open(filename, 'r')
# for ii in sorted(set(control['impurity_problem_equivalence'])):
# prob_ind=con3trol['impurity_problem_equivalence'].index(ii)
# nimp_orb=len(control['impurity_wan'][prob_ind])
# transmat=np.zeros((nimp_orb,nimp_orb), dtype='complex')
# for jj in range(nimp_orb):
# transmat2=array(map(float,g.readline().split()))
# transmat[jj,:]=transmat2[0::2]+transmat2[1::2]*1j
# trans_basis[str(ii)]=transmat
# return trans_basis
# def read_impurity_vec_static(control,filename):
# imp_basis={}
# g=open(filename, 'r')
# for ii in sorted(set(control['impurity_problem_equivalence'])):
# prob_ind=control['impurity_problem_equivalence'].index(ii)
# nimp_orb=len(control['impurity_wan'][prob_ind])
# impmat=np.zeros((nimp_orb,nimp_orb), dtype='complex')
# for jj in range(nimp_orb):
# impmat2=array(map(float,g.readline().split()))
# impmat[jj,:]=impmat2[0::2]+impmat2[1::2]*1j
# imp_basis[str(ii)]=impmat
# return imp_basis
def read_impurity_mat_static(control,filename):
imp_basis={}
g=open(filename, 'r')
for ii in sorted(set(control['impurity_problem_equivalence'])):
prob_ind=control['impurity_problem_equivalence'].index(ii)
nimp_orb=len(control['impurity_wan'][prob_ind])
impmat=np.zeros((nimp_orb,nimp_orb), dtype='complex')
# for jj in range(nimp_orb):
# impmat2=array([float(x) for x in g.readline().split()])
# for kk in range(0,nimp_orb*2,2):
# impmat[jj,kk]=impmat2[kk]+impmat2[kk+1]*1j
for jj in range(nimp_orb):
impmat2=np.array(list(map(float,g.readline().split())))
impmat[jj,:]=impmat2[0::2]+impmat2[1::2]*1j
imp_basis[str(ii)]=impmat
return imp_basis
def read_impurity_mat_dynamic(control,filename):
imp_basis={}
dat=np.loadtxt(filename)
print(np.shape(dat))
start_array={}
end_array={}
last_index=1
for ii in sorted(set(control['impurity_problem_equivalence'])):
prob_ind=control['impurity_problem_equivalence'].index(ii)
nimp_orb=len(control['impurity_wan'][prob_ind])
start_array[ii]=last_index
end_array[ii]=last_index+2*nimp_orb**2
last_index=last_index+2*nimp_orb**2
# print(start_array)
# print(end_array)
for ii in sorted(set(control['impurity_problem_equivalence'])):
prob_ind=control['impurity_problem_equivalence'].index(ii)
nimp_orb=len(control['impurity_wan'][prob_ind])
dat3=np.reshape(dat[:,start_array[ii]:end_array[ii]], (control['n_omega'], 2, nimp_orb,nimp_orb), order='F')
imp_basis[str(ii)]=dat3[:,0,:,:]+dat3[:,1,:,:]*1j
return imp_basis
def cal_hyb_diagonal(control,imp):
os.chdir(control['lowh_directory'])
hyb_mat=read_impurity_mat_dynamic(control,control['lowh_directory']+'/delta_mat.dat')
# print hyb_mat
hyb_table=[]
for jj in range(control['n_omega']):
hyb_omega=[control['omega'][jj]]
for ii in sorted(set(control['impurity_problem_equivalence'])):
hyb_vec=imp_from_mat_to_array(hyb_mat[str(ii)][jj,:,:],imp[str(abs(ii))]['impurity_matrix'])
hyb_omega=hyb_omega+np.reshape(np.stack((np.real(hyb_vec), np.imag(hyb_vec)), 0), (len(hyb_vec)*2), order='F').tolist()
hyb_table.append(hyb_omega)
with open(control['lowh_directory']+'/delta.dat', 'w') as outputfile:
outputfile.write(tabulate(hyb_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./delta.dat', iter_string)
shutil.copy('./delta.dat', control['top_dir'])
print('delta.dat generation done', file=control['h_log'],flush=True)
causality=test_causality('./delta.dat')
os.chdir(control['lowh_directory'])
return causality
# def cal_sig_dc_diagonal(control,imp):
# os.chdir(control['dc_directory'])
# trans_basis=read_impurity_mat_static(control,control['lowh_directory']+'/trans_basis.dat')
# sig_mat=read_impurity_mat_dynamic(control,control['dc_directory']+'/delta_mat.dat')
# h=open('./Delta.inp', 'w')
# print hyb_mat
# for jj in range(control['n_omega']):
# h.write(str(control['omega'][jj])+' ')
# for ii in sorted(set(control['impurity_problem_equivalence'])):
# hyb_mat_new=dot(dot(trans_basis[str(ii)], hyb_mat[str(ii)][jj,:,:]), conj(np.transpose(trans_basis[str(ii)])))
# hyb_vec=imp_from_mat_to_array(hyb_mat_new,imp[str(abs(ii))]['impurity_matrix'])
# for kk in range(len(hyb_vec)):
# h.write(str(np.real(hyb_vec[kk]))+' '+str(np.imag(hyb_vec[kk]))+' ')
# h.write('\n')
# h.close()
# if (control['method']=='lqsgw+dmft'):
# iter_string='_'+str(control['iter_num_impurity'])
# elif (control['method']=='lda+dmft'):
# iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
# labeling_file('./Delta.inp', iter_string)
# print('Delta.inp generation done', file=control['h_log'],flush=True)
# causality=test_causality('./Delta.inp')
# return causality
def labeling_file(filename,iter_string):
dirname=os.path.abspath(os.path.dirname(filename))
filenameonly=os.path.basename(filename)
temp=filenameonly.split('.')
shutil.copy(dirname+'/'+filenameonly, dirname+"/"+'.'.join(temp[0:-1])+iter_string+'.'+temp[-1])
return None
def directory_setup(control):
if (control['method'] =='lda+dmft'):
#lattice
tempdir=control['lattice_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
if not control['hdf5']:
if len(glob.glob(tempdir+'/checkpoint'))==0 : os.mkdir(tempdir+'/checkpoint')
elif (control['method'] =='lqsgw+dmft'):
tempdir=control['coulomb_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
#wannier90 directory
tempdir=control['wannier_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
tempdir=control['dc_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
# ctqmc
tempdir=control['impurity_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
for ii in range(1,np.amax(control['impurity_problem_equivalence'])+1):
tempdir=control['impurity_directory']+'/'+str(ii)
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
tempdir=control['dc_directory']+'/'+str(ii)
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
# delta
tempdir=control['lowh_directory']
if len(glob.glob(tempdir))==0 : os.mkdir(tempdir)
return None
def check_for_files(filepath, h_log):
if len(glob.glob(filepath))==0:
print('missing:', filepath, file=control['h_log'],flush=True)
quit()
return None
def gaussian_broadening_linear(x, y, w1, temperature, cutoff):
# broadening starts at the second matsubara points
print(np.shape(x))
print(np.shape(y))
print(x)
print(y)
w0=(1.0-3.0*w1)*np.pi*temperature*8.6173303*10**-5
width_array=w0+w1*x
cnt=0
ynew=np.zeros(len(y), dtype='complex')
for x0 in x:
if (x0>cutoff+(w0+w1*cutoff)*3.0):
ynew[cnt]=y[cnt]
else:
if ((x0>3*width_array[cnt]) and ((x[-1]-x0)>3*width_array[cnt])):
dist=1.0/np.sqrt(2*pi)/width_array[cnt]*np.exp(-(x-x0)**2/2.0/width_array[cnt]**2)
ynew[cnt]=np.sum(dist*y)/np.sum(dist)
else:
ynew[cnt]=y[cnt]
cnt=cnt+1
return ynew
def solve_impurity_patrick(control):
# execute CTQMC
# chdir_string='cd '+control['top_dir']+'/impurity; '
print('-----------------------', file = sys.stdout, flush=True)
print('run CTQMC', file = sys.stdout, flush=True)
print('-----------------------', file = sys.stdout, flush=True)
print('-----------------------', file = sys.stderr, flush=True)
print('run CTQMC', file = sys.stderr, flush=True)
print('-----------------------', file = sys.stderr, flush=True)
run_string=control['mpi_prefix_impurity']+' '+control['comsuitedir']+"/CTQMC params"
cmd = run_string
print(cmd, file=control['h_log'],flush=True)
# with open('./ctqmc.out', 'w') as logfile, open('./ctqmc.err', 'w') as errfile:
# ret = subprocess.call(cmd, shell=True,stdout = logfile, stderr = errfile)
ret = subprocess.call(cmd, shell=True)
if ret != 0:
print("Error in CTQMC. Check standard error file for error message.", file=control['h_log'],flush=True)
sys.exit()
return None
def measure_impurity_patrick(control):
print('-----------------------', file = sys.stdout, flush=True)
print('run EVALSYM', file = sys.stdout, flush=True)
print('-----------------------', file = sys.stdout, flush=True)
print('-----------------------', file = sys.stderr, flush=True)
print('run EVALSYM', file = sys.stderr, flush=True)
print('-----------------------', file = sys.stderr, flush=True)
run_string= control['mpi_prefix_impurity']+' '+control['comsuitedir']+"/EVALSIM params"
cmd = run_string
print(cmd, file=control['h_log'],flush=True)
# with open('./evalsim.out', 'w') as logfile, open('./evalsim.err', 'w') as errfile :
# ret = subprocess.call(cmd,shell=True, stdout=logfile, stderr=errfile)
ret = subprocess.call(cmd,shell=True)
if ret != 0:
print("Error in EVALSIM. Check standard error file for error message.", file=control['h_log'],flush=True)
sys.exit()
print("measure self-energy done", file=control['h_log'],flush=True)
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
# shutil.copy("./evalsim.out", "./evalsim"+iter_string+'.log')
return None
def write_json_all(control,imp,data_array,json_name):
# assume that it is diagonal matrix
for key, value in imp.items(): # for the ordered phase this part should be fixed
json_dict={}
if (not (isinstance(imp[key], dict))):
continue
n_iio=np.amax(imp[key]['impurity_matrix'])
if (imp[key]['para']):
for kk in range(n_iio):
orb_name=str(kk+1)
json_dict[orb_name]={}
json_dict[orb_name]['beta']=imp['beta']
json_dict[orb_name]['real']=np.real(data_array[key][:,kk]).tolist()
json_dict[orb_name]['imag']=np.imag(data_array[key][:,kk]).tolist()
else:
mkey=str(-int(key))
for kk in range(n_iio):
orb_name=str(kk+1)
json_dict[orb_name]={}
json_dict[orb_name]['beta']=imp['beta']
json_dict[orb_name]['real']=np.real(data_array[key][:,kk]).tolist()
json_dict[orb_name]['imag']=np.imag(data_array[key][:,kk]).tolist()
orb_name=str(kk+1+n_iio)
json_dict[orb_name]={}
json_dict[orb_name]['beta']=imp['beta']
json_dict[orb_name]['real']=np.real(data_array[mkey][:,kk]).tolist()
json_dict[orb_name]['imag']=np.imag(data_array[mkey][:,kk]).tolist()
with open(control['impurity_directory']+'/'+key+'/'+json_name,'w') as outfile:
json.dump(json_dict, outfile,sort_keys=True, indent=4, separators=(',', ': '))
print(json_name+" written", file=control['h_log'],flush=True)
return None
def read_json(jsonfile):
Sig_temp=json.load(open(jsonfile))
n_omega=len(Sig_temp['1']['real'])
n_iio=len(Sig_temp.keys())
dat1=np.zeros((n_omega, n_iio), dtype='complex')
for key, value in Sig_temp.items():
dat1[:,int(key)-1]=np.array(Sig_temp[key]['real'])+np.array(Sig_temp[key]['imag'])*1j
return dat1
def read_function_from_jsonfile(jsonfile, dict_name):
Sig_temp=json.load(open(jsonfile))['partition'][dict_name]
n_omega=len(Sig_temp['1']["function"]['real'])
n_iio=len(Sig_temp.keys())
dat1=np.zeros((n_omega, n_iio), dtype='complex')
for key, value in Sig_temp.items():
dat1[:,int(key)-1]=np.array(Sig_temp[key]["function"]['real'])+np.array(Sig_temp[key]["function"]['imag'])*1j
return dat1
def impurity_postprocessing(control, imp, key):
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./params.obs.json',iter_string)
labeling_file('./params.meas.json',iter_string)
histo_temp=json.load(open('params.obs.json'))['partition']["expansion histogram"]
histo=np.zeros((np.shape(histo_temp)[0], 2))
histo[:,0]=np.arange(np.shape(histo_temp)[0])
histo[:,1]=histo_temp
nn=json.load(open('params.obs.json'))['partition']["scalar"]["N"][0]
ctqmc_sign=json.load(open('params.obs.json'))['partition']["sign"][0]
# histogram
firstmoment=np.sum(histo[:,0]*histo[:,1])/np.sum(histo[:,1])
secondmoment=np.sum((histo[:,0]-firstmoment)**2*histo[:,1])/np.sum(histo[:,1])
thirdmoment=np.sum((histo[:,0]-firstmoment)**3*histo[:,1])/np.sum(histo[:,1])/secondmoment**(3.0/2.0)
print('histogram information for impurity_'+imp['name'], file=control['h_log'],flush=True)
print('first moment', firstmoment, file=control['h_log'],flush=True)
print('second moment', secondmoment, file=control['h_log'],flush=True)
print('third moment', thirdmoment, file=control['h_log'],flush=True)
# previous_iter_string='_'.join(map(str,iter_string.split('_')[:-1]))+'_'+str(int(iter_string.split('_')[-1])-1)
green=read_function_from_jsonfile('./params.obs.json',"green")
sigma_bare=read_function_from_jsonfile('./params.obs.json',"self-energy")
sigma_old=array_impurity_dynamic(control,imp,control['impurity_directory']+'/sig.dat')
sigma=np.zeros(np.shape(sigma_bare), dtype='complex')
sigma_to_delta=np.zeros(np.shape(sigma_bare), dtype='complex')
n_iio=np.amax(imp[key]['impurity_matrix'])
sig_causality=1
for jj in range(n_iio):
sigma[:,jj]=gaussian_broadening_linear(control['omega'], sigma_bare[:,jj], 0.05, imp['temperature'], imp[key]['green_cutoff'])
if ((np.imag(sigma[:,jj])>0.0).any()):
sig_causality=0
sigma_to_delta[:,jj]=sigma_old[key][:,jj]
else:
sigma_to_delta[:,jj]=(sigma_old[key][:,jj])*(1.0-control['sigma_mix_ratio'])+(sigma[:,jj])*control['sigma_mix_ratio']
if (not imp[key]['para']):
for jj in range(n_iio, n_iio*2):
mkey=str(-int(key))
sigma[:,jj]=gaussian_broadening_linear(control['omega'], sigma_bare[:,jj], 0.05, imp['temperature'], imp[key]['green_cutoff'])
if ((np.imag(sigma[:,jj])>0.0).any()):
sig_causality=0
sigma_to_delta[:,jj]=sigma_old[mkey][:,jj-n_iio]
else:
sigma_to_delta[:,jj]=(sigma_old[mkey][:,jj-n_iio])*(1.0-control['sigma_mix_ratio'])+(sigma[:,jj])*control['sigma_mix_ratio']
if (imp[key]['para']):
sig_diff_ave=np.sqrt(np.mean(np.absolute((sigma_to_delta-sigma_old[key]))**2))
else:
mkey=str(-int(key))
sig_diff_ave=np.sqrt(np.mean(( | np.absolute((sigma_to_delta[:,0:n_iio]-sigma_old[key])) | numpy.absolute |
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from oa_dev import *
from oa_filter import *
from oa_ls import *
def get_dice_scores(segmentation, ground_truth, classes):
dice_scores = []
for i in range(1,classes+1):
binary_gt = (ground_truth == i).astype(np.uint8)
binary_seg = (segmentation == i).astype(np.uint8)
intersect = np.logical_and(binary_gt, binary_seg)
sum_binary_gt = np.sum(binary_gt)
sum_binary_seg = np.sum(binary_seg)
class_dice_score = np.sum(intersect)*2 / (sum_binary_gt+sum_binary_seg)
return class_dice_score
def get_outlier_fraction(pred_array, gt_array, min_outlier_error):
#print("get outlier fraction")
pred_array[pred_array==0] = np.nan
gt_array[gt_array==0] = np.nan
both_nan = np.bitwise_and(np.isnan(pred_array), np.isnan(gt_array))
pred_array=pred_array[~both_nan]
gt_array=gt_array[~both_nan]
diff = pred_array - gt_array
diff = diff[~np.isnan(diff)]
diff = np.abs(diff)
outliers = diff>min_outlier_error
outlier_frac = np.sum(outliers)*1.0/len(gt_array)
#print(outlier_frac*100)
return outlier_frac
def shift_hue(img, shift):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
hsv[:,:,0] = (hsv[:,:,0] + shift)%180
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return rgb
def stack_ground_truth_pred(gt, pred, scan_img, proj_img, show_imgs=False):
scan_img = scan_img.astype(np.uint16)
scan_img = scan_img *4
scan_img[scan_img>254] = 255
scan_img = scan_img.astype(np.uint8)
binary_gt = np.where(gt>0, 1, 0)
binary_pred = np.where(pred>0, 1, 0)
stacked_gt_pred = np.dstack((np.zeros_like(binary_gt), binary_pred*255, binary_gt*255))
stacked_gt_pred = stacked_gt_pred.astype(np.uint8)
stacked_gt_pred = shift_hue(stacked_gt_pred, 120)
if show_imgs:
fig,ax = plt.subplots(1,3)
ax[0].imshow(stacked_gt_pred)
ax[1].imshow(scan_img)
ax[2].imshow(proj_img)
plt.show()
return stacked_gt_pred
def plot_pred_gt(pred_array, gt_array):
plt.plot(pred_array, label="Prediction")
plt.plot(gt_array, label="Ground truth")
plt.legend()
plt.ylabel("Centre of scan line, column index")
plt.xlabel("Row index")
plt.show()
def overlap_pred_gt(binary_pred, binary_gt):
fig,ax = plt.subplots(1,3)
ax[0].imshow(binary_pred)
ax[1].imshow(binary_gt)
ax[2].imshow(np.dstack((binary_gt*255, binary_pred*255, np.zeros_like(binary_gt))))
plt.show()
def get_subpix_score(pred_array, gt_array, min_outlier_error):
SHOW_IMAGES = False
pred_array[pred_array==0] = np.nan
gt_array[gt_array==0] = np.nan
outlier_frac =get_outlier_fraction(pred_array, gt_array, min_outlier_error)
#print("Outlier frac:", outlier_frac)
#plot_pred_gt(pred_array, gt_array)
diff = np.abs(pred_array- gt_array)
diff = diff[~np.isnan(diff)]
diff = diff[diff<min_outlier_error]
if SHOW_IMAGES:
fig,ax = plt.subplots(1,2)
ax[0].plot(pred_array)
ax[0].plot(gt_array)
ax[1].plot(diff)
plt.show()
print("mean subpix error", np.mean(diff))
return np.mean(diff),outlier_frac
def save_images(dice, left_scan, right_scan, prediction, gt):
pass
def test_dataset(dataset_path, test_path):
SHOW_IMAGES = False
MIN_OUTLIER_ERROR = 5
result_dir = os.path.join("results", dataset_path)
try:
os.rmdir(result_dir)
print("Removed directory", result_dir)
except:
print("No directory named", result_dir)
gt_paths = get_named_image_from_dirs(dataset_path, "gt_scan.png")
gt_proj_mask_paths = get_named_image_from_dirs(dataset_path, "gt_proj_mask.png")
left_scans_paths = get_named_image_from_dirs(dataset_path, "filt_rgb.png")
proj_scans_paths = get_named_image_from_dirs(dataset_path, "filt_proj_rgb.png")
testset_path = os.path.join("test", test_path)
pred_paths = [os.path.join(testset_path, img) for img in os.listdir(testset_path)]
pred_paths.sort()
dice_score_list = []
subpix_score_list = []
outlier_frac_list = []
for idx in range(len(gt_paths)):
gt_path = gt_paths[idx]
pred_path = pred_paths[idx]
left_scan_path = left_scans_paths[idx]
gt_proj_path = gt_proj_mask_paths[idx]
proj_scan_path = proj_scans_paths[idx]
gt_img = cv2.imread(gt_path, 0)
gt_proj_mask = cv2.imread(gt_proj_path,0)
subpix = secdeg_momentum_subpix(gt_img)
pred_img = cv2.imread(pred_path, 0)
left_scan = cv2.cvtColor(cv2.imread(left_scan_path), cv2.COLOR_BGR2RGB)
proj_scan = cv2.cvtColor(cv2.imread(proj_scan_path), cv2.COLOR_BGR2RGB)
pred_img = np.where(pred_img>30, pred_img, 0)
gt_img = np.where(gt_proj_mask>10, gt_img, 0)
binary_gt = np.where(gt_img>5, 1, 0)
binary_pred = np.where(pred_img>0, 1, 0)
#overlap_pred_gt(binary_gt, binary_pred)
filtered=pred_img
dice_score = get_dice_scores(binary_pred, binary_gt, 2)
#print("dice:", dice_score)
stacked = stack_ground_truth_pred(gt_img, pred_img, left_scan, proj_scan)
dice_score_list.append(dice_score)
#print("dice score", dice_score)
combined_rgb = np.dstack((binary_pred*255, np.zeros_like(binary_pred), binary_gt*255))
subpix_pred = secdeg_momentum_subpix(filtered, 0.3)
subpix_score, outlier_fraction = get_subpix_score(subpix_pred, subpix, MIN_OUTLIER_ERROR)
subpix_score_list.append(subpix_score)
outlier_frac_list.append(outlier_fraction)
#outlier_fraction = get_outlier_fraction(subpix_pred, subpix, 10)
result_scan_dir = os.path.join(result_dir, "scan"+str(dice_score)+"-"+str(idx))
os.makedirs(result_scan_dir, exist_ok=True)
cv2_imwrite(os.path.join(result_scan_dir, "gt_proj_mask.png"), gt_proj_mask)
cv2_imwrite(os.path.join(result_scan_dir, "gt_scan.png"), gt_img)
cv2_imwrite(os.path.join(result_scan_dir, "pred.png"), pred_img)
cv2_imwrite(os.path.join(result_scan_dir, "left_scan.png"), left_scan)
cv2_imwrite(os.path.join(result_scan_dir, "proj_scan.png"), proj_scan)
cv2_imwrite(os.path.join(result_scan_dir, "stacked.png"), stacked)
if SHOW_IMAGES:
print("subpix", subpix.shape)
print("subpix_pred", subpix_pred.shape)
plt.imshow(combined_rgb)
plt.show()
plt.imshow(filtered)
plt.show()
subpix_score_list = np.sort(np.array(subpix_score_list))
outlier_frac_list = np.array(outlier_frac_list)
dice_scores_np = np.array(dice_score_list)
avg_dice_score = np.average(dice_scores_np)
avg_subpix_score = np.mean(subpix_score_list)
avg_outlier_frac = np.mean(outlier_frac_list)
outlier_frac_list.sort()
dice_score_list.sort()
subpix_score_list.sort()
print("Avg dice score", avg_dice_score)
print("avg subpix score", avg_subpix_score)
print("avg outlier frac", format(avg_outlier_frac*100.0, "02f"), "%")
return dice_score, subpix_score_list, dice_score_list, outlier_frac_list
if __name__ == '__main__':
specular_path = os.path.join("test-sets", "test-color-specular")
pbr_path = os.path.join("test-sets", "test-color-pbr")
blurry_path = os.path.join("test-sets", "test-color-blurry")
print("Specular")
dice_score, subpix_score_list_spec, dice_scores_spec, outliers_spec= test_dataset(specular_path, "test-spec")
print("PBR")
dice_score, subpix_score_list_pbr, dice_scores_pbr, outliers_pbr = test_dataset(pbr_path, "test-pbr")
print("Blurry")
dice_score, subpix_score_list_blurry, dice_scores_blur, outliers_blur = test_dataset(blurry_path, "test-blurry")
fig,ax =plt.subplots(1,3)
ax[0].plot(dice_scores_spec, label='Specular')
ax[0].plot(dice_scores_pbr, label='PBR')
ax[0].plot(dice_scores_blur, label='Blurry')
ax[0].set_xlabel("Sorted images by dice score")
ax[0].set_ylabel("Dice score")
ax[0].grid(True)
ax[0].legend()
ax[1].plot(subpix_score_list_spec, label='Specular')
ax[1].plot(subpix_score_list_pbr, label='PBR')
ax[1].plot(subpix_score_list_blurry, label='Blurry')
ax[1].set_xlabel("Sorted images by error")
ax[1].set_ylabel("Mean image subpixel error")
ax[1].grid(True)
ax[1].legend()
ax[2].plot(np.array(outliers_spec)*100, label='Specular')
ax[2].plot(np.array(outliers_pbr)*100, label='PBR')
ax[2].plot( | np.array(outliers_blur) | numpy.array |
from os import link
import flask
from flask.globals import request
from flask import Flask, render_template
# library used for prediction
import numpy as np
import pandas as pd
import pickle
# library used for insights
import json
import plotly
import plotly.express as px
app = Flask(__name__, template_folder = 'templates')
link_active = None
# render home template
@app.route('/')
def main():
return(render_template('home.html', title = 'Home'))
# load pickle file
model = pickle.load(open('model/rf_classifier.pkl', 'rb'))
scaler = pickle.load(open('model/scaler.pkl', 'rb'))
@app.route('/form')
def form():
show_prediction = False
link_active = 'Form'
return(render_template('form.html', title = 'Form', show_prediction = show_prediction, link_active = link_active))
@app.route('/insights')
def insights():
link_active = 'Insights'
df = pd.read_csv('online_shoppers_intention.csv')
df['Revenue'] = np.where(df['Revenue'] == True, 'Yes', 'No')
df.rename(columns={'Revenue':'Intention to Buy'}, inplace = True)
color_map = {'Yes': '#FFBF00', 'No': '#36454F'}
df_sorted = df.sort_values('Intention to Buy', ascending = True)
fig1 = px.scatter(
df_sorted, x = 'BounceRates', y='ExitRates',
color='Intention to Buy', color_discrete_map=color_map,
labels = {
"BounceRates": "Bounce Rates", "ExitRates" : "Exit Rates"
}
)
fig1.update_layout(legend_traceorder='reversed')
graph1JSON = json.dumps(fig1, cls=plotly.utils.PlotlyJSONEncoder)
fig2 = px.box(
df, x = 'Intention to Buy', y='PageValues', color='Intention to Buy',
color_discrete_map=color_map,
labels = {
"PageValues" : "Page Values"
}
)
fig2.update_layout(legend_traceorder='reversed')
graph2JSON = json.dumps(fig2, cls=plotly.utils.PlotlyJSONEncoder)
dist_vt = df.groupby(['VisitorType', "Intention to Buy"]).count()[["Administrative"]]
cat_group = df.groupby(['VisitorType']).count()[["Administrative"]]
dist_vt["percentage"] = dist_vt.div(cat_group, level = 'VisitorType') * 100
dist_vt.reset_index(inplace = True)
dist_vt.columns = ['VisitorType', "Intention to Buy", "count", "percentage"]
dist_vt = dist_vt.sort_values(['VisitorType', 'Intention to Buy'], ascending=True)
dist_vt['VisitorType'] = np.where(
dist_vt['VisitorType'] == 'Returning_Visitor', 'Returning Visitor',
np.where(dist_vt['VisitorType'] == 'New_Visitor', 'New Visitor', 'Other')
)
fig3 = px.bar(
dist_vt, x = 'VisitorType', y = 'count', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map,
labels = {
"VisitorType" : "Visitor Type"
}
)
fig3.update_layout(showlegend=False)
graph3JSON = json.dumps(fig3, cls=plotly.utils.PlotlyJSONEncoder)
fig4 = px.bar(
dist_vt, x = 'VisitorType', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
labels = {
"VisitorType" : "Visitor Type"
}
)
fig4.update_layout(showlegend=False)
graph4JSON = json.dumps(fig4, cls=plotly.utils.PlotlyJSONEncoder)
df['Weekend'] = np.where(df['Weekend'] == True, 'Yes', 'No')
dist_weekend = df.groupby(['Intention to Buy', "Weekend"]).count()[["Administrative"]]
cat_group2 = df.groupby(['Weekend']).count()[["Administrative"]]
dist_weekend["percentage"] = dist_weekend.div(cat_group2, level = 'Weekend') * 100
dist_weekend.reset_index(inplace = True)
dist_weekend.columns = ["Intention to Buy", 'Weekend', "count", "percentage"]
fig5 = px.bar(
dist_weekend, x = 'Weekend', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
)
fig5.update_layout(showlegend=False)
graph5JSON = json.dumps(fig5, cls=plotly.utils.PlotlyJSONEncoder)
dist_vt_weekend = df[df['VisitorType'] == 'New_Visitor'].groupby(['Intention to Buy', "Weekend"]).count()[["Administrative"]]
cat_group3 = df[df['VisitorType'] == 'New_Visitor'].groupby(['Weekend']).count()[["Administrative"]]
dist_vt_weekend["percentage"] = dist_vt_weekend.div(cat_group3, level = 'Weekend') * 100
dist_vt_weekend.reset_index(inplace = True)
dist_vt_weekend.columns = ["Intention to Buy", 'Weekend', "count", "percentage"]
fig6 = px.bar(
dist_vt_weekend, x = 'Weekend', y = 'percentage', color = 'Intention to Buy', barmode="group",
color_discrete_map=color_map, range_y = [0, 100],
)
fig6.update_layout(showlegend=False)
graph6JSON = json.dumps(fig6, cls=plotly.utils.PlotlyJSONEncoder)
return(render_template('insights.html', title = 'Insights', link_active = link_active, graph1JSON = graph1JSON, graph2JSON = graph2JSON, graph3JSON = graph3JSON, graph4JSON = graph4JSON, graph5JSON = graph5JSON, graph6JSON = graph6JSON))
@app.route('/predict', methods=['POST'])
def predict():
'''
For rendering prediction result.
'''
link_active = 'Result'
show_prediction = True
# retrieve data
Administrative = int(request.form.get('Administrative'))
Administrative_Duration = float(request.form.get('Administrative_Duration'))
ProductRelated = int(request.form.get('ProductRelated'))
ProductRelated_Duration = float(request.form.get('ProductRelated_Duration'))
BounceRates = float(request.form.get('BounceRates'))
ExitRates = float(request.form.get('ExitRates'))
PageValues = float(request.form.get('PageValues'))
Month = int(request.form.get('Month'))
SpecialDay = request.form.get('SpecialDay')
Weekend = request.form.get('Weekend')
VisitorType = request.form.get('VisitorType')
TrafficType = request.form.get('TrafficType')
OperatingSystems = request.form.get('OperatingSystems')
Browser = request.form.get('Browser')
Region = request.form.get('Region')
# transform to log
Administrative = np.log1p(Administrative)
Administrative_Duration = np.log1p(Administrative_Duration)
ProductRelated = np.log1p(ProductRelated)
ProductRelated_Duration = np.log1p(ProductRelated_Duration)
BounceRates = np.log1p(BounceRates)
ExitRates = | np.log1p(ExitRates) | numpy.log1p |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 10:13:15 2019
@author: orteg
"""
import logging
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import LocalOutlierFactor
from sklearn.utils import check_random_state
from sklearn.utils.multiclass import type_of_target
from scipy.sparse import issparse
from sklearn.exceptions import NotFittedError
from imblearn.over_sampling import ADASYN
from pomegranate import NormalDistribution, NaiveBayes
import numpy as np
#from safeu.classification.TSVM import TSVM
from .spy import Spy
from .weighted_iforest import WeightedIsoForest
class PNN(BaseEstimator, ClassifierMixin):
""" Label Cleaner object
Parameters
----------
method : str or None, optional (default = removal)
Method of cleaning the noise label.
treatment_ratio : float
Threshold for either removal or relabeling noisy labels.
anomaly_detector : scikit-learn anomaly detection model or None, (default = None)
Model for identifying anomalous instances in the dataset.
base_classifier : scikit-learn classifier, (default = None)
Classifier for predicting class.
resampler : resampling method or None, (default = None)
Sampling method for imbalance class issue.
seed : int, optional ()
Attributes
----------
score_samples_ : array-like or None, optional (default = None)
Anomaly score.
modified_instances_ : array-like or None, optional (default = None)
Index of the instances that are identified as likely mislabelled by AD model.
removed_instances_ : array-like or None, optional (default = None)
Index of the removed instances by the noise-label cleaning method.
classes_ : ndarray of shape (n_classes, )
List of class labels known to the classifier.
Xt_, yt_: training set after treatment (if keep_treated=True)
Xf_, yf_: training set after resampling (if keep_final=True)
"""
def __init__(self,
method = None,
treatment_ratio = 0.10,
selftr_threshold = 0.70,
spy_ratio = 0.10,
anomaly_detector = None,
high_score_anomaly = False,
base_classifier = None,
resampler = None,
max_samples = 'auto',
n_neighbors = 5,
keep_treated = True,
keep_final = True,
random_state = None):
self.method = method
self.treatment_ratio = treatment_ratio
self.spy_ratio = spy_ratio
self.selftr_threshold = selftr_threshold
self.anomaly_detector = anomaly_detector
self.high_score_anomaly = high_score_anomaly
self.base_classifier = base_classifier
self.resampler = resampler
self.random_state = random_state
self.max_samples = max_samples
self.n_neighbors = n_neighbors
self.keep_treated = keep_treated
self.keep_final = keep_final
if (self.method not in ['selftraining', 'relabeling', 'removal',
'embayes','semiboost','selftraining',
'embayes_classifier','semiboost_classifier','selftraining_classifier',
None]):
raise ValueError('Choose an appropriate option!')
def fit(self, X, y):
""" Fit estimator.
Parameters
----------
X : Array-like of shape = [n_samples, n_features]
Input samples.
y : Array of shape = [n_samples]
Predicted classes.
Returns
-------
self : object
Fitted estimator.
"""
self.score_samples_ = None
self.Xt_, self.yt_ = None, None
self.Xf_, self.yf_ = None, None
self.modified_instances_ = None
self.removed_instances_ = None
self.score_samples_ = None
self.classes_ = None
self.ix_neg_ = None
self.ix_neg_anm_ = None
self.ix_rm_neg_anm_ = None
self.ss_base_learner = None
# Don't reconstruct these internal objects if we have already fitted before,
# as we might have used the random state
if not self._is_fitted():
self.anomaly_detector_ = self.anomaly_detector
self.base_classifier_ = self.base_classifier
self.resampler_ = self.resampler
self.random_state_ = check_random_state(self.random_state).randint(np.iinfo(np.int32).max)
# De we need a default anomaly detector, base classifier or resampler?
if self.anomaly_detector_ is None or self.anomaly_detector_ == 'iforest':
self.anomaly_detector_ = IsolationForest(n_estimators = 100,
max_samples = self.max_samples,
random_state=self.random_state_,
n_jobs = -1)
if self.anomaly_detector_ is None or self.anomaly_detector_ == 'lof':
self.anomaly_detector_ = LocalOutlierFactor(n_neighbors = self.n_neighbors,
n_jobs = -1)
if self.anomaly_detector_ is None or self.anomaly_detector_ == 'wiforest':
self.anomaly_detector_ = WeightedIsoForest(n_estimators = 100,
n_neighbors = self.n_neighbors,
max_samples = self.max_samples,
random_state = self.random_state_,
n_jobs = -1)
if self.anomaly_detector_ is None or self.anomaly_detector_ == 'spy':
self.anomaly_detector_ = Spy(random_state = self.random_state_)
if self.base_classifier_ is None:
self.base_classifier_ = RandomForestClassifier(n_estimators = 100, random_state=self.random_state_, n_jobs = -1)
if self.resampler_ == 'adasyn':
self.resampler_ = ADASYN(sampling_strategy=1.0, random_state = self.random_state_, n_jobs = -1)
#### CHECKS ####
# Sparsity checks
if issparse(X) or issparse(y):
logging.info('`X` or `y` are sparse, I will convert them to dense (might incur high memory usage)')
self.Xt_ = np.asarray(X).copy() if not issparse(X) else X.toarray().copy()
self.yt_ = np.asarray(y).copy() if not issparse(y) else y.toarray().copy()
self.modified_instances_ = np.array([])
self.score_samples_ = | np.array([]) | numpy.array |
'''
BMI tasks in the new structure, i.e. inheriting from manualcontrolmultitasks
'''
import numpy as np
import time, random
from riglib.experiment import traits, experiment
from features.bmi_task_features import LinearlyDecreasingAssist, LinearlyDecreasingHalfLife
import os
from riglib.bmi import clda, assist, extractor, train, goal_calculators, ppfdecoder
import riglib.bmi
import pdb
import multiprocessing as mp
import pickle
import tables
import re
from riglib.stereo_opengl import ik
import tempfile, pickle, traceback, datetime
from riglib.bmi.bmi import GaussianStateHMM, Decoder, GaussianState, BMISystem, BMILoop
from riglib.bmi.assist import Assister, SSMLFCAssister, FeedbackControllerAssist
from riglib.bmi import feedback_controllers
from riglib.stereo_opengl.window import WindowDispl2D
from riglib.stereo_opengl.primitives import Line
from riglib.bmi.state_space_models import StateSpaceEndptVel2D, StateSpaceNLinkPlanarChain
from .target_capture_task import ScreenTargetCapture
target_colors = {"blue":(0,0,1,0.5),
"yellow": (1,1,0,0.5),
"hibiscus":(0.859,0.439,0.576,0.5),
"magenta": (1,0,1,0.5),
"purple":(0.608,0.188,1,0.5),
"lightsteelblue":(0.690,0.769,0.901,0.5),
"dodgerblue": (0.118,0.565,1,0.5),
"teal":(0,0.502,0.502,0.5),
"aquamarine":(0.498,1,0.831,0.5),
"olive":(0.420,0.557,0.137,0.5),
"chiffonlemon": (0.933,0.914,0.749,0.5),
"juicyorange": (1,0.502,0,0.5),
"salmon":(1,0.549,0.384,0.5),
"wood": (0.259,0.149,0.071,0.5),
"elephant":(0.409,0.409,0.409,0.5)}
np.set_printoptions(suppress=False)
###################
####### Assisters
##################
class OFCEndpointAssister(FeedbackControllerAssist):
'''
Assister for cursor PPF control which uses linear feedback (infinite horizon LQR) to drive the cursor toward the target state
'''
def __init__(self, decoding_rate=180):
'''
Constructor for OFCEndpointAssister
Parameters
----------
decoding_rate : int
Rate that the decoder should operate, in Hz. Should be a multiple or divisor of 60 Hz
Returns
-------
OFCEndpointAssister instance
'''
F_dict = pickle.load(open('/storage/assist_params/assist_20levels_ppf.pkl'))
B = np.mat(np.vstack([np.zeros([3,3]), np.eye(3)*1000*1./decoding_rate, np.zeros(3)]))
fb_ctrl = feedback_controllers.MultiModalLFC(A=B, B=B, F_dict=F_dict)
super(OFCEndpointAssister, self).__init__(fb_ctrl, style='additive_cov')
self.n_assist_levels = len(F_dict)
def get_F(self, assist_level):
'''
Look up the feedback gain matrix based on the assist_level
Parameters
----------
assist_level : float
Float between 0 and 1 to indicate the level of the assist (1 being the highest)
Returns
-------
np.mat
'''
assist_level_idx = min(int(assist_level * self.n_assist_levels), self.n_assist_levels-1)
F = np.mat(self.fb_ctrl.F_dict[assist_level_idx])
return F
class SimpleEndpointAssister(Assister):
'''
Constant velocity toward the target if the cursor is outside the target. If the
cursor is inside the target, the speed becomes the distance to the center of the
target divided by 2.
'''
def __init__(self, *args, **kwargs):
''' Docstring '''
self.decoder_binlen = kwargs.pop('decoder_binlen', 0.1)
self.assist_speed = kwargs.pop('assist_speed', 5.)
self.target_radius = kwargs.pop('target_radius', 2.)
def calc_assisted_BMI_state(self, current_state, target_state, assist_level, mode=None, **kwargs):
''' Docstring '''
Bu = None
assist_weight = 0.
if assist_level > 0:
cursor_pos = np.array(current_state[0:3,0]).ravel()
target_pos = np.array(target_state[0:3,0]).ravel()
decoder_binlen = self.decoder_binlen
speed = self.assist_speed * decoder_binlen
target_radius = self.target_radius
Bu = self.endpoint_assist_simple(cursor_pos, target_pos, decoder_binlen, speed, target_radius, assist_level)
assist_weight = assist_level
# return Bu, assist_weight
return dict(x_assist=Bu, assist_level=assist_weight)
@staticmethod
def endpoint_assist_simple(cursor_pos, target_pos, decoder_binlen=0.1, speed=0.5, target_radius=2., assist_level=0.):
'''
Estimate the next state using a constant velocity estimate moving toward the specified target
Parameters
----------
cursor_pos: np.ndarray of shape (3,)
Current position of the cursor
target_pos: np.ndarray of shape (3,)
Specified target position
decoder_binlen: float
Time between iterations of the decoder
speed: float
Speed of the machine-assisted cursor
target_radius: float
Radius of the target. When the cursor is inside the target, the machine assisted cursor speed decreases.
assist_level: float
Scalar between (0, 1) where 1 indicates full machine control and 0 indicates full neural control.
Returns
-------
x_assist : np.ndarray of shape (7, 1)
Control vector to add onto the state vector to assist control.
'''
diff_vec = target_pos - cursor_pos
dist_to_target = np.linalg.norm(diff_vec)
dir_to_target = diff_vec / (np.spacing(1) + dist_to_target)
if dist_to_target > target_radius:
assist_cursor_pos = cursor_pos + speed*dir_to_target
else:
assist_cursor_pos = cursor_pos + speed*diff_vec/2
assist_cursor_vel = (assist_cursor_pos-cursor_pos)/decoder_binlen
x_assist = np.hstack([assist_cursor_pos, assist_cursor_vel, 1])
x_assist = np.mat(x_assist.reshape(-1,1))
return x_assist
class SimpleEndpointAssisterLFC(feedback_controllers.MultiModalLFC):
'''
Docstring
'''
def __init__(self, *args, **kwargs):
'''
Docstring
Parameters
----------
Returns
-------
'''
dt = 0.1
A = np.mat([[1., 0, 0, dt, 0, 0, 0],
[0., 1, 0, 0, dt, 0, 0],
[0., 0, 1, 0, 0, dt, 0],
[0., 0, 0, 0, 0, 0, 0],
[0., 0, 0, 0, 0, 0, 0],
[0., 0, 0, 0, 0, 0, 0],
[0., 0, 0, 0, 0, 0, 1]])
I = np.mat(np.eye(3))
B = np.vstack([0*I, I, np.zeros([1,3])])
F_target = np.hstack([I, 0*I, np.zeros([3,1])])
F_hold = np.hstack([0*I, 0*I, np.zeros([3,1])])
F_dict = dict(hold=F_hold, target=F_target)
super(SimpleEndpointAssisterLFC, self).__init__(B=B, F_dict=F_dict)
#################
##### Tasks #####
#################
class BMIControlMulti(BMILoop, LinearlyDecreasingAssist, ScreenTargetCapture):
'''
Target capture task with cursor position controlled by BMI output.
Cursor movement can be assisted toward target by setting assist_level > 0.
'''
background = (.5,.5,.5,1) # Set the screen background color to grey
reset = traits.Int(0, desc='reset the decoder state to the starting configuration')
ordered_traits = ['session_length', 'assist_level', 'assist_level_time', 'reward_time','timeout_time','timeout_penalty_time']
exclude_parent_traits = ['marker_count', 'marker_num', 'goal_cache_block']
static_states = [] # states in which the decoder is not run
hidden_traits = ['arm_hide_rate', 'arm_visible', 'hold_penalty_time', 'rand_start', 'reset', 'target_radius', 'window_size']
is_bmi_seed = False
cursor_color_adjust = traits.OptionsList(*list(target_colors.keys()), bmi3d_input_options=list(target_colors.keys()))
def __init__(self, *args, **kwargs):
super(BMIControlMulti, self).__init__(*args, **kwargs)
def init(self, *args, **kwargs):
sph = self.plant.graphics_models[0]
sph.color = target_colors[self.cursor_color_adjust]
sph.radius = self.cursor_radius
self.plant.cursor_radius = self.cursor_radius
self.plant.cursor.radius = self.cursor_radius
super(BMIControlMulti, self).init(*args, **kwargs)
def create_assister(self):
# Create the appropriate type of assister object
start_level, end_level = self.assist_level
kwargs = dict(decoder_binlen=self.decoder.binlen, target_radius=self.target_radius)
if hasattr(self, 'assist_speed'):
kwargs['assist_speed'] = self.assist_speed
if isinstance(self.decoder.ssm, StateSpaceEndptVel2D) and isinstance(self.decoder, ppfdecoder.PPFDecoder):
self.assister = OFCEndpointAssister()
elif isinstance(self.decoder.ssm, StateSpaceEndptVel2D):
self.assister = SimpleEndpointAssister(**kwargs)
## elif (self.decoder.ssm == namelist.tentacle_2D_state_space) or (self.decoder.ssm == namelist.joint_2D_state_space):
## # kin_chain = self.plant.kin_chain
## # A, B, W = self.decoder.ssm.get_ssm_matrices(update_rate=self.decoder.binlen)
## # Q = np.mat(np.diag(np.hstack([kin_chain.link_lengths, np.zeros_like(kin_chain.link_lengths), 0])))
## # R = 10000*np.mat(np.eye(B.shape[1]))
## # fb_ctrl = LQRController(A, B, Q, R)
## # self.assister = FeedbackControllerAssist(fb_ctrl, style='additive')
## self.assister = TentacleAssist(ssm=self.decoder.ssm, kin_chain=self.plant.kin_chain, update_rate=self.decoder.binlen)
else:
raise NotImplementedError("Cannot assist for this type of statespace: %r" % self.decoder.ssm)
print(self.assister)
def create_goal_calculator(self):
if isinstance(self.decoder.ssm, StateSpaceEndptVel2D):
self.goal_calculator = goal_calculators.ZeroVelocityGoal(self.decoder.ssm)
elif isinstance(self.decoder.ssm, StateSpaceNLinkPlanarChain) and self.decoder.ssm.n_links == 2:
self.goal_calculator = goal_calculators.PlanarMultiLinkJointGoal(self.decoder.ssm, self.plant.base_loc, self.plant.kin_chain, multiproc=False, init_resp=None)
elif isinstance(self.decoder.ssm, StateSpaceNLinkPlanarChain) and self.decoder.ssm.n_links == 4:
shoulder_anchor = self.plant.base_loc
chain = self.plant.kin_chain
q_start = self.plant.get_intrinsic_coordinates()
x_init = np.hstack([q_start, np.zeros_like(q_start), 1])
x_init = | np.mat(x_init) | numpy.mat |
import tensorflow as tf
import numpy as np
import tensorflow.contrib as tf_contrib
from tqdm import tqdm_notebook as tqdm
weight_init = tf_contrib.layers.variance_scaling_initializer() # kaming init for encoder / decoder
weight_regularizer = tf_contrib.layers.l2_regularizer(scale=0.0001)
class GAIA(object):
def __init__(self, dims, batch_size, gpus=[], activation_fn=tf.nn.relu,
latent_loss='SSE', adam_eps=1.0, network_type='AE',
n_res=4, n_sample=2, style_dim=8, ch=64, n_hidden=512):
self.dims = dims
self.batch_size = batch_size
self.latent_loss = latent_loss # either 'SSE', 'VAE', or 'distance'
self.network_type = network_type
# training loss for SSE and distance
self.default_activation = activation_fn
self.adam_eps = adam_eps
self.n_res = n_res # number of residual layers
self.n_sample = n_sample # number of resamples
self.ch = ch # base number of filters
self.img_ch = dims[2] # number of channels in image
self.style_dim = style_dim
self.mlp_dim = pow(2, self.n_sample) * self.ch # default : 256
self.n_downsample = self.n_sample
self.n_upsample = self.n_sample
self.n_hidden = n_hidden # how many hidden units in generator z
self.batch_num = 0 # how many batches have been trained
self.num_gpus = len(gpus) # number of GPUs to use
if len(gpus) < 1:
self.num_gpus = 1
self.initialize_network()
def initialize_network(self):
""" Defines the network architecture
"""
# initialize graph and session
# import pdb; pdb.set_trace()
gpus = tf.config.experimental.list_physical_devices('GPU')
gpus += tf.config.experimental.list_physical_devices('XLA_GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_visible_devices(gpus[0], 'XLA_GPU')
logical_gpus = tf.config.experimental.list_logical_devices('XLA_GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
# with tf.device('/device:XLA_GPU:0'):
self.graph = tf.Graph()
self.config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
self.config.gpu_options.allocator_type = 'BFC'
self.config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(graph=self.graph, config=self.config)
# Global step needs to be defined to coordinate multi-GPU
self.global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
self.x_input = tf.placeholder(tf.float32, [self.batch_size * self.num_gpus,
np.prod(self.dims)]) # Placeholder for input data
if self.network_type == 'AE':
self.AE_initialization()
elif self.network_type == 'GAIA':
self.GAIA_initialization()
# apply the gradients with our optimizers
# Start the Session
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver() # initialize network saver
print('Network Initialized')
def GAIA_initialization(self):
""" Initialization specific to GAIA network
"""
# run the x input through the network
self.inference_GAIA(self.x_input)
self.lr_sigma_slope = tf.placeholder('float32') # slope for balancing Generator and Descriminator in GAN
self.lr_max = tf.placeholder('float32') # maximum learning rate for both networks
### RENAMING SOME PARAMETERS ###
self.x_real = self.x_input
# self.z_x = self.z_x_real
self.x_tilde = self.x_real_recon
self.latent_loss_weights = tf.placeholder(tf.float32) # Placeholder for weight of distance metric
# distance loss
self.distance_loss = distance_loss(self.x_input, self.z_gen_content_net_real)
# compute losses of the model
self.x_fake_from_real_recon_loss = tf.reduce_mean(tf.abs(self.x_real - self.x_fake_from_real_recon))
self.x_fake_from_sample_recon_loss = tf.reduce_mean(
tf.abs(self.x_fake_from_sample - self.x_fake_from_sample_recon))
self.x_fake_recon_loss = (self.x_fake_from_sample_recon_loss + self.x_fake_from_real_recon_loss) / 2.
# compute losses of the model
self.x_real_recon_loss = tf.reduce_mean(tf.abs(self.x_real - self.x_real_recon))
# squash with a sigmoid based on the learning rate
self.lr_D = sigmoid(self.x_real_recon_loss - self.x_fake_recon_loss, shift=0., mult=self.lr_sigma_slope)
self.lr_G = (tf.constant(1.0) - self.lr_D) * self.lr_max
self.lr_D = self.lr_D * self.lr_max
self.sigma = 0.5 # balance parameter for discriminator caring more about autoencoding real, or discriminating fake
self.discrim_proportion_fake = tf.clip_by_value(
sigmoid(self.x_fake_from_sample_recon_loss * self.sigma - self.x_real_recon_loss, shift=0.,
mult=self.lr_sigma_slope),
0., 0.9) # hold the discrim proportion fake aways at less than half
self.discrim_proportion_real = tf.constant(1.)
# add losses for generator and descriminator
# loss of Encoder/Decoder: reconstructing x_real well and x_fake poorly
self.L_d = tf.clip_by_value((
self.x_real_recon_loss * self.discrim_proportion_real - self.x_fake_recon_loss * self.discrim_proportion_fake),
-1, 1)
# hold the discrim proportion fake aways at less than half
self.gen_proportion_sample = tf.clip_by_value(
sigmoid(self.x_fake_from_sample_recon_loss - self.x_fake_from_real_recon_loss, shift=0.,
mult=self.lr_sigma_slope),
0., 1.0)
# Generator should be balancing the reproduction
self.L_g = tf.clip_by_value((self.gen_proportion_sample * self.x_fake_from_sample_recon_loss + \
(1.0 - self.gen_proportion_sample) * self.x_fake_from_real_recon_loss) + \
self.latent_loss_weights * self.distance_loss, \
-1, 1)
# M Global is just a way to visualize the training decrease
self.m_global = self.x_real_recon_loss + self.x_fake_recon_loss
# apply optimizers
self.opt_D = tf.train.AdamOptimizer(learning_rate=self.lr_D, epsilon=self.adam_eps)
self.opt_G = tf.train.AdamOptimizer(learning_rate=self.lr_G, epsilon=self.adam_eps)
# specify loss to parameters
self.params = tf.trainable_variables()
self.D_params = [i for i in self.params if 'descriminator/' in i.name]
self.G_params = [i for i in self.params if 'generator/' in i.name]
# Calculate the gradients for the batch of data on this CIFAR tower.
self.grads_d = self.opt_D.compute_gradients(self.L_d, var_list=self.D_params)
self.grads_g = self.opt_G.compute_gradients(self.L_g, var_list=self.G_params)
#
self.train_D = self.opt_D.apply_gradients(self.grads_d, global_step=self.global_step)
self.train_G = self.opt_G.apply_gradients(self.grads_g, global_step=self.global_step)
def inference_GAIA(self, x_real):
# Create a fake X value from input from the generator (this will try to autoencode it's input)
print('Creating Generator...')
with tf.variable_scope("generator"):
with tf.variable_scope("enc"):
print('...Creating encoder in generator...')
self.gen_style_net_real, self.gen_content_net_real = self.encoder(self.x_input, discriminator=False)
self.z_gen_style_net_real = self.gen_style_net_real[-1] # z value in the generator for the real image
self.z_gen_content_net_real = self.gen_content_net_real[
-1] # z value in the generator for the real image
print('... Creating interpolations...')
# get interpolation points as sampled from a gaussian distribution centered at 50%
self.midpoint_div_1 = tf.random_normal(shape=(int(self.batch_size / 2),), mean=0.5, stddev=0.25)
self.midpoint_div_2 = tf.random_normal(shape=(int(self.batch_size / 2),), mean=0.5, stddev=0.25)
self.z_gen_style_net_sampled = get_midpoints(self.z_gen_style_net_real, self.midpoint_div_1,
self.midpoint_div_2)
self.z_gen_content_net_sampled = get_midpoints(self.z_gen_content_net_real, self.midpoint_div_1,
self.midpoint_div_2)
# run real images through the first autoencoder (the generator)
print('...Creating decoder in generator...')
with tf.variable_scope("dec"):
self.gen_dec_net_from_real = self.decoder(self.z_gen_style_net_real, self.z_gen_content_net_real,
discriminator=False) # fake generated image
self.x_fake_from_real = self.gen_dec_net_from_real[-1]
# run the sampled generator_z through the decoder of the generator
with tf.variable_scope("dec", reuse=True):
self.gen_dec_net_from_sampled = self.decoder(self.z_gen_style_net_sampled,
self.z_gen_content_net_sampled, verbose=False,
discriminator=False) # fake generated image
self.x_fake_from_sample = self.gen_dec_net_from_sampled[-1]
print('Creating Discriminator...')
with tf.variable_scope("descriminator"):
# Run the real x through the descriminator
with tf.variable_scope("enc"):
print('...Creating encoder in discriminator...')
self.disc_style_net_real, self.disc_content_net_real = self.encoder(self.x_input,
discriminator=True) # get z from the input
self.z_disc_style_net_real = self.disc_style_net_real[
-1] # z value in the descriminator for the real image
self.z_disc_content_net_real = self.disc_content_net_real[
-1] # z value in the descriminator for the real image
with tf.variable_scope("dec"):
print('...Creating decoder in discriminator...')
self.disc_dec_net_real = self.decoder(self.z_disc_style_net_real, self.z_disc_content_net_real,
discriminator=True) # get output from z
self.x_real_recon = self.disc_dec_net_real[-1] # reconsruction of the real image in the descriminator
# run the generated x which is autoencoding the real values through the network
with tf.variable_scope("enc", reuse=True):
self.disc_style_net_fake_from_real, self.disc_content_net_fake_from_real = self.encoder(
self.x_fake_from_real, discriminator=True) # get z from the input
self.z_disc_style_net_fake_from_real = self.disc_style_net_fake_from_real[
-1] # z value in the descriminator for the real image
self.z_disc_content_net_fake_from_real = self.disc_content_net_fake_from_real[
-1] # z value in the descriminator for the real image
with tf.variable_scope("dec", reuse=True):
self.disc_dec_net_fake_from_real = self.decoder(self.z_disc_style_net_fake_from_real,
self.z_disc_content_net_fake_from_real, verbose=False,
discriminator=True) # get output from z
self.x_fake_from_real_recon = self.disc_dec_net_fake_from_real[
-1] # reconsruction of the fake image in the descriminator
# run the interpolated (generated) x through the discriminator
with tf.variable_scope("enc", reuse=True):
self.disc_style_net_fake_from_sampled, self.disc_content_net_fake_from_sampled = self.encoder(
self.x_fake_from_sample, discriminator=True) # get z from the input
self.z_disc_style_net_fake_from_sampled = self.disc_style_net_fake_from_sampled[
-1] # z value in the descriminator for the real image
self.z_disc_content_net_fake_from_sampled = self.disc_content_net_fake_from_sampled[
-1] # z value in the descriminator for the real image
# run gen_x through the autoencoder, return the output
with tf.variable_scope("dec", reuse=True):
self.disc_dec_net_fake_from_sampled = self.decoder(self.z_disc_style_net_fake_from_sampled,
self.z_disc_content_net_fake_from_sampled,
verbose=False,
discriminator=True) # get output from z
self.x_fake_from_sample_recon = self.disc_dec_net_fake_from_sampled[
-1] # reconsruction of the fake image in the descriminator
def encoder(self, X, reuse=False, scope='content_encoder', verbose=False, discriminator=False):
n_downsample = self.n_downsample
channel = self.ch
if discriminator == False:
n_res = self.n_res
else:
n_res = self.n_res # - 2
channel = channel / 2
###### Content Encoder
with tf.variable_scope("content_enc"):
content_net = [tf.reshape(X, [self.batch_size, self.dims[0], self.dims[1], self.dims[2]])]
content_net.append(relu(
conv(content_net[len(content_net) - 1], channel, kernel=7, stride=1, pad=3, pad_type='reflect',
scope='conv_0')))
for i in range(n_downsample):
content_net.append(relu(
conv(content_net[len(content_net) - 1], channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect',
scope='conv_' + str(i + 1))))
channel = channel * 2
for i in range(n_res):
content_net.append(resblock(content_net[len(content_net) - 1], channel, scope='resblock_' + str(i)))
if discriminator == False:
content_net.append(
linear(content_net[len(content_net) - 1], self.n_hidden, use_bias=True, scope='linear'))
content_shapes = [shape(i) for i in content_net]
if verbose: print('content_net shapes: ', content_shapes)
if discriminator == False:
channel = self.ch
else:
channel = self.ch / 2
###### Style Encoder
with tf.variable_scope("style_enc"):
# IN removes the original feature mean and variance that represent important style information
style_net = [tf.reshape(X, [self.batch_size, self.dims[0], self.dims[1], self.dims[2]])]
style_net.append(conv(style_net[len(style_net) - 1], channel, kernel=7, stride=1, pad=3, pad_type='reflect',
scope='conv_0'))
style_net.append(relu(style_net[len(style_net) - 1]))
for i in range(2):
style_net.append(relu(
conv(style_net[len(style_net) - 1], channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect',
scope='conv_' + str(i + 1))))
channel = channel * 2
for i in range(2):
style_net.append(relu(
conv(style_net[len(style_net) - 1], channel, kernel=4, stride=2, pad=1, pad_type='reflect',
scope='down_conv_' + str(i))))
style_net.append(adaptive_avg_pooling(style_net[len(style_net) - 1])) # global average pooling
style_net.append(conv(style_net[len(style_net) - 1], self.style_dim, kernel=1, stride=1, scope='SE_logit'))
style_shapes = [shape(i) for i in style_net]
if verbose: print('style_net shapes: ', style_shapes)
return style_net, content_net
def decoder(self, z_x_style, z_x_content, reuse=False, scope="content_decoder", verbose=False, discriminator=False):
channel = self.mlp_dim
n_upsample = self.n_upsample
if discriminator == False:
n_res = self.n_res
z_x_content = tf.reshape(linear(z_x_content, (self.dims[0] / (2 ** self.n_sample)) * (
self.dims[0] / (2 ** self.n_sample)) * (self.ch * self.n_sample ** 2)), (
self.batch_size, int(self.dims[0] / (2 ** self.n_sample)),
int(self.dims[0] / (2 ** self.n_sample)), int(self.ch * self.n_sample ** 2)))
else:
n_res = self.n_res # - 2
channel = channel / 2
dec_net = [z_x_content]
mu, sigma = self.MLP(z_x_style, discriminator=discriminator)
for i in range(n_res):
dec_net.append(
adaptive_resblock(dec_net[len(dec_net) - 1], channel, mu, sigma, scope='adaptive_resblock' + str(i)))
for i in range(n_upsample):
# # IN removes the original feature mean and variance that represent important style information
dec_net.append(up_sample(dec_net[len(dec_net) - 1], scale_factor=2))
dec_net.append(conv(dec_net[len(dec_net) - 1], channel // 2, kernel=5, stride=1, pad=2, pad_type='reflect',
scope='conv_' + str(i)))
dec_net.append(relu(layer_norm(dec_net[len(dec_net) - 1], scope='layer_norm_' + str(i))))
channel = channel // 2
dec_net.append(
conv(dec_net[len(dec_net) - 1], channels=self.img_ch, kernel=7, stride=1, pad=3, pad_type='reflect',
scope='G_logit'))
dec_net.append(tf.reshape(tf.sigmoid(dec_net[len(dec_net) - 1]),
[self.batch_size, self.dims[0] * self.dims[1] * self.dims[2]]))
dec_shapes = [shape(i) for i in dec_net]
if verbose: print('Decoder shapes: ', dec_shapes)
return dec_net
def MLP(self, style, reuse=False, scope='MLP', discriminator=False):
channel = self.mlp_dim
if discriminator:
channel = int(channel / 2)
with tf.variable_scope(scope, reuse=reuse):
x = relu(linear(style, channel, scope='linear_0'))
x = relu(linear(x, channel, scope='linear_1'))
mu = linear(x, channel, scope='mu')
sigma = linear(x, channel, scope='sigma')
mu = tf.reshape(mu, shape=[-1, 1, 1, channel])
sigma = tf.reshape(sigma, shape=[-1, 1, 1, channel])
return mu, sigma
def _get_tensor_by_name(self, tensor_list):
return [self.graph.get_tensor_by_name(i) for i in tensor_list]
def save_network(self, save_location, verbose=True):
""" Save the network to some location"""
self.saver.save(self.sess, ''.join([save_location]))
if verbose: print('Network Saved')
def load_network(self, load_location, verbose=True):
""" Retrieve the network from some location"""
self.saver = tf.train.import_meta_graph(load_location + '.meta')
self.saver.restore(self.sess, tf.train.latest_checkpoint('/'.join(load_location.split('/')[:-1]) + '/'))
if verbose: print('Network Loaded')
def encode(self, X, only_z=False):
"""encode input into z values and output"""
return self.sess.run((self.x_tilde, self.z_gen_style_net_real), {self.x_input: X})
def encode_x(self, x, zs_shape, zc_shape, batch_size):
nex = np.ceil(len(x) / batch_size).astype('int')
face_z = [np.zeros([nex * batch_size] + list(zs_shape)), np.zeros([nex * batch_size] + list(zc_shape))]
face_x = np.zeros([nex * batch_size] + list(np.shape(x)[1:]))
face_x[:len(x)] = x
for batch in np.arange(nex):
cur_batch = face_x[int(batch * batch_size):int((batch + 1) * batch_size)]
zs_out, zc_out = self.sess.run([self.z_gen_style_net_real, self.z_gen_content_net_real],
{self.x_input: cur_batch})
face_z[0][batch * batch_size:(batch + 1) * batch_size] = zs_out
face_z[1][batch * batch_size:(batch + 1) * batch_size] = zc_out
z_final = [face_z[0][:len(x)], face_z[1][:len(x)]]
return z_final
def decode_z(self, z, x_shape, batch_size):
nex = np.ceil(len(z[0]) / batch_size).astype('int')
face_x = np.zeros([nex * batch_size] + list(x_shape))
face_z = [np.zeros([nex * batch_size] + list(np.shape(z[0])[1:])),
np.zeros([nex * batch_size] + list(np.shape(z[1])[1:]))]
face_z[0][:len(z[0])] = z[0]
face_z[1][:len(z[1])] = z[1]
for batch in np.arange(nex):
cur_batch = [face_z[0][int(batch * batch_size):int((batch + 1) * batch_size)],
face_z[1][int(batch * batch_size):int((batch + 1) * batch_size)]]
x_out = self.sess.run(self.x_fake_from_real, {self.z_gen_style_net_real: cur_batch[0],
self.z_gen_content_net_real: cur_batch[1]})
face_x[batch * batch_size:(batch + 1) * batch_size] = x_out
faces_x_final = face_x[:len(z[0])]
return faces_x_final
def norm(X):
return (X - | np.min(X) | numpy.min |
import keras
import numpy as np
import sys
import tensorflow as tf
import cv2
def random_crop(x,dn):
dx = | np.random.randint(dn,size=1) | numpy.random.randint |
# Copyright (c) 2018 <NAME>
from collections import namedtuple
from PIL import Image
import numpy as np
def to_bgr(img_arr):
return img_arr[:, :, ::-1]
def convert_image(img):
arr = | np.array(img) | numpy.array |
#
# For this is how God loved the world:<br/>
# he gave his only Son, so that everyone<br/>
# who believes in him may not perish<br/>
# but may have eternal life.
#
# John 3:16
#
import numpy as np
from aRibeiro.math import *
from aRibeiro.opengl import *
#from multiprocessing import RLock
from threading import RLock
# threading.get_ident() works, or threading.current_thread().ident (or threading.currentThread().ident for Python < 2.6).
from aRibeiro.window import *
class LineRenderer:
def __init__(self, window:Window, line_width = 1.5):
self.window = window
self.line_width = line_width
self.shader = PositionColorShader(window)
self.vertex_line_pos = | np.array([],dtype=np.float32) | numpy.array |
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from .. import FeaturePipelineRuntime
class DummyTransformerRuntime(BaseEstimator, TransformerMixin):
def __init__(self, constant=1):
self.constant = constant
def fit(self, X, y=None):
pass
def transform(self, X):
return self.constant * np.ones((X.shape[0], 1))
def fit_transform(self, X, y=None, **fit_kwargs):
self.fit(X, y, **fit_kwargs)
return self.transform(X)
def test_one_schema_drop():
X_np = np.array([[0, 1], [1, 0]])
pipe = FeaturePipelineRuntime([
('dt', DummyTransformerRuntime(), [0])])
X_exp = np.ones((X_np.shape[0], 1))
X_act = pipe.fit_transform(X_np)
assert np.all(X_exp == X_act)
def test_one_schema_keep():
X_np = np.array([[0, 1], [1, 0]])
pipe = FeaturePipelineRuntime(
[('dt', DummyTransformerRuntime(), [0])],
drop=False)
X_exp = np.hstack([X_np, np.ones((X_np.shape[0], 1))])
X_act = pipe.fit_transform(X_np)
assert np.all(X_exp == X_act)
def test_many_schema_drop():
X_np = np.array([[0, 1], [1, 0]])
pipe = FeaturePipelineRuntime([
('dt_1', DummyTransformerRuntime(), [0]),
('dt_0', DummyTransformerRuntime(constant=0), [1])])
X_exp = np.hstack([np.ones((X_np.shape[0], 1)),
np.zeros((X_np.shape[0], 1))])
X_act = pipe.fit_transform(X_np)
assert | np.all(X_exp == X_act) | numpy.all |
import copy
import itertools
import json
import logging
import os
from collections import OrderedDict
import numpy as np
from PIL import Image, ImageDraw
import pycocotools.mask as mask_util
import torch
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.modeling.matcher import Matcher
from detectron2.structures import Boxes, pairwise_iou
from detectron2.utils import comm
from fibercnn.visualization.utilities import display_image
from fvcore.common.file_io import PathManager
class FiberEvaluator(DatasetEvaluator):
"""
Evaluate predicted fiber lengths and fiber widths of instances.
"""
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
cfg (CfgNode): config instance
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains:
"instances_results.json" a json file containing the evaluation results.
"""
self._predictions = []
self._fiber_results = []
self._results = None
# Matcher to assign predictions to annotations
self._bbox_matcher = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
self._tasks = ("fiberwidth", "fiberlength")
self._modes = ("strict", "loose")
self._distributed = distributed
self._output_dir = output_dir
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
self._metadata = MetadataCatalog.get(dataset_name)
assert hasattr(
self._metadata, "json_file"
), f"json_file was not found in MetaDataCatalog for '{dataset_name}'"
self._get_annotations()
def _get_annotations(self):
json_file = PathManager.get_local_path(self._metadata.json_file)
with open(json_file) as f:
self._annotations = json.load(f)["annotations"]
self._convert_annotation_bboxes()
def _convert_annotation_bboxes(self):
for annotation in self._annotations:
x1, y1, width, height = annotation["bbox"]
new_bbox = torch.tensor([x1, y1, x1 + width, y1 + height])
new_bbox = new_bbox.unsqueeze(0)
new_bbox = Boxes(new_bbox)
annotation["bbox"] = new_bbox
def reset(self):
self._predictions = []
self._fiber_results = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a FibeRCNN model
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a FibeRCNN model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_evaluatable_format(
instances, input["image_id"]
)
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
self._predictions = comm.gather(self._predictions, dst=0)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return {}
if len(self._predictions) == 0:
self._logger.warning("[FiberEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self._predictions, f)
self._results = OrderedDict()
if "instances" in self._predictions[0]:
self._eval_predictions()
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_predictions(self):
"""
Evaluate self._predictions on the given tasks.
Fill self._results with the metrics of the tasks.
"""
self._fiber_results = list(itertools.chain(*[x["instances"] for x in self._predictions]))
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
reverse_id_mapping = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
for result in self._fiber_results:
category_id = result["category_id"]
assert (
category_id in reverse_id_mapping
), "A prediction has category_id={}, which is not available in the dataset.".format(
category_id
)
result["category_id"] = reverse_id_mapping[category_id]
self._logger.info("Evaluating predictions ...")
annotation_image_ids = set(_extract_instances_property(self._annotations, "image_id"))
for task in self._tasks:
self._logger.info(f"Task: {task}")
self._results[task] = {}
for mode in self._modes:
percentage_errors = []
for image_id in annotation_image_ids:
image_predictions = _filter_by_image_id(self._fiber_results, image_id)
if len(image_predictions) == 0:
continue
image_annotations = _filter_by_image_id(self._annotations, image_id)
matched_image_annotations, matched_labels = self._match_annotations(
image_annotations, image_predictions
)
percentage_errors.append(
_get_percentage_errors(
image_predictions, matched_image_annotations, matched_labels, task, mode
)
)
percentage_errors = np.concatenate(percentage_errors)
mean_absolute_percentage_error = np.mean(np.abs(percentage_errors))
self._results[task][f"MAPE_{mode}"] = mean_absolute_percentage_error
self._logger.info(f"MAPE_{mode}: {mean_absolute_percentage_error}")
def _match_annotations(self, image_annotations, image_predictions):
# TODO: Evaluate the number of detected instances.
prediction_boxes = Boxes.cat(_extract_instances_property(image_predictions, "bbox"))
annotation_boxes = Boxes.cat(_extract_instances_property(image_annotations, "bbox"))
match_quality_matrix = pairwise_iou(annotation_boxes, prediction_boxes)
matched_idxs, matched_labels = self._bbox_matcher(match_quality_matrix)
matched_image_annotations = [image_annotations[i] for i in matched_idxs]
return matched_image_annotations, matched_labels
def _get_percentage_errors(
image_predictions, matched_image_annotations, matched_labels, measurand, mode
):
assert mode in ["strict", "loose"], f"Unexpected mode: {mode}"
is_valid_match = np.atleast_1d(matched_labels > 0)
targets = _extract_instances_property(matched_image_annotations, measurand)
targets = np.array(targets)
predictions = _extract_instances_property(image_predictions, measurand)
predictions = np.concatenate(predictions)
predictions = predictions * matched_labels.numpy()
if mode == "loose":
predictions = predictions[is_valid_match]
targets = targets[is_valid_match]
errors = predictions - targets
percentage_errors = errors / targets * 100
return percentage_errors
def _extract_instances_property(instances, property_name):
return [annotation[property_name] for annotation in instances]
def instances_to_evaluatable_format(instances, img_id):
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
has_fiberlength = instances.has("pred_fiberlength")
if has_fiberlength:
fiberlengths = instances.pred_fiberlength
fiberlengths = | np.array(fiberlengths) | numpy.array |
#
# This file is part of minimalFE.
#
# Created by <NAME> on 04.05.21.
# Copyright (c) ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, Geo-Energy Laboratory, 2016-2021. All rights reserved.
# See the LICENSE.TXT file for more details.
#
import numpy as np
from numba import jit
# FUNCTIONS Utilities to Handle some unstructured mesh computations
@jit(nopython=True)
def project_surface_elt(xae3D):
# this function projects the coordinates of a 3D planar surface element (i.e. a triangle) into its plane
# and return
R = surface_elt_local(xae3D)
xae_proj=xae3D-xae3D[0]
for i in range(3) :
xae_proj[i] = np.dot(R,xae_proj[i])
return xae_proj[:,0:2]
@jit(nopython=True)
def surface_elt_local(xae3D):
# this function returns the local cartesian frame of a 3D planar surface element
# as a rotation matrix [tangent_1, tangent_2, normal]
# xae3D has the coordinates of the vertex (we only take the first 3 vertex assuming a triangle)
# vertex1 = xae3D[0,:], vertex2=xae3D[1,:]
tangent_1=xae3D[1]-xae3D[0]
tangent_1=tangent_1/(np.linalg.norm(tangent_1))
normal =np.cross(tangent_1,xae3D[2]-xae3D[0])
normal = normal/(np.linalg.norm(normal))
tangent_2= np.cross(normal,tangent_1)
res = np.empty((3, 3)) # Instantiating NumPy arrays via a list of NumPy arrays, or even a list of lists, is not supported by numba.njit. Instead, use np.empty and then assign values via NumPy indexing
res[0]=tangent_1
res[1]=tangent_2
res[2]=normal
return res
#myxx=np.array([[0.,0.,0.],[0.,0.,1.],[0.,1.,0.]])
# 2D segment
@jit(nopython=True)
def project_segment_elt(xae2D):
# this function projects the coordinates of a 2D planar surface element into its 1D plane
# and return
R = segment_elt_local(xae2D)
xae_proj=xae2D-xae2D[0]
for i in range(2) :
xae_proj[i] = np.dot(R,xae_proj[i])
return xae_proj[:,0]
@jit(nopython=True)
def segment_elt_local(xae2D):
# tangent vector
tangent=xae2D[1]-xae2D[0]
tangent = tangent / ( | np.linalg.norm(tangent) | numpy.linalg.norm |
#!/usr/bin/env python
# coding: utf-8
"""
Implementation of Deep Deterministic Policy Gradients (DDPG) network
using PyTorch.
See https://arxiv.org/abs/1509.02971 for algorithm details.
@author: <NAME> 2020 (<EMAIL>)
Project for CityLearn Competition
"""
# In[1]:
# Import Packages
from agent_DDPG import Agent
from agent_DDPG import RBC_Agent
import numpy as np
from pathlib import Path
import torch
from torch.utils.tensorboard import SummaryWriter
from citylearn import CityLearn
from algo_utils import graph_building, tabulate_table
# Extra packages for postprocessing
import time
import os
import getopt, sys
import warnings
warnings.simplefilter("ignore", UserWarning) # Ignore casting to float32 warnings
# In[2]:
# Load environment
climate_zone = 1
data_path = Path("data/Climate_Zone_"+str(climate_zone))
building_attributes = data_path / 'building_attributes.json'
weather_file = data_path / 'weather_data.csv'
solar_profile = data_path / 'solar_generation_1kW.csv'
building_state_actions = 'buildings_state_action_space.json'
#building_ids = ["Building_1","Building_2","Building_3","Building_4","Building_5","Building_6","Building_7","Building_8","Building_9"]
building_ids = ["Building_1"]
objective_function = ['ramping','1-load_factor','average_daily_peak','peak_demand','net_electricity_consumption']
env = CityLearn(data_path, building_attributes, weather_file, solar_profile, building_ids, buildings_states_actions = building_state_actions, cost_function = objective_function, central_agent = False, verbose = 0)
# Contain the lower and upper bounds of the states and actions, to be provided to the agent to normalize the variables between 0 and 1.
# Can be obtained using observations_spaces[i].low or .high
observations_spaces, actions_spaces = env.get_state_action_spaces()
# Provides information on Building type, Climate Zone, Annual DHW demand, Annual Cooling Demand, Annual Electricity Demand, Solar Capacity, and correllations among buildings
building_info = env.get_building_information()
# In[ ]:
"""
###################################
STEP 1: Set the Training Parameters
======
num_episodes (int): maximum number of training episodes
episode_scores (float): list to record the scores obtained from each episode
scores_average_window (int): the window size employed for calculating the average score (e.g. 100)
checkpoint_interval (int): Interval of the number of steps to save a checkpoint of the agents
iteration_interval (int): Interval of the number of steps to save logging parameters (e.g. loss)
"""
num_episodes=500
episode_scores = []
scores_average_window = 5
checkpoint_interval = 8760
iteration_interval = 100
rollout_interval = 50
chk_load_dir = None
# REWARD SHAPING CONSTANTS
peak_constant = 60
ramping_constant = 0.002
consumption_constant = 0.2
# Ref: https://stackabuse.com/command-line-arguments-in-python/
# Get full command-line arguments
full_cmd_arguments = sys.argv
argument_list = full_cmd_arguments[1:]
short_options = "e:c:"
long_options = ["episodes=","checkpoints="]
arguments, values = [], []
try:
arguments, values = getopt.getopt(argument_list, short_options, long_options)
for c, v in arguments:
if c in ("-e", "--episdoes"):
num_episodes = int(v)
elif c in ("-c", "--checkpoints"):
chk_load_dir = v
except getopt.error as err:
print (str(err))
sys.exit(2)
"""
#############################################
STEP 2: Determine the size of the Action and State Spaces and the Number of Agents
The observation space consists of various variables corresponding to the
building_state_action_json file. See https://github.com/intelligent-environments-lab/CityLearn
for more information about the states. Each agent receives all observations of all buildings
(communication between buildings).
Up to two continuous actions are available, corresponding to whether to charge or discharge
the cooling storage and DHW storage tanks.
"""
# Get number of agents in Environment
num_agents = env.n_buildings
print('\nNumber of Agents: ', num_agents)
# Set the size of state observations or state size
print('\nSize of State: ', observations_spaces)
"""
#############################################
STEP 3: Run the RBC Controller to extract baseline costs
"""
# Instantiating the control agent(s)
agent = RBC_Agent(actions_spaces)
state = env.reset()
done = False
rewards_list = []
while not done:
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action)
state = next_state
rewards_list.append(reward)
cost_rbc = env.get_baseline_cost()
print(cost_rbc)
"""
###################################
STEP 4: Create DDPG Agents from the Agent Class in ddpg_agent.py
A DDPG agent initialized with the following parameters.
======
building_info: Dictionary with building information as described above
state_size (list): List of lists with observation spaces of all buildings selected
action_size (list): List of lists with action spaces of all buildings selected
seed (int): random seed for initializing training point (default = 0)
The invididual agents are defined within the Agent call for each building
"""
agent = Agent(building_info, state_size=observations_spaces, action_size=actions_spaces, random_seed=0, load_dir=chk_load_dir)
"""
###################################
STEP 5: Run the DDPG Training Sequence
The DDPG Training Process involves the agent learning from repeated episodes of behaviour
to map states to actions the maximize rewards received via environmental interaction.
The agent training process involves the following:
(1) Reset the environment at the beginning of each episode.
(2) Obtain (observe) current state, s, of the environment at time t
(3) Perform an action, a(t), in the environment given s(t)
(4) Observe the result of the action in terms of the reward received and
the state of the environment at time t+1 (i.e., s(t+1))
(5) Update agent memory and learn from experience (i.e, agent.step)
(6) Update episode score (total reward received) and set s(t) -> s(t+1).
(7) If episode is done, break and repeat from (1), otherwise repeat from (3).
"""
# Measure the time taken for training
start_timer = time.time()
# Store the weights and scores in a new directory
parent_dir = "alg/ddpg{}/".format(time.strftime("%Y%m%d-%H%M%S")) # apprends the timedate
os.makedirs(parent_dir, exist_ok=True)
# Summary Writer setup
# Writer will output to ./runs/ directory by default
writer = SummaryWriter(log_dir=parent_dir+"tensorboard/")
print("Saving TB to {}".format(parent_dir+"tensorboard/"))
# Crate the final dir
final_dir = parent_dir + "final/"
iteration_step = 0
# loop from num_episodes
for i_episode in range(1, num_episodes+1):
# reset the environment at the beginning of each episode
states = env.reset()
# set the initial episode score to zero.
agent_scores = np.zeros(num_agents)
# Run the episode training loop;
# At each loop step take an action as a function of the current state observations
# Based on the resultant environmental state (next_state) and reward received update the Agents Actor and Critic networks
# If environment episode is done, exit loop.
# Otherwise repeat until done == true
while True:
# determine actions for the agents from current sate, using noise for exploration
action = agent.select_action(states, add_noise=True)
#print(actions)
# send the actions to the agents in the environment and receive resultant environment information
next_states, reward, done, _ = env.step(action)
# Calculate ramping rate
consumption_last_time = reward
ramping = [abs(x - y) for x, y in zip(reward, consumption_last_time)]
# Apply Shaped Reward Function
reward = [peak_constant*(x/cost_rbc['peak_demand']) - y * ramping_constant + consumption_constant*x for x, y in zip(reward, ramping)]
#Send (S, A, R, S') info to the training agent for replay buffer (memory) and network updates
agent.step(states, action, reward, next_states, done)
# Learn every rollout number of steps (if enough samples have been collected)
if iteration_step % rollout_interval ==0:
agent.rollout(reward)
# set new states to current states for determining next actions
states = next_states
# Update episode score for each agent
agent_scores += reward
if iteration_step % iteration_interval == 0:
buildings_reward_dict = {}
building_idx=1
for building in reward:
buildings_reward_dict["Building {}".format(building_idx)] = building
building_idx += 1
# Building reward
writer.add_scalars("Reward/Buildings", buildings_reward_dict, iteration_step)
agent_scores_dict = {}
agent_idx=1
for agentS in agent_scores:
agent_scores_dict["Agent {}".format(agent_idx)] = agentS
agent_idx += 1
# Agent scores
#writer.add_scalars("Scores/Agents", agent_scores_dict, iteration_step)
# Plot losses for critic and actor
if agent.critic_loss is not None:
writer.add_scalar("Losses/Critic Loss", agent.critic_loss, iteration_step)
if agent.actor_loss is not None:
writer.add_scalar("Losses/Actor Loss", agent.actor_loss, iteration_step)
# Action choices
at = | np.array(agent.action_tracker) | numpy.array |
# -*- coding: utf-8 -*-
"""
Written by:
<NAME>
- <EMAIL> OR
- <EMAIL>
Postdoctoral Fellow at:
Norwegian University of Science and Technology, NTNU
Department of Marine Technology, IMT
Marine System Dynamics and Vibration Lab, MD Lab
https://www.ntnu.edu/imt/lab/md-lab
@author: geraldod
"""
# import sys
from numpy import pi, sin, tan, radians, isscalar, mean, eye, allclose, diag, \
sqrt, zeros
from scipy import interpolate, array
from scipy.stats import hmean
from scipy.linalg import block_diag
from matplotlib.pyplot import gca
from matplotlib.patches import Rectangle
###############################################################################
def check_key(key, dic):
'''
check if key is a part of any key from
Parameters
----------
key : string
DESCRIPTION.
dic : dict
DESCRIPTION.
Returns
-------
val : float
DESCRIPTION.
'''
val = 1.0
for k, v in dic.items():
if(key in k):
val = v
return val
###############################################################################
class Material:
'''
Simple class to store some properties of materials used to manufacture
gears.
'''
def __init__(self):
self.E = 206.0e9 # [Pa], Young's modulus
self.nu = 0.3 # [-], Poisson's ratio
self.sigma_Hlim = 1500.0e6 # [Pa], Allowable contact stress number
self.rho = 7.83e3 # [kg/m**3], Density
self.S_ut = 700.0e6 # [Pa], Tensile strength
self.S_y = 490.0e6 # [Pa], Yield strength
# % [Pa], Shear modulus
self.G = (self.E/2.0)/(1.0 + self.nu)
###############################################################################
class Rack:
'''
Implements some characteristics of the standard basic rack tooth profile
for cylindrical involute gears (external or internal) for general and
heavy engineering.
References:
[1] ISO 53:1998 Cylindrical gears for general and heavy engineering
-- Standard basic rack tooth profile
[2] ISO 54:1996 Cylindrical gears for general engineering and for
heavy engineering -- Modules
written by:
<NAME>
- <EMAIL> OR
- <EMAIL>
Postdoctoral Fellow at:
Norwegian University of Science and Technology, NTNU
Department of Marine Technology, IMT
Marine System Dynamics and Vibration Lab, MD Lab
https://www.ntnu.edu/imt/lab/md-lab
'''
def __init__(self, **kwargs):
# main attributes:
# [-], Type of basic rack tooth profile:
self.type = kwargs['type'] if('type' in kwargs) else 'A'
# [mm], Module:
mm = kwargs['m'] if('m' in kwargs) else 1.0
self.m = self.module(mm)
# [deg.], Pressure angle:
self.alpha_P = kwargs['alpha_P'] if('alpha_P' in kwargs) else 20.0
# secondary attributes:
if(self.type == 'A'):
k_c_P = 0.25
k_rho_fP = 0.38
elif(self.type == 'B'):
k_c_P = 0.25
k_rho_fP = 0.30
elif(self.type == 'C'):
k_c_P = 0.25
k_rho_fP = 0.25
elif(self.type == 'D'):
k_c_P = 0.40
k_rho_fP = 0.39
else:
print('Rack type [{}] is NOT defined.'.format(self.type))
self.c_P = k_c_P*self.m # [mm], Bottom clearance
self.h_fP = (1.0 + k_c_P)*self.m # [mm], Dedendum
self.e_P = self.m/2.0 # [mm], Spacewidth
self.h_aP = self.m # [mm], Addendum
self.h_FfP = self.h_fP - self.c_P # [mm], Straight portion of the dedendum
self.h_P = self.h_aP + self.h_fP # [mm], Tooth depth
self.h_wP = self.h_P - self.c_P # [mm], Common depth of rack and tooth
self.p = pi*self.m # [mm], Pitch
self.s_P = self.e_P # [mm], Tooth thickness
self.rho_fP = k_rho_fP*self.m # [mm], Fillet radius
self.U_FP = 0.0 # [mm], Size of undercut
self.alpha_FP = 0.0 # [deg.], Angle of undercut
def __repr__(self):
'''
Return a string containing a printable representation of an object.
Returns
-------
None.
'''
val = ('Rack type: {} -\n'.format(self.type) +
'Module, m = {:7.3f} mm\n'.format(self.m) +
'Pressure angle, alpha_P = {:7.3f} deg.\n'.format(self.alpha_P) +
'Addendum, h_aP = {:7.3f} mm\n'.format(self.h_aP) +
'Dedendum, h_fP = {:7.3f} mm\n'.format(self.h_fP) +
'Tooth depth, h_P = {:7.3f} mm\n'.format(self.h_P) +
'Pitch, p = {:7.3f} mm\n'.format(self.p) +
'Tooth thickness, s_P = {:7.3f} mm\n'.format(self.s_P) +
'Fillet radius, rho_fP = {:7.3f} mm\n'.format(self.rho_fP))
return val
def save(self, filename):
pass
@staticmethod
def module(m_x, **kwargs):
'''
Returns the values of normal modules for straight and helical gears
according to ISO 54:1996 [2]. According to this standard, preference
should be given to the use of the normal modules as given in series I
and the module 6.5 in series II should be avoided.
The module is defined as the quotient between:
- the pitch, expressed in millimetres, to the number pi, or;
- the reference diameter, expressed in millimetres, by the number
of teeth.
'''
option = kwargs['option'] if('option' in kwargs) else 'calc'
method = kwargs['method'] if('method' in kwargs) else 'nearest'
m_1 = [1.000, 1.250, 1.50, 2.00, 2.50, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 12.0, 16.0, 20.0, 25.0, 32.0, 40.0, 50.0]
m_2 = [1.125, 1.375, 1.75, 2.25, 2.75, 3.5, 4.5, 5.5, 6.5, 7.0, 9.0, 11.0, 14.0, 18.0, 22.0, 28.0, 36.0, 45.0]
if(option == 'show'):
print('... to be implemented...')
elif(option == 'calc'):
# removing the value 6.5 which should be avoided according to [2]
# and inserting additional values due to (Nejad et. al., 2015) and
# (Wang et. al., 2020). See the classes NREL_5MW and DTU_10MW for
# detailed references about these works.
m_2[8] = 21.0
m_2.append(30.0)
x = sorted(m_1 + m_2)
elif(option == 'calc_1'):
x = m_1
elif(option == 'calc_2'):
x = m_2
else:
print('Option [{}] is NOT valid.'.format(option))
idx = interpolate.interp1d(x, list(range(len(x))), kind = method,
fill_value = (0, len(x) - 1))
return x[idx(m_x).astype(int)]
def max_fillet_radius(self):
'''
returns the maximum fillet radius of the basic rack according to
ISO 53:1998 [1], Sec. 5.9.
'''
if(self.alpha_P != 20.0):
print('The pressure angle is not 20.0 [deg.]')
else:
if((self.c_P <= 0.295*self.m) and (self.h_FfP == self.m)):
rho_fP_max = self.c_P/(1.0 - sin( | radians(self.alpha_P) | numpy.radians |
import unittest
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from clusterz.algs.kzmedian import (
DistributedKZMedian, BELDistributedKMedian, k_median_my, kz_median, KZMedian, KMedianWrapped
)
class MyTestCase(unittest.TestCase):
def setUp(self):
cluster = np.random.uniform(-1, 1, size=(40, 2))
self.centers_ = np.array([
[0, 30], [0, -30]
])
self.outliers_ = np.array([
[80, 0], [-80, 0]
])
# data set on a single machine
self.X_without_outliers_ = np.vstack(
[self.centers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + np.array([-5, 0]),
cluster + self.centers_[1] + np.array([5, 0]),
cluster + self.centers_[1] + np.array([-5, 0])])
self.X_with_outliers_ = np.vstack(
[self.centers_,
self.outliers_,
# clusters
cluster + self.centers_[0] + np.array([5, 0]),
cluster + self.centers_[0] + | np.array([-5, 0]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>, <EMAIL>
#!python3
"""
A module to produce a catalog of spectral energy distributions
"""
import os
import pickle
from copy import copy
from pkg_resources import resource_filename
import shutil
from astropy.io import ascii
import astropy.table as at
import astropy.units as q
import numpy as np
from bokeh.models import HoverTool, ColumnDataSource, LabelSet
from bokeh.plotting import figure, show
from bokeh.models.glyphs import Patch
from .sed import SED
from . import utilities as u
class Catalog:
"""An object to collect SED results for plotting and analysis"""
def __init__(self, name='SED Catalog', marker='circle', color='blue', verbose=True, **kwargs):
"""Initialize the Catalog object"""
# Metadata
self.verbose = verbose
self.name = name
self.marker = marker
self.color = color
self.wave_units = q.um
self.flux_units = q.erg/q.s/q.cm**2/q.AA
# List all the results columns
self.cols = ['name', 'ra', 'dec', 'age', 'age_unc', 'distance', 'distance_unc',
'parallax', 'parallax_unc', 'radius', 'radius_unc',
'spectral_type', 'spectral_type_unc', 'SpT',
'membership', 'reddening', 'fbol', 'fbol_unc', 'mbol',
'mbol_unc', 'Lbol', 'Lbol_unc', 'Lbol_sun',
'Lbol_sun_unc', 'Mbol', 'Mbol_unc', 'logg', 'logg_unc',
'mass', 'mass_unc', 'Teff', 'Teff_unc', 'SED']
# A master table of all SED results
self.results = self.make_results_table(self)
# Try to set attributes from kwargs
for k, v in kwargs.items():
setattr(self, k, v)
def __add__(self, other, name=None):
"""Add two catalogs together
Parameters
----------
other: sedkit.catalog.Catalog
The Catalog to add
Returns
-------
sedkit.catalog.Catalog
The combined catalog
"""
if not type(other) == type(self):
raise TypeError('Cannot add object of type {}'.format(type(other)))
# Make a new catalog
new_cat = Catalog(name=name or self.name)
# Combine results
new_results = at.vstack([at.Table(self.results), at.Table(other.results)])
new_cat.results = new_results
return new_cat
def add_column(self, name, data, unc=None):
"""
Add a column of data to the results table
Parameters
----------
name: str
The name of the new column
data: sequence
The data array
unc: sequence (optional)
The uncertainty array
"""
# Make sure column doesn't exist
if name in self.results.colnames:
raise ValueError("{}: Column already exists.".format(name))
# Make sure data is the right length
if len(data) != len(self.results):
raise ValueError("{} != {}: Data is not the right size for this catalog.".format(len(data), len(self.results)))
# Add the column
self.results.add_column(data, name=name)
# Add uncertainty column
if unc is not None:
# Uncertainty name
name = name + '_unc'
# Make sure column doesn't exist
if name in self.results.colnames:
raise ValueError("{}: Column already exists.".format(name))
# Make sure data is the right length
if len(unc) != len(self.results):
raise ValueError(
"{} != {}: Data is not the right size for this catalog.".format(len(unc), len(self.results)))
# Add the column
self.results.add_column(unc, name=name)
def add_SED(self, sed):
"""Add an SED to the catalog
Parameters
----------
sed: sedkit.sed.SED
The SED object to add
"""
# Overwrite duplicate names
idx = None
if sed.name in self.results['name']:
self.message("{}: Target already in catalog. Overwriting with new SED...")
idx = np.where(self.results['name'] == sed.name)[0][0]
# Turn off print statements
sed.verbose = False
# Check the units
sed.wave_units = self.wave_units
sed.flux_units = self.flux_units
# Run the SED
sed.make_sed()
# Add the values and uncertainties if applicable
new_row = {}
for col in self.cols[:-1]:
if col + '_unc' in self.cols:
if isinstance(getattr(sed, col), tuple):
val = getattr(sed, col)[0]
else:
val = None
elif col.endswith('_unc'):
if isinstance(getattr(sed, col.replace('_unc', '')), tuple):
val = getattr(sed, col.replace('_unc', ''))[1]
else:
val = None
else:
val = getattr(sed, col)
val = val.to(self.results[col.replace('_unc', '')].unit).value if hasattr(val, 'unit') else val
new_row[col] = val
# Add the SED
new_row['SED'] = sed
# Append apparent and absolute photometry
for row in sed.photometry:
# Add the column to the results table
if row['band'] not in self.results.colnames:
self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name=row['band']))
self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name=row['band'] + '_unc'))
self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name='M_' + row['band']))
self.results.add_column(at.Column([np.nan] * len(self.results), dtype=np.float16, name='M_' + row['band'] + '_unc'))
# Add the apparent magnitude
new_row[row['band']] = row['app_magnitude']
# Add the apparent uncertainty
new_row[row['band'] + '_unc'] = row['app_magnitude_unc']
# Add the absolute magnitude
new_row['M_' + row['band']] = row['abs_magnitude']
# Add the absolute uncertainty
new_row['M_' + row['band'] + '_unc'] = row['abs_magnitude_unc']
# Add the new row...
if idx is None:
self.results.add_row(new_row)
# ...or replace existing
else:
self.results[idx] = new_row
self.message("Successfully added SED '{}'".format(sed.name))
def export(self, parentdir='.', dirname=None, format='ipac', sources=True, zipped=False):
"""
Exports the results table and a directory of all SEDs
Parameters
----------
parentdir: str
The parent directory for the folder or zip file
dirname: str (optional)
The name of the exported directory or zip file, default is SED name
format: str
The format of the output results table
sources: bool
Export a directory of all source SEDs too
zipped: bool
Zip the directory
"""
# Check the parent directory
if not os.path.exists(parentdir):
raise IOError('No such target directory', parentdir)
# Check the target directory
name = self.name.replace(' ', '_')
dirname = dirname or name
dirpath = os.path.join(parentdir, dirname)
# Remove '.' from column names
final = at.Table(self.results).filled(np.nan)
for col in final.colnames:
final.rename_column(col, col.replace('.', '_').replace('/', '_'))
# Write a directory of results and all SEDs...
if sources:
# Make a directory
if not os.path.exists(dirpath):
os.system('mkdir {}'.format(dirpath))
else:
raise IOError('Directory already exists:', dirpath)
# Export the results table
resultspath = os.path.join(dirpath, '{}_results.txt'.format(name))
final.write(resultspath, format=format)
# Make a sources directory
sourcedir = os.path.join(dirpath,'sources')
os.system('mkdir {}'.format(sourcedir))
# Export all SEDs
for source in self.results['SED']:
source.export(sourcedir)
# zip if desired
if zipped:
shutil.make_archive(dirpath, 'zip', dirpath)
os.system('rm -R {}'.format(dirpath))
# ...or just write the results table
else:
resultspath = dirpath + '_results.txt'
final.write(resultspath, format=format)
def filter(self, param, value):
"""Retrieve the filtered rows
Parameters
----------
param: str
The parameter to filter by, e.g. 'Teff'
value: str, float, int, sequence
The criteria to filter by,
which can be single valued like 1400
or a range with operators [<,<=,>,>=],
e.g. (>1200,<1400), ()
Returns
-------
sedkit.sed.Catalog
The filtered catalog
"""
# Make a new catalog
cat = Catalog()
cat.results = u.filter_table(self.results, **{param: value})
return cat
def from_file(self, filepath, run_methods=['find_2MASS'], delimiter=','):
"""Generate a catalog from a file of source names and coordinates
Parameters
----------
filepath: str
The path to an ASCII file
run_methods: list
A list of methods to run
delimiter: str
The column delimiter of the ASCII file
"""
# Get the table of sources
data = ascii.read(filepath, delimiter=delimiter)
self.message("Generating SEDs for {} sources from {}".format(len(data), filepath))
# Iterate over table
for row in data:
try:
# Make the SED
s = SED(row['name'], verbose=False)
if 'ra' in row and 'dec' in row:
s.sky_coords = row['ra'] * q.deg, row['dec'] * q.deg
# Run the desired methods
s.run_methods(run_methods)
# Add it to the catalog
self.add_SED(s)
except:
self.message("Could not add SED '{}".format(row['name']))
def get_data(self, *args):
"""Fetch the data for the given columns
"""
results = []
for x in args:
# Get the data
if '-' in x:
x1, x2 = x.split('-')
if self.results[x1].unit != self.results[x2].unit:
raise TypeError('Columns must be the same units.')
xunit = self.results[x1].unit
xdata = self.results[x1] - self.results[x2]
xerror = np.sqrt(self.results['{}_unc'.format(x1)]**2 + self.results['{}_unc'.format(x2)]**2)
else:
xunit = self.results[x].unit
xdata = self.results[x]
xerror = self.results['{}_unc'.format(x)]
# Append to results
results.append([xdata, xerror, xunit])
return results
def get_SED(self, name_or_idx):
"""Retrieve the SED for the given object
Parameters
----------
idx_or_name: str, int
The name or index of the SED to get
"""
# Add the index
self.results.add_index('name')
# Get the rows
if isinstance(name_or_idx, str) and name_or_idx in self.results['name']:
return copy(self.results.loc[name_or_idx]['SED'])
elif isinstance(name_or_idx, int) and name_or_idx <= len(self.results):
return copy(self.results[name_or_idx]['SED'])
else:
self.message('Could not retrieve SED {}'.format(name_or_idx))
return
def load(self, file):
"""Load a saved Catalog"""
if os.path.isfile(file):
f = open(file)
cat = pickle.load(f)
f.close()
f = open(file, 'rb')
cat = pickle.load(f)
f.close()
self.results = cat
@staticmethod
def make_results_table(self):
"""Generate blank results table"""
results = at.QTable(names=self.cols, dtype=['O'] * len(self.cols))
results.add_index('name')
# Set the units
results['age'].unit = q.Gyr
results['age_unc'].unit = q.Gyr
results['distance'].unit = q.pc
results['distance_unc'].unit = q.pc
results['parallax'].unit = q.mas
results['parallax_unc'].unit = q.mas
results['radius'].unit = q.Rsun
results['radius_unc'].unit = q.Rsun
results['fbol'].unit = q.erg / q.s / q.cm ** 2
results['fbol_unc'].unit = q.erg / q.s / q.cm ** 2
results['Lbol'].unit = q.erg / q.s
results['Lbol_unc'].unit = q.erg / q.s
results['mass'].unit = q.Msun
results['mass_unc'].unit = q.Msun
results['Teff'].unit = q.K
results['Teff_unc'].unit = q.K
return results
def message(self, msg, pre='[sedkit]'):
"""
Only print message if verbose=True
Parameters
----------
msg: str
The message to print
pre: str
The stuff to print before
"""
if self.verbose:
if pre is None:
print(msg)
else:
print("{} {}".format(pre, msg))
def plot(self, x, y, marker=None, color=None, scale=['linear','linear'],
xlabel=None, ylabel=None, fig=None, order=None, identify=[],
id_color='red', label_points=False, draw=True, **kwargs):
"""Plot parameter x versus parameter y
Parameters
----------
x: str
The name of the x axis parameter, e.g. 'SpT'
y: str
The name of the y axis parameter, e.g. 'Teff'
marker: str (optional)
The name of the method for the desired marker
color: str (optional)
The color to use for the points
scale: sequence
The (x,y) scale for the plot
xlabel: str
The label for the x-axis
ylable : str
The label for the y-axis
fig: bokeh.plotting.figure (optional)
The figure to plot on
order: int
The polynomial order to fit
identify: idx, str, sequence
Names of sources to highlight in the plot
id_color: str
The color of the identified points
label_points: bool
Print the name of the object next to the point
Returns
-------
bokeh.plotting.figure.Figure
The figure object
"""
# Grab the source and valid params
source = copy(self.source)
params = [k for k in source.column_names if not k.endswith('_unc')]
# If no uncertainty column for parameter, add it
if '{}_unc'.format(x) not in source.column_names:
_ = source.add([None] * len(source.data['name']), '{}_unc'.format(x))
if '{}_unc'.format(y) not in source.column_names:
_ = source.add([None] * len(source.data['name']), '{}_unc'.format(y))
# Check if the x parameter is a color
if '-' in x and all([i in params for i in x.split('-')]):
colordata = self.get_data(x)[0]
if len(colordata) == 3:
_ = source.add(colordata[0], x)
_ = source.add(colordata[1], '{}_unc'.format(x))
params.append(x)
# Check if the y parameter is a color
if '-' in y and all([i in params for i in y.split('-')]):
colordata = self.get_data(y)[0]
if len(colordata) == 3:
_ = source.add(colordata[0], y)
_ = source.add(colordata[1], '{}_unc'.format(y))
params.append(y)
# Check the params are in the table
if x not in params:
raise ValueError("'{}' is not a valid x parameter. Please choose from {}".format(x, params))
if y not in params:
raise ValueError("'{}' is not a valid y parameter. Please choose from {}".format(y, params))
# Make the figure
if fig is None:
# Tooltip names can't have '.' or '-'
xname = source.add(source.data[x], x.replace('.', '_').replace('-', '_'))
yname = source.add(source.data[y], y.replace('.', '_').replace('-', '_'))
# Set up hover tool
tips = [('Name', '@name'), (x, '@{}'.format(xname)), (y, '@{}'.format(yname))]
hover = HoverTool(tooltips=tips, names=['points'])
# Make the plot
TOOLS = ['pan', 'reset', 'box_zoom', 'wheel_zoom', 'save', hover]
title = '{} v {}'.format(x, y)
fig = figure(plot_width=800, plot_height=500, title=title, y_axis_type=scale[1], x_axis_type=scale[0], tools=TOOLS)
# Get marker class
size = kwargs.get('size', 8)
kwargs['size'] = size
marker = getattr(fig, marker or self.marker)
color = color or self.color
# Prep data
names = source.data['name']
xval, xerr = source.data[x], source.data['{}_unc'.format(x)]
xval[xval == None] = np.nan
xerr[xerr == None] = np.nan
yval, yerr = source.data[y], source.data['{}_unc'.format(y)]
yval[yval == None] = np.nan
yerr[yerr == None] = np.nan
# Plot nominal values
marker(x, y, source=source, color=color, fill_alpha=0.7, name='points', **kwargs)
# Identify sources
idx = [ni for ni, name in enumerate(names) if name in identify]
fig.circle(xval[idx], yval[idx], size=size + 5, color=id_color, fill_color=None, line_width=2)
# Plot y errorbars
y_err_x = [(i, i) for i in source.data[x]]
y_err_y = [(yval[n] if np.isnan(i - j) else i - j, yval[n] if np.isnan(i + j) else i + j) for n, (i, j) in enumerate(zip(yval, yerr))]
fig.multi_line(y_err_x, y_err_y, color=color)
# Plot x errorbars
x_err_y = [(i, i) for i in source.data[y]]
x_err_x = [(xval[n] if np.isnan(i - j) else i - j, xval[n] if np.isnan(i + j) else i + j) for n, (i, j) in enumerate(zip(xval, xerr))]
fig.multi_line(x_err_x, x_err_y, color=color)
# Label points
if label_points:
labels = LabelSet(x=x, y=y, text='name', level='glyph', x_offset=5, y_offset=5, source=source, render_mode='canvas')
fig.add_layout(labels)
# Fit polynomial
if isinstance(order, int):
# Only fit valid values
idx = [n for n, (i, j) in enumerate(zip(xval, yval)) if not hasattr(i, 'mask') and not np.isnan(i) and not hasattr(j, 'mask') and not | np.isnan(j) | numpy.isnan |
'''
Window Fetcher
These functions will find the dimensions of the Minecraft
window and also take screen caps.
'''
import os
import time
import numpy as np
import cv2
from PIL import Image
from mss import mss
'''
Small General Funcs
'''
def get_mouse_pos ():
'''Returns system mouse position, as [x,y] pair'''
# pos_str = ['x:123', 'y:1232', ...]
pos_str = os.popen("xdotool getmouselocation").read().split(" ")
mouse_x = int(pos_str[0][2:])
mouse_y = int(pos_str[1][2:])
return [mouse_x, mouse_y]
def get_mouse_locale (sq_size: int):
'''Takes a picture around the mouse and returns it'''
sct = mss()
mouse_pos = get_mouse_pos()
top = mouse_pos[1] - sq_size // 2
left = mouse_pos[0] - sq_size // 2
monitor = {"top": top, "left": left, "width": sq_size, "height": sq_size}
IMG = Image.frombytes("RGB", (sq_size, sq_size), sct.grab(monitor).rgb)
IMG = cv2.cvtColor(np.array(IMG), cv2.COLOR_RGB2BGR)
return IMG
def capture_area (monitor_dict):
sct = mss()
IMG = Image.frombytes("RGB",
(monitor_dict["width"], monitor_dict["height"]),
sct.grab(monitor_dict).rgb)
IMG = cv2.cvtColor( | np.array(IMG) | numpy.array |
"""
Copyright (C) 2019-2021, Monash University, Geoscience Australia
Copyright (C) 2018, <NAME>
Bluecap is released under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The project uses third party components which may have different licenses.
Please refer to individual components for more details.
"""
import numpy as np
# IO
from IO.XML import HasChild,GetChild,GetChildren,AddChild
from IO.XML import GetAttributeString,GetAttributeValue, SetAttributeString, HasAttribute
# Functions
from Functions.Royalties import AustralianRoyalties
from .EconomicDataManager import EconomicDataManager
# Managers
class HydrogenEconomicDataManager(EconomicDataManager):
def __init__(self):
EconomicDataManager.__init__(self)
"""
Create an empty economic data manager and default variables.
"""
self.GandAOpex = | np.array([0.0]) | numpy.array |
import xarray as xr
import numpy as np
class SplitAndStandardize:
"""Class instantiation of SplitAndStandardize:
Here we will be preprocessing data for deep learning model training.
This module includes methods for training and testing data splits and standardization.
Attributes:
climate (str): The climate period to derive deep learning data for; ``current`` or ``future``.
variable (str): Variable to run script the for, which can include ``TK``, ``EV``, ``EU``, ``QVAPOR``,
``PRESS``, ``W_vert``, ``WMAX``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, or ``MASK``.
percent_split (float): Percentage of total data to assign as training data. The remaining data will be
assigned as testing data. For example, 0.6 is 60% training data, 40% testing data.
working_directory (str): The directory path to where the produced files will be saved and worked from.
threshold1 (int): The threshold for used for the chosen classification method (e.g., 75 UH25).
mask (boolean): Whether the threshold was applied within the storm patch mask or not. Defaults to ``False``.
unbalanced (boolean): Whether training data will be artificially balanced (``False``) or left unbalanced (``True``). Defaults to ``False``.
currenttrain_futuretest (boolean):
Raises:
Exceptions: Checks whether correct values were input for climate, variable, and percent_split.
"""
def __init__(self, climate, variable, percent_split, working_directory, threshold1, mask=False, unbalanced=False,
currenttrain_futuretest=False, kfold_total=5, kfold_indx=None, use_kfold=False):
# assigning class attributes
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# variable name checks and string automatic assignments
if variable!='TK' and variable!='EV' and variable!='EU' and variable!='QVAPOR' and variable!='PRESS' and variable!='W_vert' \
and variable!='WMAX' and variable!='DBZ' and variable!='CTT' and variable!='UH25' and variable!='UH03' and variable!='MASK':
raise Exception("Please enter TK, EV, EU, QVAPOR, PRESS, W_vert, UH25, UH03, MAXW, CTT, DBZ, or MASK as variable.")
else:
self.variable=variable
# temperature at 1, 3, 5, and 7 km
if self.variable=="TK":
self.choice_var1="temp_sev_1"
self.choice_var3="temp_sev_3"
self.choice_var5="temp_sev_5"
self.choice_var7="temp_sev_7"
self.attrs_array=np.array(["tk_1km", "tk_3km", "tk_5km", "tk_7km"])
self.single=False
# v-wind at 1, 3, 5, and 7 km
if self.variable=="EV":
self.choice_var1="evwd_sev_1"
self.choice_var3="evwd_sev_3"
self.choice_var5="evwd_sev_5"
self.choice_var7="evwd_sev_7"
self.attrs_array=np.array(["ev_1km", "ev_3km", "ev_5km", "ev_7km"])
self.single=False
# u-wind at 1, 3, 5, and 7 km
if self.variable=="EU":
self.choice_var1="euwd_sev_1"
self.choice_var3="euwd_sev_3"
self.choice_var5="euwd_sev_5"
self.choice_var7="euwd_sev_7"
self.attrs_array=np.array(["eu_1km", "eu_3km", "eu_5km", "eu_7km"])
self.single=False
# water vapor at 1, 3, 5, and 7 km
if self.variable=="QVAPOR":
self.choice_var1="qvap_sev_1"
self.choice_var3="qvap_sev_3"
self.choice_var5="qvap_sev_5"
self.choice_var7="qvap_sev_7"
self.attrs_array=np.array(["qv_1km", "qv_3km", "qv_5km", "qv_7km"])
self.single=False
# pressure at 1, 3, 5, and 7 km
if self.variable=="PRESS":
self.choice_var1="pres_sev_1"
self.choice_var3="pres_sev_3"
self.choice_var5="pres_sev_5"
self.choice_var7="pres_sev_7"
self.attrs_array=np.array(["pr_1km", "pr_3km", "pr_5km", "pr_7km"])
self.single=False
# w-wind at 1, 3, 5, and 7 km
if self.variable=="W_vert":
self.choice_var1="wwnd_sev_1"
self.choice_var3="wwnd_sev_3"
self.choice_var5="wwnd_sev_5"
self.choice_var7="wwnd_sev_7"
self.attrs_array=np.array(["ww_1km", "ww_3km", "ww_5km", "ww_7km"])
self.single=False
# max-w
if self.variable=="WMAX":
self.choice_var1="maxw_sev_1"
self.attrs_array=np.array(["maxw"])
self.single=True
# dbz
if self.variable=="DBZ":
self.choice_var1="dbzs_sev_1"
self.attrs_array=np.array(["dbzs"])
self.single=True
# cloud top temperature
if self.variable=="CTT":
self.choice_var1="ctts_sev_1"
self.attrs_array=np.array(["ctts"])
self.single=True
# 2-5 km updraft helicity
if self.variable=="UH25":
self.choice_var1="uh25_sev_1"
self.attrs_array=np.array(["uh25"])
self.single=True
# 0-3 km updraft helicity
if self.variable=="UH03":
self.choice_var1="uh03_sev_1"
self.attrs_array=np.array(["uh03"])
self.single=True
# storm masks
if self.variable=="MASK":
self.choice_var1="mask_sev_1"
self.attrs_array=np.array(["mask"])
self.single=True
# percent splitting for train and test sets
if percent_split>=1:
raise Exception("Percent split should be a float less than 1.")
if percent_split<1:
self.percent_split=percent_split
# assign class attributes
self.working_directory=working_directory
self.threshold1=threshold1
self.unbalanced=unbalanced
self.mask=mask
# mask option string naming for files
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# boolean for training with current, testing with future, standardization
self.currenttrain_futuretest=currenttrain_futuretest
if self.currenttrain_futuretest:
if self.climate == 'current':
raise Exception("Set currenttrain_futuretest to False!")
# for k-fold cross validation
self.use_kfold=use_kfold
if self.use_kfold:
self.kfold_total=kfold_total
self.kfold_indx=kfold_indx
def variable_translate(self):
"""Variable name for the respective filenames.
Returns:
variable (str): The variable string used to save files.
Raises:
ValueError: Input variable must be from available list.
"""
var={
'EU':'EU',
'EV':'EV',
'TK':'TK',
'QVAPOR':'QVAPOR',
'WMAX':'MAXW',
'W_vert':'W',
'PRESS':'P',
'DBZ':'DBZ',
'CTT':'CTT',
'UH25':'UH25',
'UH03':'UH03',
'MASK':'MASK'
}
try:
out=var[self.variable]
return out
except:
raise ValueError("Please enter ``TK``, ``EU``, ``EV``, ``QVAPOR``, ``PRESS``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, ``W_vert``, ``WMAX``, or ``MASK`` as variable.")
def open_above_threshold(self):
"""Open and concat files for the six months of analysis (threshold exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def open_below_threshold(self):
"""Open and concat files for six months of analysis (threshold non-exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def grab_variables(self, data):
"""Eagerly load variable data. This function converts dask arrays into numpy arrays.
Args:
data (Xarray dataset): The original Xarray dataset containing dask arrays.
Returns:
data_1, data_2, data_3, data_4 or data_1 (numpy array(s)): Input data as numpy arrays.
"""
# if variable file contains 4 heights
if not self.single:
data_1=data[self.choice_var1].values
data_2=data[self.choice_var3].values
data_3=data[self.choice_var5].values
data_4=data[self.choice_var7].values
return data_1, data_2, data_3, data_4
# if variable file is single height
if self.single:
data_1=data[self.choice_var1].values
return data_1
def create_traintest_data(self, data_b, data_a, return_label=False):
"""This function performs balancing of above and below threshold data for training and testing data. Data is permuted
before being assigned to training and testing groups.
The training group sample size is computed using the assigned percentage (``self.percent_split``) from the above threshold population.
Then, the testing group sample size is computed using the leftover percentage (e.g., 1-``self.percent_split``) from a population
with a similar ratio of above and below threshold storm patches (e.g., ~5% above threshold to 95% below threshold). This is done
artificially balance the ratio of threshold exceeding storms to that of non-exceeding storms, to ensure that the training data set
contains sufficient examples of above threshold storm patches, given that they are rare events. The testing data set is left with
a population of storms that resembles the original data's population.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[int(data_a.shape[0]*self.percent_split):]
test_above=data_a[select_data]
# generate index for test below (stratified sampling)
indx_below=int((((data_a.shape[0]*(1-self.percent_split))*data_b.shape[0])/data_a.shape[0])+(data_a.shape[0]*(1-self.percent_split)))
# test below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[int(data_a.shape[0] * self.percent_split):indx_below]
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def create_traintest_unbalanced(self, data_b, data_a, return_label=False):
"""This function performs creates and permutes training and testing data.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_b.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[int(data_a.shape[0]*self.percent_split):]
test_above=data_a[select_data]
# test below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[int(data_b.shape[0]*self.percent_split):]
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def create_traintest_unbalanced_kfold(self, data_b, data_a, return_label=False):
"""This function performs creates and permutes training and testing data for k-fold cross validation.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# helper functions for indices of k-fold cross validation
kgroups = np.arange(0, self.kfold_total, 1)
kgroups_leftover = np.delete(kgroups, self.kfold_indx)
# train above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(
data_a.shape[0]), self.kfold_total))[[kgroups_leftover[0],kgroups_leftover[1],kgroups_leftover[2],kgroups_leftover[3]]])
train_above=data_a[select_data]
# train below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split(np.random.permutation(
data_b.shape[0]), self.kfold_total))[[kgroups_leftover[0],kgroups_leftover[1],kgroups_leftover[2],kgroups_leftover[3]]])
train_below=data_b[select_data]
# test above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.hstack(np.array(np.array_split( | np.random.permutation(data_a.shape[0]) | numpy.random.permutation |
import numpy as np
import pandas as pd
import gensim
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.utils import class_weight
def create_embeddings_matrix(glove_file_loc, glove_file_dims, n_words, idx2word):
'''Use the GloVe pre-trained 300 dimension word vectors to create an embedding matrix for the vocab in the corpus'''
'''Specify location of the GloVe file, which can be downloaded here: https://nlp.stanford.edu/projects/glove/'''
w2v_output_file = glove_file_loc
glovemodel = gensim.models.KeyedVectors.load_word2vec_format(w2v_output_file, binary=False)
'''Pass the GloVe pretrained model weights into the vocabulary list'''
'''Create an empty matrix with the dimensions of the vocab size and the embeddings size'''
embedding_matrix = np.zeros((n_words , glove_file_dims))
missing = []
for i, w in idx2word.items():
try:
embedding_vector = glovemodel[w]
embedding_matrix[i] = embedding_vector
except:
'''Assess the size of the vocabulary unsuccessfully matched to the GloVe word vectors'''
missing.append(w)
pass
return embedding_matrix
def prepare_sequences(all_tokens_tags):
'''Function takes tokenized corpus and creates equal-length sequences of encoded words and tags'''
seq_len = int(np.ceil(np.percentile([len(s) for s in all_tokens_tags], 99.5)))
'''Pads sentences to a length matching up to 99.5 percentile of sequences length'''
X = [[word2idx[PorterStemmer().stem(word[0]).lower()] for word in sentence] for sentence in all_tokens_tags]
X = pad_sequences(maxlen=seq_len, sequences=X, padding="post",value=n_words - 1)
X_df = pd.DataFrame(X).reset_index()
y = [[tag2idx[word[2]] for word in sentence] for sentence in all_tokens_tags]
y = pad_sequences(maxlen=seq_len, sequences=y, padding="post", value=tag2idx["O"])
'''Due to imbalanced target classes, class weights are derived and returned'''
class_weights = class_weight.compute_class_weight('balanced', | np.unique(y) | numpy.unique |
from datetime import datetime, timezone
import numpy as np
import xarray as xr
import carbonplan_trace.v1.utils as utils
from carbonplan_trace.v0.data import cat
from carbonplan_trace.v1.glas_height_metrics import HEIGHT_METRICS_MAP, get_all_height_metrics
ECOREGIONS_GROUPINGS = {
'afrotropic': np.arange(1, 117),
'tropical_asia': np.concatenate(
(
np.arange(135, 142),
np.arange(143, 147),
np.arange(151, 167),
np.arange(217, 324),
np.array([148, 149, 188, 195, 618, 621, 622, 626, 627, 634, 635, 637, 638]),
),
axis=None,
),
'tropical_neotropic': np.concatenate(
(
np.arange(439, 561),
np.arange(564, 575),
np.arange(579, 585),
np.arange(587, 618),
np.arange(622, 626),
# 634 showed up in both tropical asia and here, determined to be more suitable for tropical asia
np.arange(628, 634),
np.arange(639, 642),
np.array([562, 619, 620, 636]),
),
axis=None,
),
'extratropical_neotropic': np.concatenate(
(
np.arange(575, 579),
np.array([561, 563, 585, 586]),
),
axis=None,
),
'alaska': np.concatenate(
(
np.arange(404, 412),
np.array([369, 371, 372, 375, 416, 420]),
),
axis=None,
),
'western_canada': np.concatenate(
(
np.arange(376, 382),
np.arange(412, 416),
np.array([383, 419]),
),
axis=None,
),
'eastern_canada': np.array([370, 373, 374, 382, 421]),
'conus': np.concatenate(
(
np.arange(328, 369),
np.arange(384, 404),
np.arange(422, 426),
np.array([325, 429, 430, 433, 434, 438]),
),
axis=None,
),
'mexico_north': np.array([324, 326, 327, 426, 428, 431, 432, 435, 436, 437]),
'mexico_south': np.array([427]),
'western_boreal_eurasia': np.array([691, 708, 711, 717, 729, 774, 776, 778, 780]),
'eastern_boreal_eurasia': np.concatenate(
(
np.arange(712, 717),
np.arange(718, 721),
np.arange(771, 774),
np.arange(781, 785),
np.array([710, 775, 777, 779]),
),
axis=None,
),
'palearctic_wang_2013': np.concatenate(
(
np.arange(655, 658),
np.arange(704, 708),
np.arange(721, 725),
np.arange(726, 729),
np.arange(730, 744),
np.arange(746, 758),
np.arange(759, 771),
np.array(
[
642,
643,
653,
659,
667,
669,
673,
677,
680,
681,
684,
685,
687,
690,
693,
694,
696,
697,
700,
702,
709,
]
),
),
axis=None,
),
'palearctic_takagi_2015': np.array([666, 670, 671, 682, 683, 698, 699]),
'palearctic_yavasli_2016': np.array([649, 652, 662, 688, 725, 785, 786, 789, 790, 791, 804]),
'palearctic_brovkina_2015': np.array([692]),
'palearctic_alberti_2013': np.array([689]),
'palearctic_whrc': np.array(
[644, 646, 650, 658, 660, 665, 674, 675, 678, 695, 703, 788, 794, 795, 799, 801, 802, 806]
),
'palearctic_shang_and_chazette_2014': np.array(
[645, 647, 648, 654, 661, 664, 668, 676, 679, 686]
),
'palearctic_simonson_2016': np.array([701, 758, 787, 792, 793, 796, 797, 798, 800, 803, 805]),
'palearctic_patenaude_2004': np.array([651, 663, 672]),
'palearctic_suganuma_2006': np.concatenate(
(
np.arange(807, 847),
np.array([744, 745]),
),
axis=None,
),
'australia_beets_2011': np.concatenate(
(
| np.arange(169, 176) | numpy.arange |
import numpy as np
import numpy.linalg as nla
from numpy import cos, sin
import scipy.linalg as sla
import scipy.interpolate
import scipy.optimize
from fym.core import BaseEnv, BaseSystem
from fym.utils.rot import quat2dcm, quat2angle, angle2quat
class Aircraft3Dof(BaseSystem):
g = 9.80665
rho = 1.2215
m = 8.5
S = 0.65
b = 3.44
CD0 = 0.033
CD1 = 0.017
name = 'aircraft'
def __init__(self, initial_state, wind):
super().__init__(initial_state)
self.wind = wind
self.term1 = self.rho*self.S/2/self.m
def external(self, states, controls):
state = states['aircraft']
return dict(wind=self.wind.get(state))
def deriv(self, state, t, control, external):
CL, phi = control
CD = self.CD0 + self.CD1*CL**2
raw_control = CD, CL, phi
return self._raw_deriv(state, t, raw_control, external)
def _raw_deriv(self, state, t, control, external):
x, y, z, V, gamma, psi = state.ravel()
CD, CL, phi = control()
(_, Wy, _), (_, (_, _, dWydz), _) = external['wind']
dxdt = V*np.cos(gamma)*np.cos(psi)
dydt = V*np.cos(gamma)*np.sin(psi) + Wy
dzdt = - V*np.sin(gamma)
dWydt = dWydz * dzdt
dVdt = (-self.term1*V**2*CD - self.g*np.sin(gamma)
- dWydt*np.cos(gamma)*np.sin(psi))
dgammadt = (self.term1*V*CL*np.cos(phi) - self.g*np.cos(gamma)/V
+ dWydt*np.sin(gamma)*np.sin(psi)/V)
dpsidt = (self.term1*V/np.cos(gamma)*CL*np.sin(phi)
- dWydt*np.cos(psi)/V/np.cos(gamma))
return np.vstack([dxdt, dydt, dzdt, dVdt, dgammadt, dpsidt])
class F16LinearLateral(BaseSystem):
"""
Reference:
<NAME> et al. "Aircraft Control and Simulation", 3/e, 2016
Example 5.3-1: LQR Design for F-16 Lateral Regulator
Dynamics:
x_dot = Ax + Bu
State:
x = [beta, phi, p, r, del_a, del_r, x_w]
beta, phi: [rad], p, r: [rad/s], del_a, del_r: [deg]
Control input:
u = [u_a, u_r] (aileron and rudder servo inputs, [deg])
"""
A = np.array([
[-0.322, 0.064, 0.0364, -0.9917, 0.0003, 0.0008, 0],
[0, 0, 1, 0.0037, 0, 0, 0],
[-30.6492, 0, -3.6784, 0.6646, -0.7333, 0.1315, 0],
[8.5396, 0, -0.0254, -0.4764, -0.0319, -0.062, 0],
[0, 0, 0, 0, -20.2, 0, 0],
[0, 0, 0, 0, 0, -20.2, 0],
[0, 0, 0, 57.2958, 0, 0, -1]
])
B = np.array([
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[20.2, 0],
[0, 20.2],
[0, 0]
])
C = np.array([
[0, 0, 0, 57.2958, 0, 0, -1],
[0, 0, 57.2958, 0, 0, 0, 0],
[57.2958, 0, 0, 0, 0, 0, 0],
[0, 57.2958, 0, 0, 0, 0, 0]
])
def __init__(self, initial_state=[1, 0, 0, 0, 0, 0, 0]):
super().__init__(initial_state)
def deriv(self, x, u):
return self.A.dot(x) + self.B.dot(u)
class MorphingPlane(BaseEnv):
g = 9.80665 # [m/s^2]
mass = 10 # [kg]
S = 0.84 # reference area (norminal planform area) [m^2]
# longitudinal reference length (nominal mean aerodynamic chord) [m]
cbar = 0.288
b = 3 # lateral reference length (nominal span) [m]
Tmax = 50 # maximum thrust [N]
control_limits = {
"delt": (0, 1),
"dele": np.deg2rad((-10, 10)),
"dela": (-0.5, 0.5),
"delr": (-0.5, 0.5),
"eta1": (0, 1),
"eta2": (0, 1),
}
coords = {
"eta1": np.linspace(0, 1, 3), # eta1
"eta2": np.linspace(0, 1, 3), # eta2
"dele": np.deg2rad(np.linspace(-10, 10, 3)), # dele
"alpha": np.deg2rad(np.linspace(-10, 20, 61)) # alpha
}
polycoeffs = {
"CD": [0.03802,
[-0.0023543, 0.0113488, -0.00549877, 0.0437561],
[[0.0012769, -0.00220993, 1166.938, 672.113],
[0.00188837, 0.000115637, -203.85818, -149.4225],
[-1166.928, 203.8535, 0.1956192, -115.13404],
[-672.111624, 149.417, 115.76766, 0.994464]]],
"CL": [0.12816,
[0.13625538, 0.1110242, 1.148293, 6.0995634],
[[-0.147822776, 1.064541, 243.35532, -330.0270179],
[-1.13021511, -0.009309088, 166.28991, -146.8964467],
[-243.282881, -166.2709286, 0.071258483, 4480.53564],
[328.541707, 148.945785, -4480.67456545, -0.99765511]]],
"Cm": [0.09406144,
[-0.269902, 0.24346326, -7.46727, -2.7296],
[[0.35794703, -7.433699, 647.83725, -141.0390569],
[6.8532466, -0.0510021, 542.882121, -681.325],
[-647.723162, -542.8638, 0.76322739, 2187.33517],
[135.66547, 678.941, -2186.1196, 0.98880322]]]
}
J_template = np.array([
[9323306930.82, -2622499.75, 56222833.68],
[-2622499.75, 0, 395245.59],
[56222833.68, 395245.59, 105244200037]
]) / 10**9 / 103.47649 * mass
J_yy_data = np.array([
[96180388451.54, 96468774320.55, 97352033548.31],
[96180388451.54, 96720172843.10, 98272328292.52],
[96180388451.54, 97077342563.70, 99566216309.81]
]).T / 10**9 / 103.47649 * mass
J_yy = scipy.interpolate.interp2d(
coords["eta1"], coords["eta2"], J_yy_data
)
def __init__(self, velocity, omega, quaternion, position):
self.vel = BaseSystem(velocity, name="velocity") # 3x1
self.omega = BaseSystem(omega, name="omega") # 3x1
self.quat = BaseSystem(quaternion, name="quaternion") # 4x1
self.pos = BaseSystem(position, name="position") # 3x1
def J(self, eta1, eta2):
J_temp = self.J_template
J_temp[1, 1] = self.J_yy(eta1, eta2)
return J_temp
def _aero_base(self, name, *x):
# x = [eta1, eta2, dele, alp]
a0, a1, a2 = self.polycoeffs[name]
return a0 + np.dot(a1, x) + np.sum(x * np.dot(a2, x), axis=0)
def CD(self, eta1, eta2, dele, alp):
return self._aero_base("CD", eta1, eta2, dele, alp)
def CL(self, eta1, eta2, dele, alp):
return self._aero_base("CL", eta1, eta2, dele, alp)
def Cm(self, eta1, eta2, dele, alp):
return self._aero_base("Cm", eta1, eta2, dele, alp)
def set_dot(self, x, u, eta):
v, omega, q, p = self.observe_list(x)
F, M = self.aerodyn(v, q, p, u, eta)
J = self.J(eta[0], eta[1])
# force equation
self.systems_dict["velocity"].dot = F / self.mass - np.cross(omega, v)
# moment equation
self.systems_dict["omega"].dot = (
nla.inv(J).dot(M - np.cross(omega, J.dot(omega)))
)
# kinematic equation
self.systems_dict["quaternion"].dot = 0.5 * np.append(
-omega.dot(q[1:]),
omega*q[0] - np.cross(omega, q[1:])
)
# navigation equation
self.systems_dict["position"].dot = quat2dcm(q).T.dot(v)
def state_readable(self, v=None, omega=None, q=None, p=None, preset="vel"):
VT = sla.norm(v)
alpha = np.arctan2(v[2], v[0])
beta = np.arcsin(v[1] / VT)
if preset == "vel":
return VT, alpha, beta
else:
_, theta, _ = quat2angle(q)
gamma = theta - alpha
Q = omega[1]
return {'VT': VT, 'gamma': gamma, 'alpha': alpha, 'Q': Q,
'theta': theta, 'beta': beta}
def aerocoeff(self, *args):
# *args: eta1, eta2, dele, alp
# output: CL, CD, Cm, CC, Cl, Cn
return self.CL(*args), self.CD(*args), self.Cm(*args), 0, 0, 0
def aerodyn(self, v, q, p, u, eta):
delt, dele, dela, delr = u
x_cg, z_cg = 0, 0
VT, alp, bet = self.state_readable(v=v, preset="vel")
qbar = 0.5 * get_rho(-p[2]) * VT**2
CL, CD, Cm, CC, Cl, Cn = self.aerocoeff(*eta, dele, alp)
CX = cos(alp)*cos(bet)*(-CD) - cos(alp)*sin(bet)*(-CC) - sin(alp)*(-CL)
CY = sin(bet)*(-CD) + cos(bet)*(-CC) + 0*(-CL)
CZ = cos(bet)*sin(alp)*(-CD) - sin(alp)*sin(bet)*(-CC) + cos(alp)*(-CL)
S, cbar, b, Tmax = self.S, self.cbar, self.b, self.Tmax
X_A = qbar*CX*S # aerodynamic force along body x-axis
Y_A = qbar*CY*S # aerodynamic force along body y-axis
Z_A = qbar*CZ*S # aerodynamic force along body z-axis
# Aerodynamic moment
l_A = qbar*S*b*Cl + z_cg*Y_A # w.r.t. body x-axis
m_A = qbar*S*cbar*Cm + x_cg*Z_A - z_cg*X_A # w.r.t. body y-axis
n_A = qbar*S*b*Cn - x_cg*Y_A # w.r.t. body z-axis
F_A = np.array([X_A, Y_A, Z_A]) # aerodynamic force [N]
M_A = np.array([l_A, m_A, n_A]) # aerodynamic moment [N*m]
# thruster force and moment are computed here
T = Tmax*delt # thrust [N]
X_T, Y_T, Z_T = T, 0, 0 # thruster force body axes component [N]
l_T, m_T, n_T = 0, 0, 0 # thruster moment body axes component [N*m]
# Thruster force, momentum, and gravity force
F_T = np.array([X_T, Y_T, Z_T]) # in body coordinate [N]
M_T = np.array([l_T, m_T, n_T]) # in body coordinate [N*m]
F_G = quat2dcm(q).dot(np.array([0, 0, self.mass*self.g]))
F = F_A + F_T + F_G
M = M_A + M_T
return F, M
def get_trim(self, z0={"alpha": 0.1, "delt": 0.13, "dele": 0},
fixed={"h": 300, "VT": 16, "eta": (0, 0)},
method="SLSQP", options={"disp": True, "ftol": 1e-10}):
z0 = list(z0.values())
fixed = list(fixed.values())
bounds = (
(self.coords["alpha"].min(), self.coords["alpha"].max()),
self.control_limits["delt"],
self.control_limits["dele"]
)
result = scipy.optimize.minimize(
self._trim_cost, z0, args=(fixed,),
bounds=bounds, method=method, options=options)
return self._trim_convert(result.x, fixed)
def _trim_cost(self, z, fixed):
x, u, eta = self._trim_convert(z, fixed)
self.set_dot(x, u, eta)
weight = np.diag([1, 1, 1000])
dxs = np.append(self.vel.dot[(0, 2), ], self.omega.dot[1])
return dxs.dot(weight).dot(dxs)
def _trim_convert(self, z, fixed):
h, VT, eta = fixed
alp = z[0]
v = np.array([VT*cos(alp), 0, VT* | sin(alp) | numpy.sin |
import numpy as np
from numpy import pi as PI
from numpy.random import uniform as random
from mtuq.event import Force, MomentTensor
from mtuq.grid import Grid, UnstructuredGrid
from mtuq.util import asarray
from mtuq.util.math import open_interval as regular
from mtuq.util.math import to_mij, to_rho, semiregular_grid, to_v, to_w
def to_mt(rho, v, w, kappa, sigma, h):
""" Converts from lune parameters to MomentTensor object
"""
mt = to_mij(rho, v, w, kappa, sigma, h)
return MomentTensor(mt, convention='USE')
def FullMomentTensorGridRandom(magnitudes=[1.], npts=1000000):
""" Full moment tensor grid with randomly-spaced values
Given input parameters ``magnitudes`` (`list`) and ``npts`` (`int`),
returns an ``UnstructuredGrid`` of size `npts*len(magnitudes)`.
Moment tensors are drawn from the uniform distribution defined by
`Tape2015` (https://doi.org/10.1093/gji/ggv262)
.. rubric :: Usage
Use ``get(i)`` to return the `i`-th moment tensor as a `MomentTensor` object
Use ``get(i).as_vector()`` to return the `i`-th moment tensor as a vector
`Mrr, Mtt, Mpp, Mrp, Mrt, Mtp`
Use ``get_dict(i)`` to return the `i`-th moment tensor as dictionary
of Tape2015 parameters `rho, v, w, kappa, sigma, h`
"""
v = random(-1./3., 1./3., npts)
w = random(-3./8.*PI, 3./8.*PI, npts)
kappa = random(0., 360, npts)
sigma = random(-90., 90., npts)
h = random(0., 1., npts)
rho = list(map(to_rho, asarray(magnitudes)))
v = np.tile(v, len(magnitudes))
w = np.tile(w, len(magnitudes))
kappa = np.tile(kappa, len(magnitudes))
sigma = np.tile(sigma, len(magnitudes))
h = np.tile(h, len(magnitudes))
rho = np.repeat(rho, npts)
return UnstructuredGrid(
dims=('rho', 'v', 'w', 'kappa', 'sigma', 'h'),
coords=(rho, v, w, kappa, sigma, h),
callback=to_mt)
def FullMomentTensorGridSemiregular(magnitudes=[1.], npts_per_axis=20, tightness=0.8):
""" Full moment tensor grid with semiregular values
Given input parameters ``magnitudes`` (`list`) and ``npts`` (`int`),
returns a ``Grid`` of size `2*len(magnitudes)*npts_per_axis^5`.
For tightness~0, the grid will be regular in Tape2015 parameters `v, w`,
and for tightness~1, regular in Tape2012 parameters `delta, gamma`.
For intermediate values, the grid will be "semiregular" in the sense of
a linear interpolation between the above cases (in such a way that, as
`tightness` increases, the extremal grid points get closer to the boundary
of the lune).
.. rubric :: Usage
Use ``get(i)`` to return the `i`-th moment tensor as a `MomentTensor` object
Use ``get(i).as_vector()`` to return the `i`-th moment tensor as a vector
`Mrr, Mtt, Mpp, Mrp, Mrt, Mtp`
Use ``get_dict(i)`` to return the `i`-th moment tensor as dictionary
of Tape2015 parameters `rho, v, w, kappa, sigma, h`
"""
v, w = semiregular_grid(npts_per_axis, 2*npts_per_axis, tightness)
kappa = regular(0., 360, npts_per_axis)
sigma = regular(-90., 90., npts_per_axis)
h = regular(0., 1., npts_per_axis)
rho = list(map(to_rho, asarray(magnitudes)))
return Grid(
dims=('rho', 'v', 'w', 'kappa', 'sigma', 'h'),
coords=(rho, v, w, kappa, sigma, h),
callback=to_mt)
def FullMomentTensorPlottingGrid(magnitudes=[1.], npts_per_axis=11, tightness=0.0):
""" Full moment tensor regular grid to plot MT on the lune
"""
# v, w = semiregular_grid(13, 35, tightness)
v1, v2, nv = -30, 30, 13
w1, w2, nw = -1, 1, 35
dv = (v2-v1)/nv
dw = (w2-w1)/nw
v = np.arange((v1+dv/2), (v2-dv/2), dv)
w = | np.arange((w1+dw/2), (w2+dw/2), dw) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 18 10:02:38 2011
@author: oliver.tomic
@info:
Implementation of SO-PLS algorithm.
FIXME: provide refernce to paper
Changes from last version: 10.10.2012
-------------------------------------
- changed X_cumCalExplVars_indVar to X_cumCalExplVar_indVar
- changed X_cumCalExplVars to X_cumCalExplVar_indVar
- fixed computation of cumulative validated explained variance for Y
-- changed <<for indX, Xblock in enumerate(Xblocks_train_procList):>>
to <<for indX in range(len(comb_nonZeroIndexArr)):>>
around line 1543
Changes from last version: 10.01.2013
-------------------------------------
- fixed reconstruction of X blocks when standarised. Used always X_std from
very last X block instead of X_std from each particluar X block that is to
be reconstructed
- Extended plotting for more than 5 Y variables up to 15.
- Cleaned up and removed most print commands
"""
# Import necessary modules
#import matplotlib
#matplotlib.use('Agg')
import numpy as np
import numpy.linalg as nl
import plsr
import cross_val as cv
import statTools as st
import matplotlib.pyplot as plt
import pca
class SOPLSCV:
"""
GENERAL INFO
------------
This class carries out cross validation for Sequential Orthogonlised
Partial Least Squares regression (SO-PLS).
"""
def __init__(self, Y, XblocksList, **kargs):
"""
Do cross validation for SO-PLS. Compute RMSECV for a number of
components provided by user. RMSECV is then used in Måge plot to
decide on optimal number of components in each X block.
"""
#==============================================================================
# Check what is provided by user for SO-PLS cross validation
#==============================================================================
# First of all, centre Y and all X blocks, such that orthogonalisation
# procedure is simplified.
self.arrY_input = Y
self.XblocksList_input = XblocksList[:]
# Check whether number of PC's that are to be computed is provided.
# If NOT, then number of PC's is set to either number of objects or
# variables of X, whichever is smaller (maxComp). If number of
# PC's IS provided, then number is checked against maxPC and set to
# maxPC if provided number too large.
self.XcompsList = []
# If number of components for X blocks are not provided
if 'Xcomps' not in kargs.keys():
for Xblock in self.XblocksList_input:
maxComp = min(np.shape(Xblock))
# Set maximum number of components to 15
if maxComp > 20: maxComp = 20
self.XcompsList.append(maxComp)
# If number of components are provided, check whether that many
# components can be computed. If given number of components is too
# high, set number to max possible.
else:
self.XcompsList_input = kargs['Xcomps']
for ind, comp in enumerate(kargs['Xcomps']):
givenComp = kargs['Xcomps'][ind]
# Set maximum number of components to 15
maxComp = min(np.shape(self.XblocksList_input[ind]))
if maxComp > 20: maxComp = 20
if givenComp > maxComp:
self.XcompsList.append(maxComp)
else:
self.XcompsList.append(givenComp)
# Check whether cvType is provided. If NOT, then use "loo" by default.
if 'cvType' not in kargs.keys():
self.cvType = ["loo"]
else:
self.cvType = kargs['cvType']
# Check whether standardisation of X and Y are requested by user. If
# NOT, then all X blocks and Y are only centred by default.
if 'Xstand' not in kargs.keys():
self.XstandList = []
for Xblock in self.XblocksList_input:
self.XstandList.append(False)
else:
self.XstandList = kargs['Xstand']
if 'Ystand' not in kargs.keys():
self.Ystand = False
else:
self.Ystand = kargs['Ystand']
# Check dimensionality of Y and choose between PLS1 or PLS2 thereafter
# --------------------------------------------------------------------
numYvars = np.shape(self.arrY_input)[1]
if numYvars == 1:
PLS = plsr.nipalsPLS1
else:
PLS = plsr.nipalsPLS2
#==============================================================================
# Here the main loop starts: looping over all possible combinations
# of PC's in X blocks
#==============================================================================
# Find all possible combinations of provided X components
# -------------------------------------------------------
# Construct a list that holds all possible combinations of components
# for each X block. Must use a dummy list that holds for construction
# [compsX1+1, compsX2+1] when [compsX1, compsX2] is provided. This is
# because of Python indexing starting at zero.
dummyXcomps = []
for comp in self.XcompsList:
comp = comp + 1
dummyXcomps.append(comp)
self.XcompComb = []
self.sopls_RMSECVarr = np.zeros(dummyXcomps)
for ind, item in np.ndenumerate(self.sopls_RMSECVarr):
self.XcompComb.append(ind)
# # Temporary code for computing RMSECV for Y1 and Y2
# self.sopls_RMSECVarr_Y1 = self.sopls_RMSECVarr.copy()
# self.sopls_RMSECVarr_Y2 = self.sopls_RMSECVarr.copy()
# ----- HERE STARTS THE LOOP OVER ALL PC COMBINATIONS -----
# Run SO-PLS for all possible combinations of X components given
# for each X block
self.resultsAllCombs = {}
self.PRESS = {}
self.Ypred = {}
allComputations = len(self.XcompComb)
for ind, comb in enumerate(self.XcompComb):
print; print;
print(comb, ind, round(float(ind+1)/allComputations*100,1))
resultsCurrentComb = {}
# Solutions for special cases where cross-validation of SO-PLS
# will not be run (e.g. zero components in all blocks, or zero in
# all except one X block. In these cases we use RMSECV directly
# from simple one X block PLSR model.
# When all ALL X blocks have zero components do nothing and
# continue with next components combination
# =========================================================
if sum(comb) == 0:
continue
# Check which X block is the first that has one or more components
# to be computed (first non-zero).
comb_nonZeroIndexArr = np.flatnonzero(np.array(comb))
position_firstNonZero = comb_nonZeroIndexArr[0]
actualComps = comb[position_firstNonZero]
# Do ordinary PLSR when only one X block has non-zero components.
# ===============================================================
if len(comb_nonZeroIndexArr) == 1:
model = PLS(self.XblocksList_input[position_firstNonZero], \
self.arrY_input, \
numPC=actualComps, \
Xstand = self.XstandList[position_firstNonZero], \
Ystand = self.Ystand, \
cvType = ["loo"])
self.sopls_RMSECVarr[comb] = model.Y_RMSECV()[-1]
# Here is where RMSCV for each individual variable is extracted
# self.sopls_RMSECVarr_Y1[comb] = model.RMSECV_indVar_arr()[:,0][-1]
# self.sopls_RMSECVarr_Y2[comb] = model.RMSECV_indVar_arr()[:,1][-1]
# print '***', self.sopls_RMSECVarr_Y1[comb], comb
# print '***', self.sopls_RMSECVarr_Y2[comb], comb
# Insert RMSECV for zero X components into SO-PLS RMSCEV array
self.sopls_RMSECVarr[self.XcompComb[0]] = model.Y_RMSECV()[0]
# self.sopls_RMSECVarr_Y1[self.XcompComb[0]] = model.RMSECV_indVar_arr()[:,0][0]
# self.sopls_RMSECVarr_Y2[self.XcompComb[0]] = model.RMSECV_indVar_arr()[:,1][0]
#
# print '***', self.sopls_RMSECVarr[self.XcompComb[0]], self.XcompComb[0]
print('Single PLS this time')
continue
# FOR ALL OTHER CASES with two or more X blocks
#==============================================
else:
# First devide into combinations of training and test sets
numObj = np.shape(self.arrY_input)[0]
if self.cvType[0] == "loo":
print("loo")
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "lpo":
print("lpo")
cvComb = cv.LeavePOut(numObj, self.cvType[1])
elif self.cvType[0] == "lolo":
print("lolo")
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print('Requested form of cross validation is not available')
pass
# Collect train and test set in dictionaries for each PC and put
# them in this list.
segCount = 0
# Generate a dictionary that holds Ypred after each X block. This
# will used later to compute mean Ypred across X blocks, which
# then is used to compute RMSEP
YpredConstrDict = {}
YpredList_cv = []
for constrInd, constrItem in enumerate(self.XblocksList_input):
YpredConstrDict[constrInd] = []
# ----- HERE STARTS THE CROSS VALIDATION LOOP -----
# First devide into combinations of training and test sets
for train_index, test_index in cvComb:
# Define training and test set for Y
Y_train, Y_test = cv.split(train_index, test_index, \
self.arrY_input)
# -------------------------------------------------------------
# Center or standardise Y according to users choice
if self.Ystand == True:
# Standardise training set Y using mean and STD
Y_train_mean = np.average(Y_train, axis=0).reshape(1,-1)
Y_train_std = np.std(Y_train, axis=0, ddof=1).reshape(1,-1)
Y_train_proc = (Y_train - Y_train_mean) / Y_train_std
# Standardise test set Y using mean and STD from
# training set
Y_test_proc = (Y_test - Y_train_mean) / Y_train_std
else:
# Centre training set Y using mean
Y_train_mean = np.average(Y_train, axis=0).reshape(1,-1)
Y_train_proc = Y_train - Y_train_mean
Y_train_std = None
# Centre test set Y using mean and STD from
# training set
Y_test_proc = Y_test - Y_train_mean
# -------------------------------------------------------------
# Now do the same for the X blocks. Do this by iterating
# through self.XblocksList_input and take out test and
# training data
Xblocks_trainDict = {}
Xblocks_testDict = {}
Xblocks_trainList = []
Xblocks_testList = []
Xblocks_train_meanList = []
Xblocks_train_stdList = []
Xblocks_train_procList = []
Xblocks_test_procList = []
segCount = segCount + 1
# For each X block split up into training and test set for
# the cross validation segment we are in right now
for ind, item in enumerate(self.XblocksList_input):
X_train, X_test = cv.split(train_index, test_index, item)
Xblocks_trainDict[ind] = X_train
Xblocks_testDict[ind] = X_test
Xblocks_trainList.append(X_train)
Xblocks_testList.append(X_test)
# ---------------------------------------------------------
# Center or standardise X blocks according to users choice
if self.XstandList[ind] == True:
# Compute standardised X blocks using mean and STD
X_train_mean = np.average(X_train, axis=0).reshape(1,-1)
X_train_std = np.std(X_train, axis=0, ddof=1).reshape(1,-1)
X_train_proc = (X_train - X_train_mean) / X_train_std
# Append each standardised X block to X blocks training
# list
Xblocks_train_meanList.append(X_train_mean)
Xblocks_train_stdList.append(X_train_std)
Xblocks_train_procList.append(X_train_proc)
# Standardise test set of each X block using mean and
# and STD from training X blocks and append to X blocks
# test list
X_test_proc = (X_test - X_train_mean) / X_train_std
Xblocks_test_procList.append(X_test_proc)
else:
# Compute centred X blocks using mean
X_train_mean = np.average(X_train, axis=0).reshape(1,-1)
X_train_proc = X_train - X_train_mean
# Append each centred X block to X blocks training list
Xblocks_train_meanList.append(X_train_mean.reshape(1,-1))
Xblocks_train_stdList.append(None)
Xblocks_train_procList.append(X_train_proc)
# Centre test set of each X block using mean from
# X block training set and append to X blocks test list
X_test_proc = X_test - X_train_mean
Xblocks_test_procList.append(X_test_proc)
# ---------------------------------------------------------
# Put all training and test data for Y and X blocks in a
# dictionary.
segDict = {}
segDict['Y train'] = Y_train
segDict['Y test'] = Y_test
segDict['X train'] = Xblocks_trainDict
segDict['X test'] = Xblocks_testDict
segDict['proc Y train'] = Y_train_proc
segDict['Y train mean'] = Y_train_mean.reshape(1,-1)
#segDict['Y train std'] = Y_train_std
segDict['proc X train'] = Xblocks_train_procList
segDict['X train mean'] = Xblocks_train_meanList
#segDict['X train std'] = Xblocks_train_stdList
segDict['proc X test'] = Xblocks_test_procList
segDict['proc Y test'] = Y_test_proc
# Now start modelling sequential PLSR over all X blocks.
# First X block with non-zero X components will be modelled
# with ordinary PLSR. For all following X blocks with non-
# zero components, the X block must be orthogonalised with
# regard to the X scores of the prior X block. Only then the
# orthogonalised X block can be modelled against Y.
scoresList = []
cv_scoresList = []
Blist = []
orthoXblockList = []
tCQlist = []
Wlist = []
XmeanList = []
YmeanList = []
Qlist = []
Clist = []
for indMod, Xblock in enumerate(Xblocks_train_procList):
if indMod not in comb_nonZeroIndexArr:
continue
if indMod == comb_nonZeroIndexArr[0]:
# Do ordinary PLSR prior to PLRS with orth. Xblocks
model = PLS(Xblock, Y_train_proc, numPC=comb[indMod])
# Get X scores and store them in a scores list. The
# scores are needed for the orthogonlisation step of
# the next X block.
scoresList.append(model.X_scores())
# Here prediction part starts.
# First estimate X scores for test set of X blocks
arrW = model.X_loadingsWeights()
arrP = model.X_loadings()
projScoresList = []
X_test_deflationList = [Xblocks_test_procList[indMod]]
for predInd in range(comb[indMod]):
projScores = np.dot(X_test_deflationList[-1], \
arrW[:,predInd])
projScoresList.append(projScores)
deflated_Xblock_test = X_test_deflationList[-1] - \
np.dot(projScores.reshape(1,-1), \
np.transpose(arrP[:,predInd]).reshape(1,-1))
X_test_deflationList.append(deflated_Xblock_test)
# Construct array which holds projected X test scores.
T_test = np.transpose(np.array(projScoresList))
cv_scoresList.append(T_test)
# Get Y loadings and scores regression coefficients
# for computation Y pred.
arrQ = model.Y_loadings()
arrC = model.scoresRegressionCoeffs()
# Now compute Ypred for the test set of the acutal
# X block.
tCQ = np.dot(T_test, np.dot(arrC, np.transpose(arrQ)))
tCQlist.append(tCQ)
XmeanList.append(model.X_means())
YmeanList.append(model.Y_means())
Wlist.append(arrW)
Qlist.append(arrQ)
Clist.append(arrC)
else:
# Orthogonalise and run PLS and model Y. Could also
# use residuals from previous model, which would give
# the same result.
# Stack X scores horizontally from previous PLS models
# and orthogonalise next X block with regard to the
# stacked X scores. If there is only one set of scores
# then stacking is not necessary
if len(scoresList) == 1:
T = scoresList[0]
cv_T = cv_scoresList[0]
else:
T = np.hstack(scoresList)
cv_T = np.hstack(cv_scoresList)
# Orthogonalisation process
# X_orth = X - TB
B = np.dot(np.dot(nl.inv(np.dot(np.transpose(T), T)), \
np.transpose(T)), Xblock)
orth_Xblock = Xblock - np.dot(T, B)
Blist.append(B)
orthoXblockList.append(orth_Xblock)
# Run PLSR on orthogonalised X block.
model = PLS(orth_Xblock, Y_train_proc, \
numPC=comb[indMod])
scoresList.append(model.X_scores())
# Orthogonalisation of test set of X block
orth_Xblock_test = Xblocks_test_procList[indMod] - \
np.dot(cv_T, B)
# Here the prediction part starts.
# First estimate X scores for test set of X blocks
arrW = model.X_loadingsWeights()
arrP = model.X_loadings()
projScoresList = []
X_test_deflationList = [orth_Xblock_test]
for predInd in range(comb[indMod]):
projScores = np.dot(X_test_deflationList[-1], \
arrW[:,predInd])
projScoresList.append(projScores)
deflated_Xblock_test = X_test_deflationList[-1] - \
np.dot(projScores.reshape(1,-1), \
np.transpose(arrP[:,predInd]).reshape(1,-1))
X_test_deflationList.append(deflated_Xblock_test)
# Construct array which holds projected X test scores.
T_test = np.transpose(np.array(projScoresList))
cv_scoresList.append(T_test)
# Get Y loadings and scores regression coefficients
# for computation Y pred.
arrQ = model.Y_loadings()
arrC = model.scoresRegressionCoeffs()
# Now compute Ypred for the test set of the acutal
# X block.
tCQ = np.dot(T_test, np.dot(arrC, np.transpose(arrQ)))
tCQlist.append(tCQ)
XmeanList.append(model.X_means())
Wlist.append(arrW)
Qlist.append(arrQ)
Clist.append(arrC)
# Here the Ypreds for one segment are added across X blocks.
# The Ypreds are stored in a list that is later converted into
# a YpredFull that has the same dimensions (number of rows)
# as Y.
tCQsum = np.sum(tCQlist, axis=0)
if self.Ystand == True:
Ypred_cv = (tCQsum * Y_train_std) + Y_train_mean
else:
Ypred_cv = tCQsum + Y_train_mean
YpredList_cv.append(Ypred_cv)
# Now construct Ypred from cross validated Ypred of each segment
YpredFull = np.vstack(YpredList_cv)
#resultsCurrentComb['cv data'] = segDict
resultsCurrentComb['x scores'] = scoresList
resultsCurrentComb['B list'] = Blist
resultsCurrentComb['orth X block'] = orthoXblockList
resultsCurrentComb['new X scores'] = cv_scoresList
resultsCurrentComb['W list'] = Wlist
resultsCurrentComb['C list'] = Clist
resultsCurrentComb['Q list'] = Qlist
resultsCurrentComb['tCQ list'] = tCQlist
resultsCurrentComb['tCQ sum'] = tCQsum
resultsCurrentComb['Ypred_cv'] = Ypred_cv
resultsCurrentComb['YpredList_cv'] = YpredList_cv
resultsCurrentComb['YpredFull'] = YpredFull
#resultsCurrentComb['X means'] = XmeanList
#resultsCurrentComb['Y means'] = YmeanList
self.scoresList = scoresList
self.Blist = Blist
self.cv_scoresList = cv_scoresList
comb_PRESS = np.sum(np.square(self.arrY_input - YpredFull))
comb_MSEP = comb_PRESS / np.size(self.arrY_input)
comb_RMSEP = np.sqrt(comb_MSEP)
self.sopls_RMSECVarr[comb] = comb_RMSEP
self.PRESS[comb] = comb_PRESS
self.resultsAllCombs[comb] = resultsCurrentComb
# # Temporary code:
# # Computing RMSECV for Y1 and Y2
# Y1_comb_PRESS = np.sum(np.square(self.arrY_input[:,0] - sumYpred[:,0]))
# Y1_comb_MSEP = Y1_comb_PRESS / np.shape(self.arrY_input)[0]
# Y1_comb_RMSEP = np.sqrt(Y1_comb_MSEP)
# self.sopls_RMSECVarr_Y1[comb] = Y1_comb_RMSEP
#
# Y2_comb_PRESS = np.sum(np.square(self.arrY_input[:,1] - sumYpred[:,1]))
# Y2_comb_MSEP = Y2_comb_PRESS / np.shape(self.arrY_input)[0]
# Y2_comb_RMSEP = np.sqrt(Y2_comb_MSEP)
# self.sopls_RMSECVarr_Y2[comb] = Y2_comb_RMSEP
# PRESS_segment = np.sum(np.square(Y_test - sumYpred))
# comb_PRESSlist.append(PRESS_segment)
# comb_PRESS = np.sum(comb_PRESSlist)
# comb_MSEP = comb_PRESS / np.shape(self.arrY_input)[0]
# comb_RMSEP = np.sqrt(comb_MSEP)
# self.sopls_RMSECVarr[comb] = comb_RMSEP
# self.PRESS[comb] = comb_PRESS
def results(self):
resDict = {}
resDict['combRes'] = self.resultsAllCombs
return resDict
def modelSettings(self):
"""
Returns a dictionary holding most important model settings.
"""
settings = {}
settings['Y'] = self.arrY_input
settings['X blocks'] = self.XblocksList_input
settings['Xstand'] = self.XstandList
settings['Ystand'] = self.Ystand
#settings['analysed X blocks'] = self.XblocksList
#settings['analysed Y'] = self.arrY
settings['X comps'] = self.XcompsList_input
settings['used X comps'] = self.XcompsList
return settings
def RMSECV(self):
"""
Returns an array holding RMSECV of SO-PLS
"""
return self.sopls_RMSECVarr
#return [self.sopls_RMSECVarr, self.sopls_RMSECVarr_Y1,self.sopls_RMSECVarr_Y2]
def plotRMSEP(RMSEParr, *args):
"""
Input:
======
RMSEParr: type <array>: Array holding the RMSEP values returned by function
lsplsCV.
Output:
=======
A scatter plot showing RMSEP for each possible combination of components
chosen.
"""
# Check whether further arguments are provided. If not, then go for
# maximum number of components, which is the dimension of the RMSEP array.
if len(args) == 0:
print('no further parameters given')
dummyArr = np.zeros(np.shape(RMSEParr))
# Build dummyArr according to provided components and use it for iterating
# through the desired combinations. May be a multi-dimensional array.
# Example: [3,2,2] will result in plotting [2,1,1] components because of
# Python syntax counting from zero. Need therefore to add 1 to the
# submitted component numbers.
else:
newArgs = []
for item in args[0]:
newArgs.append(item + 1)
dummyArr = np.zeros(newArgs)
# This is plotting code. Plot RMSEP values in a scatter plot.
fig = plt.figure()
ax = fig.add_subplot(111)
# Now plot the RMSEP values in a scatter plot. Save the the minimum RMSEP
# for each total number of components in a dictionary named minima.
minima = {}
for ind, x in np.ndenumerate(dummyArr):
#print('++++++++++', ind, '-', x, '--', sum(ind))
x = sum(ind)
# Use RMSEP with any components as maxRMSEP. If RMSEP is larger than
# maxRMSEP for any combination of components it will not be plotted.
if x == 0:
maxRMSEP = RMSEParr[ind]
# print('MAX RMSEP:', maxRMSEP)
# print('MIN RMSEP:', np.min(RMSEParr))
if RMSEParr[ind] > maxRMSEP:
# print('............ MAX OUT at', ind, '-', x, '--', sum(ind))
continue
else:
# Make a text lable out of the tuple holding the number of components.
# that is, from (3,0,1) make a text lable '301'.
text = ''
for i in ind:
#text += str(i)
text = text + '_' + str(i)
# Plot cirles for each RMSEP value. This needs to be done, as text
# alone will not be printed/shown in matplotlib.
ax.scatter(x, RMSEParr[ind], s=10, c='w', marker='o', edgecolor='grey')
ax.text(x, RMSEParr[ind], text, fontsize=13)
# # TEMPORARY CODE:
# if RMSEParr[ind] < 2.15:
#
# if x >= 10 and x <= 16:
#
# # Plot cirles for each RMSEP value. This needs to be done, as text
# # alone will not be printed/shown in matplotlib.
# ax.scatter(x, RMSEParr[ind], s=10, c='w', marker='o', edgecolor='grey')
# ax.text(x, RMSEParr[ind], text, fontsize=13)
# Store minimum RMSEP in dictionary named minima.
#print('++++++++++', ind, '-', x, '--', sum(ind))
if x in minima.keys():
if RMSEParr[ind] < minima[x]:
minima[x] = RMSEParr[ind]
else:
minima[x] = RMSEParr[ind]
# Find maximum total number of components for iteration. Need to make a
# list holding all minimum RMSEP.
maxComp = max(minima.keys())
minimaList = []
xPosList = []
for comps in range(maxComp + 1):
try:
minimaList.append(minima[comps])
xPosList.append(comps)
except KeyError:
continue
# Plot line for lowest RMSEP for each total number of components.
ax.plot(xPosList, minimaList, 'r--')
ax.set_xlabel('# components')
ax.set_ylabel('RMSEP')
# # TEMPORARY CODE
# ax.set_xlim(9.5, 18.8)
# ax.set_ylim(1.6, 2.2)
plt.show()
class SOPLS:
"""
GENERAL INFO
------------
This class carries out Sequential Orthogonlised Partial Least Squares
regression (SO-PLS) for predetermined number of components in each
X block.
"""
def __init__(self, Y, XblocksList, Xcomps, **kargs):
"""
Do SO-PLS for a given number of components. Compute all important
features as Xscores, Xloadings, etc.
"""
#==============================================================================
# Check what is provided by user for SO-PLS
#==============================================================================
# Acess Y block and X blocks
self.arrY_input = Y.copy()
self.XblocksList_input = XblocksList[:]
# Get number of components for each X block
self.XcompsList_input = Xcomps[:]
# If number of components are provided, check whether that many
# components can be computed. If given number of components is too
# high, set number to max possible.
self.XcompsList = []
for ind, comp in enumerate(self.XcompsList_input):
givenComp = self.XcompsList_input[ind]
# Check each given number of X components against maximum possible
# number of X components
maxComp = min(np.shape(self.XblocksList_input[ind]))
if maxComp > 20: maxComp = 20
if givenComp > maxComp:
self.XcompsList.append(maxComp)
else:
self.XcompsList.append(givenComp)
# Check whether cvType is provided. If NOT, then use "loo" by default.
if 'cvType' not in kargs.keys():
self.cvType = ["loo"]
else:
self.cvType = kargs['cvType']
# Check whether standardisation of X and Y are requested by user. If
# NOT, then all X blocks and Y are only centred by default.
if 'Xstand' not in kargs.keys():
self.XstandList = []
for Xblock in self.XblocksList_input:
self.XstandList.append(False)
else:
self.XstandList = kargs['Xstand']
if 'Ystand' not in kargs.keys():
self.Ystand = False
else:
self.Ystand = kargs['Ystand']
# Check dimensionality of Y and choose between PLS1 or PLS2 thereafter
numYvars = np.shape(self.arrY_input)[1]
if numYvars == 1:
PLS = plsr.nipalsPLS1
else:
PLS = plsr.nipalsPLS2
#==============================================================================
# From here SO-PLS algorithm starts (CALIBRATION)
#==============================================================================
# Collect relevant computation results in lists
self.Xblocks_meanList = []
self.XscoresList = []
self.XloadingsList = []
self.XcorrLoadingsList = []
self.XcumCalExplVars_indVar = []
self.XcumCalExplVars = []
self.XpredCalList = []
self.XcumValExplVars_indVar = []
self.XcumValExplVars = []
self.XpredValList = []
self.YscoresList = []
self.YloadingsList = []
self.YcorrLoadingsList = []
self.YcumPredCalList = []
self.YcumCalExplVar = []
self.YcumPredValList = []
self.YcumValExplVar = []
# Check which X block is the first that has one or more components
# to be computed (first non-zero).
comb_nonZeroIndexArr = np.flatnonzero(np.array(self.XcompsList))
position_firstNonZero = comb_nonZeroIndexArr[0]
actualComps = self.XcompsList[position_firstNonZero]
print('Positions of non-zero:', position_firstNonZero)
print('Actual # of comps of first non-zero:', actualComps)
# When all ALL X blocks have zero components do nothing and
# continue with next components combination
# =========================================================
if sum(self.XcompsList) == 0:
print('nothing to do when no components are given')
# Do ordinary PLSR when only one X block has non-zero components.
# ===============================================================
elif len(comb_nonZeroIndexArr) == 1:
model = PLS(self.XblocksList_input[position_firstNonZero], \
self.arrY_input, \
numPC=actualComps, \
Xstand = self.XstandList[position_firstNonZero], \
Ystand = self.Ystand, \
cvType = self.cvType)
# Collect results for X blocks
# ----------------------------
self.Xblocks_meanList.append(model.X_means())
self.XscoresList.append(model.X_scores())
self.XloadingsList.append(model.X_loadings())
self.XcorrLoadingsList.append(model.X_corrLoadings())
self.XcumCalExplVars_indVar.append(model.X_cumCalExplVar_indVar())
self.XcumCalExplVars.append(model.X_cumCalExplVar())
self.XpredCalList.append(model.X_predCal()\
[self.XcompsList_input[position_firstNonZero]])
self.XcumValExplVars_indVar.append(model.X_cumValExplVar_indVar())
self.XcumValExplVars.append(model.X_cumValExplVar())
self.XpredValList.append(model.X_predVal()\
[self.XcompsList_input[position_firstNonZero]])
# Collect results for Y block
# ---------------------------
self.Y_mean = model.Y_means()
self.YscoresList.append(model.Y_scores())
self.YloadingsList.append(model.Y_loadings())
self.YcorrLoadingsList.append(model.Y_corrLoadings())
self.YcumPredCalList.append(model.Y_predCal()\
[self.XcompsList_input[position_firstNonZero]])
Y_cal_first = model.Y_cumCalExplVar_indVar()[0,:]
Y_cal_last = model.Y_cumCalExplVar_indVar()[-1,:]
self.YcumCalExplVar_indVar = np.vstack((Y_cal_first, Y_cal_last))
#self.YcumCalExplVar_indVar = model.Y_cumCalExplVar_indVar()
self.YcumCalExplVar.append(0)
self.YcumCalExplVar.append(model.Y_cumCalExplVar()\
[self.XcompsList_input[position_firstNonZero]])
self.YcumPredValList.append(model.Y_predVal()\
[self.XcompsList_input[position_firstNonZero]])
Y_val_first = model.Y_cumValExplVar_indVar()[0,:]
Y_val_last = model.Y_cumValExplVar_indVar()[-1,:]
self.YcumValExplVar_indVar = np.vstack((Y_val_first, Y_val_last))
#self.YcumValExplVar_indVar = model.Y_cumValExplVar_indVar()
self.YcumValExplVar.append(0)
self.YcumValExplVar.append(model.Y_cumValExplVar()\
[self.XcompsList_input[position_firstNonZero]])
# Collect other general results
# -----------------------------
self.orthoXblockList = []
self.Y_proc = model.modelSettings()['analysed Y']
self.Xblocks_procList = model.modelSettings()['analysed X']
# If X components for more than one X block are given, then run
# SO-PLS computations.
# =============================================================
else:
self.Xblocks_stdList = []
self.Xblocks_procList = []
# ----------------------------------------------------------
# Center or standardise X blocks according to user's choice
for ind, item in enumerate(self.XblocksList_input):
if self.XstandList[ind] == True:
# Compute standardised X blocks using mean and STD
X_mean = np.average(item, axis=0).reshape(1,-1)
X_std = np.std(item, axis=0, ddof=1).reshape(1,-1)
X_proc = (item - X_mean) / X_std
# Append each standardised X block to X blocks training
# list
self.Xblocks_meanList.append(X_mean)
self.Xblocks_stdList.append(X_std)
self.Xblocks_procList.append(X_proc)
else:
# Compute centred X blocks using mean
X_mean = np.average(item, axis=0).reshape(1,-1)
X_proc = item - X_mean
# Append each centred X block to X blocks training list
self.Xblocks_meanList.append(X_mean.reshape(1,-1))
self.Xblocks_stdList.append(None)
self.Xblocks_procList.append(X_proc)
# ----------------------------------------------------------
# ----------------------------------------------------------
# Center or standardise Y according to user's choice
if self.Ystand == True:
# Standardise training set Y using mean and STD
self.Y_mean = np.average(self.arrY_input, axis=0).reshape(1,-1)
self.Y_std = np.std(self.arrY_input, axis=0, ddof=1).reshape(1,-1)
self.Y_proc = (self.arrY_input - self.Y_mean) / self.Y_std
else:
# Centre training set Y using mean
self.Y_mean = np.average(self.arrY_input, axis=0).reshape(1,-1)
self.Y_proc = self.arrY_input - self.Y_mean
self.Y_std = None
# ----------------------------------------------------------
# Now start modelling sequential PLSR over all X blocks.
# First X block with non-zero X components will be modelled
# with ordinary PLSR. For all following X blocks with non-
# zero components, the X block must be orthogonalised with
# regard to the X scores of the prior X block. Only then the
# orthogonalised X block can be modelled against Y.
self.Blist = []
self.orthoXblockList = []
self.Wlist = []
self.Qlist = []
self.Clist = []
YprocPredList = []
for indMod, Xblock in enumerate(self.Xblocks_procList):
if indMod not in comb_nonZeroIndexArr:
print('NO COMPS for this BLOCK')
continue
# Do PLSR on first X block (not orthogonalised)
if indMod == comb_nonZeroIndexArr[0]:
# Do ordinary PLSR prior to PLRS with orth. Xblocks
model = PLS(Xblock, self.Y_proc, \
numPC=self.XcompsList_input[indMod], \
cvType=self.cvType)
# Get X scores and store them in a scores list. The
# scores are needed for the orthogonlisation step of
# the next X block.
self.XscoresList.append(model.X_scores())
# Collect X loadings and X correlation loadings in a list
self.XloadingsList.append(model.X_loadings())
self.XcorrLoadingsList.append(model.X_corrLoadings())
# Get calibrated explained variance in first X block
self.XcumCalExplVars_indVar.append(model.X_cumCalExplVar_indVar())
self.XcumCalExplVars.append(model.X_cumCalExplVar())
# Get X pred for calibration for chosen number of
# components for first X block
XpredCal_proc = model.X_predCal()[self.XcompsList_input[indMod]]
if self.XstandList[indMod] == True:
# XpredCal = (XpredCal_proc * X_std) + \
# self.Xblocks_meanList[indMod]
XpredCal = (XpredCal_proc * self.Xblocks_stdList[indMod]) + \
self.Xblocks_meanList[indMod]
else:
XpredCal = XpredCal_proc + self.Xblocks_meanList[indMod]
self.XpredCalList.append(XpredCal)
# Get X pred for validation for chosen number of
# components for first X block
XpredVal_proc = model.X_predVal()[self.XcompsList_input[indMod]]
if self.XstandList[indMod] == True:
# XpredVal = (XpredVal_proc * X_std) + \
# self.Xblocks_meanList[indMod]
XpredVal = (XpredVal_proc * self.Xblocks_stdList[indMod]) + \
self.Xblocks_meanList[indMod]
else:
XpredVal = XpredVal_proc + self.Xblocks_meanList[indMod]
self.XpredValList.append(XpredVal)
# Get validated explained variance in first X block
self.XcumValExplVars_indVar.append(model.X_cumValExplVar_indVar())
self.XcumValExplVars.append(model.X_cumValExplVar())
# Get Y scores, Y loadings and Y correlation loadings for
# the chosen number of components in this X block
self.YscoresList.append(model.Y_scores())
self.YloadingsList.append(model.Y_loadings())
self.YcorrLoadingsList.append(model.Y_corrLoadings())
# Get Y pred from calibration. This Y is processed and
# needs to be 'un-processed' (un-center, un-standardise)
# before being put into list.
YpredCal_proc = model.Y_predCal()[self.XcompsList_input[indMod]]
YprocPredList.append(YpredCal_proc)
if self.Ystand == True:
YpredCal = (YpredCal_proc * self.Y_std) + \
self.Y_mean
else:
YpredCal = YpredCal_proc + self.Y_mean
self.YcumPredCalList.append(YpredCal)
# Do PLSR on all other X blocks (orthogonalised)
else:
print('second or later X block')
# Orthogonalise next X block and run PLSR to model Y. Could
# also use residuals from previous model, which would give
# the same result.
# Stack X scores horizontally from previous PLS models
# and orthogonalise next X block with regard to the
# stacked X scores. If there is only one set of scores
# then stacking is not necessary
if len(self.XscoresList) == 1:
T = self.XscoresList[0]
else:
T = | np.hstack(self.XscoresList) | numpy.hstack |
# 3rd party
import numpy as np
import h5py
# Our own imports
from ..helpers import FileSystemTestCase
from deep_hipsc_tracking.tracking import persistence_data
# Constants
PERSISTENCE_ATTRS = [
'tt', 'xx', 'yy', 'time_scale',
'sm_tt', 'sm_xx', 'sm_yy',
'sm_dt', 'sm_dx', 'sm_dy',
'sm_ds', 'sm_dv', 'sm_dtheta',
'sm_unwrap_dtheta',
'pct_persistent', 'pct_quiescent',
'times', 'gap_times', 'speeds', 'distances', 'displacements',
'timeline', 'waveform', 'mask',
]
# Tests
class TestPersistenceData(FileSystemTestCase):
def assertPersistenceAttrsEqual(self, obj1, obj2):
for attr in PERSISTENCE_ATTRS:
self.assertTrue(hasattr(obj1, attr), msg='obj1 missing {}'.format(attr))
self.assertTrue(hasattr(obj2, attr), msg='obj2 missing {}'.format(attr))
val1 = getattr(obj1, attr)
val2 = getattr(obj2, attr)
msg = '"{}" mismatch: obj1.{}={} obj2.{}={}'.format(attr, attr, val1, attr, val2)
try:
if hasattr(val1, 'dtype') and hasattr(val2, 'dtype'):
np.testing.assert_almost_equal(val1, val2, err_msg=msg)
else:
self.assertEqual(val1, val2, msg=msg)
except Exception:
print(msg)
raise
def test_refuses_to_process_too_short_of_track(self):
tt = np.linspace(1, 10, 5)
xx = np.linspace(-10, 5, tt.shape[0])
yy = np.linspace(-5, 15, tt.shape[0])
time_scale = 3.0
obj = persistence_data.calc_track_persistence(
tt=tt, xx=xx, yy=yy, time_scale=time_scale,
resample_factor=2,
smooth_points=0,
interp_points=3,
min_persistence_points=6)
self.assertIsNone(obj)
obj = persistence_data.calc_track_persistence(
tt=tt, xx=xx, yy=yy, time_scale=time_scale,
resample_factor=2,
smooth_points=0,
interp_points=3,
min_persistence_points=5)
self.assertIsNotNone(obj)
def test_has_helpful_summary_attributes(self):
tt = np.linspace(1, 10, 5)
xx = np.linspace(-10, 5, tt.shape[0])
yy = np.linspace(-5, 15, tt.shape[0])
yy[3:] *= 5.0
time_scale = 3.0
space_scale = 2.0
obj = persistence_data.calc_track_persistence(
tt=tt, xx=xx, yy=yy,
time_scale=time_scale,
space_scale=space_scale,
resample_factor=2,
smooth_points=0,
interp_points=3,
min_persistence_points=1)
np.testing.assert_almost_equal(obj.duration, 27.0)
np.testing.assert_almost_equal(obj.distance, 172.0807, decimal=3)
np.testing.assert_almost_equal(obj.distance, np.sum(obj.sm_ds)*space_scale, decimal=3)
| np.testing.assert_almost_equal(obj.displacement, 169.3451, decimal=3) | numpy.testing.assert_almost_equal |
import itertools
from itertools import chain, combinations
from copy import deepcopy
import math
import numpy as np
from openfermion.linalg import LinearQubitOperator
from openfermion.ops import FermionOperator, QubitOperator
import utils.cs_vqe_tools as c
import utils.qonversion_tools as qonvert
def bin_to_int(bits):
bit_string = deepcopy(bits)
if type(bit_string) == str:
bit_string = [int(b) for b in bit_string]
for index, b in enumerate(bit_string):
bit_string[index] = b * 2 ** (len(bit_string)-index-1)
return sum(bit_string)
def int_to_bin(integer, num_qubits):
if integer >= 2**num_qubits:
raise ValueError('Input integer larger than specified number of bits.')
bin_str=bin(integer)[2:]
leading_0 = ''.join(['0' for i in range(num_qubits-len(bin_str))])
return leading_0 + bin_str
def A_action(molecule, num_qubits, basis_index, rot=False):
"""This will be computed programmatically from A operator in the future***
"""
B = list(itertools.product([0,1], repeat=num_qubits))
b1 = list(B[basis_index])
b2 = deepcopy(b1)
i1 = bin_to_int(b1)
if molecule == 'H2O':
b2[5] = (b2[5]+1)%2
i2 = bin_to_int(b2)
parity = b1[9]+b1[8]+b1[7]+b1[6]+b1[4]
Z_loc = b1[5]
elif molecule == 'HeH+':
if not rot:
b2[0] = (b2[0]+1)%2
b2[6] = (b2[6]+1)%2
i2 = bin_to_int(b2)
parity = 1+sum(b1)
Z_loc = b1[6]
else:
b2[6] = (b2[6]+1)%2
i2 = bin_to_int(b2)
parity = b1[1]+b1[2]+b1[3]+b1[4]+b1[5]
Z_loc = b1[6]
else:
raise ValueError('Molecule is not recognised.')
return i1, i2, parity, Z_loc
def add_eigenstate(molecule, r1, r2, index, num_qubits, theta=0, custom_amp=None, rot=False):
"""
"""
i1, i2, parity, Z_loc = A_action(molecule, num_qubits, index, rot)
amp_ratio = (1 + r2 * (-1)**Z_loc) / (r1 * (-1)**(parity))
t = np.arctan(amp_ratio)
#print(q4, t)
#print(i1, ':', np.sin(t), i2, ':', np.cos(t))
psi = [0 for i in range(2**num_qubits)]
if custom_amp is None:
psi[i1] = np.sin(t)*np.exp(1j*theta)
psi[i2] = np.cos(t)*np.exp(1j*theta)
else:
psi[i1] = custom_amp[0]
psi[i2] = custom_amp[1]
return np.array(psi)
def expectation(op, state, num_qubits):
assert(type(op)==QubitOperator)
state = np.array(state)
conj_state = | np.conjugate(state) | numpy.conjugate |
from numpy import pi
import numpy as np
import math
#from sympy import Matrix
import pylab
#import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#from scipy.interpolate import Rbf
import pickle
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from Functions import *
theta = 0.5
ProcessedFiles = ['PTh=0.051886.txt','PertPQh=0.043478.txt','PVh=0.0677285.txt']
h = [0.051886,0.043478,0.0677285]
Basis = [Poly1,Poly2,Poly]
T = 20
y = 0
for Pfile in ProcessedFiles:
print(Pfile)
Nodes,EdgeNodes,ElementEdges,BoundaryNodes,Orientations = ProcessedMesh(Pfile)
dt = 0.05*h[y]**2
DivMat = primdiv(ElementEdges,EdgeNodes,Nodes,Orientations)
time = np.arange(0,T,dt)
InternalNodes,NumberInternalNodes = InternalObjects(BoundaryNodes,Nodes)
ME,MV,MJ = MFDAssembly(J,Nodes,EdgeNodes,ElementEdges,Orientations) #compute the mass matrices
#ME,MV,MJ = NewAssembly(J,Basis,Nodes,EdgeNodes,ElementEdges,Orientations) #compute the mass matrices
#ME,MV,MJ = LeastSquaresAssembly(J,Basis,Nodes,EdgeNodes,ElementEdges,Orientations)
#ME,MV,MJ = PiecewiseAssembly(J,Basis,Nodes,EdgeNodes,ElementEdges,Orientations)
#print('Piecewise Matrices Assembled')
#Let us construct the required matrices
curl = primcurl(EdgeNodes,Nodes) #the primary curl
#D = np.zeros((len(Nodes),len(Nodes))) #this matrix will is explained in the pdf
D = lil_matrix((len(Nodes),len(Nodes)))
for i in InternalNodes:
D[i,i]=1
D = D.tocsr()
Aprime = MV+theta*dt*( ( | np.transpose(curl) | numpy.transpose |
import logging as lo
import numpy as np
import scipy.sparse as sp
import warnings as wn
from . import regions as reg
logger = lo.getLogger('pyfds')
class Field:
"""Base class for all fields."""
def __init__(self):
self.material_regions = []
self.step = 0
self.matrices_assembled = False
self.t = None
@property
def num_points(self):
"""Returns number of points in the field."""
raise NotImplementedError
def get_index(self, position):
"""Returns the index of a point at the given position.
Args:
position: Position of the requested point.
Returns:
Index of the point.
"""
raise NotImplementedError
def material_vector(self, mat_parameter):
"""Get a vector that contains the specified material parameter for every point of the
field.
Args:
mat_parameter: Material parameter of interest.
Returns:
Vector which contains the specified material parameter for each point in the field.
"""
param_found = False
mat_vector = np.zeros(self.num_points)
for mat_reg in self.material_regions:
for mat in mat_reg.materials:
if hasattr(mat, mat_parameter):
mat_vector[mat_reg.region.indices] = getattr(mat, mat_parameter)
param_found = True
if not param_found:
wn.warn('Material parameter {} not found in set materials. Returning zeros.'
.format(mat_parameter), stacklevel=2)
logger.warning(
'Material parameter {} not found in set materials. Returning zeros.'
.format(mat_parameter))
return mat_vector
def assemble_matrices(self):
"""Assemble the matrices and vectors required for simulation."""
raise NotImplementedError
def sim_step(self):
"""Simulate one step."""
raise NotImplementedError
def simulate(self, num_steps=None):
"""Starts the simulation.
Args:
num_steps: Number of steps to simulate (self.t.samples by default).
"""
if not num_steps:
num_steps = self.t.samples
# log progress only if simulation run in not segmented
progress_logger = ProgressLogger(num_steps)
else:
progress_logger = None
if not self.matrices_assembled:
self.assemble_matrices()
logger.info('Matrices created.')
logger.info('Starting simulation of {} steps.'.format(num_steps))
start_step = self.step
while self.step < start_step + num_steps:
self.sim_step()
if progress_logger:
progress_logger.log(self.step)
self.step += 1
logger.info('Simulation of {} steps completed.'.format(num_steps))
def get_point_region(self, position, name=''):
"""Creates a point region at the given position.
Args:
position: Position of the point region.
name: Name of the point region.
Returns:
Point region.
"""
return reg.PointRegion([self.get_index(position)], position, name=name)
def add_material_region(self, *args, **kwargs):
"""Adds a material region to the field.
Args:
See pyfds.regions.MaterialRegion constructor arguments.
"""
new_material_region = reg.MaterialRegion(*args, **kwargs)
self.material_regions.append(new_material_region)
logger.info('Material region {} added.'.format(new_material_region.region.name))
def reset(self):
"""Reset the field to all-zero but keep all boundaries to enable repeated simulation using
the same field object."""
for name in dir(self):
if type(getattr(self, name)) == FieldComponent:
getattr(self, name).values = np.zeros_like(getattr(self, name).values)
self.step = 0
class Field1D(Field):
"""Class for one-dimensional fields."""
def __init__(self, x_samples, x_delta, t_samples, t_delta, material):
"""Class constructor.
Args:
x_samples: Number of samples in x direction.
x_delta: Increment in x direction.
t_samples: Number of time samples.
t_delta: Time increment.
material: Main material of the field.
"""
super().__init__()
self.x = Dimension(x_samples, x_delta)
self.t = Dimension(t_samples, t_delta)
# add main material
self.add_material_region(self.get_line_region((0, max(self.x.vector)), name='main'),
material)
@property
def num_points(self):
"""Returns number of points in the field."""
return self.x.samples
def d_x(self, factors=None, variant='forward'):
"""Creates a sparse matrix for computing the first derivative with respect to x multiplied
by factors given for every point. Uses forward difference quotient by default.
Args:
factors: Factor for each point to be applied after derivation.
variant: Variant for the difference quotient ('forward', 'central', or 'backward').
Returns:
Sparse matrix the calculate derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
if variant == 'forward':
return sp.dia_matrix((np.array([-factors, factors]), [0, 1]),
shape=(self.num_points, self.num_points))
elif variant == 'central':
return sp.dia_matrix((np.array([-factors / 2, factors / 2]), [-1, 1]),
shape=(self.num_points, self.num_points))
elif variant == 'backward':
return sp.dia_matrix((np.array([-factors, factors]), [-1, 0]),
shape=(self.num_points, self.num_points))
else:
raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def d_x2(self, factors=None):
"""Creates a sparse matrix for computing the second derivative with respect to x multiplied
by factors given for every point. Uses central difference quotient.
Args:
factors: Factor for each point to be applied after derivation.
Returns:
Sparse matrix the calculate second derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
return sp.dia_matrix((np.array([factors, -2*factors, factors]), [-1, 0, 1]),
shape=(self.num_points, self.num_points))
def get_index(self, position):
"""Returns the index of a point at the given position.
Args:
position: Position of the requested point.
Returns:
Index of the point.
"""
return self.x.get_index(position)
def get_position(self, index):
"""Returns the position of a point with the given index.
Args:
index: Index of a point.
Returns:
Position of the point as x coordinate.
"""
return self.x.vector[index]
def get_line_region(self, position, name=''):
"""Creates a line region at the given position (start, end), inclusive.
Args:
position: Position of the line region (start, end), as x coordinates.
name: Name of the region.
Returns:
Line region.
"""
return reg.LineRegion([index for index in range(self.get_index(position[0]),
self.get_index(position[1]) + 1)],
position, name=name)
class Field2D(Field):
"""Class for two-dimensional fields."""
def __init__(self, x_samples, x_delta, y_samples, y_delta, t_samples, t_delta, material):
"""Class constructor.
Args:
x_samples: Number of samples in x direction.
x_delta: Increment in x direction.
y_samples: Number of samples in y direction.
y_delta: Increment in y direction.
t_samples: Number of time samples.
t_delta: Time increment.
material: Main material of the field.
"""
super().__init__()
self.x = Dimension(x_samples, x_delta)
self.y = Dimension(y_samples, y_delta)
self.t = Dimension(t_samples, t_delta)
# add main material
self.add_material_region(self.get_rect_region(
(0, 0, max(self.x.vector), max(self.y.vector)), name='main'), material)
@property
def num_points(self):
return self.x.samples * self.y.samples
def d_x(self, factors=None, variant='forward'):
"""Creates a sparse matrix for computing the first derivative with respect to x multiplied
by factors given for every point. Uses forward difference quotient by default.
Args:
factors: Factor for each point to be applied after derivation.
variant: Variant for the difference quotient ('forward', 'central', or 'backward').
Returns:
Sparse matrix the calculate derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
if variant == 'forward':
return sp.dia_matrix((np.array([-factors, factors]), [0, 1]),
shape=(self.num_points, self.num_points))
elif variant == 'central':
return sp.dia_matrix((np.array([-factors/2, factors/2]), [-1, 1]),
shape=(self.num_points, self.num_points))
elif variant == 'backward':
return sp.dia_matrix((np.array([-factors, factors]), [-1, 0]),
shape=(self.num_points, self.num_points))
else:
raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def d_y(self, factors=None, variant='forward'):
"""Creates a sparse matrix for computing the first derivative with respect to y multiplied
by factors given for every point. Uses forward difference quotient by default.
Args:
factors: Factor for each point to be applied after derivation.
variant: Variant for the difference quotient ('forward', 'central', or 'backward').
Returns:
Sparse matrix the calculate derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
if variant == 'forward':
return sp.dia_matrix((np.array([-factors, factors]), [0, self.x.samples]),
shape=(self.num_points, self.num_points))
elif variant == 'central':
return sp.dia_matrix(
(np.array([-factors/2, factors/2]), [-self.x.samples, self.x.samples]),
shape=(self.num_points, self.num_points))
elif variant == 'backward':
return sp.dia_matrix((np.array([-factors, factors]), [-self.x.samples, 0]),
shape=(self.num_points, self.num_points))
else:
raise ValueError('Unknown difference quotient variant {}.'.format(variant))
def d_x2(self, factors=None):
"""Creates a sparse matrix for computing the second derivative with respect to x multiplied
by factors given for every point. Uses central difference quotient.
Args:
factors: Factor for each point to be applied after derivation.
Returns:
Sparse matrix the calculate second derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
return sp.dia_matrix((np.array([factors, -2*factors, factors]), [-1, 0, 1]),
shape=(self.num_points, self.num_points))
def d_y2(self, factors=None):
"""Creates a sparse matrix for computing the second derivative with respect to y multiplied
by factors given for every point. Uses central difference quotient.
Args:
factors: Factor for each point to be applied after derivation.
Returns:
Sparse matrix the calculate second derivatives of field components.
"""
# use ones as factors if none are specified
if factors is None:
factors = np.array(1).repeat(self.num_points)
return sp.dia_matrix((np.array([factors, -2*factors, factors]),
[-self.x.samples, 0, self.x.samples]),
shape=(self.num_points, self.num_points))
def get_index(self, position):
"""Returns the index of a point at the given position.
Args:
position: Position of the requested point.
Returns:
Index of the point.
"""
return self.x.get_index(position[0]) + self.y.get_index(position[1]) * self.x.samples
def get_position(self, index):
"""Returns the position of a point with the given index.
Args:
index: Index of a point.
Returns:
Position of the point as (x coordinate, y coordinate).
"""
return self.x.vector[index % self.x.samples], self.y.vector[int(index / self.x.samples)]
def get_line_region(self, position, name=''):
"""Creates a line region at the given position (start_x, start_y, end_x, end_y),
inclusive.
Args:
position: Position of the line region (start_x, start_y, end_x, end_y).
name: Name of the region.
Returns:
Line region.
"""
start_idx = self.get_index(position[:2])
end_idx = self.get_index(position[2:])
x_diff = start_idx % self.x.samples - end_idx % self.x.samples
y_diff = int(start_idx / self.x.samples) - int(end_idx / self.x.samples)
num_points = max( | np.abs([x_diff, y_diff]) | numpy.abs |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simulate the dog-sheep shepherding environment.
Each episode requires the dog to shepherd the sheep to the goal.
"""
# suppress runtime warnings
import warnings
warnings.filterwarnings("ignore")
# ipython debugging
from IPython.terminal.debugger import set_trace as keyboard
# core modules
import gym
import random
import numpy as np
from gym import spaces
import matplotlib.pyplot as plt
class ShepherdEnv(gym.Env):
"""
Define the shepherding environment.
The environment treats the dog as the agent
and the sheep as a part of the environment.
State:
1) Position of center of mass (x,y)
2) Position of farthest sheep (x,y)
3) Position of target (x,y)
4) Position of dog (x,y)
5) Radius of sheep (r)
6) Distance to target (d)
Action:
1) Increment in position of dog (x,y)
Reward:
1) Negative of farthest sheep distance to com (d_f)
2) Negative of com distance to target (d_t)
"""
def __init__(self, continuous=False, num_sheep=25, info_mode=0,
fixed_reset=False, sparse_reward=False):
# initialize observation space
obs_low = np.array(10*[-1000.0])
obs_high = np.array(10*[1000.0])
self.observation_space = spaces.Box(low=obs_low, high=obs_high)
# setup environment type
self.continuous = continuous
# initialize action space
if self.continuous:
act_low = np.array([-np.pi])
act_high = np.array([np.pi])
self.action_space = spaces.Box(low=act_low, high=act_high)
else:
self.action_space = spaces.Discrete(8)
# limit episode length
self.max_steps = 500
# conditions to terminate
self.boundary = 400.0
self.max_radius = 100.0
self.max_distance = 400.0
# create buffer and episode variable
self.curr_step = -1
self.curr_episode = -1
# radius for sheep to be considered as collected by dog
self.dog_collect_radius = 2.0
# parameters for initialization
self.init_sheep_root = 200.0
self.init_sheep_range = 50.0
self.init_dog_distance = 60.0
# weight multipliers for sheep forces
self.com_term = 1.05
self.noise_term = 0.3
self.inertia_term = 0.5
self.repulsion_dog_term = 1.0
self.repulsion_sheep_term = 2.0
# constants used to update environment
self.delta_sheep_pose = 1.0
self.dog_repulsion_dist = 70.0
self.sheep_repulsion_dist = 2.0
# assign number of sheep
self.num_sheep = num_sheep
# flag to show simulation, false by default
self.info_mode = info_mode
self.fixed_reset = fixed_reset
# info variables
self.episode_length = 0.0
self.episode_reward = 0.0
# flag for sparse reward
self.sparse_reward = sparse_reward
# initialize plot figure
self.fig = None
def step(self, action):
"""
The dog takes a step in the environment
Parameters
----------
action : float array
Returns
-------
ob, reward, episode_over, info : tuple
observation (float array) :
observation after dog position is updated.
reward (float) :
amount of reward achieved by dog in the previous step.
episode_over (bool) :
flag that indicates if the environment is reset or not.
info (dict) :
useful information about the environment for debugging.
"""
success = False
self.curr_step += 1
self._take_action(action)
self._take_action(action)
self._take_action(action)
# initialize reward and get state
reward = 0.0
ob = self._get_state()
# give dense rewards
if not self.sparse_reward:
reward = self._get_reward()
# bad terminal conditions
if self.curr_step >= self.max_steps \
or self.target_distance >= self.max_distance \
or self.mean_radius_sheep >= self.max_radius:
self.finish = True
if self.sparse_reward:
reward = -1.0
# good terminal conditions
if self.target_distance <= 1.0:
success = True
self.finish = True
if self.sparse_reward:
reward = 1.0
# update rl parameters
self.episode_length += 1
self.episode_reward += reward
# generate info return parameter
if self.info_mode == 1 and self.finish:
info = {'r':self.episode_reward, 'l':self.episode_length,
's': success}
else:
info = {'n':self.num_sheep, 's': success}
return ob, reward, self.finish, info
def reset(self):
"""
Reset the environment and return the init state
Returns
-------
observation (float array) : initial observation after reset.
"""
# initialize gym env variables
self.finish = False
self.curr_step = -1
self.curr_episode += 1
# initialize target position
self.target = | np.random.uniform(-10.0,10.0,size=(2)) | numpy.random.uniform |
import numpy as np
alist = [1,2,3,4,5]
narray = np.array([1,2,3,4])
print(alist)
print(narray)
print(type(alist))
print(type(narray))
print(narray + narray)
print(alist + alist)
print(narray * 3)
print(alist * 4)
npmatrix1 = np.array([narray,narray,narray])
npmatrix2 = np.array([alist,alist,alist])
npmatrix3 = np.array([narray,[1,1,1,1],narray])
print(npmatrix1)
print(npmatrix2)
print(npmatrix3)
okmatrix = np.array([[1,2],[3,4]])
print(okmatrix)
print(okmatrix * 2)
print('bad matrix :')
badmatrix = np.array([[1,2],[3,4],[5,6,7]])
print(badmatrix)
print(badmatrix * 2)
result = okmatrix * 2 + 1
print('result',result)
result1 = okmatrix + okmatrix
print('result 1 :',result1)
result2 = okmatrix - okmatrix
print('result 2 :',result2)
result = okmatrix * okmatrix
print(result)
matrix3x2 = | np.array([[1,2],[3,4],[5,6]]) | numpy.array |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback control flow."""
import pytest
from mindspore import Tensor, ms_function, context
from mindspore import dtype as mstype
import numpy as np
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if():
x = Tensor(1)
if x > Tensor(7):
return x
return x * 2
res = control_flow_if()
assert res == 2
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if():
x = np.array([1, 2, 3, 4, 5])
y = x % 2
z = Tensor(y)
if (x < y).any():
z = Tensor(x)
return z
res = control_flow_if()
assert np.all(res.asnumpy() == np.array([1, 0, 1, 0, 1]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_3():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if():
x = np.array([1])
if x <= 1:
x += 1
return Tensor(x)
res = control_flow_if()
assert res == 2
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_4():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if():
x = Tensor(7).astype("int32")
y = Tensor(0).astype("int32")
z = x + y
if z > y:
y = 5 * x + Tensor(7).astype("int32")
return y
res = control_flow_if()
assert res == 42
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_else_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_else():
x = Tensor(1)
if x > Tensor(7):
return x
x += Tensor(3)
return x * 2
res = control_flow_if_else()
assert res == 8
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_single_if_else_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_else():
x = np.array([1, 2, 3, 4, 5])
y = x % 2
if (x < y).any():
z = Tensor(x)
else:
z = Tensor(y)
return z
res = control_flow_if_else()
assert np.all(res.asnumpy() == | np.array([1, 0, 1, 0, 1]) | numpy.array |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# ---------------------------- ADAPTIVE DUPLIFACES --------------------------- #
# ------------------------------- version 0.84 ------------------------------- #
# #
# Creates duplicates of selected mesh to active morphing the shape according #
# to target faces. #
# #
# (c) <NAME> #
# (2017) #
# #
# http://www.co-de-it.com/ #
# #
# ############################################################################ #
import bpy
from bpy.types import (
Operator,
Panel,
PropertyGroup,
)
from bpy.props import (
BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
StringProperty,
PointerProperty
)
from mathutils import Vector
import numpy as np
from math import sqrt
import random, time
import bmesh
from .utils import *
def anim_tessellate_active(self, context):
ob = context.object
props = ob.tissue_tessellate
if not props.bool_hold:
try:
props.generator.name
props.component.name
bpy.ops.object.update_tessellate()
except: pass
def anim_tessellate_object(ob):
try:
#bpy.context.view_layer.objects.active = ob
bpy.ops.object.update_tessellate()
except:
return None
#from bpy.app.handlers import persistent
#@persistent
def anim_tessellate(scene):
# store selected objects
#scene = context.scene
try: active_object = bpy.context.object
except: active_object = None
try: selected_objects = bpy.context.selected_objects
except: selected_objects = []
if bpy.context.mode in ('OBJECT', 'PAINT_WEIGHT'):
old_mode = bpy.context.mode
if old_mode == 'PAINT_WEIGHT': old_mode = 'WEIGHT_PAINT'
for ob in scene.objects:
if ob.tissue_tessellate.bool_run:
hidden = ob.hide_viewport
ob.hide_viewport = False
for o in scene.objects:
if not o.hide_viewport: ob.select_set(False)
bpy.context.view_layer.objects.active = ob
ob.select_set(True)
try:
bpy.ops.object.update_tessellate()
except: pass
ob.hide_viewport = hidden
# restore selected objects
for o in scene.objects:
if not o.hide_viewport: o.select_set(False)
for o in selected_objects:
if not o.hide_viewport: o.select_set(True)
bpy.context.view_layer.objects.active = active_object
try: bpy.ops.object.mode_set(mode=old_mode)
except: pass
return
def set_tessellate_handler(self, context):
old_handlers = []
blender_handlers = bpy.app.handlers.frame_change_post
for h in blender_handlers:
if "anim_tessellate" in str(h):
old_handlers.append(h)
for h in old_handlers: blender_handlers.remove(h)
for o in context.scene.objects:
if o.tissue_tessellate.bool_run:
blender_handlers.append(anim_tessellate)
break
return
class tissue_tessellate_prop(PropertyGroup):
bool_hold : BoolProperty(
name="Hold Update",
description="Prevent automatic update while other properties are changed",
default=False
)
bool_run : BoolProperty(
name="Animatable Tessellation",
description="Automatically recompute the tessellation when the frame is changed. Currently is not working during Render Animation",
default = False,
update = set_tessellate_handler
)
zscale : FloatProperty(
name="Scale", default=1, soft_min=0, soft_max=10,
description="Scale factor for the component thickness",
update = anim_tessellate_active
)
scale_mode : EnumProperty(
items=(
('CONSTANT', "Constant", "Uniform thinkness"),
('ADAPTIVE', "Proportional", "Preserve component's proportions")
),
default='ADAPTIVE',
name="Z-Scale according to faces size",
update = anim_tessellate_active
)
offset : FloatProperty(
name="Surface Offset",
default=1,
min=-1,
max=1,
soft_min=-1,
soft_max=1,
description="Surface offset",
update = anim_tessellate_active
)
mode : EnumProperty(
items=(
('BOUNDS', "Bounds", "The component fits automatically the size of the target face"),
('LOCAL', "Local", "Based on Local coordinates, from 0 to 1"),
('GLOBAL', 'Global', "Based on Global coordinates, from 0 to 1")),
default='BOUNDS',
name="Component Mode",
update = anim_tessellate_active
)
rotation_mode : EnumProperty(
items=(('RANDOM', "Random", "Random faces rotation"),
('UV', "Active UV", "Rotate according to UV coordinates"),
('DEFAULT', "Default", "Default rotation")),
default='DEFAULT',
name="Component Rotation",
update = anim_tessellate_active
)
fill_mode : EnumProperty(
items=(
('QUAD', 'Quad', 'Regular quad tessellation. Uses only 3 or 4 vertices'),
('FAN', 'Fan', 'Radial tessellation for polygonal faces'),
('PATCH', 'Patch', 'Curved tessellation according to the last ' +
'Subsurf\n(or Multires) modifiers. Works only with 4 sides ' +
'patches.\nAfter the last Subsurf (or Multires) only ' +
'deformation\nmodifiers can be used')),
default='QUAD',
name="Fill Mode",
update = anim_tessellate_active
)
combine_mode : EnumProperty(
items=(
('LAST', 'Last', 'Show only the last iteration'),
('UNUSED', 'Unused', 'Combine each iteration with the unused faces of the previous iteration. Used for branching systems'),
('ALL', 'All', 'Combine the result of all iterations')),
default='LAST',
name="Combine Mode",
update = anim_tessellate_active
)
gen_modifiers : BoolProperty(
name="Generator Modifiers",
default=False,
description="Apply Modifiers and Shape Keys to the base object",
update = anim_tessellate_active
)
com_modifiers : BoolProperty(
name="Component Modifiers",
default=False,
description="Apply Modifiers and Shape Keys to the component object",
update = anim_tessellate_active
)
merge : BoolProperty(
name="Merge",
default=False,
description="Merge vertices in adjacent duplicates",
update = anim_tessellate_active
)
merge_thres : FloatProperty(
name="Distance",
default=0.001,
soft_min=0,
soft_max=10,
description="Limit below which to merge vertices",
update = anim_tessellate_active
)
generator : PointerProperty(
type=bpy.types.Object,
name="",
description="Base object for the tessellation",
update = anim_tessellate_active
)
component : PointerProperty(
type=bpy.types.Object,
name="",
description="Component object for the tessellation",
#default="",
update = anim_tessellate_active
)
bool_random : BoolProperty(
name="Randomize",
default=False,
description="Randomize component rotation",
update = anim_tessellate_active
)
random_seed : IntProperty(
name="Seed",
default=0,
soft_min=0,
soft_max=10,
description="Random seed",
update = anim_tessellate_active
)
bool_vertex_group : BoolProperty(
name="Map Vertex Group",
default=False,
description="Transfer all Vertex Groups from Base object",
update = anim_tessellate_active
)
bool_selection : BoolProperty(
name="On selected Faces",
default=False,
description="Create Tessellation only on selected faces",
update = anim_tessellate_active
)
bool_shapekeys : BoolProperty(
name="Use Shape Keys",
default=False,
description="Transfer Component's Shape Keys. If the name of Vertex "
"Groups and Shape Keys are the same, they will be "
"automatically combined",
update = anim_tessellate_active
)
bool_smooth : BoolProperty(
name="Smooth Shading",
default=False,
description="Output faces with smooth shading rather than flat shaded",
update = anim_tessellate_active
)
bool_materials : BoolProperty(
name="Transfer Materials",
default=False,
description="Preserve component's materials",
update = anim_tessellate_active
)
bool_material_id : BoolProperty(
name="Tessellation on Material ID",
default=False,
description="Apply the component only on the selected Material",
update = anim_tessellate_active
)
material_id : IntProperty(
name="Material ID",
default=0,
min=0,
description="Material ID",
update = anim_tessellate_active
)
bool_dissolve_seams : BoolProperty(
name="Dissolve Seams",
default=False,
description="Dissolve all seam edges",
update = anim_tessellate_active
)
iterations : IntProperty(
name="Iterations",
default=1,
min=1,
soft_max=5,
description="Automatically repeat the Tessellation using the "
+ "generated geometry as new base object.\nUsefull for "
+ "for branching systems. Dangerous!",
update = anim_tessellate_active
)
bool_combine : BoolProperty(
name="Combine unused",
default=False,
description="Combine the generated geometry with unused faces",
update = anim_tessellate_active
)
bool_advanced : BoolProperty(
name="Advanced Settings",
default=False,
description="Show more settings"
)
normals_mode : EnumProperty(
items=(
('VERTS', 'Along Normals', 'Consistent direction based on vertices normal'),
('FACES', 'Individual Faces', 'Based on individual faces normal')),
default='VERTS',
name="Direction",
update = anim_tessellate_active
)
bool_multi_components : BoolProperty(
name="Multi Components",
default=False,
description="Combine different components according to materials name",
update = anim_tessellate_active
)
error_message : StringProperty(
name="Error Message",
default=""
)
warning_message : StringProperty(
name="Warning Message",
default=""
)
bounds_x : EnumProperty(
items=(
('EXTEND', 'Extend', 'Default X coordinates'),
('CLIP', 'Clip', 'Trim out of bounds in X direction'),
('CYCLIC', 'Cyclic', 'Cyclic components in X direction')),
default='EXTEND',
name="Bounds X",
update = anim_tessellate_active
)
bounds_y : EnumProperty(
items=(
('EXTEND', 'Extend', 'Default Y coordinates'),
('CLIP', 'Clip', 'Trim out of bounds in Y direction'),
('CYCLIC', 'Cyclic', 'Cyclic components in Y direction')),
default='EXTEND',
name="Bounds Y",
update = anim_tessellate_active
)
cap_faces : BoolProperty(
name="Cap Holes",
default=False,
description="Cap open edges loops",
update = anim_tessellate_active
)
open_edges_crease : FloatProperty(
name="Open Edges Crease",
default=0,
min=0,
max=1,
description="Automatically set crease for open edges",
update = anim_tessellate_active
)
def store_parameters(operator, ob):
ob.tissue_tessellate.bool_hold = True
ob.tissue_tessellate.generator = bpy.data.objects[operator.generator]
ob.tissue_tessellate.component = bpy.data.objects[operator.component]
ob.tissue_tessellate.zscale = operator.zscale
ob.tissue_tessellate.offset = operator.offset
ob.tissue_tessellate.gen_modifiers = operator.gen_modifiers
ob.tissue_tessellate.com_modifiers = operator.com_modifiers
ob.tissue_tessellate.mode = operator.mode
ob.tissue_tessellate.rotation_mode = operator.rotation_mode
ob.tissue_tessellate.merge = operator.merge
ob.tissue_tessellate.merge_thres = operator.merge_thres
ob.tissue_tessellate.scale_mode = operator.scale_mode
ob.tissue_tessellate.bool_random = operator.bool_random
ob.tissue_tessellate.random_seed = operator.random_seed
ob.tissue_tessellate.fill_mode = operator.fill_mode
ob.tissue_tessellate.bool_vertex_group = operator.bool_vertex_group
ob.tissue_tessellate.bool_selection = operator.bool_selection
ob.tissue_tessellate.bool_shapekeys = operator.bool_shapekeys
ob.tissue_tessellate.bool_smooth = operator.bool_smooth
ob.tissue_tessellate.bool_materials = operator.bool_materials
ob.tissue_tessellate.bool_material_id = operator.bool_material_id
ob.tissue_tessellate.material_id = operator.material_id
ob.tissue_tessellate.bool_dissolve_seams = operator.bool_dissolve_seams
ob.tissue_tessellate.iterations = operator.iterations
ob.tissue_tessellate.bool_advanced = operator.bool_advanced
ob.tissue_tessellate.normals_mode = operator.normals_mode
ob.tissue_tessellate.bool_combine = operator.bool_combine
ob.tissue_tessellate.bool_multi_components = operator.bool_multi_components
ob.tissue_tessellate.combine_mode = operator.combine_mode
ob.tissue_tessellate.bounds_x = operator.bounds_x
ob.tissue_tessellate.bounds_y = operator.bounds_y
ob.tissue_tessellate.cap_faces = operator.cap_faces
ob.tissue_tessellate.bool_hold = False
return ob
def tessellate_patch(_ob0, _ob1, offset, zscale, com_modifiers, mode,
scale_mode, rotation_mode, rand_seed, bool_vertex_group,
bool_selection, bool_shapekeys, bool_material_id, material_id,
bounds_x, bounds_y):
random.seed(rand_seed)
ob0 = convert_object_to_mesh(_ob0)
me0 = _ob0.data
# Check if zero faces are selected
if _ob0.type == 'MESH':
bool_cancel = True
for p in me0.polygons:
check_sel = check_mat = False
if not bool_selection or p.select: check_sel = True
if not bool_material_id or p.material_index == material_id: check_mat = True
if check_sel and check_mat:
bool_cancel = False
break
if bool_cancel:
return 0
levels = 0
sculpt_levels = 0
render_levels = 0
bool_multires = False
multires_name = ""
not_allowed = ['FLUID_SIMULATION', 'ARRAY', 'BEVEL', 'BOOLEAN', 'BUILD',
'DECIMATE', 'EDGE_SPLIT', 'MASK', 'MIRROR', 'REMESH',
'SCREW', 'SOLIDIFY', 'TRIANGULATE', 'WIREFRAME', 'SKIN',
'EXPLODE', 'PARTICLE_INSTANCE', 'PARTICLE_SYSTEM', 'SMOKE']
modifiers0 = list(_ob0.modifiers)#[m for m in ob0.modifiers]
show_modifiers = [m.show_viewport for m in _ob0.modifiers]
show_modifiers.reverse()
modifiers0.reverse()
for m in modifiers0:
visible = m.show_viewport
#m.show_viewport = False
if m.type in ('SUBSURF', 'MULTIRES') and visible:
levels = m.levels
multires_name = m.name
if m.type == 'MULTIRES':
bool_multires = True
multires_name = m.name
sculpt_levels = m.sculpt_levels
render_levels = m.render_levels
else: bool_multires = False
break
elif m.type in not_allowed:
#ob0.data = old_me0
#bpy.data.meshes.remove(me0)
return "modifiers_error"
before = _ob0.copy()
#if ob0.type == 'MESH': before.data = me0
before_mod = list(before.modifiers)
before_mod.reverse()
for m in before_mod:
if m.type in ('SUBSURF', 'MULTIRES') and m.show_viewport:
before.modifiers.remove(m)
break
else: before.modifiers.remove(m)
before_subsurf = simple_to_mesh(before)
before_bm = bmesh.new()
before_bm.from_mesh(before_subsurf)
before_bm.faces.ensure_lookup_table()
for f in before_bm.faces:
if len(f.loops) != 4:
return "topology_error"
before_bm.edges.ensure_lookup_table()
for e in before_bm.edges:
if len(e.link_faces) == 0:
return "wires_error"
before_bm.verts.ensure_lookup_table()
for v in before_bm.verts:
if len(v.link_faces) == 0:
return "verts_error"
me0 = ob0.data
verts0 = me0.vertices # Collect generator vertices
if com_modifiers or _ob1.type != 'MESH': bool_shapekeys = False
# set Shape Keys to zero
if bool_shapekeys:
try:
original_key_values = []
for sk in _ob1.data.shape_keys.key_blocks:
original_key_values.append(sk.value)
sk.value = 0
except:
bool_shapekeys = False
if not com_modifiers and not bool_shapekeys:
mod_visibility = []
for m in _ob1.modifiers:
mod_visibility.append(m.show_viewport)
m.show_viewport = False
com_modifiers = True
ob1 = convert_object_to_mesh(_ob1, com_modifiers, False)
me1 = ob1.data
if mode != 'BOUNDS':
bpy.context.object.active_shape_key_index = 0
# Bound X
if bounds_x != 'EXTEND':
if mode == 'GLOBAL':
planes_co = ((0,0,0),(1,1,1))
plane_no = (1,0,0)
if mode == 'LOCAL':
planes_co = (ob1.matrix_world @ Vector((0,0,0)), ob1.matrix_world @ Vector((1,0,0)))
plane_no = planes_co[0]-planes_co[1]
bpy.ops.object.mode_set(mode='EDIT')
for co in planes_co:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.bisect(plane_co=co, plane_no=plane_no)
bpy.ops.mesh.mark_seam()
bpy.ops.object.mode_set(mode='OBJECT')
_faces = ob1.data.polygons
if mode == 'GLOBAL':
for f in [f for f in _faces if (ob1.matrix_world @ f.center).x > 1]:
f.select = True
for f in [f for f in _faces if (ob1.matrix_world @ f.center).x < 0]:
f.select = True
else:
for f in [f for f in _faces if f.center.x > 1]:
f.select = True
for f in [f for f in _faces if f.center.x < 0]:
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
if bounds_x == 'CLIP':
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
if bounds_x == 'CYCLIC':
bpy.ops.mesh.split()
bpy.ops.object.mode_set(mode='OBJECT')
# Bound Y
if bounds_y != 'EXTEND':
if mode == 'GLOBAL':
planes_co = ((0,0,0),(1,1,1))
plane_no = (0,1,0)
if mode == 'LOCAL':
planes_co = (ob1.matrix_world @ Vector((0,0,0)), ob1.matrix_world @ Vector((0,1,0)))
plane_no = planes_co[0]-planes_co[1]
bpy.ops.object.mode_set(mode='EDIT')
for co in planes_co:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.bisect(plane_co=co, plane_no=plane_no)
bpy.ops.mesh.mark_seam()
bpy.ops.object.mode_set(mode='OBJECT')
_faces = ob1.data.polygons
if mode == 'GLOBAL':
for f in [f for f in _faces if (ob1.matrix_world @ f.center).y > 1]:
f.select = True
for f in [f for f in _faces if (ob1.matrix_world @ f.center).y < 0]:
f.select = True
else:
for f in [f for f in _faces if f.center.y > 1]:
f.select = True
for f in [f for f in _faces if f.center.y < 0]:
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
if bounds_y == 'CLIP':
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
if bounds_y == 'CYCLIC':
bpy.ops.mesh.split()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='OBJECT')
# Component statistics
n_verts = len(me1.vertices)
# Create empty lists
new_verts = []
new_edges = []
new_faces = []
new_verts_np = np.array(())
# Component bounding box
min_c = Vector((0, 0, 0))
max_c = Vector((0, 0, 0))
first = True
for v in me1.vertices:
vert = v.co
if vert[0] < min_c[0] or first:
min_c[0] = vert[0]
if vert[1] < min_c[1] or first:
min_c[1] = vert[1]
if vert[2] < min_c[2] or first:
min_c[2] = vert[2]
if vert[0] > max_c[0] or first:
max_c[0] = vert[0]
if vert[1] > max_c[1] or first:
max_c[1] = vert[1]
if vert[2] > max_c[2] or first:
max_c[2] = vert[2]
first = False
bb = max_c - min_c
# adaptive XY
verts1 = []
for v in me1.vertices:
if mode == 'BOUNDS':
vert = v.co - min_c # (ob1.matrix_world * v.co) - min_c
vert[0] = (vert[0] / bb[0] if bb[0] != 0 else 0.5)
vert[1] = (vert[1] / bb[1] if bb[1] != 0 else 0.5)
vert[2] = (vert[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale
elif mode == 'LOCAL':
vert = v.co.xyz
vert[2] *= zscale
#vert[2] = (vert[2] - min_c[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale
elif mode == 'GLOBAL':
vert = ob1.matrix_world @ v.co
vert[2] *= zscale
try:
for sk in me1.shape_keys.key_blocks:
sk.data[v.index].co = ob1.matrix_world @ sk.data[v.index].co
except: pass
#verts1.append(vert)
v.co = vert
# Bounds X, Y
if mode != 'BOUNDS':
if bounds_x == 'CYCLIC':
move_verts = []
for f in [f for f in me1.polygons if (f.center).x > 1]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.x -= 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.x -= 1
except: pass
move_verts = []
for f in [f for f in me1.polygons if (f.center).x < 0]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.x += 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.x += 1
except: pass
if bounds_y == 'CYCLIC':
move_verts = []
for f in [f for f in me1.polygons if (f.center).y > 1]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.y -= 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.y -= 1
except: pass
move_verts = []
for f in [f for f in me1.polygons if (f.center).y < 0]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.y += 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.y += 1
except: pass
verts1 = [v.co for v in me1.vertices]
patch_faces = 4**levels
sides = int(sqrt(patch_faces))
sides0 = sides-2
patch_faces0 = int((sides-2)**2)
n_patches = int(len(me0.polygons)/patch_faces)
if len(me0.polygons)%patch_faces != 0:
#ob0.data = old_me0
return "topology_error"
new_verts = []
new_edges = []
new_faces = []
for o in bpy.context.view_layer.objects: o.select_set(False)
new_patch = None
# All vertex group
if bool_vertex_group:
try:
weight = []
for vg in ob0.vertex_groups:
_weight = []
for v in me0.vertices:
try:
_weight.append(vg.weight(v.index))
except:
_weight.append(0)
weight.append(_weight)
except:
bool_vertex_group = False
# Adaptive Z
if scale_mode == 'ADAPTIVE':
if mode == 'BOUNDS': com_area = (bb[0]*bb[1])
else: com_area = 1
mult = 1/com_area*patch_faces
verts_area = []
bm = bmesh.new()
bm.from_mesh(me0)
bm.verts.ensure_lookup_table()
for v in bm.verts:
area = 0
faces = v.link_faces
for f in faces:
area += f.calc_area()
area/=len(faces)
area*=mult
verts_area.append(sqrt(area))
random.seed(rand_seed)
bool_correct = False
_faces = [[[0] for ii in range(sides)] for jj in range(sides)]
_verts = [[[0] for ii in range(sides+1)] for jj in range(sides+1)]
for i in range(n_patches):
poly = me0.polygons[i*patch_faces]
if bool_selection and not poly.select: continue
if bool_material_id and not poly.material_index == material_id: continue
bool_correct = True
new_patch = bpy.data.objects.new("patch", me1.copy())
bpy.context.collection.objects.link(new_patch)
new_patch.select_set(True)
bpy.context.view_layer.objects.active = new_patch
for area in bpy.context.screen.areas:
for space in area.spaces:
try: new_patch.local_view_set(space, True)
except: pass
# Vertex Group
if bool_vertex_group:
for vg in ob0.vertex_groups:
new_patch.vertex_groups.new(name=vg.name)
# find patch faces
faces = _faces.copy()
verts = _verts.copy()
shift1 = sides
shift2 = sides*2-1
shift3 = sides*3-2
for j in range(patch_faces):
if j < patch_faces0:
if levels == 0:
u = j%sides0
v = j//sides0
else:
u = j%sides0+1
v = j//sides0+1
elif j < patch_faces0 + shift1:
u = j-patch_faces0
v = 0
elif j < patch_faces0 + shift2:
u = sides-1
v = j-(patch_faces0 + sides)+1
elif j < patch_faces0 + shift3:
jj = j-(patch_faces0 + shift2)
u = sides-jj-2
v = sides-1
else:
jj = j-(patch_faces0 + shift3)
u = 0
v = sides-jj-2
face = me0.polygons[j+i*patch_faces]
faces[u][v] = face
verts[u][v] = verts0[face.vertices[0]]
if u == sides-1:
verts[sides][v] = verts0[face.vertices[1]]
if v == sides-1:
verts[u][sides] = verts0[face.vertices[3]]
if u == v == sides-1:
verts[sides][sides] = verts0[face.vertices[2]]
# Random rotation
if rotation_mode == 'RANDOM':
rand = random.randint(0, 3)
if rand == 1:
verts = [[verts[k][w] for w in range(sides,-1,-1)] for k in range(sides,-1,-1)]
elif rand == 2:
verts = [[verts[w][k] for w in range(sides,-1,-1)] for k in range(sides+1)]
elif rand == 3:
verts = [[verts[w][k] for w in range(sides+1)] for k in range(sides,-1,-1)]
# UV rotation
elif rotation_mode == 'UV' and ob0.type == 'MESH':
if len(ob0.data.uv_layers) > 0:
uv0 = me0.uv_layers.active.data[faces[0][0].index*4].uv
uv1 = me0.uv_layers.active.data[faces[0][-1].index*4 + 3].uv
uv2 = me0.uv_layers.active.data[faces[-1][-1].index*4 + 2].uv
uv3 = me0.uv_layers.active.data[faces[-1][0].index*4 + 1].uv
v01 = (uv0 + uv1)
v32 = (uv3 + uv2)
v0132 = v32 - v01
v0132.normalize()
v12 = (uv1 + uv2)
v03 = (uv0 + uv3)
v1203 = v03 - v12
v1203.normalize()
vertUV = []
dot1203 = v1203.x
dot0132 = v0132.x
if(abs(dot1203) < abs(dot0132)):
if (dot0132 > 0):
pass
else:
verts = [[verts[k][w] for w in range(sides,-1,-1)] for k in range(sides,-1,-1)]
else:
if(dot1203 < 0):
verts = [[verts[w][k] for w in range(sides,-1,-1)] for k in range(sides+1)]
else:
verts = [[verts[w][k] for w in range(sides+1)] for k in range(sides,-1,-1)]
step = 1/sides
for vert, patch_vert in zip(verts1, new_patch.data.vertices):
# grid coordinates
u = int(vert[0]//step)
v = int(vert[1]//step)
u1 = min(u+1, sides)
v1 = min(v+1, sides)
if mode != 'BOUNDS':
if u > sides-1:
u = sides-1
u1 = sides
if u < 0:
u = 0
u1 = 1
if v > sides-1:
v = sides-1
v1 = sides
if v < 0:
v = 0
v1 = 1
v00 = verts[u][v]
v10 = verts[u1][v]
v01 = verts[u][v1]
v11 = verts[u1][v1]
# factor coordinates
fu = (vert[0]-u*step)/step
fv = (vert[1]-v*step)/step
fw = vert.z
# interpolate Z scaling factor
fvec2d = Vector((fu,fv,0))
if scale_mode == 'ADAPTIVE':
a00 = verts_area[v00.index]
a10 = verts_area[v10.index]
a01 = verts_area[v01.index]
a11 = verts_area[v11.index]
fw*=lerp2(a00,a10,a01,a11,fvec2d)
# build factor vector
fvec = Vector((fu,fv,fw))
# interpolate vertex on patch
patch_vert.co = lerp3(v00, v10, v01, v11, fvec)
# Vertex Group
if bool_vertex_group:
for _weight, vg in zip(weight, new_patch.vertex_groups):
w00 = _weight[v00.index]
w10 = _weight[v10.index]
w01 = _weight[v01.index]
w11 = _weight[v11.index]
wuv = lerp2(w00,w10,w01,w11, fvec2d)
vg.add([patch_vert.index], wuv, "ADD")
if bool_shapekeys:
for sk in ob1.data.shape_keys.key_blocks:
source = sk.data
for sk_v, _v in zip(source, me1.vertices):
if mode == 'BOUNDS':
sk_vert = sk_v.co - min_c # (ob1.matrix_world * v.co) - min_c
sk_vert[0] = (sk_vert[0] / bb[0] if bb[0] != 0 else 0.5)
sk_vert[1] = (sk_vert[1] / bb[1] if bb[1] != 0 else 0.5)
sk_vert[2] = (sk_vert[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale
elif mode == 'LOCAL':
sk_vert = sk_v.co#.xyzco
#sk_vert[2] *= zscale
#sk_vert[2] = (sk_vert[2] - min_c[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale
elif mode == 'GLOBAL':
#sk_vert = ob1.matrix_world @ sk_v.co
sk_vert = sk_v.co
#sk_vert[2] *= zscale
# grid coordinates
u = int(sk_vert[0]//step)
v = int(sk_vert[1]//step)
u1 = min(u+1, sides)
v1 = min(v+1, sides)
if mode != 'BOUNDS':
if u > sides-1:
u = sides-1
u1 = sides
if u < 0:
u = 0
u1 = 1
if v > sides-1:
v = sides-1
v1 = sides
if v < 0:
v = 0
v1 = 1
v00 = verts[u][v]
v10 = verts[u1][v]
v01 = verts[u][v1]
v11 = verts[u1][v1]
# factor coordinates
fu = (sk_vert[0]-u*step)/step
fv = (sk_vert[1]-v*step)/step
fw = sk_vert.z
if scale_mode == 'ADAPTIVE':
a00 = verts_area[v00.index]
a10 = verts_area[v10.index]
a01 = verts_area[v01.index]
a11 = verts_area[v11.index]
fw*=lerp2(a00,a10,a01,a11,Vector((fu,fv,0)))
fvec = Vector((fu,fv,fw))
sk_co = lerp3(v00, v10, v01, v11, fvec)
new_patch.data.shape_keys.key_blocks[sk.name].data[_v.index].co = sk_co
#if ob0.type == 'MESH': ob0.data = old_me0
if not bool_correct: return 0
bpy.ops.object.join()
if bool_shapekeys:
# set original values and combine Shape Keys and Vertex Groups
for sk, val in zip(_ob1.data.shape_keys.key_blocks, original_key_values):
sk.value = val
new_patch.data.shape_keys.key_blocks[sk.name].value = val
if bool_vertex_group:
for sk in new_patch.data.shape_keys.key_blocks:
for vg in new_patch.vertex_groups:
if sk.name == vg.name:
sk.vertex_group = vg.name
new_name = ob0.name + "_" + ob1.name
new_patch.name = "tessellate_temp"
if bool_multires:
for m in ob0.modifiers:
if m.type == 'MULTIRES' and m.name == multires_name:
m.levels = levels
m.sculpt_levels = sculpt_levels
m.render_levels = render_levels
# restore original modifiers visibility for component object
try:
for m, vis in zip(_ob1.modifiers, mod_visibility):
m.show_viewport = vis
except: pass
bpy.data.objects.remove(before)
bpy.data.objects.remove(ob0)
bpy.data.objects.remove(ob1)
return new_patch
def tessellate_original(_ob0, _ob1, offset, zscale, gen_modifiers, com_modifiers, mode,
scale_mode, rotation_mode, rand_seed, fill_mode,
bool_vertex_group, bool_selection, bool_shapekeys,
bool_material_id, material_id, normals_mode, bounds_x, bounds_y):
if com_modifiers or _ob1.type != 'MESH': bool_shapekeys = False
random.seed(rand_seed)
if bool_shapekeys:
try:
original_key_values = []
for sk in _ob1.data.shape_keys.key_blocks:
original_key_values.append(sk.value)
sk.value = 0
except:
bool_shapekeys = False
ob0 = convert_object_to_mesh(_ob0, gen_modifiers, True)
me0 = ob0.data
ob1 = convert_object_to_mesh(_ob1, com_modifiers, True)
me1 = ob1.data
base_polygons = []
base_face_normals = []
n_faces0 = len(me0.polygons)
# Check if zero faces are selected
if (bool_selection and ob0.type == 'MESH') or bool_material_id:
for p in me0.polygons:
if (bool_selection and ob0.type == 'MESH'):
is_sel = p.select
else: is_sel = True
if bool_material_id:
is_mat = p.material_index == material_id
else: is_mat = True
if is_sel and is_mat:
base_polygons.append(p)
base_face_normals.append(p.normal)
else:
base_polygons = me0.polygons
base_face_normals = [p.normal for p in me0.polygons]
# numpy test: slower
#base_face_normals = np.zeros(n_faces0*3)
#me0.polygons.foreach_get("normal", base_face_normals)
#base_face_normals = base_face_normals.reshape((n_faces0,3))
if len(base_polygons) == 0:
return 0
if mode != 'BOUNDS':
bpy.ops.object.select_all(action='DESELECT')
for o in bpy.context.view_layer.objects: o.select_set(False)
bpy.context.view_layer.objects.active = ob1
ob1.select_set(True)
bpy.context.object.active_shape_key_index = 0
# Bound X
if bounds_x != 'EXTEND':
if mode == 'GLOBAL':
planes_co = ((0,0,0),(1,1,1))
plane_no = (1,0,0)
if mode == 'LOCAL':
planes_co = (ob1.matrix_world @ Vector((0,0,0)), ob1.matrix_world @ Vector((1,0,0)))
plane_no = planes_co[0]-planes_co[1]
bpy.ops.object.mode_set(mode='EDIT')
for co in planes_co:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.bisect(plane_co=co, plane_no=plane_no)
bpy.ops.mesh.mark_seam()
bpy.ops.object.mode_set(mode='OBJECT')
_faces = ob1.data.polygons
if mode == 'GLOBAL':
for f in [f for f in _faces if (ob1.matrix_world @ f.center).x > 1]:
f.select = True
for f in [f for f in _faces if (ob1.matrix_world @ f.center).x < 0]:
f.select = True
else:
for f in [f for f in _faces if f.center.x > 1]:
f.select = True
for f in [f for f in _faces if f.center.x < 0]:
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
if bounds_x == 'CLIP':
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
if bounds_x == 'CYCLIC':
bpy.ops.mesh.split()
bpy.ops.object.mode_set(mode='OBJECT')
# Bound Y
if bounds_y != 'EXTEND':
if mode == 'GLOBAL':
planes_co = ((0,0,0),(1,1,1))
plane_no = (0,1,0)
if mode == 'LOCAL':
planes_co = (ob1.matrix_world @ Vector((0,0,0)), ob1.matrix_world @ Vector((0,1,0)))
plane_no = planes_co[0]-planes_co[1]
bpy.ops.object.mode_set(mode='EDIT')
for co in planes_co:
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.bisect(plane_co=co, plane_no=plane_no)
bpy.ops.mesh.mark_seam()
bpy.ops.object.mode_set(mode='OBJECT')
_faces = ob1.data.polygons
if mode == 'GLOBAL':
for f in [f for f in _faces if (ob1.matrix_world @ f.center).y > 1]:
f.select = True
for f in [f for f in _faces if (ob1.matrix_world @ f.center).y < 0]:
f.select = True
else:
for f in [f for f in _faces if f.center.y > 1]:
f.select = True
for f in [f for f in _faces if f.center.y < 0]:
f.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='FACE')
if bounds_y == 'CLIP':
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
if bounds_y == 'CYCLIC':
bpy.ops.mesh.split()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='OBJECT')
#ob1 = new_ob1
me1 = ob1.data
verts0 = me0.vertices # Collect generator vertices
# Component statistics
n_verts1 = len(me1.vertices)
n_edges1 = len(me1.edges)
n_faces1 = len(me1.polygons)
# Create empty lists
new_verts = []
new_edges = []
new_faces = []
new_verts_np = np.array(())
# Component Coordinates
co1 = [0]*n_verts1*3
if mode == 'GLOBAL':
for v in me1.vertices:
v.co = ob1.matrix_world @ v.co
try:
for sk in me1.shape_keys.key_blocks:
sk.data[v.index].co = ob1.matrix_world @ sk.data[v.index].co
except: pass
if mode != 'BOUNDS':
if bounds_x == 'CYCLIC':
move_verts = []
for f in [f for f in me1.polygons if (f.center).x > 1]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.x -= 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.x -= 1
except: pass
move_verts = []
for f in [f for f in me1.polygons if (f.center).x < 0]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.x += 1
try:
_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.x += 1
except: pass
if bounds_y == 'CYCLIC':
move_verts = []
for f in [f for f in me1.polygons if (f.center).y > 1]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.y -= 1
try:
#new_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.y -= 1
except: pass
move_verts = []
for f in [f for f in me1.polygons if (f.center).y < 0]:
for v in f.vertices:
if v not in move_verts: move_verts.append(v)
for v in move_verts:
me1.vertices[v].co.y += 1
try:
#new_ob1.active_shape_key_index = 0
for sk in me1.shape_keys.key_blocks:
sk.data[v].co.y += 1
except: pass
me1.vertices.foreach_get("co", co1)
co1 = np.array(co1)
vx = co1[0::3].reshape((n_verts1,1))
vy = co1[1::3].reshape((n_verts1,1))
vz = co1[2::3].reshape((n_verts1,1))
min_c = Vector((vx.min(), vy.min(), vz.min())) # Min BB Corner
max_c = Vector((vx.max(), vy.max(), vz.max())) # Max BB Corner
bb = max_c - min_c # Bounding Box
# Component Coordinates
if mode == 'BOUNDS':
vx = (vx - min_c[0]) / bb[0] if bb[0] != 0 else 0.5
vy = (vy - min_c[1]) / bb[1] if bb[1] != 0 else 0.5
vz = ((vz - min_c[2]) + (-0.5 + offset * 0.5) * bb[2]) * zscale
else:
vz *= zscale
# Component polygons
fs1 = [[i for i in p.vertices] for p in me1.polygons]
new_faces = fs1[:]
# Component edges
es1 = np.array([[i for i in e.vertices] for e in me1.edges])
#es1 = [[i for i in e.vertices] for e in me1.edges if e.is_loose]
new_edges = es1[:]
# SHAPE KEYS
if bool_shapekeys:
basis = True #com_modifiers
vx_key = []
vy_key = []
vz_key = []
sk_np = []
for sk in ob1.data.shape_keys.key_blocks:
do_shapekeys = True
# set all keys to 0
for _sk in ob1.data.shape_keys.key_blocks: _sk.value = 0
sk.value = 1
if basis:
basis = False
continue
# Apply component modifiers
if com_modifiers:
sk_ob = convert_object_to_mesh(_ob1)
sk_data = sk_ob.data
source = sk_data.vertices
else:
source = sk.data
shapekeys = []
for v in source:
if mode == 'BOUNDS':
vert = v.co - min_c
vert[0] = vert[0] / bb[0]
vert[1] = vert[1] / bb[1]
vert[2] = (vert[2] + (-0.5 + offset * 0.5) * bb[2]) * zscale
elif mode == 'LOCAL':
vert = v.co.xyz
vert[2] *= zscale
#vert[2] = (vert[2] - min_c[2] + (-0.5 + offset * 0.5) * bb[2]) * \
# zscale
elif mode == 'GLOBAL':
vert = v.co.xyz
#vert = ob1.matrix_world @ v.co
vert[2] *= zscale
shapekeys.append(vert)
# Component vertices
key1 = np.array([v for v in shapekeys]).reshape(len(shapekeys), 3, 1)
vx_key.append(key1[:, 0])
vy_key.append(key1[:, 1])
vz_key.append(key1[:, 2])
#sk_np.append([])
# All vertex group
if bool_vertex_group:
try:
weight = []
vertex_groups = ob0.vertex_groups
for vg in vertex_groups:
_weight = []
for v in me0.vertices:
try:
_weight.append(vg.weight(v.index))
except:
_weight.append(0)
weight.append(_weight)
except:
bool_vertex_group = False
# Adaptive Z
if scale_mode == 'ADAPTIVE':
if mode == 'BOUNDS': com_area = (bb[0]*bb[1])
else: com_area = 1
if com_area == 0: mult = 1
else: mult = 1/com_area
verts_area = []
bm = bmesh.new()
bm.from_mesh(me0)
bm.verts.ensure_lookup_table()
for v in bm.verts:
area = 0
faces = v.link_faces
for f in faces:
area += f.calc_area()
try:
area/=len(faces)
area*=mult
verts_area.append(sqrt(area))
except:
verts_area.append(1)
# FAN tessellation mode
if fill_mode == 'FAN':
fan_verts = [v.co.to_tuple() for v in me0.vertices]
fan_polygons = []
fan_select = []
fan_material = []
fan_normals = []
# selected_faces = []
for p in base_polygons:
fan_center = Vector((0, 0, 0))
center_area = 0
for v in p.vertices:
fan_center += me0.vertices[v].co
if scale_mode == 'ADAPTIVE':
center_area += verts_area[v]
fan_center /= len(p.vertices)
center_area /= len(p.vertices)
last_vert = len(fan_verts)
fan_verts.append(fan_center.to_tuple())
#fan_verts.append(fan_center)
if scale_mode == 'ADAPTIVE':
verts_area.append(center_area)
# Vertex Group
if bool_vertex_group:
for w in weight:
center_weight = sum([w[i] for i in p.vertices]) / len(p.vertices)
w.append(center_weight)
for i in range(len(p.vertices)):
fan_polygons.append((p.vertices[i],
p.vertices[(i + 1) % len(p.vertices)],
last_vert, last_vert))
if bool_material_id: fan_material.append(p.material_index)
if bool_selection: fan_select.append(p.select)
if normals_mode == 'FACES':
fan_normals.append(p.normal)
fan_me = bpy.data.meshes.new('Fan.Mesh')
fan_me.from_pydata(tuple(fan_verts), [], tuple(fan_polygons))
me0 = fan_me.copy()
bpy.data.meshes.remove(fan_me)
verts0 = me0.vertices
base_polygons = me0.polygons
if normals_mode == 'FACES': base_face_normals = fan_normals
count = 0 # necessary for UV calculation
# TESSELLATION
j = 0
jj = -1
bool_correct = False
# optimization test
n_faces = len(base_polygons)
_vs0 = [0]*n_faces
_nvs0 = [0]*n_faces
_sz = [0]*n_faces
_w0 = [[0]*n_faces]*len(ob0.vertex_groups)
np_faces = [ | np.array(p) | numpy.array |
""" Softmax Cross-Entropy Loss Layer """
import numpy as np
# a small number to prevent dividing by zero, maybe useful for you
EPS = 1e-11
class SoftmaxCrossEntropyLossLayer():
def __init__(self):
self.acc = 0.
self.loss = np.zeros(1, dtype='f')
def forward(self, logit, gt):
"""
Inputs: (minibatch)
- logit: forward results from the last FCLayer, shape(batch_size, 10)
- gt: the ground truth label, shape(batch_size, 10)
"""
############################################################################
# TODO: Put your code here
# Calculate the average accuracy and loss over the minibatch, and
# store in self.accu and self.loss respectively.
# Only return the self.loss, self.accu will be used in solver.py.
self.logit = logit
self.gt = gt
self.output = np.transpose(np.exp( | np.transpose(logit) | numpy.transpose |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 29 14:32:15 2019
@author: <NAME>
"""
import tflearn
import tensorflow as tf
import pandas as pd
import numpy as np
"""
X = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
Y = [
[0], # Desired output for inputs 0, 0
[1], # Desired output for inputs 0, 1
[1], # Desired output for inputs 1, 0
[0] # Desired output for inputs 1, 1
]
"""
#==========
df = pd.read_csv('pee.csv')
inp =df.to_numpy()
final_inp =[]
final_inp=inp[:,0:4]
output=inp[:,4]
final_output=[]
for i in output:
if i=="Cancer":
final_output.append([1,0,0,0])
if i=="Diabeties":
final_output.append([0,1,0,0])
if i=="Stomach":
final_output.append([0,0,1,0])
if i=="Heart":
final_output.append([0,0,0,1])
final_output = np.array(final_output)
#=======
weights = tflearn.initializations.uniform(minval = -1, maxval = 1)
tf.reset_default_graph()
# Input layer
net = tflearn.input_data(
shape = [None, 4],
name = 'my_input'
)
# Hidden layers
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
# Output layer
net = tflearn.fully_connected(net, 4,
activation = 'softmax',
name = 'my_output'
)
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load('xor.tflearn')
except:
model = tflearn.DNN(net)
model.fit(final_inp, final_output, 250)
results = model.predict([[40,40,70,30]])
results_index = | np.argmax(results) | numpy.argmax |
from __future__ import print_function, division
import numpy as np
from .astroTimeLegacy import premat, daycnv, precess, helio_jd
from .idlMod import idlMod
from PyAstronomy.pyaC import pyaErrors as PE
import six
import six.moves as smo
def baryvel(dje, deq):
"""
Calculate helio- and barycentric velocity.
.. note:: The "JPL" option present in IDL is not provided here.
Parameters
----------
dje : float
Julian ephemeris date
deq : float
Epoch of mean equinox of helio- and barycentric velocity output.
If `deq` is zero, `deq` is assumed to be equal to `dje`.
Returns
-------
dvelh : array
Heliocentric velocity vector [km/s].
dvelb : array
Barycentric velocity vector [km/s].
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
pro baryvel, dje, deq, dvelh, dvelb, JPL = JPL
NAME:
BARYVEL
PURPOSE:
Calculates heliocentric and barycentric velocity components of Earth.
EXPLANATION:
BARYVEL takes into account the Earth-Moon motion, and is useful for
radial velocity work to an accuracy of ~1 m/s.
CALLING SEQUENCE:
BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ]
INPUTS:
DJE - (scalar) Julian ephemeris date.
DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0
then deq is assumed to be equal to dje.
OUTPUTS:
DVELH: (vector(3)) heliocentric velocity component. in km/s
DVELB: (vector(3)) barycentric velocity component. in km/s
The 3-vectors DVELH and DVELB are given in a right-handed coordinate
system with the +X axis toward the Vernal Equinox, and +Z axis
toward the celestial pole.
OPTIONAL KEYWORD SET:
JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP
to compute the Earth velocity using the full JPL ephemeris.
The JPL ephemeris FITS file JPLEPH.405 must exist in either the
current directory, or in the directory specified by the
environment variable ASTRO_DATA. Alternatively, the JPL keyword
can be set to the full path and name of the ephemeris file.
A copy of the JPL ephemeris FITS file is available in
http://idlastro.gsfc.nasa.gov/ftp/data/
PROCEDURES CALLED:
Function PREMAT() -- computes precession matrix
JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set
NOTES:
Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1)
Stumpf claimed an accuracy of 42 cm/s for the velocity. A
comparison with the JPL FORTRAN planetary ephemeris program PLEPH
found agreement to within about 65 cm/s between 1986 and 1994
If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are
given in the ICRS system; otherwise in the FK4 system.
EXAMPLE:
Compute the radial velocity of the Earth toward Altair on 15-Feb-1994
using both the original Stumpf algorithm and the JPL ephemeris
IDL> jdcnv, 1994, 2, 15, 0, jd ;==> JD = 2449398.5
IDL> baryvel, jd, 2000, vh, vb ;Original algorithm
==> vh = [-17.07243, -22.81121, -9.889315] ;Heliocentric km/s
==> vb = [-17.08083, -22.80471, -9.886582] ;Barycentric km/s
IDL> baryvel, jd, 2000, vh, vb, /jpl ;JPL ephemeris
==> vh = [-17.07236, -22.81126, -9.889419] ;Heliocentric km/s
==> vb = [-17.08083, -22.80484, -9.886409] ;Barycentric km/s
IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians
IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians
IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star
vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec)
REVISION HISTORY:
<NAME>, U.C. Berkeley Translated BARVEL.FOR to IDL.
<NAME>, Cleaned up program sent by <NAME> (SfSU) June 1994
Converted to IDL V5.0 <NAME> September 1997
Added /JPL keyword <NAME> July 2001
Documentation update W. Landsman Dec 2005
"""
# Define constants
dc2pi = 2 * np.pi
cc2pi = 2 * np.pi
dc1 = 1.0
dcto = 2415020.0
dcjul = 36525.0 # days in Julian year
dcbes = 0.313
dctrop = 365.24219572 # days in tropical year (...572 insig)
dc1900 = 1900.0
AU = 1.4959787e8
# Constants dcfel(i,k) of fast changing elements.
dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6]
dcfel = np.resize(dcfel, (8, 3))
# constants dceps and ccsel(i,k) of slowly changing elements.
dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8]
ccsel = [1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00,
1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7]
ccsel = np.resize(ccsel, (17, 3))
# Constants of the arguments of the short-period perturbations.
dcargs = [5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, -
1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2]
dcargs = np.resize(dcargs, (15, 2))
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = \
[-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6,
1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0]
ccamps = np.resize(ccamps, (15, 5))
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = [1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01]
ccsec = np.resize(ccsec, (4, 3))
# Sidereal rates.
dcsld = 1.990987e-7 # sidereal rate in longitude
ccsgd = 1.990969e-7 # sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = [5.1679830e0, 8.3286911095275e3, 5.4913150e0, -
7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]
dcargm = np.resize(dcargm, (3, 2))
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = [1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8,
1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]
ccampm = np.resize(ccampm, (3, 4))
# ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = [8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = idlMod(np.dot(dcfel, tvec), dc2pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0] # old fortran equivalence
deps = idlMod(np.sum(tvec * dceps), dc2pi)
sorbel = idlMod(np.dot(ccsel, tvec), dc2pi)
e = sorbel[0] # old fortran equivalence
# Secular perturbations in longitude.
dummy = np.cos(2.0)
sn = np.sin(idlMod(np.dot(ccsec[::, 1:3], tvec[0:2]), cc2pi))
# Periodic perturbations of the emb (earth-moon barycenter).
pertl = np.sum(ccsec[::, 0] * sn) + (dt * ccsec3 * sn[2])
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in smo.range(15):
a = idlMod((dcargs[k, 0] + dt * dcargs[k, 1]), dc2pi)
cosa = np.cos(a)
sina = | np.sin(a) | numpy.sin |
'''
Created on Feb 1, 2017
@author: <NAME>
Purpose:
Perform density clustering on gaussian mixture
'''
from fdc import FDC
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import normalized_mutual_info_score as nmi
from fdc import plotting
import pickle
import numpy as np
from matplotlib import pyplot as plt
n_true_center = 15
np.random.seed(0)
print("------> Example with %i true cluster centers <-------"%n_true_center)
X, y = make_blobs(10000, 2, n_true_center) # Generating random gaussian mixture
X = StandardScaler().fit_transform(X) # always normalize your data :)
# set eta=0.0 if you have excellent density profile fit (lots of data say)
model = FDC(eta = 0.01)#, atol=0.0001, rtol=0.0001)
model.fit(X) # performing the clustering
x = | np.linspace(-0.5, 0.6,200) | numpy.linspace |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
gggp.process(catp, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Finally a set (with all patches) using the GGGCrossCorrelation class.
gggc = treecorr.GGGCrossCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
print('CrossCorrelation:')
gggc.process(catp, catp, catp)
for g in gggc._all:
print(g.ntri.ravel())
print(g.gam0.ravel())
print(g.vargam0.ravel())
np.testing.assert_allclose(g.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam0, ggg.vargam0, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam1, ggg.vargam1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam2, ggg.vargam2, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam3, ggg.vargam3, rtol=0.05 * tol_factor)
fc = lambda gggc: np.concatenate([
[np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)]
for g in gggc._all])
print('jackknife:')
cov = gggc.estimate_cov('jackknife', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggc.estimate_cov('sample', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggc.estimate_cov('marked_bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggc.estimate_cov('bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.3*tol_factor)
# Without func, don't check the accuracy, but make sure it returns something the right shape.
cov = gggc.estimate_cov('jackknife')
assert cov.shape == (48, 48)
@timer
def test_nnn_jk():
# Test jackknife and other covariance estimates for nnn correlations.
if __name__ == '__main__':
# This setup takes about 1200 sec to run.
nhalo = 300
nsource = 2000
npatch = 16
source_factor = 50
rand_factor = 3
tol_factor = 1
elif False:
# This setup takes about 250 sec to run.
nhalo = 200
nsource = 1000
npatch = 16
source_factor = 50
rand_factor = 2
tol_factor = 2
else:
# This setup takes about 44 sec to run.
nhalo = 100
nsource = 500
npatch = 8
source_factor = 30
rand_factor = 1
tol_factor = 3
file_name = 'data/test_nnn_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
rng = np.random.RandomState()
nruns = 1000
all_nnns = []
all_nnnc = []
t0 = time.time()
for run in range(nruns):
t2 = time.time()
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng)
p = k**3
p /= np.sum(p)
ns = rng.poisson(nsource)
select = rng.choice(range(len(x)), size=ns, replace=False, p=p)
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k))
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rrr.process(rand_cat)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s, _ = ddd.calculateZeta(rrr)
zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd)
print('simple: ',zeta_s.ravel())
print('compensated: ',zeta_c.ravel())
all_nnns.append(zeta_s.ravel())
all_nnnc.append(zeta_c.ravel())
t3 = time.time()
print('time: ',round(t3-t2),round((t3-t0)/60),round((t3-t0)*(nruns/(run+1)-1)/60))
mean_nnns = np.mean(all_nnns, axis=0)
var_nnns = np.var(all_nnns, axis=0)
mean_nnnc = np.mean(all_nnnc, axis=0)
var_nnnc = np.var(all_nnnc, axis=0)
np.savez(file_name, mean_nnns=mean_nnns, var_nnns=var_nnns,
mean_nnnc=mean_nnnc, var_nnnc=var_nnnc)
data = np.load(file_name)
mean_nnns = data['mean_nnns']
var_nnns = data['var_nnns']
mean_nnnc = data['mean_nnnc']
var_nnnc = data['var_nnnc']
print('mean simple = ',mean_nnns)
print('var simple = ',var_nnns)
print('mean compensated = ',mean_nnnc)
print('var compensated = ',var_nnnc)
# Make a random catalog with 2x as many sources, randomly distributed .
rng = np.random.RandomState(1234)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
t0 = time.time()
rrr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
print('RRR:',rrr.tot)
print(rrr.ntri.ravel())
# Make the data catalog
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng=rng)
print('mean k = ',np.mean(k))
print('min,max = ',np.min(k),np.max(k))
p = k**3
p /= np.sum(p)
select = rng.choice(range(len(x)), size=nsource, replace=False, p=p)
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr)
zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd)
print('DDD:',ddd.tot)
print(ddd.ntri.ravel())
print('simple: ')
print(zeta_s1.ravel())
print(var_zeta_s1.ravel())
print('DRR:',drr.tot)
print(drr.ntri.ravel())
print('RDD:',rdd.tot)
print(rdd.ntri.ravel())
print('compensated: ')
print(zeta_c1.ravel())
print(var_zeta_c1.ravel())
# Make the patches with a large random catalog to make sure the patches are uniform area.
big_rx = rng.uniform(0,1000, 100*nsource)
big_ry = rng.uniform(0,1000, 100*nsource)
big_catp = treecorr.Catalog(x=big_rx, y=big_ry, npatch=npatch, rng=rng)
patch_centers = big_catp.patch_centers
# Do the same thing with patches on D, but not yet on R.
dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rddp = dddp.copy()
drrp = dddp.copy()
catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers)
print('Patch\tNtot')
for p in catp.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
print('with patches on D:')
dddp.process(catp)
rddp.process(rand_cat, catp)
drrp.process(catp, rand_cat)
# Need to run calculateZeta to get patch-based covariance
with assert_raises(RuntimeError):
dddp.estimate_cov('jackknife')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print('simple: ')
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
# Check the _calculate_xi_from_pairs function. Using all pairs, should get total xi.
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
# None of these are very good without the random using patches.
# I think this is basically just that the approximations used for estimating the area_frac
# to figure out the appropriate altered RRR counts isn't accurate enough when the total
# counts are as low as this. I think (hope) that it should be semi-ok when N is much larger,
# but this is probably saying that for 3pt using patches for R is even more important than
# for 2pt.
# Ofc, it could also be that this is telling me I still have a bug somewhere that I haven't
# managed to find... :(
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.3*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor)
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp)
print('compensated: ')
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=3.8*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
# Now with the random also using patches
# These are a lot better than the above tests. But still not nearly as good as we were able
# to get in 2pt. I'm pretty sure this is just due to the fact that we need to have much
# smaller catalogs to make it feasible to run this in a reasonable amount of time. I don't
# think this is a sign of any bug in the code.
print('with patched random catalog:')
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=patch_centers)
rrrp = rrr.copy()
rrrp.process(rand_catp)
drrp.process(catp, rand_catp)
rddp.process(rand_catp, catp)
print('simple: ')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.7*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.0*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('compensated: ')
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp)
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
# I haven't implemented calculateZeta for the NNNCrossCorrelation class, because I'm not
# actually sure what the right thing to do here is for calculating a single zeta vectors.
# Do we do a different one for each of the 6 permutations? Or one overall one?
# So rather than just do something, I'll wait until someone has a coherent use case where
# they want this and can explain exactly what the right thing to compute is.
# So to just exercise the machinery with NNNCrossCorrelation, I'm using a func parameter
# to compute something equivalent to the simple zeta calculation.
dddc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rrrc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
print('CrossCorrelation:')
dddc.process(catp, catp, catp)
rrrc.process(rand_catp, rand_catp, rand_catp)
def cc_zeta(corrs):
d, r = corrs
d1 = d.n1n2n3.copy()
d1._sum(d._all)
r1 = r.n1n2n3.copy()
r1._sum(r._all)
zeta, _ = d1.calculateZeta(r1)
return zeta.ravel()
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
# Repeat with a 1-2 cross-correlation
print('CrossCorrelation 1-2:')
dddc.process(catp, catp)
rrrc.process(rand_catp, rand_catp)
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nhalo = 100
ngal = 500
npatch = 16
rand_factor = 5
else:
nhalo = 100
ngal = 30
npatch = 16
rand_factor = 2
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(ngal, nhalo, rng)
rx = rng.uniform(0,1000, rand_factor*ngal)
ry = rng.uniform(0,1000, rand_factor*ngal)
rand_cat_nopatch = treecorr.Catalog(x=rx, y=ry)
rand_cat = treecorr.Catalog(x=rx, y=ry, npatch=npatch, rng=rng)
patch_centers = rand_cat.patch_centers
cat_nopatch = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch_centers=patch_centers)
print('cat patches = ',np.unique(cat.patch))
print('len = ',cat.nobj, cat.ntot)
assert cat.nobj == ngal
print('Patch\tNtot')
for p in cat.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
# Start with KKK, since relatively simple.
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat_nopatch)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
kkk.process(cat)
np.testing.assert_allclose(kkk.zeta, kkk1.zeta)
kkk_zeta_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat1)
print('zeta = ',kkk1.zeta.ravel())
kkk_zeta_list.append(kkk1.zeta.ravel())
kkk_zeta_list = np.array(kkk_zeta_list)
cov = np.cov(kkk_zeta_list.T, bias=True) * (len(kkk_zeta_list)-1)
varzeta = np.diagonal(np.cov(kkk_zeta_list.T, bias=True)) * (len(kkk_zeta_list)-1)
print('KKK: treecorr jackknife varzeta = ',kkk.varzeta.ravel())
print('KKK: direct jackknife varzeta = ',varzeta)
np.testing.assert_allclose(kkk.varzeta.ravel(), varzeta)
# Now GGG
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat_nopatch)
ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
ggg.process(cat)
np.testing.assert_allclose(ggg.gam0, ggg1.gam0)
np.testing.assert_allclose(ggg.gam1, ggg1.gam1)
np.testing.assert_allclose(ggg.gam2, ggg1.gam2)
np.testing.assert_allclose(ggg.gam3, ggg1.gam3)
ggg_gam0_list = []
ggg_gam1_list = []
ggg_gam2_list = []
ggg_gam3_list = []
ggg_map3_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat1)
ggg_gam0_list.append(ggg1.gam0.ravel())
ggg_gam1_list.append(ggg1.gam1.ravel())
ggg_gam2_list.append(ggg1.gam2.ravel())
ggg_gam3_list.append(ggg1.gam3.ravel())
ggg_map3_list.append(ggg1.calculateMap3()[0])
ggg_gam0_list = np.array(ggg_gam0_list)
vargam0 = np.diagonal(np.cov(ggg_gam0_list.T, bias=True)) * (len(ggg_gam0_list)-1)
print('GGG: treecorr jackknife vargam0 = ',ggg.vargam0.ravel())
print('GGG: direct jackknife vargam0 = ',vargam0)
np.testing.assert_allclose(ggg.vargam0.ravel(), vargam0)
ggg_gam1_list = np.array(ggg_gam1_list)
vargam1 = np.diagonal(np.cov(ggg_gam1_list.T, bias=True)) * (len(ggg_gam1_list)-1)
print('GGG: treecorr jackknife vargam1 = ',ggg.vargam1.ravel())
print('GGG: direct jackknife vargam1 = ',vargam1)
np.testing.assert_allclose(ggg.vargam1.ravel(), vargam1)
ggg_gam2_list = np.array(ggg_gam2_list)
vargam2 = np.diagonal(np.cov(ggg_gam2_list.T, bias=True)) * (len(ggg_gam2_list)-1)
print('GGG: treecorr jackknife vargam2 = ',ggg.vargam2.ravel())
print('GGG: direct jackknife vargam2 = ',vargam2)
np.testing.assert_allclose(ggg.vargam2.ravel(), vargam2)
ggg_gam3_list = np.array(ggg_gam3_list)
vargam3 = np.diagonal(np.cov(ggg_gam3_list.T, bias=True)) * (len(ggg_gam3_list)-1)
print('GGG: treecorr jackknife vargam3 = ',ggg.vargam3.ravel())
print('GGG: direct jackknife vargam3 = ',vargam3)
np.testing.assert_allclose(ggg.vargam3.ravel(), vargam3)
ggg_map3_list = np.array(ggg_map3_list)
varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1)
covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife',
lambda corrs: corrs[0].calculateMap3()[0])
print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3))
print('GGG: direct jackknife varmap3 = ',varmap3)
np.testing.assert_allclose(np.diagonal(covmap3), varmap3)
# Finally NNN, where we need to use randoms. Both simple and compensated.
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
drr = ddd.copy()
rdd = ddd.copy()
rrr = ddd.copy()
ddd.process(cat)
drr.process(cat, rand_cat)
rdd.process(rand_cat, cat)
rrr.process(rand_cat)
zeta1_list = []
zeta2_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
rand_cat1 = treecorr.Catalog(x=rand_cat.x[rand_cat.patch != i],
y=rand_cat.y[rand_cat.patch != i])
ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
drr1 = ddd1.copy()
rdd1 = ddd1.copy()
rrr1 = ddd1.copy()
ddd1.process(cat1)
drr1.process(cat1, rand_cat1)
rdd1.process(rand_cat1, cat1)
rrr1.process(rand_cat1)
zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel())
zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel())
print('simple')
zeta1_list = np.array(zeta1_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr)
varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta1)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta1)
print('compensated')
print(zeta2_list)
zeta2_list = np.array(zeta2_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd)
varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta2)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta2)
# Can't do patch calculation with different numbers of patches in rrr, drr, rdd.
rand_cat3 = treecorr.Catalog(x=rx, y=ry, npatch=3)
cat3 = treecorr.Catalog(x=x, y=y, patch_centers=rand_cat3.patch_centers)
rrr3 = rrr.copy()
drr3 = drr.copy()
rdd3 = rdd.copy()
rrr3.process(rand_cat3)
drr3.process(cat3, rand_cat3)
rdd3.process(rand_cat3, cat3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3, drr, rdd)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd)
@timer
def test_finalize_false():
nsource = 80
nhalo = 100
npatch = 16
# Make three independent data sets
rng = np.random.RandomState(8675309)
x_1, y_1, g1_1, g2_1, k_1 = generate_shear_field(nsource, nhalo, rng)
x_2, y_2, g1_2, g2_2, k_2 = generate_shear_field(nsource, nhalo, rng)
x_3, y_3, g1_3, g2_3, k_3 = generate_shear_field(nsource, nhalo, rng)
# Make a single catalog with all three together
cat = treecorr.Catalog(x=np.concatenate([x_1, x_2, x_3]),
y=np.concatenate([y_1, y_2, y_3]),
g1=np.concatenate([g1_1, g1_2, g1_3]),
g2=np.concatenate([g2_1, g2_2, g2_3]),
k=np.concatenate([k_1, k_2, k_3]),
npatch=npatch)
# Now the three separately, using the same patch centers
cat1 = treecorr.Catalog(x=x_1, y=y_1, g1=g1_1, g2=g2_1, k=k_1, patch_centers=cat.patch_centers)
cat2 = treecorr.Catalog(x=x_2, y=y_2, g1=g1_2, g2=g2_2, k=k_2, patch_centers=cat.patch_centers)
cat3 = treecorr.Catalog(x=x_3, y=y_3, g1=g1_3, g2=g2_3, k=k_3, patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat1.patch, cat.patch[0:nsource])
np.testing.assert_array_equal(cat2.patch, cat.patch[nsource:2*nsource])
np.testing.assert_array_equal(cat3.patch, cat.patch[2*nsource:3*nsource])
# KKK auto
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk1.process(cat)
kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk2.process(cat1, initialize=True, finalize=False)
kkk2.process(cat2, initialize=False, finalize=False)
kkk2.process(cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat2, cat1, initialize=False, finalize=False)
kkk2.process(cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat1, initialize=False, finalize=False)
kkk2.process(cat3, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross12
cat23 = treecorr.Catalog(x=np.concatenate([x_2, x_3]),
y=np.concatenate([y_2, y_3]),
g1=np.concatenate([g1_2, g1_3]),
g2=np.concatenate([g2_2, g2_3]),
k=np.concatenate([k_2, k_3]),
patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource])
kkk1.process(cat1, cat23)
kkk2.process(cat1, cat2, initialize=True, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross12
kkkc1 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc1.process(cat1, cat23)
kkkc2 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc2.process(cat1, cat2, initialize=True, finalize=False)
kkkc2.process(cat1, cat3, initialize=False, finalize=False)
kkkc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross
kkk1.process(cat, cat2, cat3)
kkk2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkk2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross
kkkc1.process(cat, cat2, cat3)
kkkc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkkc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkkc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# GGG auto
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg1.process(cat)
ggg2 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg2.process(cat1, initialize=True, finalize=False)
ggg2.process(cat2, initialize=False, finalize=False)
ggg2.process(cat3, initialize=False, finalize=False)
ggg2.process(cat1, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat3, initialize=False, finalize=False)
ggg2.process(cat2, cat1, initialize=False, finalize=False)
ggg2.process(cat2, cat3, initialize=False, finalize=False)
ggg2.process(cat3, cat1, initialize=False, finalize=False)
ggg2.process(cat3, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGG cross12
ggg1.process(cat1, cat23)
ggg2.process(cat1, cat2, initialize=True, finalize=False)
ggg2.process(cat1, cat3, initialize=False, finalize=False)
ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGGCross cross12
gggc1 = treecorr.GGGCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
gggc1.process(cat1, cat23)
gggc2 = treecorr.GGGCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
gggc2.process(cat1, cat2, initialize=True, finalize=False)
gggc2.process(cat1, cat3, initialize=False, finalize=False)
gggc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1']:
ggg1 = getattr(gggc1, perm)
ggg2 = getattr(gggc2, perm)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGG cross
ggg1.process(cat, cat2, cat3)
ggg2.process(cat1, cat2, cat3, initialize=True, finalize=False)
ggg2.process(cat2, cat2, cat3, initialize=False, finalize=False)
ggg2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGGCross cross
gggc1.process(cat, cat2, cat3)
gggc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
gggc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
gggc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1']:
ggg1 = getattr(gggc1, perm)
ggg2 = getattr(gggc2, perm)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# NNN auto
nnn1 = treecorr.NNNCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
nnn1.process(cat)
nnn2 = treecorr.NNNCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
nnn2.process(cat1, initialize=True, finalize=False)
nnn2.process(cat2, initialize=False, finalize=False)
nnn2.process(cat3, initialize=False, finalize=False)
nnn2.process(cat1, cat2, initialize=False, finalize=False)
nnn2.process(cat1, cat3, initialize=False, finalize=False)
nnn2.process(cat2, cat1, initialize=False, finalize=False)
nnn2.process(cat2, cat3, initialize=False, finalize=False)
nnn2.process(cat3, cat1, initialize=False, finalize=False)
nnn2.process(cat3, cat2, initialize=False, finalize=False)
nnn2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(nnn1.ntri, nnn2.ntri)
np.testing.assert_allclose(nnn1.weight, nnn2.weight)
np.testing.assert_allclose(nnn1.meand1, nnn2.meand1)
np.testing.assert_allclose(nnn1.meand2, nnn2.meand2)
np.testing.assert_allclose(nnn1.meand3, nnn2.meand3)
# NNN cross12
nnn1.process(cat1, cat23)
nnn2.process(cat1, cat2, initialize=True, finalize=False)
nnn2.process(cat1, cat3, initialize=False, finalize=False)
nnn2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(nnn1.ntri, nnn2.ntri)
np.testing.assert_allclose(nnn1.weight, nnn2.weight)
np.testing.assert_allclose(nnn1.meand1, nnn2.meand1)
np.testing.assert_allclose(nnn1.meand2, nnn2.meand2)
np.testing.assert_allclose(nnn1.meand3, nnn2.meand3)
# NNNCross cross12
nnnc1 = treecorr.NNNCrossCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
nnnc1.process(cat1, cat23)
nnnc2 = treecorr.NNNCrossCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
nnnc2.process(cat1, cat2, initialize=True, finalize=False)
nnnc2.process(cat1, cat3, initialize=False, finalize=False)
nnnc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
nnn1 = getattr(nnnc1, perm)
nnn2 = getattr(nnnc2, perm)
np.testing.assert_allclose(nnn1.ntri, nnn2.ntri)
np.testing.assert_allclose(nnn1.weight, nnn2.weight)
np.testing.assert_allclose(nnn1.meand1, nnn2.meand1)
np.testing.assert_allclose(nnn1.meand2, nnn2.meand2)
np.testing.assert_allclose(nnn1.meand3, nnn2.meand3)
# NNN cross
nnn1.process(cat, cat2, cat3)
nnn2.process(cat1, cat2, cat3, initialize=True, finalize=False)
nnn2.process(cat2, cat2, cat3, initialize=False, finalize=False)
nnn2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(nnn1.ntri, nnn2.ntri)
np.testing.assert_allclose(nnn1.weight, nnn2.weight)
np.testing.assert_allclose(nnn1.meand1, nnn2.meand1)
np.testing.assert_allclose(nnn1.meand2, nnn2.meand2)
np.testing.assert_allclose(nnn1.meand3, nnn2.meand3)
# NNNCross cross
nnnc1.process(cat, cat2, cat3)
nnnc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
nnnc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
nnnc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']:
nnn1 = getattr(nnnc1, perm)
nnn2 = getattr(nnnc2, perm)
np.testing.assert_allclose(nnn1.ntri, nnn2.ntri)
np.testing.assert_allclose(nnn1.weight, nnn2.weight)
np.testing.assert_allclose(nnn1.meand1, nnn2.meand1)
np.testing.assert_allclose(nnn1.meand2, nnn2.meand2)
np.testing.assert_allclose(nnn1.meand3, nnn2.meand3)
@timer
def test_lowmem():
# Test using patches to keep the memory usage lower.
if __name__ == '__main__':
nsource = 10000
nhalo = 100
npatch = 4
himem = 7.e5
lomem = 8.e4
else:
nsource = 1000
nhalo = 100
npatch = 4
himem = 1.3e5
lomem = 8.e4
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(nsource, nhalo, rng)
file_name = os.path.join('output','test_lowmem_3pt.fits')
orig_cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, npatch=npatch)
patch_centers = orig_cat.patch_centers
orig_cat.write(file_name)
del orig_cat
try:
import guppy
hp = guppy.hpy()
hp.setrelheap()
except Exception:
hp = None
full_cat = treecorr.Catalog(file_name,
x_col='x', y_col='y', g1_col='g1', g2_col='g2', k_col='k',
patch_centers=patch_centers)
kkk = treecorr.KKKCorrelation(nbins=1, min_sep=280., max_sep=300.,
min_u=0.95, max_u=1.0, nubins=1,
min_v=0., max_v=0.05, nvbins=1)
t0 = time.time()
s0 = hp.heap().size if hp else 0
kkk.process(full_cat)
t1 = time.time()
s1 = hp.heap().size if hp else 2*himem
print('regular: ',s1, t1-t0, s1-s0)
assert s1-s0 > himem # This version uses a lot of memory.
ntri1 = kkk.ntri
zeta1 = kkk.zeta
full_cat.unload()
kkk.clear()
# Remake with save_patch_dir.
clear_save('test_lowmem_3pt_%03d.fits', npatch)
save_cat = treecorr.Catalog(file_name,
x_col='x', y_col='y', g1_col='g1', g2_col='g2', k_col='k',
patch_centers=patch_centers, save_patch_dir='output')
t0 = time.time()
s0 = hp.heap().size if hp else 0
kkk.process(save_cat, low_mem=True, finalize=False)
t1 = time.time()
s1 = hp.heap().size if hp else 0
print('lomem 1: ',s1, t1-t0, s1-s0)
assert s1-s0 < lomem # This version uses a lot less memory
ntri2 = kkk.ntri
zeta2 = kkk.zeta
print('ntri1 = ',ntri1)
print('zeta1 = ',zeta1)
| np.testing.assert_array_equal(ntri2, ntri1) | numpy.testing.assert_array_equal |
"""Plots classifier ambiguity against compactness.
<NAME> <<EMAIL>>
Research School of Astronomy and Astrophysics
The Australian National University
2017
"""
import logging
import astropy.io.ascii
import astropy.io.fits
import astropy.visualization
import astropy.visualization.wcsaxes
import astropy.wcs
import matplotlib.pyplot as plt
import numpy
import scipy.special
from scipy.spatial import KDTree
import examples_all
import examples_incorrect
import pipeline
def get_predictions(swire_tree, swire_coords, swire_names, swire_test_sets, atlas_coords, predictor_name, radius=1 / 60):
import pdb
predictions_ = pipeline.unserialise_predictions(pipeline.WORKING_DIR + predictor_name + '_predictions', [0, 1, 2, 3], ['RGZ & Norris'])
for predictions in predictions_:
nearby = swire_tree.query_ball_point(atlas_coords, radius) # all-SWIRE indices
nearby_bool = numpy.zeros((swire_test_sets.shape[0],), dtype=bool)
nearby_bool[nearby] = True
set_ = swire_test_sets[:, pipeline.SET_NAMES['RGZ'], predictions.quadrant] # all-SWIRE indices, mask
if not nearby_bool[set_].any():
# Wrong quadrant.
continue
# pdb.set_trace()
nearby_predictions = predictions.probabilities[nearby_bool[set_]] # quadrant + dataset indices
nearby_coords = swire_coords[nearby_bool & set_]
nearby_names = swire_names[nearby_bool & set_]
try:
assert len(nearby_coords) == len(nearby_predictions)
except AssertionError:
pdb.set_trace()
raise
return list(zip(nearby_names, nearby_predictions))
def main(classifier='CNN', labeller='Norris'):
# Load SWIRE stuff.
swire_names, swire_coords, swire_features = pipeline.generate_swire_features(overwrite=False)
swire_labels = pipeline.generate_swire_labels(swire_names, swire_coords, overwrite=False)
_, (_, swire_test_sets) = pipeline.generate_data_sets(swire_coords, overwrite=False)
swire_tree = KDTree(swire_coords)
swire_name_to_index = {n: i for i, n in enumerate(swire_names)}
atlas_names = []
atlas_compactnesses = []
atlas_coords = []
atlas_norris_swire = []
table = astropy.io.ascii.read(pipeline.TABLE_PATH)
for row in table:
name = row['Component Name (Franzen)']
if not name:
continue
if not row['Component Zooniverse ID (RGZ)']:
continue
compactness = pipeline.compactness(row)
atlas_names.append(name)
atlas_compactnesses.append(compactness)
atlas_coords.append((row['Component RA (Franzen)'], row['Component DEC (Franzen)']))
atlas_norris_swire.append(row['Source SWIRE (Norris)'])
ys = []
xs_entropy = []
xs_margin = []
no_groundtruth = []
correct = []
for name, compactness, coords, swire in zip(atlas_names, atlas_compactnesses, atlas_coords, atlas_norris_swire):
predictor_name = '{}_{}'.format(classifier, labeller)
predictions = get_predictions(swire_tree, swire_coords, numpy.array(swire_names), swire_test_sets, coords, predictor_name)
if not predictions:
print('No predictions for {}'.format(name))
continue
chosen_swire = predictions[numpy.argmax([p for _, p in predictions])][0]
predictions = [p for _, p in predictions]
predictions_softmax = [numpy.exp(p) / sum(numpy.exp(p) for p in predictions) for p in predictions]
if len(predictions_softmax) == 1:
entropy_ambiguity = 0
margin_ambiguity = 0
else:
entropy_ambiguity = -sum(p * numpy.log(p) for p in predictions_softmax if p)
predictions.sort()
margin_ambiguity = 1 - (predictions[-1] - predictions[-2])
ys.append(compactness)
xs_entropy.append(entropy_ambiguity)
xs_margin.append(margin_ambiguity)
no_groundtruth.append(not swire or not swire.startswith('SWIRE'))
correct.append(swire == chosen_swire)
ys = numpy.array(ys)
xs_margin = | numpy.array(xs_margin) | numpy.array |
import cyscs as scs
import pytest
import cyscs.examples as ex
import numpy as np
def test_cache():
data, cone = ex.many_iter_ecp()
work = scs.Workspace(data, cone)
sol = work.solve()
def test_settings():
expected_keys = set(['normalize', 'use_indirect', 'scale', 'verbose',
'eps', 'cg_rate', 'max_iters', 'alpha', 'rho_x'])
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert 'warm_start' not in work.settings
assert set(work.settings.keys()) == expected_keys
work.solve()
assert 'warm_start' not in work.settings
assert set(work.settings.keys()) == expected_keys
def test_fixed_settings():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
expected_fixed = set(['normalize', 'use_indirect', 'scale', 'rho_x'])
assert set(work.fixed.keys()) == expected_fixed
with pytest.raises(Exception):
work.settings['rho_x'] = 3.14159
# should raise an exception because we changed a fixed setting
work.solve()
def test_data_keys():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert 'A' not in work.data
assert set(work.data.keys()) == set(['b','c'])
def test_A():
data, cone, true_x = ex.simple_socp()
work = scs.Workspace(data, cone)
# corrupt the original data (but SCS should have made an internal copy, so this is ok)
data['A'][:] = 3
sol = work.solve(eps=1e-6)
assert np.allclose(sol['x'], true_x)
# now, solving on corrupted data shouldn't work
work = scs.Workspace(data, cone)
sol = work.solve(eps=1e-6)
assert not np.allclose(sol['x'], true_x)
def test_settings_change():
data, cone, _ = ex.simple_socp()
work = scs.Workspace(data, cone)
assert work.settings['eps'] == 1e-3
work.solve(eps=1e-6)
assert work.settings['eps'] == 1e-6
def test_warm_start():
# if warm-starting, the input warm-start vector should not be modified
data, cone, true_x = ex.simple_socp()
work = scs.Workspace(data, cone)
sol = work.solve(eps=1e-2)
assert np.linalg.norm(sol['x'] - true_x) > 1e-3
sol2 = work.solve(warm_start=sol, eps=1e-9)
assert np.linalg.norm(sol2['x'] - true_x) <= 1e-9
assert | np.linalg.norm(sol['x'] - sol2['x']) | numpy.linalg.norm |
import gym
from gym.utils import seeding
import numpy as np
from gym import spaces
from os import path
import sys
def return_classes():
unwanted = ["EzPickle", "circleShape", "contactListener",
"edgeShape", "fixtureDef", "polygonShape",
"revoluteJointDef", "FrictionDetector", "ContactDetector", "Car"]
current_module = sys.modules[__name__]
class_names = []
for key in dir(current_module):
if key in unwanted: continue
if isinstance(getattr(current_module, key), type):
class_names.append(key)
return class_names
DEBUG = False
class Pendulum(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, reward=None):
self.max_speed = 8
self.max_torque = 2.
self.dt = .05
self.g = 10.0
self.m = 1.
self.l = 1.
self.reward_fn = reward
self.viewer = None
high = np.array([1., 1., self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque,
high=self.max_torque, shape=(1,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-high,
high=high,
dtype=np.float32
)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state # th := theta
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u # for rendering
costs = self.reward_fn.calculateReward(th, thdot, u)
newthdot = thdot + (-3 * g / (2 * l) * np.sin(th + np.pi) + 3. / (m * l ** 2) * u) * dt
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
if DEBUG: print("Inside step 1")
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
print("Reset 1")
self.state = self.np_random.uniform(low=-high, high=high)
print("Reset 2")
self.last_u = None
print(self.state)
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
print("Get obs 1")
return np.array([np.cos(theta), np.sin(theta), thetadot])
def render(self, mode='human'):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, .2)
rod.set_color(.8, .3, .3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
if DEBUG: print("Inside render 1")
try:
fname = path.join(path.dirname(__file__), "../assets/clockwise.png")
if DEBUG: print("Inside render 2")
self.img = rendering.Image(fname, 1., 1.)
except Exception as e:
print(e)
if DEBUG: print("Inside render 3")
self.imgtrans = rendering.Transform()
if DEBUG: print("Inside render 4")
self.img.add_attr(self.imgtrans)
if DEBUG: print("Inside render 5")
self.viewer.add_onetime(self.img)
if DEBUG: print("Inside render 6")
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if DEBUG: print("Inside render 7")
if self.last_u:
if DEBUG: print("Inside render 8")
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
if DEBUG: print("Inside render 9")
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
"""
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, goal_velocity=0, reward=None):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.reward_fn = reward
self.low = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(
self.low, self.high, dtype=np.float32
)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position == self.min_position and velocity < 0):
velocity = 0
done = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = self.reward_fn.calculateReward()
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs) * .45 + .55
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width / world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(
rendering.Transform(translation=(carwidth / 4, clearance))
)
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(
rendering.Transform(translation=(-carwidth / 4, clearance))
)
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position-self.min_position) * scale
flagy1 = self._height(self.goal_position) * scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon(
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]
)
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(
(pos-self.min_position) * scale, self._height(pos) * scale
)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def get_keys_to_action(self):
# Control with left and right arrow keys.
return {(): 1, (276,): 0, (275,): 2, (275, 276): 1}
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
class Continuous_MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, goal_velocity=0, reward=None):
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.45 # was 0.5 in gym, 0.45 in <NAME>'s version
self.goal_velocity = goal_velocity
self.power = 0.0015
self.reward_fn = reward
self.low_state = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high_state = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.viewer = None
self.action_space = spaces.Box(
low=self.min_action,
high=self.max_action,
shape=(1,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state,
high=self.high_state,
dtype=np.float32
)
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
position = self.state[0]
velocity = self.state[1]
force = min(max(action[0], self.min_action), self.max_action)
velocity += force * self.power - 0.0025 * math.cos(3 * position)
if (velocity > self.max_speed): velocity = self.max_speed
if (velocity < -self.max_speed): velocity = -self.max_speed
position += velocity
if (position > self.max_position): position = self.max_position
if (position < self.min_position): position = self.min_position
if (position == self.min_position and velocity < 0): velocity = 0
# Convert a possible numpy bool to a Python bool.
done = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = self.reward_fn.calculateReward(done, action[0])
# reward = 0
# if done:
# reward = 100.0
# reward -= math.pow(action[0], 2) * 0.1
self.state = np.array([position, velocity])
return self.state, reward, done, {}
def reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs)*.45+.55
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs-self.min_position)*scale, ys*scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(
rendering.Transform(translation=(carwidth / 4, clearance))
)
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(
rendering.Transform(translation=(-carwidth / 4, clearance))
)
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position-self.min_position)*scale
flagy1 = self._height(self.goal_position)*scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon(
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]
)
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(
(pos-self.min_position) * scale, self._height(pos) * scale
)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along
a frictionless track. The pendulum starts upright, and the goal is to
prevent it from falling over by increasing and reducing the cart's
velocity.
Source:
This environment corresponds to the version of the cart-pole problem
described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -0.418 rad (-24 deg) 0.418 rad (24 deg)
3 Pole Angular Velocity -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not
fixed; it depends on the angle the pole is pointing. This is because
the center of gravity of the pole increases the amount of energy needed
to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees.
Cart Position is more than 2.4 (center of the cart reaches the edge of
the display).
Episode length is greater than 200.
Solved Requirements:
Considered solved when the average return is greater than or equal to
195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self, reward=None):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
self.reward_fn = reward
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array([self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max],
dtype=np.float32)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
x, x_dot, theta, theta_dot = self.state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (force + self.polemass_length * theta_dot ** 2 * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (self.length * (4.0 / 3.0 - self.masspole * costheta ** 2 / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x, x_dot, theta, theta_dot)
done = bool(
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
# if not done:
# reward = 1.0
# elif self.steps_beyond_done is None:
# # Pole just fell!
# self.steps_beyond_done = 0
# reward = 1.0
# else:
# if self.steps_beyond_done == 0:
# logger.warn(
# "You are calling 'step()' even though this "
# "environment has already returned done = True. You "
# "should always call 'reset()' once you receive 'done = "
# "True' -- any further steps are undefined behavior."
# )
# self.steps_beyond_done += 1
# reward = 0.0
reward = self.reward_fn.calculateReward(done, self, logger)
return np.array(self.state), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None:
return None
# Edit the pole polygon vertex
pole = self._pole_geom
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole.v = [(l, b), (l, t), (r, t), (r, b)]
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
"""classic Acrobot task"""
import numpy as np
from numpy import sin, cos, pi
from gym import core, spaces
from gym.utils import seeding
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "BSD 3-Clause"
__author__ = "<NAME> <<EMAIL>>"
# SOURCE:
# https://github.com/rlpy/rlpy/blob/master/rlpy/Domains/Acrobot.py
class AcrobotEnv(core.Env):
"""
Acrobot is a 2-link pendulum with only the second joint actuated.
Initially, both links point downwards. The goal is to swing the
end-effector at a height at least the length of one link above the base.
Both links can swing freely and can pass by each other, i.e., they don't
collide when they have the same angle.
**STATE:**
The state consists of the sin() and cos() of the two rotational joint
angles and the joint angular velocities :
[cos(theta1) sin(theta1) cos(theta2) sin(theta2) thetaDot1 thetaDot2].
For the first link, an angle of 0 corresponds to the link pointing downwards.
The angle of the second link is relative to the angle of the first link.
An angle of 0 corresponds to having the same angle between the two links.
A state of [1, 0, 1, 0, ..., ...] means that both links point downwards.
**ACTIONS:**
The action is either applying +1, 0 or -1 torque on the joint between
the two pendulum links.
.. note::
The dynamics equations were missing some terms in the NIPS paper which
are present in the book. R. Sutton confirmed in personal correspondence
that the experimental results shown in the paper and the book were
generated with the equations shown in the book.
However, there is the option to run the domain with the paper equations
by setting book_or_nips = 'nips'
**REFERENCE:**
.. seealso::
R. Sutton: Generalization in Reinforcement Learning:
Successful Examples Using Sparse Coarse Coding (NIPS 1996)
.. seealso::
R. Sutton and <NAME>:
Reinforcement learning: An introduction.
Cambridge: MIT press, 1998.
.. warning::
This version of the domain uses the Runge-Kutta method for integrating
the system dynamics and is more realistic, but also considerably harder
than the original version which employs Euler integration,
see the AcrobotLegacy class.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 15
}
dt = .2
LINK_LENGTH_1 = 1. # [m]
LINK_LENGTH_2 = 1. # [m]
LINK_MASS_1 = 1. #: [kg] mass of link 1
LINK_MASS_2 = 1. #: [kg] mass of link 2
LINK_COM_POS_1 = 0.5 #: [m] position of the center of mass of link 1
LINK_COM_POS_2 = 0.5 #: [m] position of the center of mass of link 2
LINK_MOI = 1. #: moments of inertia for both links
MAX_VEL_1 = 4 * pi
MAX_VEL_2 = 9 * pi
AVAIL_TORQUE = [-1., 0., +1]
torque_noise_max = 0.
#: use dynamics equations from the nips paper or the book
book_or_nips = "book"
action_arrow = None
domain_fig = None
actions_num = 3
def __init__(self, reward=None):
self.viewer = None
high = np.array([1.0, 1.0, 1.0, 1.0, self.MAX_VEL_1, self.MAX_VEL_2], dtype=np.float32)
low = -high
self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32)
self.action_space = spaces.Discrete(3)
self.state = None
self.reward_fn = reward
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.state = self.np_random.uniform(low=-0.1, high=0.1, size=(4,))
return self._get_ob()
def step(self, a):
s = self.state
torque = self.AVAIL_TORQUE[a]
# Add noise to the force action
if self.torque_noise_max > 0:
torque += self.np_random.uniform(-self.torque_noise_max, self.torque_noise_max)
# Now, augment the state with our force action so it can be passed to
# _dsdt
s_augmented = np.append(s, torque)
ns = rk4(self._dsdt, s_augmented, [0, self.dt])
# only care about final timestep of integration returned by integrator
ns = ns[-1]
ns = ns[:4] # omit action
# ODEINT IS TOO SLOW!
# ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])
# self.s_continuous = ns_continuous[-1] # We only care about the state
# at the ''final timestep'', self.dt
ns[0] = wrap(ns[0], -pi, pi)
ns[1] = wrap(ns[1], -pi, pi)
ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)
ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = ns
terminal = self._terminal()
# reward = -1. if not terminal else 0.
reward = self.reward_fn.calculateReward(terminal)
return (self._get_ob(), reward, terminal, {})
def _get_ob(self):
s = self.state
return np.array([cos(s[0]), sin(s[0]), cos(s[1]), sin(s[1]), s[2], s[3]])
def _terminal(self):
s = self.state
return bool(-cos(s[0]) - cos(s[1] + s[0]) > 1.)
def _dsdt(self, s_augmented, t):
m1 = self.LINK_MASS_1
m2 = self.LINK_MASS_2
l1 = self.LINK_LENGTH_1
lc1 = self.LINK_COM_POS_1
lc2 = self.LINK_COM_POS_2
I1 = self.LINK_MOI
I2 = self.LINK_MOI
g = 9.8
a = s_augmented[-1]
s = s_augmented[:-1]
theta1 = s[0]
theta2 = s[1]
dtheta1 = s[2]
dtheta2 = s[3]
d1 = m1 * lc1 ** 2 + m2 * \
(l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(theta2)) + I1 + I2
d2 = m2 * (lc2 ** 2 + l1 * lc2 * cos(theta2)) + I2
phi2 = m2 * lc2 * g * cos(theta1 + theta2 - pi / 2.)
phi1 = - m2 * l1 * lc2 * dtheta2 ** 2 * sin(theta2) \
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * sin(theta2) \
+ (m1 * lc1 + m2 * l1) * g * cos(theta1 - pi / 2) + phi2
if self.book_or_nips == "nips":
# the following line is consistent with the description in the
# paper
ddtheta2 = (a + d2 / d1 * phi1 - phi2) / \
(m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
else:
# the following line is consistent with the java implementation and the
# book
ddtheta2 = (a + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1 ** 2 * sin(theta2) - phi2) \
/ (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
ddtheta1 = -(d2 * ddtheta2 + phi1) / d1
return (dtheta1, dtheta2, ddtheta1, ddtheta2, 0.)
def render(self, mode='human'):
from gym.envs.classic_control import rendering
s = self.state
if self.viewer is None:
self.viewer = rendering.Viewer(500,500)
bound = self.LINK_LENGTH_1 + self.LINK_LENGTH_2 + 0.2 # 2.2 for default
self.viewer.set_bounds(-bound,bound,-bound,bound)
if s is None: return None
p1 = [-self.LINK_LENGTH_1 *
cos(s[0]), self.LINK_LENGTH_1 * sin(s[0])]
p2 = [p1[0] - self.LINK_LENGTH_2 * cos(s[0] + s[1]),
p1[1] + self.LINK_LENGTH_2 * sin(s[0] + s[1])]
xys = | np.array([[0,0], p1, p2]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue 26 March 18:44:45 2020
@author: Mnemosyne
Vocal learning model results (plots of)
"""
import os
import time
import glob
import pickle
import numpy as np
import matplotlib
import librosa
from matplotlib import rcParams, cm, colors
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import Axes3D
import scipy.io.wavfile as wav
csfont = {'fontname':'Times New Roman'}
from songbird_data_analysis import Song_functions
def magnitude(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return m: magnitude (Euclidian norm in this case)
"""
m = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
return m
def polar_coord(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return r,phi, theta: polar coordinates
"""
r = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
phi = np.arctan(v[1]/v[0])
theta = np.arctan(np.sqrt(v[0]**2 + v[1]**2)/v[2])
return r, phi, theta
def arctan_coord(v):
"""
:param v: 3D cartesian coordinates - vector
:return x_new, y_new: 2D vector with x_new = arctan(v0/v2) ane y_new = arctan(v0/v2)
"""
x_new = np.arctan(v[0]/v[1])
y_new = np.arctan(v[0]/v[2])
return x_new, y_new
def arctan_distance(v,w):
"""
:param v, w: vectors of the same size
:return: "angular" distance component by componet - vector
"""
d = np.zeros((np.size(v),))
for i in range(0, np.size(v)):
d[i] = np.arctan(v[i] - w[i])
return d
def create_sphere(cx,cy,cz, r, resolution=360):
'''
create sphere with center (cx, cy, cz) and radius r
'''
phi = np.linspace(0, 2*np.pi, 2*resolution)
theta = np.linspace(0, np.pi, resolution)
theta, phi = np.meshgrid(theta, phi)
r_xy = r*np.sin(theta)
x = cx + np.cos(phi) * r_xy
y = cy + np.sin(phi) * r_xy
z = cz + r * np.cos(theta)
return np.stack([x,y,z])
def plot_auditory_activation(args):
"""
Plot the results of the different auditory activation functions (results from the test function)
"""
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
softmax_sum_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl_' + str(sim_counter) + '.npy')
softmax_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_mean_expl_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_raw_score_expl_' + str(sim_counter) + '.npy')
raw_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_expl_' + str(sim_counter) + '.npy')
mean_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl_' + str(sim_counter) + '.npy')
logistic_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_logistic_expl_' + str(sim_counter) + '.npy')
tanh_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_tanh_expl_' + str(sim_counter) + '.npy')
minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_minmax_expl_' + str(sim_counter) + '.npy')
sign_minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl_' + str(sim_counter) + '.npy')
sign_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_expl_' + str(sim_counter) + '.npy')
square_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_square_expl_' + str(sim_counter) + '.npy')
arctg_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_arctg_expl_' + str(sim_counter) + '.npy')
scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(sim_counter) + '.npy', allow_pickle=True)
scaling_softmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(sim_counter) + '.npy', allow_pickle=True)
softmax_MAX_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(sim_counter) + '.npy', allow_pickle=True)
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_p95_expl' + str(sim_counter) + '.npy', allow_pickle=True)
for i in range(0, np.shape(raw_score_expl)[0]):
for j in range(0, len(classes)):
if p95_expl[i,j] > 1:
p95_expl[i,j] = 1
# Time vector
x_time = np.linspace(0, np.shape(raw_score_expl)[0], np.shape(raw_score_expl)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_score_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('MinMax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_score_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(p95_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('p95 score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_p95_expl_pw' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max norm score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_softmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Scaling softmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_MAX_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Softmax MAX score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('Scaling score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(arctg_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Arctg score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_arctg_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(square_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Square root score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_square_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Sign score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i,j].set_ylim(0,800)
ax[i, j].set_xlabel('Sign minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(logistic_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Logistic score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_logistic_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(tanh_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Tanh score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_tanh_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_mean_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_mean_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(mean_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_sum_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl' + str(
sim_counter) + '.' + args.format)
plt.close('all')
for b in range(0, np.size(args.beta)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_mean_expl[b][:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[
cl] + '_softmax_mean_expl_beta_' + str(args.beta[b]) + '_' + str(
sim_counter) + '.' + args.format)
print('Done')
def plot_sensory(args):
"""
Plots of the results obtained from the leanring model (VLM function).
"""
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
p95_mean = np.zeros((len(args.learning_rate), args.n_points + 1, len(classes)))
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
p95_all_sim = []
for sim_counter in range(0, args.N_sim):
p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_sim_' + str(sim_counter) + '.npy')
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_expl_' + str(sim_counter) + '.npy')
# Focus on 200 time steps
p95_focus = p95[0:200, :]
# Remove focus (every N points up to 200 points) - CHECK PLOT
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
# All sim vector
p95_all_sim.append(p95_jump)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(p95_jump)[0])
x_time_expl = np.linspace(0, np.shape(p95_expl)[0], np.shape(p95_expl)[0])
x_time_focus = np.linspace(0, np.shape(p95_focus)[0], np.shape(p95_focus)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_focus, p95_focus[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, np.shape(p95_focus)[0])
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_FOCUS_sim' + str(
sim_counter) + '.' + args.format)
W_p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_p95_sim_' + str(sim_counter) + '.npy')[0:args.MAX_trial, :, :]
# Plot the evolution of the synaptic weights over trials
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for k in range(0, args.wavegan_latent_dim):
ax[i, j].plot(x_time_expl, W_p95[:, k, 4 * i + j], color[k])
ax[i, j].set_ylabel('Weights', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i,j].set_ylim(-1,1)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_p95' + str(sim_counter) + '.' + args.format)
# Plot activation of the exploration
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, p95_expl[:, 4 * i + j], 'b')
#ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_expl' + str(
sim_counter) + '.' + args.format)
# Plot activation during learning
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial-1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim' + str(
sim_counter) + '.' + args.format)
# [TODO] add comment here when I try this option
if args.example == True:
if sim_counter == 1:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
ax.plot(x_time, p95_all_sim[sim_counter][:, 14], 'b')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_xlim(0, args.MAX_trial)
ax.set_xlabel('Time (in number of time steps)', fontsize=15)
ax.set_ylabel('Activation', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_R' + '.' + args.format)
plt.close('all')
# Average over multiple simulations
p95_mean_sim = np.mean(p95_all_sim, axis=0)
p95_mean[lr, :, :] = p95_mean_sim
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim_ALL' + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_mean_sim[:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_MEAN' + '.' + args.format)
# Comparison between different learning rates
cfr_lr = ['10e-1', '10e-2']
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for lr in range(0, len(args.learning_rate)):
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_mean[lr,:, 4 * i + j], c=color[lr], alpha=.7, label=cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + '_p95_MEAN_all' + '.' + args.format)
np.save(args.data_dir + '/' + 'p95_MEAN_lr_' + str(args.wavegan_latent_dim) + '.npy' ,p95_mean)
plt.close('all')
print('Done')
def cfr_dim13(p95_MEAN, colors, ld, args):
"""
:param p95_MEAN: list of the arrays containing the data (one per latent space condition, two values each - one per learning rate condition)
:return: figure with the comparison (one per leanring rate condition)
"""
x_time = np.linspace(0, args.MAX_trial, 201)
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for lr in range(0, len(args.learning_rate)):
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for i in range(0, 4):
for j in range(0, 4):
for l in range(0, len(p95_MEAN)):
ax[i, j].plot(x_time, p95_MEAN[l][lr,:, 4 * i + j], c=colors[l], alpha=.7, label=str(ld[l]))
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + '_p95_MEAN_lr_' + str(args.learning_rate[lr]) + '.' + args.format)
plt.close('all')
print('Done')
def plot_sensory_test(args):
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
cfr_class_A_all = []
cfr_class_A_expl_all = []
cfr_class_raw_all = []
cfr_class_expl_all = []
conv = []
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
cfr_class_A = []
cfr_class_A_expl = []
cfr_class_raw = []
cfr_class_expl = []
mean_spectrogram_env = []
T = []
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
sensory_gen = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_sim_' + str(sim_counter) + '.npy')
sensory_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_' + str(sim_counter) + '.npy')
sensory_expl_all = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_all_' + str(sim_counter) + '.npy')
raw_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_sim_' + str(sim_counter) + '.npy')
max_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_sim_' + str(sim_counter) + '.npy')
max_norm = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_sim_' + str(sim_counter) + '.npy')
max_scaling = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_sim_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_expl_' + str(sim_counter) + '.npy')
max_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_score_expl_' + str(sim_counter) + '.npy')
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_expl_' + str(sim_counter) + '.npy')
max_scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_expl_' + str(sim_counter) + '.npy')
#cfr_class_A.append(sensory_gen)
#cfr_class_A_expl.append(sensory_expl)
cfr_class_raw.append(raw_score)
cfr_class_expl.append(raw_score_expl)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(raw_score)[0])
x_time_expl = np.linspace(0, np.shape(raw_score_expl)[0], | np.shape(raw_score_expl) | numpy.shape |
import numpy as np
from astropy.io import ascii
import types
import scipy.interpolate as spi
import astropy.io.fits as pf
import matplotlib.pyplot as plt
import pdb
def read_table(name, delimiter='\t', comment='#', fmt=None, ds=1):
'''
Reads ascii tables and converts them cleanly into numpy arrays.
'''
if fmt is not None:
datanp = ascii.read(name, guess=False, delimiter=delimiter, \
comment=comment, header_start=0, \
data_start=ds, format=fmt)
else:
datanp = ascii.read(name, guess=False, delimiter=delimiter, \
comment=comment, header_start=0, \
data_start=ds)
return datanp
def angsep(ra1deg, dec1deg, ra2deg, dec2deg, angle=False):
'''
Determine separation in degrees between celestial objects.
ra1deg, dec1deg - primary point(s); can be arrays
ra2deg, dec2deg - secondary point(s); can be arrays or scalars
angle - if True, it will calculate angle E of N.
All arguments are in decimal degrees.
Returns distance in arcdegrees, angles between -180 and 180 degrees.
'''
ra1rad = ra1deg * np.pi / 180
dec1rad = dec1deg * np.pi / 180
ra2rad = ra2deg * np.pi / 180
dec2rad = dec2deg * np.pi / 180
# calculate scalar product for determination of angular separation
x = np.cos(ra1rad) * np.cos(dec1rad) * np.cos(ra2rad) * np.cos(dec2rad)
y = np.sin(ra1rad) * np.cos(dec1rad) * np.sin(ra2rad) * np.cos(dec2rad)
z = np.sin(dec1rad) * np.sin(dec2rad)
rad = np.arccos(x + y + z) # Sometimes gives warnings when coords match
# use Pythagoras approximation if rad < 1 arcsec
sep = np.choose(rad<0.000004848, (np.sqrt((np.cos(dec1rad) * (ra1rad-ra2rad))**2 \
+ (dec1rad - dec2rad)**2), rad))
# Angular separation
sep = sep * 180 / np.pi
if angle:
deltaDEC = dec1rad - dec2rad
deltaRA = ra1rad - ra2rad
angledeg = np.arctan2(-deltaRA, -deltaDEC) * 180 / np.pi
return sep, angledeg
else:
return sep
def deg2sex(ras, decs):
''' Converts RA and DEC from decimal to sexagesimal. Returns string.
Arguments:
ras - string(s) of RA in degrees
decs - string(s) of DEC in degrees
'''
from astropy import units as u
from astropy.coordinates import SkyCoord
if type(ras) == list or type(ras) == np.ndarray:
new_coords = []
for irow in range(0,len(ras)):
c = SkyCoord(float(ras[irow]), float(decs[irow]), \
frame='icrs', unit='deg')
new_coords.append(c.to_string('hmsdms'))
else:
c = SkyCoord(float(ras), float(decs), frame='icrs', unit='deg')
new_coords = c.to_string('hmsdms')
return new_coords
def sex2deg(ras, decs):
''' Converts RA and DEC from sexagesimal to decimal.
Arguments:
ras - string(s) of RA in sexagesimal degrees (HH MM SS.SS)
decs - string(s) of DEC in sexagesimal degrees (+-DD MM SS.SS)
'''
if type(ras) == list or type(ras) == np.ndarray:
new_ras = []
new_decs = []
for irow in range(0,len(ras)):
parts_ra = ras[irow].rsplit(' ')
if len(parts_ra) == 1:
parts_ra = ras[irow].rsplit(':')
parts_dec = decs[irow].rsplit(' ')
if len(parts_dec) == 1:
parts_dec = decss[irow].rsplit(':')
ra_deg = float(parts_ra[0]) * 15. + float(parts_ra[1]) / 4. + float(parts_ra[2]) / 240.
dec_deg = float(parts_dec[0]) + float(parts_dec[1]) / 60. + float(parts_dec[2]) / 3600.
new_ras.append(ra_deg)
new_decs.append(dec_deg)
new_ras = np.array(new_ras)
new_decs = np.array(new_decs)
return new_ras, new_decs
else:
parts_ra = ras.rsplit(' ')
if len(parts_ra) == 1:
parts_ra = ras.rsplit(':')
parts_dec = decs.rsplit(' ')
if len(parts_dec) == 1:
parts_dec = decs.rsplit(':')
ra_deg = float(parts_ra[0]) * 15. + float(parts_ra[1]) / 4. + float(parts_ra[2]) / 240.
dec_deg = float(parts_dec[0]) + float(parts_dec[1]) / 60. + float(parts_dec[2]) / 3600.
return ra_deg, dec_deg
def matchsorted(ra, dec, ra1, dec1, tol, angle=False, closest=True):
''' Find closest ra,dec within tol to a target in an ra-sorted list of ra,dec.
Arguments:
ra - Right Ascension decimal degrees (numpy sorted in ascending order)
dec - Declination decimal degrees (numpy array)
ra1 - RA to match (scalar, decimal degrees)
dec1 - Dec to match (scalar, decimal degrees)
tol - Matching tolerance in arcseconds.
angle - Boolean, whether to return angle formed by matched sources.
closest - Boolean, whether to return only the closest match.
Returns:
ibest - index of the (best) match(es) within tol; -1 if no match within tol
sep - separation (defaults to tol if no match within tol)
angle - angle (defaults to 0 if no match within tol)
'''
tol = tol / 3600.
if isinstance(tol, float):
# Case for one tolerance radius for all objects
i1 = np.searchsorted(ra, ra1 - tol) - 5
i2 = np.searchsorted(ra, ra1 + tol) + 5
else:
# Case for one tolerance radius for each object
i1 = np.searchsorted(ra + tol, ra1) - 5
i2 = np.searchsorted(ra - tol, ra1) + 5
if i1 < 0:
i1 = 0
if angle:
sep, ang = angsep(ra[i1:i2], dec[i1:i2], ra1, dec1, angle=angle)
else:
sep = angsep(ra[i1:i2], dec[i1:i2], ra1, dec1, angle=angle)
if isinstance(tol, float):
imatches = np.where(sep < tol)[0]
else:
imatches = np.where(sep < tol[i1:i2])[0]
if len(imatches) == 0:
if angle:
return [-1], [tol * 3600.], [0]
else:
return [-1], [tol * 3600.]
ibest = np.argmin(sep[imatches])
#indices = np.argsort(sep)
#if sep[indices[0]] > tol:
# if angle:
# return -1, tol * 3600., 0
# else:
# return -1, tol * 3600.
#ibest = indices[0] + i1
#imult = indices[np.where(sep[indices] < tol)[0]] + i1
#imult = np.where(sep < tol)[0]
if angle:
if closest:
return [imatches[ibest] + i1], [sep[imatches][ibest] * 3600.], \
[ang[imatches[ibest]]]
else:
return imatches + i1, sep[imatches] * 3600., ang[imatches]
else:
if closest:
return [imatches[ibest] + i1], [sep[imatches][ibest] * 3600.]
else:
return imatches + i1, sep[imatches] * 3600.
def smooth(x,window_len=11,window='hanning'):
"""
smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1],x,x[-1:-window_len:-1]]
if window == 'flat': #moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)]
def mean_comb(spectra, weights=None, mask=None, robust=None, forcesimple=False, extremes=False, renormalize=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Combine spectra using a (weighted) mean. The output is a python list with mask wavelength in position 0, mean flux in position 1, and variance in position 2. If flux uncertainties are given, then mean is a weighted mean, and variance is the "variance of the mean" (|sigma| :sub:`mean` :sup:`2`). If no flux uncertainties are given, then mean is a straight mean (<x>), and variance is the square of the standard error of the mean (|sigma| :sup:`2`/n). If no mask is given, the wavelength array of the first spectrum will be used as mask.
This function mimics IDL mc_meancomb (by <NAME>), with some restrictions.
*spectra*
Python list of spectra, where each spectrum is an array having wavelength in position 0, flux in position 1, and optional uncertainties in position 2.
*weights*
List of weights corresponding to each spectrum (must add up to one). If none, then each spectrum is assumed to have the same weight. THIS ONLY WORKS IF I GIVE IT TWO SPECTRA.
*mask*
Array of wavelengths to be used as mask for all spectra. If none, then the wavelength array of the first spectrum is used as mask.
*robust*
Float, the sigma threshold to throw bad flux data points out. If none given, then all flux data points will be used.
*forcesimple*
Boolean, whether to calculate a straight mean and variance even if weights are available.
*extremes*
Boolean, whether to include the min and max flux values at each masked pixel.
*renormalize*
Boolean, whether to re-normalized the spectra agains the calculated combined spectrum, in which case the spectra will be returned in a list, with masked values.
'''
# Check inputs
try:
spectra[0]
except TypeError:
print('Spectra invalid.')
return
if mask is not None:
try:
mask[0]
except TypeError:
print('Mask invalid.')
return
if robust is not None:
try:
float(robust)
except TypeError:
print('Robust invalid.')
return
# 1. Generate mask using the first spectrum given
if mask is None:
# Use x-axis (i.e. wl) values of first spectrum as mask for all others
wl_mask = spectra[0][0]
else:
wl_mask = mask
numPoints = len(wl_mask)
numSpec = len(spectra)
# 2. Check if uncertainties were given
uncsGiven = True
if forcesimple:
uncsGiven = False
for spec in spectra:
if uncsGiven:
try:
uncs = spec[2]
except IndexError:
uncsGiven = False
continue
nanIdx = np.where(np.isfinite(uncs))
if len(nanIdx[0]) == 0:
uncsGiven = False
# 3D-array that will hold interpolated spectra
# (it omits wavelength dimension, since all spectra have the same one)
if uncsGiven:
dims = 2
else:
dims = 1
ip_spectra = np.zeros((numPoints, dims, numSpec)) * np.nan
# 3. Interpolate spectra using mask
for spIdx, spec in enumerate(spectra):
wl = spec[0]
fluxRaw= spec[1]
if uncsGiven:
unc = spec[2]
# Eliminate outliers if requested
if robust is not None:
flux = clean_outliers(fluxRaw, robust)
else:
flux = fluxRaw
if spIdx == 0:
# No need to interpolate first spectrum
flux_new = flux
if uncsGiven:
unc_new = unc
else:
ip_func_flux = spi.interp1d(wl, flux, bounds_error=False)
flux_new = ip_func_flux(wl_mask.tolist())
if uncsGiven:
ip_func_unc = spi.interp1d(wl, unc, bounds_error=False)
unc_new = ip_func_unc(wl_mask.tolist())
ip_spectra[:,0,spIdx] = flux_new
if uncsGiven:
ip_spectra[:,1,spIdx] = unc_new
# 4. Calculate mean and variance of flux values
if weights is None:
wgts = np.ones(len(spectra))
else:
wgts = weights
if uncsGiven:
mvarraw = 1. / np.nansum(1. / (wgts * ip_spectra[:,1,:]), axis=1) # 1/Sum(1/sigma_i^2)
wmean = np.nansum(wgts * ip_spectra[:,0,:] / ip_spectra[:,1,:], axis=1) # Sum(x_i/sigma_i^2)
mean = wmean * mvarraw
mvar = mvarraw
# Correct weighted sample variance for small sample
#meantile = np.tile(mean, (numSpec,1)).T
#V1 = 1 / mvarraw
#V2 = np.nansum(ip_spectra[:,1,:]**2, axis=1)
#mvar = V1 / (V1**2 - V2) * \
# np.nansum((ip_spectra[:,0,:] - meantile)**2 / ip_spectra[:,1,:], axis=1)
else:
mvar = np.nanstd(ip_spectra[:,0,:], axis=1) ** 2 # /numSpec -- I think I dont need this
mean = np.nanmean(ip_spectra[:,0,:], axis=1)
# 5. Calculate extreme flux values if requested
if extremes:
minF = np.nanmin(ip_spectra[:,0,:], axis=1)
maxF = np.nanmax(ip_spectra[:,0,:], axis=1)
# 5. Create the combined spectrum
if extremes:
specComb = [wl_mask, mean, mvar, minF, maxF]
else:
specComb = [wl_mask, mean, mvar]
# 6. Re-normalize spectra to calculated combined spectrum, if requested
if renormalize:
renorm_spectra = []
for ispec in range(0, numSpec):
tmpflux = ip_spectra[:,0,ispec]
renormfac = np.median(tmpflux / mean) # mean is the flux of the combined spectrum
if uncsGiven:
tmpunc = ip_spectra[:,1,ispec]
renorm_spectra.append([wl_mask, tmpflux / renormfac, tmpunc / renormfac])
else:
renorm_spectra.append([wl_mask, tmpflux / renormfac])
return specComb, renorm_spectra
else:
return specComb
def norm_spec(specData, limits, flag=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Normalize a spectrum using a band (i.e. a portion) of the spectrum specified by *limits*.
*specData*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and (optional) error values in position 2. More than one spectrum can be provided simultaneously, in which case *specData* shall be a list of lists.
*limits*
Python list with lower limit in position 0 and upper limit in position 1. If more than one spectrum provided, these limits will be applied to all spectra.
*flag*
Boolean, whether to warn if normalization limits were shrinked in the case when they fall outside spectrum. If set to *True*, *norm_spec* returns the normalized spectra AND a boolean flag.
'''
# Convert specData to list or spectra if it consists only of one
if len(specData) <= 3 and len(specData[0]) > 10:
specData = [specData]
# Initialize objects
finalData = [None] * len(specData)
# Check that given limits are reasonable
if limits[0] >= limits[1]:
print('norm_spec: the Min and Max values specified are not reasonable.')
return None
# Re-define normalizing band (specified in limits) for each spectrum in case
# the limits fall outside of the spectrum range
all_lims = [None] * len(specData)
flagged = False
for spIdx, spData in enumerate(specData):
smallest = limits[0]
largest = limits[1]
if spData is None:
continue
tmpNans = np.where(np.isfinite(spData[1]))
if len(tmpNans[0]) != 0:
if spData[0][tmpNans[0][0]] > smallest:
smallest = spData[0][tmpNans[0][0]]
flagged = True
if spData[0][tmpNans[0][-1]] < largest:
largest = spData[0][tmpNans[0][-1]]
flagged = True
all_lims[spIdx] = [smallest, largest]
lims = [smallest, largest]
# Loop through each spectral data set
for spIdx, spData in enumerate(specData):
# 1) Skip if data is missing
if spData is None:
continue
# 2) Determine if spectra come with error values
if len(spData) == 3:
errors = True
else:
errors = False
# 3) Determine minimum wavelength value for band
smallIdx = np.where(spData[0] < all_lims[spIdx][0])
# If lower limit < all values in spectrum wavelength points, then
# make band's minimum value = first data point in spectrum
try:
smallIdx[0]
except IndexError:
minIdx = 0
smallIdx = [None]
# If lower limit > all values in spectrum wavelength points, then
# no band can be selected
if smallIdx != [None]:
if len(smallIdx[0]) == len(spData[0]):
print('norm_spec: the wavelength data for object is outside limits.' )
continue
else:
minIdx = smallIdx[0][-1] + 1
# 4) Determine maximum wavelength value for band
largeIdx = np.where(spData[0] > all_lims[spIdx][1])
# If upper limit > all values in spectrum wavelength points, then
# make band's maximum value = last data point in spectrum
try:
largeIdx[0]
except IndexError:
maxIdx = len(spData[0])
largeIdx = [None]
# If upper limit < all values in spectrum wavelength points, then
# no band can be selected
if largeIdx != [None]:
if len(largeIdx[0]) == len(spData[0]):
print('norm_spec: the wavelength data for object is outside limits.')
continue
else:
maxIdx = largeIdx[0][0]
# 5) Check for consistency in the computed band limits
if maxIdx - minIdx < 2:
print('norm_spec: The Min and Max values specified yield no band.')
continue
# 6) Select flux band from spectrum
fluxSelect = spData[1][minIdx:maxIdx]
fluxSelect = np.array(fluxSelect)
# 7) Select error value band from spectrum
if errors is True:
errorSelect = spData[2][minIdx:maxIdx]
errorSelect = np.array(errorSelect)
# 8) Normalize spectrum using arithmetic mean
notNans = np.where(np.isfinite(fluxSelect))
avgFlux = np.mean(fluxSelect[notNans])
finalFlux = spData[1] / avgFlux
finalData[spIdx] = [spData[0], finalFlux]
if errors is True:
#notNans = np.where(np.isfinite(errorSelect))
#avgError = np.mean(errorSelect[notNans])
finalErrors = spData[2] / avgFlux
finalData[spIdx] = [spData[0], finalFlux, finalErrors]
if flag:
return finalData, flagged
else:
return finalData
def read_spec(specFiles, errors=True, atomicron=False, negtonan=False, plot=False, linear=False, templ=False, verbose=True, header=False):
'''
(by <NAME> |uacute| |ntilde| ez, <NAME>)
Read spectral data from fits or ascii files. It returns a list of numpy arrays with wavelength in position 0, flux in position 1 and error values (if requested) in position 2. More than one file name can be provided simultaneously.
**Limitations**: Due to a lack of set framework for ascii file headers, this function assumes ascii files to have wavelength in column 1, flux in column 2, and (optional) error in column 3. Ascii spectra are assumed to be linear, so the kwarg *linear* is disabled for ascii files. Fits files that have multiple spectral orders will not be interpreted correctly with this function.
*specFiles*
String with fits file name (with full path); it can also be a python list of file names.
*errors*
Boolean, whether to return error values for the flux data; return nans if unavailable.
*atomicron*
Boolean, if wavelength units are in Angstrom, whether to convert them to microns.
*negtonan*
Boolean, whether to set negative flux values equal to zero.
*plot*
Boolean, whether to plot the spectral data, including error bars when available.
*linear*
Boolean, whether to return spectrum only if it is linear. If it cannot verify linearity, it will assume linearity.
*templ*
Boolean, whether data to extract is of a template spectrum, which means it includes avg flux, flux variance, min and max flux at each wavelength.
*verbose*
Boolean, whether to print warning messages.
*header*
Boolean, whether to also return the fits file header.
'''
# 1. Convert specFiles into a list type if it is only one file name
if isinstance(specFiles, str):
specFiles = [specFiles,]
try:
specFiles[0]
except TypeError:
print('File name(s) in invalid format.')
return
# 2. Initialize array to store spectra
specData = [None] * len(specFiles)
# 3. Loop through each file name:
for spFileIdx,spFile in enumerate(specFiles):
if spFile is None: continue
# 3.1 Determine the type of file it is
isFits = False
ext = spFile[-4:].lower()
if ext == 'fits' or ext == '.fit':
isFits = True
# 3.2. Get data from file
if isFits:
isSDSS = False
isLAMOST = False
try:
# Determine table index to extract the data
tmpHead = pf.getheader(spFile, ext=0)
# Telescope exceptions
try:
tmptelescope = tmpHead['TELESCOP'].upper()
except KeyError:
tmptelescope = ''
if tmptelescope.find('SDSS') != -1:
isSDSS = True
tmpext = 1
if tmptelescope.find('LAMOST') != -1:
isLAMOST = True
if not isSDSS:
if tmpHead['NAXIS'] == 0:
try:
if tmpHead['NAXIS1'] < 100:
tmpext = 2
else:
tmpext = 1
except KeyError:
tmpext = 1
else:
tmpext = 0
fitsData = pf.getdata(spFile, ext=tmpext)
except IOError:
print('Could not open ' + str(spFile) + '.')
continue
# Re-shape SDSS data array to make it compatible with the rest of this code
if isSDSS:
fitsData = np.array(fitsData.tolist()).T
# Now determine the table index to extract header info with wavelength solution
tmpHead = pf.getheader(spFile, ext=tmpext)
if isSDSS:
fitsHeader = pf.getheader(spFile, ext=0)
else:
fitsHeader = tmpHead.copy()
# Assume ascii file otherwise
else:
try:
aData = ascii.read(spFile)
specData[spFileIdx] = [aData[0].tonumpy(), aData[1].tonumpy()]
if len(aData) >= 3 and errors:
specData[spFileIdx].append(aData[2].tonumpy())
except IOError:
print('Could not open ' + str(spFile) + '.')
continue
# 3.3. Check if data in fits file is linear
if isFits:
KEY_TYPE = ['CTYPE1']
setType = set(KEY_TYPE).intersection(set(fitsHeader.keys()))
if len(setType) == 0:
if verbose:
print('Data in ' + spFile + ' assumed to be linear.')
isLinear = True
else:
valType = fitsHeader[setType.pop()]
if valType.strip().upper() == 'LINEAR':
isLinear = True
else:
isLinear = False
if linear and not isLinear:
if verbose:
print('Data in ' + spFile + ' is not linear.')
return
# 3.4. Get wl, flux & error data from fits file
# (returns wl in pos. 0, flux in pos. 1, error values in pos. 2)
# (If template spec: min flux in pos. 3, max flux in pos. 4)
if isFits:
specData[spFileIdx] = __get_spec(fitsData, fitsHeader, spFile, errors, \
templ=templ, verb=verbose)
if specData[spFileIdx] is None:
continue
# Generate wl axis when needed
if specData[spFileIdx][0] is None:
specData[spFileIdx][0] = __create_waxis(fitsHeader, \
len(specData[spFileIdx][1]), spFile, \
verb=verbose)
# If no wl axis generated, then clear out all retrieved data for object
if specData[spFileIdx][0] is None:
specData[spFileIdx] = None
continue
# 3.5. Convert units in wl-axis from Angstrom into microns if desired
if atomicron:
if specData[spFileIdx][0][-1] > 8000:
specData[spFileIdx][0] = specData[spFileIdx][0] / 10000
# 3.6. Set negative flux values equal to zero (next step sets them to nans)
if negtonan:
negIdx = np.where(specData[spFileIdx][1] < 0)
if len(negIdx[0]) > 0:
specData[spFileIdx][1][negIdx] = 0
if verbose:
print('%i negative data points found in %s.' \
% (len(negIdx[0]), spFile))
# 3.7. Set zero flux values as nans (do this always)
zeros = np.where(specData[spFileIdx][1] == 0)
if len(zeros[0]) > 0:
specData[spFileIdx][1][zeros] = np.nan
# 4. Plot the spectra if desired
if plot:
plot_spec(specData, ploterrors=True)
# 5. Clear up memory
fitsData = ''
if header:
return specData, fitsHeader
else:
return specData
def snr(spec, rng=None):
'''
(by <NAME> |uacute| |ntilde| ez)
Calculate signal-to-noise in a spectrum.
*spec*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and error values in position 2. It can also be a list of spectra. If no errors available, then it calculates SNR based on this: http://www.stecf.org/software/ASTROsoft/DER_SNR/der_snr.py.
*rng*
list, indicating in wavelength space the range of interest. If None, it computes signal-to-noise for the whole spectrum.
'''
# Convert spec into a list type if it is only one spectrum
if len(spec[0]) > 3:
spec = [spec,]
snr = np.array([np.nan] * len(spec))
for js,s in enumerate(spec):
i = np.where((s[1] != 0.0) & (np.isfinite(s[1])))[0]
flux = np.array(s[1][i])
wl = np.array(s[0][i])
try:
e_flux = np.array(s[2][i])
i = np.where(np.isfinite(e_flux))[0]
if len(i) > 0:
errors = True
else:
errors = False
except IndexError:
errors = False
if errors:
if rng is None:
snr[js] = np.median(flux / e_flux)
else:
if rng[0] >= rng[1]:
print('Wavelength range incorrect.')
return
else:
i = np.where((wl > rng[0]) & (wl < rng[1]))[0]
if len(i) == 0:
print('No flux data within specified range.')
return
else:
snr[js] = np.median(flux[i] / e_flux[i])
else:
if rng is None:
n = len(flux)
flx = flux.copy()
else:
if rng[0] >= rng[1]:
print('Wavelength range incorrect.')
return
else:
i = np.where((wl > rng[0]) & (wl < rng[1]))[0]
n = len(i)
flx = flux[i]
if n < 4:
print('At least 4 flux data points are needed for this calculation.')
return
else:
signal = np.median(flx)
noise = 0.6052697 * np.median(np.abs(2.0 * flx[2:n-2] - flx[0:n-4] - flx[4:n]))
snr[js] = signal / noise
return snr
def plot_spec(specData, ploterrors=False):
'''
(by <NAME> |uacute| |ntilde| ez)
Plot a spectrum. If more than one spectrum is provided simultaneously, it will plot all spectra on top of one another.
This is a quick and dirty tool to visualize a set of spectra. It is not meant to be a paper-ready format. You can use it, however, as a starting point.
*specData*
Spectrum as a Python list with wavelength in position 0, flux in position 1, and (optional) error values in position 2. More than one spectrum can be provided simultaneously, in which case *specData* shall be a list of lists.
*ploterrors*
Boolean, whether to include flux error bars when available. This will work only if all spectra have error values.
'''
# Check that there is data to plot
allNone = True
for spData in specData:
if spData is not None:
allNone = False
break
if allNone:
return
# Fix specData list dimensions when necessary
if len(specData) == 2 or len(specData) == 3:
if len(specData[0]) > 3:
specData = [specData]
# Initialize figure
plt.close()
fig = plt.figure(1)
fig.clf()
# Set plot titles
TITLE = 'SPECTRAL DATA'
X_LABEL = 'Wavelength'
Y_LABEL = 'Flux'
# Initialize plot within figure
subPlot = fig.add_subplot(1,1,1)
subPlot.set_title(TITLE)
subPlot.set_xlabel(X_LABEL)
subPlot.set_ylabel(Y_LABEL)
# Check if all spectra have error values
errorsOK = True
for spData in specData:
if len(spData) != 3:
errorsOK = False
# Plot spectra
for spData in specData:
if spData is not None:
if errorsOK and ploterrors:
subPlot.errorbar(spData[0], spData[1], spData[2], \
capsize=2, drawstyle='steps-mid')
else:
subPlot.plot(spData[0], spData[1], drawstyle='steps-mid')
return fig
def edit_header(fitsfiles, keyword, val, hdu=0):
"""
Edit a card on the fits file header using the parameters provided.
Args:
----------
fitsfile - String, the full path of the fits file; if only a filename is provided, it will look for the file in the current directory. It can also be a python list of names.
keyword - String, the name of the keyword to edit.
val - String, the value that the keyword will have.
hdu - Int, the index of the hdu to be edited.
Returns:
----------
- None.
"""
import datetime
# Convert fitsfiles into a list type if it is only one file name
if isinstance(fitsfiles, str):
fitsfiles = [fitsfiles,]
for fitsfl in fitsfiles:
# Read fits file data
FitsHDU = pf.open(fitsfl, 'update')
try:
tmp = FitsHDU[hdu].data.shape
except IndexError:
print('hdu index does not exist for ' + fitsfl)
print('Skipping this file.')
continue
try:
tmp = FitsHDU[hdu].header[keyword]
except KeyError:
print('Keyword does not exist for ' + fitsfl)
print('Skipping this file.')
continue
# Replace keyword value with new one
FitsHDU[hdu].header[keyword] = val
today = datetime.datetime.now().strftime('%Y-%m-%d')
origcomment = FitsHDU[hdu].header.comments[keyword]
FitsHDU[hdu].header.comments[keyword] = origcomment + ' ---Updated on ' + today + ' by antools.py.'
FitsHDU.flush()
return
def crop_fits(fitsfile, xsize, ysize, croploc='center', suffix=None):
"""
Crop a fits image using the parameters provided. If file has more than one image, it only considers the first one.
Args:
----------
fitsfile - String, the full path of the fits file; if only a filename is provided, it will look for the file in the current directory.
xsize - Int, the desired X size (columns) in pixels.
ysize - Int, the desired Y size (rows) in pixels.
croploc - ['center'(default), 'upper right', 'upper left', 'lower left', 'lower right'], set location around which to crop image. If 'center', then it crops image centered in the image center. If 'upper right', then it crops image to size [xsize,ysize] anchored in the upper right corner. And so on...
suffix - String, suffix to add to new fits file. If it is None, then the original fits file is overwritten with the new one.
Returns:
----------
- the new fits HDU, including the original header information.
- It also saves a copy of the newly created fits file in the same folder as the original file, with an added suffix to its name, if "suffix" is specified.
"""
import os
# Get file path, if provided, and filename
filepath = fitsfile.rsplit('/',1)[0]
if filepath == fitsfile:
filepath = ''
filename = fitsfile.rsplit('.',1)[0]
else:
filepath = filepath + '/'
filename = fitsfile.rsplit('/',1)[1].rsplit('.',1)[0]
# Read fits file data
FitsHDU = pf.open(fitsfile)
Im = FitsHDU[0].data
FitsHeader = FitsHDU[0].header
xsizeorig = FitsHeader['NAXIS1']
ysizeorig = FitsHeader['NAXIS2']
# Determine pixel limits for cropping
if croploc == 'center':
center = [int(xsizeorig/2), int(ysizeorig/2)]
xstart = center[0] - int(xsize/2) + 1
xstop = center[0] + int(xsize/2) + 1
ystart = center[1] - int(ysize/2)
ystop = center[1] + int(ysize/2)
elif croploc == 'upper right':
xstart = xsizeorig - xsize + 1
xstop = xsizeorig + 1
ystart = ysizeorig - ysize
ystop = ysizeorig + 1
elif croploc == 'upper left':
xstart = 1
xstop = xsize + 1
ystart = ysizeorig - ysize + 1
ystop = ysizeorig + 1
elif croploc == 'lower left':
xstart = 1
xstop = xsize + 1
ystart = 1
ystop = ysize + 1
elif croploc == 'lower right':
xstart = xsizeorig - xsize + 1
xstop = xsizeorig + 1
ystart = 1
ystop = ysize + 1
else:
print('croploc not recognized.')
return None
# Check that cropping dimensions are OK
if any((xstart<1, xstop<1, ystart<1,ystop<1)):
print('xsize/ysize dimensions are too large.')
return None
if any((xstart>xsizeorig+1, xstop>xsizeorig+1)):
print('xsize dimensions are too large.')
return None
if any((ystart>ysizeorig+1, ystop>ysizeorig+1)):
print('ysize dimensions are too large.')
return None
#Crop the image
Im = Im[ystart:ystop, xstart-1:xstop]
FitsHDU[0].data=Im
#Write it to a new file
if suffix is not None:
suffix = '_' + suffix
else:
suffix = ''
OutFile = filepath + filename + suffix + '.fits'
if os.path.exists(OutFile) : os.remove(OutFile)
FitsHDU.writeto(OutFile)
return FitsHDU
def __create_waxis(fitsHeader, lenData, fileName, verb=True):
# Function used by read_spec only
# (by Alejo)
# Generates a wavelength (wl) axis using header data from fits file.
# Define key names in
KEY_MIN = ['COEFF0','CRVAL1'] # Min wl
KEY_DELT = ['COEFF1','CDELT1','CD1_1'] # Delta of wl
KEY_OFF = ['LTV1'] # Offset in wl to subsection start
# Find key names for minimum wl, delta, and wl offset in fits header
setMin = set(KEY_MIN).intersection(set(fitsHeader.keys()))
setDelt = set(KEY_DELT).intersection(set(fitsHeader.keys()))
setOff = set(KEY_OFF).intersection(set(fitsHeader.keys()))
# Get the values for minimum wl, delta, and wl offset, and generate axis
if len(setMin) >= 1 and len (setDelt) >= 1:
nameMin = setMin.pop()
valMin = fitsHeader[nameMin]
nameDelt = setDelt.pop()
valDelt = fitsHeader[nameDelt]
if len(setOff) == 0:
valOff = 0
else:
nameOff = setOff.pop()
valOff = fitsHeader[nameOff]
# generate wl axis
if nameMin == 'COEFF0':
wAxis = 10 ** (np.arange(lenData) * valDelt + valMin)
else:
wAxis = (np.arange(lenData) * valDelt) + valMin - (valOff * valDelt)
else:
wAxis = None
if verb:
print('Could not re-create wavelength axis for ' + fileName + '.')
return wAxis
def __get_spec(fitsData, fitsHeader, fileName, errorVals, templ=False, verb=True):
# Function used by read_spec only
# (by Alejo)
# Interprets spectral data from fits file.
# Returns wavelength (wl) data in pos. 0, flux data in pos. 1, and if requested, error values in pos. 2.
# If templ, also returns min flux in pos. 3 and max flux in pos. 4
if templ:
validData = [None] * 5
elif errorVals:
validData = [None] * 3
else:
validData = [None] * 2
# Identify number of data sets in fits file
dimNum = len(fitsData)
fluxIdx = None
waveIdx = None
sigmaIdx = None
isSDSS = False
try:
if fitsHeader['TELESCOP'].upper().find('LAMOST') != -1:
isLAMOST = True
else:
isLAMOST = False
except KeyError:
isLAMOST = False
# Identify data sets in fits file
if dimNum == 1:
fluxIdx = 0
elif dimNum == 2:
if len(fitsData[0]) == 1:
sampleData = fitsData[0][0][20]
else:
sampleData = fitsData[0][20]
if sampleData < 0.0001:
# 0-flux, 1-unknown
fluxIdx = 0
else:
waveIdx = 0
fluxIdx = 1
elif dimNum == 3:
waveIdx = 0
fluxIdx = 1
sigmaIdx = 2
elif dimNum == 4:
# 0-flux clean, 1-flux raw, 2-background, 3-sigma clean
fluxIdx = 0
sigmaIdx = 3
elif dimNum == 5:
if templ:
# 0-wl, 1-avg flux, 2-flux variance, 3-min flux, 4-max flux
waveIdx = 0
fluxIdx = 1
sigmaIdx = 2
minIdx = 3
maxIdx = 4
else:
if isLAMOST:
# 0-flux, 1-inv.var, 2-wl, 3-andmask, 4-ormask
fluxIdx = 0
sigmaIdx = 1 # This column is actually 1/sigma^2
waveIdx = 2
else:
# 0-flux, 1-continuum substracted flux, 2-sigma, 3-mask array, 4-unknown
fluxIdx = 0
sigmaIdx = 2
elif dimNum == 8:
# SDSS spectra
fluxIdx = 0
waveIdx = 1 # This column is actually log10(wl)
sigmaIdx = 2 # This column is actually 1/sigma^2
isSDSS = True
elif dimNum > 10:
# Implies that only one data set in fits file: flux
fluxIdx = -1
if np.isscalar(fitsData[0]):
fluxIdx = -1
elif len(fitsData[0]) == 2:
# Data comes in a xxxx by 2 matrix (ascii origin)
tmpWave = []
tmpFlux = []
for pair in fitsData:
tmpWave.append(pair[0])
tmpFlux.append(pair[1])
fitsData = [tmpWave,tmpFlux]
fitsData = | np.array(fitsData) | numpy.array |
import numpy
import logging
from numpy.core import dot, array
from scipy import stats
from .. import Exceptions
from ..misc import Math
from .. import Utilities
class Context(object):
def __init__(self): raise Exceptions.ReportableException("Tried to instantiate abstract Joint Analysis context")
def get_genes(self): raise Exceptions.NotImplemented("Context: get_genes")
def get_n_genes(self): raise Exceptions.NotImplemented("Context: get_n_genes")
def get_metaxcan_zscores(self, gene): raise Exceptions.NotImplemented("Context: get_metaxcan_zscores")
def get_model_matrix(self, gene, tissues): raise Exceptions.NotImplemented("Context: get_model_matrix")
def get_cutoff(self, matrix): raise Exceptions.NotImplemented("Context: get_cutoff")
def get_gene_name(self, gene): raise Exceptions.NotImplemented("Context: get_gene_name")
def check(self): raise Exceptions.NotImplemented("Context: check")
class ContextMixin(object):
def __init__(self):
self.metaxcan_results_manager = None
self.matrix_manager = None
self.cutoff = None
self.gene_names = None
self.trimmed_ensemble_id = None
def get_metaxcan_zscores(self, gene):
if self.trimmed_ensemble_id and "." in gene:
gene = gene.split(".")[0]
results = self.metaxcan_results_manager.results_for_gene(gene)
return results
def get_model_matrix(self, gene, tissues):
return self.matrix_manager.get(gene, tissues)
def get_cutoff(self, matrix):
return self.cutoff(matrix)
def get_gene_name(self, gene):
return self.gene_names[gene]
def get_trimmed_ensemble_id(self):
return self.trimmed_ensemble_id
def _process_genes(self, genes):
if self.trimmed_ensemble_id:
g = {t.gene.split(".")[0]: t.gene_name for t in genes.itertuples()}
else:
g = {t.gene: t.gene_name for t in genes.itertuples()}
return g
class CalculationStatus(object):
OK=0
NO_DATA=-1
NO_METAXCAN_RESULTS=-2
NO_PRODUCT=-3
INSUFFICIENT_NUMERICAL_RESOLUTION = -4
SINGULAR_COVARIANCE = -5
INVERSE_ERROR = -6
COMPLEX_COVARIANCE = -7
INADEQUATE_INVERSE = -8
def joint_analysis(context, gene):
g, g_n, pvalue, n, n_indep, p_i_best, t_i_best, p_i_worst, t_i_worst, eigen_max, eigen_min, eigen_min_kept, z_min, z_max, z_mean, z_sd, tmi, status \
= None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, CalculationStatus.NO_DATA
g = gene.split(".")[0] if context.get_trimmed_ensemble_id() else gene
g_n = context.get_gene_name(g)
####################################################################################################################
zscores, tissue_labels = context.get_metaxcan_zscores(gene)
if not zscores or len(zscores) == 0:
status = CalculationStatus.NO_METAXCAN_RESULTS
return g, g_n, pvalue, n, n_indep, p_i_best, t_i_best, p_i_worst, t_i_worst, eigen_max, eigen_min, eigen_min_kept, z_min, z_max, z_mean, z_sd, tmi, status
n = len(zscores)
z_min = numpy.min(zscores)
z_max = | numpy.max(zscores) | numpy.max |
import copy
import logging.config
import os
import pickle
# for Logging handling
import sys
import time
import numpy as np
from numpy.linalg import LinAlgError
from scipy.optimize import minimize
import model
logger = logging.getLogger(__name__)
def nonzero_indices(a):
"""Get an index with non-zero element.
Parameters
----------
a : numpy.ndarray
array
Returns
-------
np.nonzero() : numpy.ndarray
Index with non-zero element
"""
return (np.nonzero(a)[0])
def create_directory(dir_name):
"""create directory
Parameters
----------
dir_name : str(file path)
create directory name
Returns
-------
None
"""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
else:
pass
def calc_diff(C_pre, C_pos, t_pre, t_pos, rss_pre, rss_pos):
"""calculate difference
Parameters
----------
C_pre : numpy.ndarray
initialize control points
C_pos : numpy.ndarray
control points
t_pre : numpy.ndarray
initialize parameters
t_pos : numpy.ndarray
parameters
rss_pre : int
initialize rss
rss_pos : int
rss
Returns
-------
np.abs() : numpy.ndarray
absolute value
"""
if t_pre.shape[1] > t_pos.shape[1]:
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
else:
t_pre = np.c_[t_pre, 1 - np.sum(t_pre, axis=1)]
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
ratio_sum = 0
for key in C_pre:
ratio_sum += np.linalg.norm(C_pre[key] - C_pos[key]) / np.linalg.norm(
C_pre[key])
diff = rss_pre - rss_pos
logger.debug("{} {} {}".format(rss_pre, rss_pos, diff))
return (np.abs(diff))
def calc_gd_igd(dd1, dd2):
"""Calculate gd and igd.
Parameters
----------
dd1 : numpy.ndarray
estimated bezier simplex sample
dd2 : numpy.ndarray
validation data
Returns
-------
gd : float
Generational Distance
igd : float
Inverted Generational Distance
"""
gd = 0
igd = 0
for i in range(dd2.shape[0]):
d2 = dd2[i, :]
tmp = dd1 - d2
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
gd += v
for i in range(dd1.shape[0]):
d1 = dd1[i, :]
tmp = dd2 - d1
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
igd += v
return (gd / dd2.shape[0], igd / dd1.shape[0])
class BorgesPastvaTrainer:
"""Polynomial Regression Trainer.
Attributes
----------
dimSpace : int
degree
dimSimplex : int
dimension
degree : int
dimension of constol point
"""
def __init__(self, dimSpace, degree, dimSimplex):
"""Borges Pastva Trainer initialize.
Parameters
----------
dimSpace : int
degree
degree : int
dimension of constol point
dimSimplex : int
dimension
Returns
----------
None
"""
self.dimSpace = dimSpace # degree of bezier simplex
self.dimSimplex = dimSimplex # dimension of bezier simplex
self.degree = degree # dimension of constol point
self.bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
def initialize_control_point(self, data):
"""Initialize control point.
Parameters
----------
data : list
test data
Returns
----------
C : dict
control point
"""
bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
C = bezier_simplex.initialize_control_point(data)
return (C)
def gradient(self, c, t):
"""Calculate gradient.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
g : float
gradient
"""
g = {}
x = {}
for d in range(self.dimSimplex - 1):
x[d] = np.zeros(self.dimSpace)
for d in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[d][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d, d1=None)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for d in x:
g[(d, )] = x[d]
return (g)
def hessian(self, c, t):
"""Calculate hessian.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
h : dict
hessian matrix
"""
h = {}
x = {}
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
x[(d1, d2)] = np.zeros(self.dimSpace)
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[(d1, d2)][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d1, d1=d2)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for (d1, d2) in x:
h[(d1, d2)] = x[(d1, d2)]
return (h)
def initialize_parameter(self, c, data):
"""Initialize parameter.
Parameters
----------
c : dict
control point
data : numpy.ndarray
sample points
Returns
----------
tt_ : numpy.ndarray
nearest parameter of each sample points
xx_ : numpy.ndarray
nearest points on the current bezier simplex
"""
tt, xx = self.bezier_simplex.meshgrid(c)
tt_ = np.empty([data.shape[0], self.dimSimplex])
xx_ = np.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
a = data[i, :]
tmp = xx - a
norm = np.linalg.norm(tmp, axis=1)
amin = np.argmin(norm)
tt_[i, :] = tt[amin, :]
xx_[i, :] = xx[amin, :]
return (tt_, xx_)
def inner_product(self, c, t, x):
"""Inner product.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
f : numpy.ndarray
point
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
f = np.array(np.zeros(self.dimSimplex - 1))
for d in range(self.dimSimplex - 1):
f[d] = sum(g[(d, )][i] * (b[i] - x[i])
for i in range(self.dimSpace))
return (f)
def inner_product_jaccobian(self, c, t, x):
"""Inner product(jaccobian).
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
j : numpy.ndarray
jaccobian matrix
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
h = self.hessian(c, t)
j = np.zeros([self.dimSimplex - 1, self.dimSimplex - 1])
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
j[d1, d2] = sum(h[(d1, d2)][i] * (b[i] - x[i]) +
g[(d1, )][i] * g[(d2, )][i]
for i in range(self.dimSpace))
return (j)
def newton_method(self, c, t_init, x, newton_itr=20, tolerance=10**(-5)):
"""Newton method.
Parameters
----------
c : dict
control point
t_init : list
parameter
x : numpy.ndarray
point
newton_itr : int
iterate value
tolerance : int
tolerance
Returns
----------
t_k : numpy.ndarray
output point
"""
t_k = copy.deepcopy(t_init)
for k in range(newton_itr):
f = self.inner_product(c, t_k, x)
if np.linalg.norm(f) > tolerance:
j = self.inner_product_jaccobian(c, t_k, x)
# for Logging handling
try:
d = np.linalg.solve(j, f)
except LinAlgError as e:
logger.critical("{0}".format(e))
logger.critical("The arguments are shown below")
logger.critical(j)
logger.critical(f)
sys.exit()
t_k = t_k - d
else:
break
return (t_k)
def projection_onto_simplex(self, t):
"""Projection onto simplex.
Parameters
----------
t : list
parameter
Returns
----------
res : numpy.ndarray
parameter
"""
if np.min(t) >= 0 and np.sum(t) <= 1:
return (t)
else:
tmp = np.append(t, 1 - np.sum(t))
def l2norm(x):
return (np.linalg.norm(x - tmp))
cons = []
for i in range(self.dimSimplex):
cons = cons + [{'type': 'ineq', 'fun': lambda x: x[i]}]
cons = cons + [{'type': 'eq', 'fun': lambda x: - | np.sum(x) | numpy.sum |
import string
import numpy as np
import argparse
import cv2
from queue import PriorityQueue
from Obstacle import Circle, ClosedFigure
from Point import Point
from Arena import Arena
def correct_theta(theta: float) -> float:
'''
Make sure angle is between 0 and pi.
'''
while(theta > np.pi):
theta = theta - 2.0 * np.pi
while(theta < -np.pi):
theta = theta + 2.0 * np.pi
return theta
def degrees(theta: float) -> float:
'''
Convert angle to degrees from radians.
'''
return (theta * 180 / np.pi)
def radians(theta: float) -> float:
'''
Convert angle to radians from degrees.
'''
return (theta / 180 * np.pi)
class Graphv2:
class Node():
def __init__(self, state: tuple, cost: float, index: int, parent_index: int) -> None:
self.state = state
self.cost = cost
self.index = index
self.parent_index = parent_index
def __init__(self, start_state: tuple, goal_state: tuple, occupancy_grid: np.ndarray,
clearance: int, threshold: float, step_size: float, steer_limit: float,
steer_step: float, make_video: bool) -> None:
self.start_state = start_state
self.goal_state = goal_state
self.grid = occupancy_grid
self.clearance = clearance
self.threshold = threshold
self.step_size = step_size
self.steer_limit = radians(steer_limit)
self.steer_step = radians(steer_step)
self.dup_thresh = 0.5
if self.traversable(start_state, clearance):
print(f'The start state is not valid!')
exit()
if(self.traversable(goal_state, clearance)):
print(f'The goal state is not valid!')
exit()
self.cindex = 0
self.start_node = self.Node(self.start_state, 0, self.cindex, None)
self.goal_node = self.Node(self.goal_state, 0, -1, None)
# Generate steering angles
self.steering_angles = np.linspace(-self.steer_limit, self.steer_limit, int((self.steer_limit * 2 // self.steer_step) + 1))
# Open and close lists
self.olist = dict()
self.clist = dict()
# Map to mark visited nodes.
self.vmap_size = [float(self.grid.shape[0])/self.dup_thresh, float(self.grid.shape[1])/self.dup_thresh, 360.0/degrees(self.steer_step)]
self.v_map = np.array(np.ones(list(map(int, self.vmap_size))) * np.inf)
# Tracking variables for plotting path.
self.final_node = None
self.path = None
self.make_video = make_video
self.iters = 0
self.num_nodes = 0
self.total_cost = 0.0
self.grid_for_video = None
if(self.make_video):
self.video = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc('F','M','P','4'), 24, (self.grid.shape[0], self.grid.shape[1]))
def traversable(self, state: np.ndarray, clearance: int) -> bool:
'''
Check if a node is traversable, i.e., not out of bounds or in an obstacle.
'''
points_x, points_y = np.ogrid[max(0, int(state[0]) - clearance): min(self.grid.shape[0], int(state[0]) + clearance), max(0, int(state[1]) - clearance): min(self.grid.shape[1], int(state[1]) + clearance)]
# Checking if the point is in bounds of the arena
if (state[0] < 10) or (state[0] > self.grid.shape[0] - 10) or (state[1] < 10) or (state[1] > self.grid.shape[1] - 10):
return True
# Checking if the point is inside or outside the obstacle
elif(not self.grid[int(state[0]),int(state[1])]):
return True
elif(len(np.where(self.grid[points_x, points_y] == 0)[0])):
return True
else:
return False
def check_visited(self, node: Node) -> bool:
'''
Check to see if the state is already visited.
'''
x = int((round(node.state[0] * 2) / 2) / self.dup_thresh)
y = int((round(node.state[1] * 2) / 2) / self.dup_thresh)
theta = round(degrees(node.state[2]))
if(theta < 0.0):
theta += 360
theta = int(theta / 30)
if(self.v_map[x][y][theta] != np.inf):
return True
else:
return False
def mark_visited(self, node: Node) -> None:
'''
Mark the state as visited.
'''
x = int((round(node.state[0] * 2) / 2) / self.dup_thresh)
y = int((round(node.state[1] * 2) / 2) / self.dup_thresh)
theta = round(degrees(node.state[2]))
if(theta < 0.0):
theta += 360
theta = int(theta / 30)
self.v_map[x][y][theta] = node.cost
def distance(self, state_1: tuple, state_2: tuple) -> float:
'''
Euclidean distance between two states.
'''
return (np.sqrt(sum((np.asarray(state_1) - np.asarray(state_2)) ** 2)))
def next_state(self, state: tuple, steering_angle: int) -> tuple:
'''
Get the next state by steering.
'''
x = state[0] + (self.step_size * np.cos(steering_angle))
y = state[1] + (self.step_size * np.sin(steering_angle))
theta = correct_theta(state[2] + steering_angle)
return (x, y, theta)
def find_path(self) -> bool:
'''
Find the path between self.start_state and self.goal_state and set all variables accordingly.
Returns true if path found, false otherwise.
'''
pq = PriorityQueue()
pq.put((self.start_node.cost, self.start_node.state))
self.olist[self.start_node.state] = (self.start_node.index, self.start_node)
if(self.make_video):
occupancy_grid = np.uint8(np.copy(self.grid))
occupancy_grid = cv2.cvtColor(np.flip(np.uint8(occupancy_grid).transpose(), axis=0), cv2.COLOR_GRAY2BGR)
cv2.circle(occupancy_grid, (self.start_state[0], self.grid.shape[1] - self.start_state[1]), 2, (0, 255, 0), 2)
cv2.circle(occupancy_grid, (self.goal_state[0], self.grid.shape[1] - self.goal_state[1]), 2, (0, 0, 255), 2)
self.video.write(np.uint8(occupancy_grid))
while(not pq.empty()):
self.iters += 1
current_node = self.olist[pq.get()[1]][1]
self.mark_visited(current_node)
self.clist[current_node.state] = (current_node.index, current_node)
del self.olist[current_node.state]
if(self.make_video and self.iters % 500 == 0):
print(f'Current iteration: {self.iters}')
try:
closed_list_ = dict(self.clist.values())
parent_node = closed_list_[current_node.parent_index]
start = (int(parent_node.state[0]), (self.grid.shape[1] - 1) - int(parent_node.state[1]))
end = (int(current_node.state[0]), (self.grid.shape[1] - 1) - int(current_node.state[1]))
cv2.line(occupancy_grid, start, end, (255,0,0), 1)
if(self.iters % 500 == 0):
self.video.write(np.uint8(occupancy_grid))
except Exception as e:
print(e)
if(self.distance(current_node.state[:2], self.goal_state[:2]) <= self.threshold):
print(f'Reached destination! Iterations: {self.iters}')
self.final_node = current_node
if(self.make_video):
self.grid_for_video = occupancy_grid
self.total_cost = current_node.cost
return True
for steering_angle in self.steering_angles:
new_state = self.next_state(current_node.state, steering_angle)
new_index = self.cindex + 1
self.cindex = new_index
new_cost = current_node.cost + self.step_size
if(not self.traversable(new_state, self.clearance)):
new_node = self.Node(new_state, new_cost, new_index, current_node.index)
if(self.check_visited(new_node)):
self.cindex -= 1
continue
if(new_state in self.clist):
self.cindex -= 1
continue
if(new_state not in self.olist):
self.olist[new_state] = (new_node.index, new_node)
pq.put((new_node.cost + self.distance(new_state[:2], self.goal_state[:2]), new_node.state))
else:
if(self.olist[new_state][1].cost > new_node.cost):
self.olist[new_state] = (new_node.index, new_node)
else:
self.cindex -= 1
self.num_nodes += 1
else:
self.cindex -= 1
pass
print(f'Goal node not reachable with given conditions!')
return False
def backtrack_path(self) -> np.ndarray:
'''
Backtrack and find the actual path.
'''
current_node = self.final_node
self.path = list()
traversed_nodes = dict(self.clist.values())
while(current_node.index != 0):
self.path.append(current_node.state)
current_node = traversed_nodes[current_node.parent_index]
self.path.append(self.start_node.state)
self.path.reverse()
print(f'The length of the path is: {len(self.path)}')
self.path = | np.array(self.path) | numpy.array |
import h5py
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib import dates
import pyresample as pr
from scipy.spatial import cKDTree
from pyproj import Proj
from scipy.interpolate import interp1d
import scipy
import pandas as pd
import netCDF4
def apr3tocit(apr3filename,fl,sphere_size,psd_filename_2ds,psd_filename_HVPS,query_k = 1,plotson=False,QC=False,slimfast=True,cit_aver=False,cit_aver2=False,
attenuation_correct=False,O2H2O={},per_for_atten = 50,
return_indices=False,BB=True,bbguess=500,
cal_adj_bool = False,cal_adj=0,
cloudtop=True,rollfix=True):
"""
=================
This function finds either the closest gate or averages over a number of gates (query_k) nearest to
the citation aircraft in the radar volume of the APR3. It can return a dict of the original full length
arrays and the matched arrays.
=====
Vars:
=====
apr3filename = str, filename of the apr hdf file
fl = awot object, the citation awot object
sphere_size = int, maximum distance allowed in the kdTree search
psd_filename_2ds = str, filename of the processed 2DS file
psd_filename_HVPS = str, filename of the processed HVPS3 file
query_k = int, number of gates considered in the average (if 1, use closest)
plotson = boolean, will create some premade plots that describe the matched data
QC = boolean, will apply a simple QC method: eliminates any gate within 0.5 km to the surface and the outliers
(plus/minus 1.5IQR)
slimfast = boolean, will not save original data. Cuts down on output file size by only outputting the matched data and the citation data.
cit_aver = boolean, averages the ciation data varibles using a 5 second moving average (there is overlap)
cit_aver2 = boolean, averages the ciation data varibles using a 5 second discrete average (there is NO overlap)
O2H20 = dict, data from sounding to correct for attenuation from O2 and H2O vapor
attenuation_correct = boolean, corrects for attenuation using LWC prof and Sounding. Uses 50th percentile of LWC Prof
per_for_atten = int, the percentile for the supercooled liquid water profile used in the attenuation correction.
return_indeices of matches = boolean, returns the matched gates in 1d coords
BB = boolean, mask gates from the BB and lower. Masks data using the BB_alt algorithm
bbguess = int, give your first guess of where the Bright Band is to assist the BB_alt algorithm
cal_adj_bool = bool, turn on calibration adjustment or not.
cal_adj = array, array of the adjustment needed for correct calibration between frequencies. [ka_adj, w_adj]
cloudtop = bool, eliminates sensativity issues with the Ku-band data (~ < 10 dBZ) by masking out the cloudtop noise using a gausian filter
rollfix = bool, turn on or off the masking of data where the plane is rolling more than 10 degrees (can change the degree of degrees).
=================
"""
#get citation times (datetimes)
cit_time = fl['time']['data']
#Eliminate BB?
if BB:
#Get rid of anything below the melting level + 250 m
apr = apr3read(apr3filename)
#there are two methods to this. One is more conservative (using mean Ku) the other more intense with LDR Ku
#apr = BB_alt(apr,bbguess) #old
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
###new BB tech 2/27/18 RJC
print('Removing BB and below')
apr = mask_surf(apr)
apr['ldr'] = np.ma.masked_where(apr['Ku'].mask,apr['ldr'])
#find bb profs
bb = precip_echo_filt3D(apr['ldr'],thresh=7)
ind1 = np.where(bb[12,:] == 1) #BB profiles based on LDR
top_a = find_bb(apr,ind1)
bb_long = extend_bb(ind1,apr['timedates'][12,:],top_a)
apr['Ku'][:,:,:] = np.ma.masked_where(apr['alt_gate'][:,:,:] <= bb_long,apr['Ku'][:,:,:])
apr['Ka'] = np.ma.masked_where(apr['Ku'].mask,apr['Ka'])
apr['W'] = np.ma.masked_where(apr['Ku'].mask,apr['W'])
###
#correct for attenuation using SLW and Ku
if attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor3(apr,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
elif attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor2(apr3filename,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
else:
apr = apr3read(apr3filename)
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
if cal_adj_bool:
print('adding calibration means...')
# These values come from the analysis preformed by 3 reasearch groups: NASA JPL, University of Leister, and the University of Illinois. Techniques use sigma_0 of the ocean surface, comparision of frequencies at low Z and numerical simulations of particles.(error/uncertainty:+- 0.5 dB)
apr['Ku'] = apr['Ku'] + 0.8
apr['Ka'] = apr['Ka'] + 1
#Whh is the only one with a time varient calibration adjustment
apr['W'] = apr['W'] + cal_adj
#While calibrating the data, radar artifacts showed up when the roll of the aircraft was > 10degrees.
if rollfix:
roll = apr['roll']
roll3d = np.zeros(apr['Ku'].shape)
for i in np.arange(0,apr['Ku'].shape[1]):
for j in np.arange(0,apr['Ku'].shape[2]):
roll3d[:,i,j] = roll[i,j]
apr['Ku'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ku'])
apr['Ka'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ka'])
apr['W'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['W'])
#Get APR3 times (datetimes)
time_dates = apr['timedates'][:,:]
#fix a few radar files where w-band disapears
if time_dates[12,0] >= datetime.datetime(2015,12,18,6,58):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,18,7,6),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
if time_dates[12,0] >= datetime.datetime(2015,12,1,23,43,48) and time_dates[12,0] <=datetime.datetime(2015,12,1,23,43,49):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,2,0,1,40),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
#Check if radar file is large enought to use (50 gates is arbitrary)
if time_dates[12,:].shape[0] < 50:
print('Limited radar gates in time')
#return
#
#Load PSD
dtime_psd,ND,dD,midpoints = PSD_load(psd_filename_2ds,psd_filename_HVPS,day = time_dates[0,0].day,month=time_dates[0,0].month)
#
#Make ND a masked array (i.e. get rid of nans from loading it in)
ind = np.isnan(ND)
ND = np.ma.masked_where(ind,ND)
#for plotting routine
fontsize=14
#
#Varibles needed for the kdtree
leafsize = 16
query_eps = 0
query_p=2
query_distance_upper_bound = sphere_size
query_n_jobs =1
Barnes = True
K_d = sphere_size
#
#Pre-Determine arrays
Ku_gate = np.ma.array([])
Ka_gate = np.ma.array([])
W_gate = np.ma.array([])
DFR_gate = np.ma.array([])
DFR2_gate = np.ma.array([])
DFR3_gate = np.ma.array([])
lon_c = np.ma.array([])
lat_c = np.ma.array([])
alt_c = np.ma.array([])
t_c = | np.ma.array([]) | numpy.ma.array |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# In[37]:
# Spline3_Coef pseudocode p.271
def Spline3_Coef(n, t, y, z):
h = np.zeros(n-1)
b = np.zeros(n-1)
u = np.zeros(n-1)
v = np.zeros(n-1)
for i in range(0, n-1):
h[i] = t[i+1] - t[i]
b[i] = (y[i+1] - y[i])/h[i]
u[1] = 2*(h[0] + h[1])
v[1] = 6*(b[1] - b[0])
for i in range(2, n-1):
u[i] = 2*(h[i] + h[i-1]) - (h[i-1]**2/u[i-1])
v[i] = 6*(b[i] - b[i-1]) - (h[i-1]*v[i-1]/u[i-1])
z[n] = 0
for i in range(n-2, 0, -1):
z[i] = (v[i] - h[i]*z[i+1])/u[i]
z[0] = 0
return z
# In[38]:
# Spline3_Eval pseudocode p.271
def Spline3_Eval(n, t, y, z, x):
for i in range(n-2, -1, -1):
if x - t[i] >= 0:
break
h = t[i+1] - t[i]
tmp = z[i]/2 + (x - t[i])*(z[i+1] - z[i])/(6*h)
tmp = -(h/6)*(z[i+1] + 2*z[i]) + (y[i+1] - y[i])/h + (x - t[i])*tmp
return y[i] + (x-t[i])*tmp
# In[39]:
X = np.zeros(20)
Y = np.zeros(20)
X = [0.0, 0.6, 1.5, 1.7, 1.9, 2.1, 2.3, 2.6, 2.8, 3.0, 3.6, 4.7, 5.2, 5.7, 5.8, 6.0, 6.4, 6.9, 7.6, 8.0]
Y = [-0.8, -0.34, 0.59, 0.59, 0.23, 0.1, 0.28, 1.03, 1.5, 1.44, 0.74, -0.82, -1.27, -0.92, -0.92, -1.04, -0.79, -0.06, 1.0, 0.0]
Z = np.zeros(21)
Spline3_Coef(20, X, Y, Z)
XS = np.arange(0, 8, 0.01)
YS = np.zeros(len(XS))
for i in range(0, len(XS)):
YS[i] = Spline3_Eval(19, X, Y, Z, XS[i])
# 6.2 Figure 5.8
# Format plot figure
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(111)
# Add f(x) line and points used by Simpson approximation
ax1.scatter(X,Y, label = 'y = S(x)')
ax1.plot(XS,YS, label = 'y = S(x)')
#ax1.scatter(XP,PP)
# Set title of plot
plt.title('f(x) = 1/(1+x^2) from 0 to 1')
# Give x axis label
plt.xlabel('x')
plt.ylabel('f(x)')
# Format the plot
plt.legend(loc='upper right')
plt.grid(True, which='both')
plt.axhline(y=0)
plt.show()
# In[41]:
# Test_Spline3 pseudocode p.272
n = 9
a = 0
b = 1.6875
h = (b - a)/n
t = np.zeros(n+1)
y = np.zeros(n+1)
for i in range(0, n):
t[i] = a + i*h
y[i] = | np.sin(t[i]) | numpy.sin |
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import unet
import numpy as np
import dataset
from PIL import Image
import nibabel as nib
from scipy import ndimage
import os
import csv
import json
from tqdm import tqdm
import collections
from evaluation import evaluate
import argparse
import csv
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def bbox2_3D(img):
r = np.any(img, axis=(1, 2))
c = np.any(img, axis=(0, 2))
z = np.any(img, axis=(0, 1))
rmin, rmax = np.where(r)[0][[0, -1]]
cmin, cmax = np.where(c)[0][[0, -1]]
zmin, zmax = np.where(z)[0][[0, -1]]
return [rmin, rmax, cmin, cmax, zmin, zmax]
def keep_largest(predictions):
predictions_labels, num_features = ndimage.measurements.label(predictions)
unique, counts = np.unique(predictions_labels, return_counts=True)
counts_dict = dict(zip(unique, counts))
#
counts_dict = sorted(counts_dict.items(), key=lambda kv: kv[1], reverse = True)
counts_dict = collections.OrderedDict(counts_dict)
for i, lbl in enumerate(counts_dict):
if i < 2:
continue
predictions[predictions_labels == lbl] = 0
return predictions
class TumorSegmenter:
def __init__(self, device, weights, show_plots=False):
num_class = 1
self.model = unet.UNet(num_class, num_class).to(device)
self.model.load_state_dict(torch.load(weights, map_location=torch.device(device)))
self.model.eval()
self.show_plots = show_plots
def segment(self, volume, liver_mask):
trans = transforms.Compose([
# transforms.Resize((256, 256), interpolation=Image.LINEAR),
transforms.ToTensor()])
trans2 = transforms.Compose([
transforms.ToPILImage()]),
# transforms.Resize((512, 512), interpolation=Image.NEAREST)])
input = volume.get_fdata().astype(np.float32)
# liver_mask = liver_segmentation.get_fdata().astype(np.uint8)
liver_mask_cp = ndimage.binary_dilation(liver_mask).astype(np.uint8)
for i in range(5):
liver_mask_cp = ndimage.binary_dilation(liver_mask_cp)
input[liver_mask_cp == 0] = 0
bbox = bbox2_3D(liver_mask_cp)
# input = np.clip(input, -100, 200)
input = | np.rot90(input) | numpy.rot90 |
#
#Script for making histograms from PMT data
#integ_off and integ_window have to be adjusted for each dataset
#PMT(ch*_/extension/, channel, create new, folder name)
#
import numpy as np
import isee_sipm
import math
import ROOT
import sys
import os
s = 1 # (s)
ms = 1e-3 * s # (ms)
us = 1e-6 * s # (us)
ns = 1e-9 * s # (ns)
V = 1 # (V)
mV = 1e-3 * V # (mV)
class PMT:
def __init__(self, dataset, ch, create, name = 'hists', integ_off = 433 * ns):
self.nominal_period = 500*ns # approximate period
self.integ_off = integ_off
self.name = name
self.mean = 0
self.dset = dataset
self.channel = ch
self.meanerror = 0
self.fname = ''
self.hist = ROOT.TH1D()
self.nhist = ROOT.TH1D()
if create != 0:
self.hist_wave(dataset, ch)
if create == 0:
self.OpenHist()
self.Ppeak = 0 #Pedestal mean
self.Opeak = 0 #One PE mean
self.Ppeak_err = 0
self.Opeak_err = 0
def hist_wave(self, dataset, ch):
fch = 'ch%d_%s.txt.gz'%(ch, dataset)
self.fname = fch
wave_ch1 = isee_sipm.gzip2waveforms(fch)
nwaveforms = len(wave_ch1)
dV = wave_ch1[0].get_dv() # quantized delta V in (V)
dT = wave_ch1[0].get_dt() # sampling period 0.5 ns for 2 GHz sampling
##
npt = wave_ch1[0].get_n() # sampling points per waveform
eps = 0.001 * ns
n_per = int(float(npt)*dT/self.nominal_period)
npt_per = int(self.nominal_period/dT)
self.integ_off = 433 * ns # integration offset from trigger
integ_off_pt = int((self.integ_off + eps) / dT)
noise_integ_off = self.integ_off - 100 * ns # integration offset from trigger
noise_integ_off_pt = int((noise_integ_off + eps) / dT)
integ_window = 20 * ns # peak width
integ_window_pt = int((integ_window + eps)/dT)
bin_width = dV * dT / mV / ns
## print('bin width = %f'%bin_width)
h_spec1 = ROOT.TH1D('signal', ';Integral Value (ns #times mV);Entries',
3500, -250.5 * bin_width , 3249.5 * bin_width)
h_spec2 = ROOT.TH1D('noise', ';Integral Value (ns #times mV);Entries',
3500, -250.5 * bin_width , 3249.5 * bin_width)
## print(n_per)
##
##
for i in range(0, nwaveforms):
for ievt in range(0, n_per-1):
t = wave_ch1[i].xarray[ievt*npt_per: (ievt+1)*npt_per] - wave_ch1[i].xarray[ievt*npt_per]
## V = wave_ch1[i].yarray[ievt*npt_per: (ievt+1)*npt_per]
Vmax = np.amax(wave_ch1[i].yarray[ievt*npt_per + integ_off_pt - integ_window_pt: ievt*npt_per + integ_off_pt])
Vmin = | np.amin(wave_ch1[i].yarray[ievt*npt_per + integ_off_pt - integ_window_pt: ievt*npt_per + integ_off_pt]) | numpy.amin |
# -*- coding: utf-8 -*-
"""
Created on Mon 11 January 2022
Modified by <EMAIL> on 21/10/2021
@author: <NAME>
@contact: <EMAIL>
@license: /
"""
import ROOT
from ROOT import RooFit
from ROOT import RooRealVar, RooArgSet, RooArgList, RooDataHist
from ROOT import RooGenericPdf, RooUniform, RooGaussian, RooGaussModel, RooDecay, RooFormulaVar
from ROOT import RooAddPdf, RooMCStudy
from ROOT import Math as RootMath
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy as sc
from scipy.signal import find_peaks
from scipy.optimize import least_squares
from scipy import stats
import os
import math
import time
import sys
import re
from process import ProcessorBase
def custom_colors():
# Color definition from coolors.co
colors = {
# https://coolors.co/d1495b-edae49-abe188-1a6fdf-0c0a3e
'Fall_rgb': {'red':'#D1495B', 'orange':'#EDAE49', 'green':'#ABE188','blue':'#1A6FDF','purple':'#0C0A3E'},
'Jo-s_favs': {'black': "#000000", 'grey': "#C0C0C0", 'purple': "#C177DA", 'blue': "#00A2FF",
'red': "#FF2D55", 'orange': "#FFCC00", 'green': "#61D935", 'lightblue': "#6FF1E9",}
}
return colors
class AME():
'''
Base handling AME related stuff
Params:
path_to_ame: path to ame mass file
ame_version: version of AME. Defaults to 2020
'''
def __init__(self, path_to_ame, ame_version = 'ame20'):
self.path_to_ame = path_to_ame
self.ame_version = ame_version
# Init masses dataframe
if self.ame_version == 'ame20':
self.ame = pd.read_fwf(self.path_to_ame, #usecols=(2,3,4,6,9,10,11,12,18,21,20),
names=['1', 'N-Z', 'N', 'Z', 'A', 'Unnamed: 5', 'element', 'O', 'Unnamed: 8',
'mass_excess', 'mass_excess_err', 'ebinding', 'nan1', 'ebinding_err', 'nan2', 'ET',
'beta_decay_energy', 'beta_decay_energy_err', 'nan18', 'atomic_mass_raw', 'nan20',
'atomic_mass_comma', 'atomic_mass_err'],
widths=(1,3,5,5,5,1,3,4,1,14,12,13,1,10,1,2,13,11,1,3,1,13,12),
header=28,
index_col=False)
else:
print(f"(error in AME.__init__): Wrong version parsed. Only 'ame20' available.")
def get_value(self, isotope="1H", value="mass", error=False):
'''
Returns value for given isotope
Params:
isotope: isotope
value: value to fetch
error: returns error on value
'''
# Split isotope string into mass number and element string
split_string = re.split("(\\d+)",isotope)
fetched_value = 0
# Iterate through every isotope in the chain of isotopes passed (to handle molecules)
for i in range(1, len(split_string)-1, 2):
A = int(split_string[i])
X = split_string[i+1]
if value == 'mass' or value == 'mass_excess':
# Get dataframe index of isotope
idx = -1
idx_list = self.ame.index[(self.ame["A"]==A) & (self.ame["element"]==X)].tolist()
if len(idx_list) < 1:
print(f"(Error in get_value for A={A}, X={X}): Can't find isotope with given values.")
return
elif len(idx_list) > 1:
print(f"(Error in get_value for A={A}, X={X}): Found multiple isotopes with given values.")
return
else:
idx = idx_list[0]
#
if not error:
# First convert the feteched dataframe entries to str, remove the hashtag, convert to float
if value == 'mass':
try:
raw = float(str(self.ame.iloc[idx]['atomic_mass_raw']).strip('#'))
comma = float(str(self.ame.iloc[idx]['atomic_mass_comma']).strip('#'))/1e6
except(Exception, TypeError) as err:
print(f"(TypeError in get_value for A={A}, X={X}): {err}")
return
fetched_value += raw+comma
elif value == 'mass_excess':
try:
fetched_value = float(str(self.ame.iloc[idx]['mass_excess']).strip('#'))
except(Exception, TypeError) as err:
print(f"(TypeError in get_value for A={A}, X={X}): {err}")
return
else:
try:
if value == 'mass':
data = float(str(self.ame.iloc[idx]['atomic_mass_err']).strip('#'))
elif value == 'mass_excess':
data = float(str(self.ame.iloc[idx]['mass_excess_err']).strip('#'))
except(Exception, TypeError) as err:
print(f"(TypeError in get_value for A={A}, X={X}): {err}")
return
fetched_value += data**2
else:
print(f"(Error in get_value: value={value} unknown.")
#
if not error:
return fetched_value
else:
return math.sqrt(fetched_value)
class FitToDict:
'''
Class for reading fit files created by the fit functions. Stores fit in .fit dictionary
Initialization:
Parameters
- file_path: path to fit file
'''
def __init__(self, file_path, verbose = 0):
self.fit = {}
self.line_nb = 0
self.res_table_line = 0
self.fit_val_line = 0
self.file_path = file_path
# Read file
self.fit = self.__read(verbose)
def __initialize(self, verbose = 0):
'''
PRIVATE: Init file, read the meta data into dict and save where the results table and fit values table start
'''
# open the file
file = open(self.file_path)
for line in file.readlines():
# Increment line counter
self.line_nb += 1
# get rid of the newline
line = line[:-1]
try:
# this will break if you have whitespace on the "blank" lines
if line:
# skip comment lines
if line[0] == '#': next
# this assumes everything starts on the first column
if line[0] == '[':
# strip the brackets
section = line[1:-1]
# create a new section if it doesn't already exist
if not section in self.fit:
self.fit[section] = {}
# Save where which section is
if section == 'RESULTS-TABLE':
self.res_table_line = self.line_nb
if section == 'FIT-VALUES':
self.fit_val_line = self.line_nb
else:
# split on first the equal sign
(key, val) = line.split('=', 1)
# create the attribute as a list if it doesn't
# exist under the current section, this will
# break if there's no section set yet
if not key in self.fit[section]:
self.fit[section][key] = val
# append the new value to the list
#sections[section][key].append(val)
except Exception as e:
if verbose > 0:
print(str(e) + "line:" +line)
def __read_tables(self, verbose = 0):
'''
Use pandas to read the tables
'''
#
if 'RESULTS-TABLE' in self.fit:
if 'FIT-VALUES' in self.fit:
n_footer = self.line_nb - self.fit_val_line + 1
else:
n_footer = 0
if verbose > 0: print(f"res_table_line: {self.res_table_line}\nn_footer: {n_footer}")
self.fit['RESULTS-TABLE'] = pd.read_csv(self.file_path, header=self.res_table_line, delimiter=' ',
skipfooter=n_footer, engine='python')
if 'FIT-VALUES' in self.fit:
if verbose > 0: print(f"fit_val_line: {self.fit_val_line}")
self.fit['FIT-VALUES'] = pd.read_csv(self.file_path, header=self.fit_val_line, delimiter=' ')
def __read(self, verbose = 0):
'''
Function for reading a fit into a dict
Parameters:
- file_path: Path to file
Return:
- Dictionary with meta data, fit results, and fit values for plotting
'''
#
self.__initialize()
# read results table
self.__read_tables(verbose)
#
return self.fit
def get_val(self, key, value = None):
'''
Returns value either from meta-data dictionary or from the data frames
Parameters:
- key: name of value to be fetched
'''
#
if key in self.fit['META-DATA']:
return self.fit['META-DATA'][key]
#
elif key in self.fit['RESULTS-TABLE']['var'].to_numpy():
if not value:
print("Key {key} specified, but no value (e.g. in df for key 'mu0', value could be 'value'")
return
else:
if value not in self.fit['RESULTS-TABLE'].columns:
print("Value {value} not in dataframe")
return
return(float( self.fit['RESULTS-TABLE'][value][self.fit['RESULTS-TABLE']['var']==key] ))
#
else:
print(f"Key {key} does not exist in the fit dictionary.")
return
class MRToFUtils(AME):
'''
Utility class for performing mass extraction from MR-ToF MS data using the C_{ToF} approach.
Params:
path_to_ame: path to ame mass file
ame_version: version of AME. Defaults to 2020
Inheritance:
AME: Inherits functionalities from AME
'''
def __init__(self, path_to_ame, ame_version = 'ame20'):
# Init base class
AME.__init__(self, path_to_ame = path_to_ame, ame_version = ame_version)
#
# Store init parameters
#
# Store fundamental constants
self.c = 299792458 # m/s
self.e = 1.602176634e-19 # C
self.u = 931494.10242 # keV/c**2
self.u_err = 000.00000028 # MeV/c**2
# Store some prominent masses
self.m_p = self.get_value('1H', 'mass')
self.m_n = self.get_value('1n', 'mass')
self.m_39K = self.get_value('39K', 'mass')
self.m_39K_err = self.get_value('39K', 'mass', error=True)
self.m_85Rb = self.get_value('85Rb', 'mass')
self.m_85Rb_err = self.get_value('85Rb', 'mass', error=True)
self.m_133Cs = self.get_value('133Cs', 'mass')
self.m_133Cs_err = self.get_value('133Cs', 'mass', error=True)
# Calculation functions
def calc_weighted_average(self, x, s):
###
### Takes array of values (x) plus array of errors (s) and returns weighted average
###
if len(x) != len(s):
print("Arrays must have same length")
return 0
#
sum_avg = 0
sum_w = 0
for i in range(len(x)):
w = s[i]**(-2)
sum_w += w
sum_avg += w * x[i]
#
return sum_avg/sum_w
def calc_weighted_averge_err(self, s):
###
### Takes array s of individual errors
###
sum_w = 0
for i in range(len(s)):
sum_w += s[i]**(-2)
#
return math.sqrt(1/sum_w)
def calc_red_chi_square(self, x, s):
###
### Calculates reduced chi square for array of values (x) and array of errors (s)
###
if len(x) != len(s):
print("Arrays must have same length")
return 0
#
weighted_average = self.calc_weighted_average(x, s)
#
chi_square = 0
for i in range(len(x)):
chi_square += (x[i]-weighted_average)**2 / s[i]**2
#
return chi_square / (len(x)-1)
def calc_C_ToF(self,t,t1,t2):
###
### Calculation of C_ToF based on Franks 2013 Nature paper https://www.nature.com/articles/nature12226
###
return (2*t-t1-t2)/(2*(t1-t2))
def calc_C_ToF_err(self, t, t_err, t1, t1_err, t2, t2_err):
###
### Calculation of C_ToF error based on Franks 2013 Nature paper https://www.nature.com/articles/nature12226
###
del_t = 1 / (t1-t2)
del_t1 = (-t+t2)/(t1-t2)**2
del_t2 = (t-t1)/(t1-t2)**2
#
return math.sqrt( (del_t*t_err)**2 + (del_t1*t1_err)**2 + (del_t2*t2_err)**2 )
def calc_sqrt_m(self, C_tof, m1, m2):
###
### Calculation of the sqrt of the mass of the species of interest
###
Sigma_ref = math.sqrt(m1) + math.sqrt(m2)
Delta_ref = math.sqrt(m1) - math.sqrt(m2)
#
return C_tof * Delta_ref + Sigma_ref/2
def calc_sqrt_m_err(self, C_tof, C_tof_err, m1, m1_err, m2, m2_err):
###
### Calculation of the err on the sqrt of the mass
###
del_C_tof = math.sqrt(m1) - math.sqrt(m2)
del_m1 = C_tof + 1/2
del_m2 = - C_tof + 1/2
#
sqrt_m1_err = 1/2 * m1**(-1/2) * m1_err
sqrt_m2_err = 1/2 * m2**(-1/2) * m2_err
#
return math.sqrt( (del_C_tof * C_tof_err)**2 + ( del_m1 * sqrt_m1_err )**2 + ( del_m2 * sqrt_m2_err )**2 )
def calc_m_err_alternative(self, sqrt_m, sqrt_m_err):
###
### Calculation of the mass error using the error on the sqrt of the mass
###
return 2 * sqrt_m * sqrt_m_err
def calc_m_err(self, C_tof, C_tof_err, m1, m1_err, m2, m2_err):
###
### Calculation of the mass error using error propagation
###
delta_ref = math.sqrt(m1) - math.sqrt(m2)
sigma_ref = math.sqrt(m1) + math.sqrt(m2)
#
del_C_tof = 2 * C_tof * delta_ref**2 + delta_ref * sigma_ref
del_m1 = C_tof**2 * (1-m1**(-1/2)) + C_tof + 1/4 * (1+m1**(-1/2))
del_m2 = C_tof**2 * (1-m2**(-1/2)) + C_tof + 1/4 * (1+m2**(-1/2))
#
return math.sqrt( (del_C_tof*C_tof_err)**2 + (del_m1 * m1_err)**2 + (del_m2 * m2_err)**2 )
# PLOT FUNCTIONS
def simple_error_plt(self, y, y_err, x_labels,
data_legend_label = "ISOLTRAP", x_label='', y_label='', title='',
ref_value=None, ref_err=None, ref_legend_label='AME20 Error'):
'''
Simple scatter plot with y error bars.
Parameters:
- y: y-values
- y_err: y-errors
- x_labels: array of strings to be used as x-labels
- x_label: x-axis labeling
- y_label: y-axis labeling
- title: plot title
'''
colors = custom_colors()
mpl.rc('text', usetex=False)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5*1.5, 4.5))
x = np.arange(0,len(x_labels),1)
ax.errorbar(x, y, y_err,
fmt='o', color=colors['Jo-s_favs']['black'], zorder = 2,
label=data_legend_label,
fillstyle='full', mfc="black", linewidth=2, ms =10
)
ax.set_ylabel(y_label, size=18) #fontweight='bold')
ax.set_xlabel(x_label, size=18) #fontweight='bold')
ax.tick_params(direction='out')
#
ax_t = ax.secondary_xaxis('top')
ax_t.set_xticks(x)
# ax_r = ax.secondary_yaxis('right')
# # ax.set_xlabel("Mercury Isotopes", size=14, fontweight='bold')
ax_t.tick_params(axis='x', direction='in', labeltop=False)
# ax_r.tick_params(axis='y', direction='in', labelright=False)
if ref_value is not None and ref_err is not None:
# Error band
ax.fill_between(x, ref_value-ref_err, ref_value+ref_err, facecolor='0.5', alpha=0.5,
label = ref_legend_label)
# plt.axhspan(ref_value-ref_err, ref_value+ref_err, facecolor='0.5', alpha=0.5)
handles, labels = ax.get_legend_handles_labels()
# handles = [h[0] for h in handles] # Remove errorbars from legend
plt.legend()#handles, labels, fontsize=12, frameon=False, ncol=1, loc="upper left")
plt.xticks(x, x_labels, size = 10, rotation = 0)
# plt.tight_layout()
plt.title(title, fontsize=20)
# plt.savefig("./Mercury_Masses_Comparison.pdf", dpi=300)
plt.show()
class MRToFIsotope(MRToFUtils):
'''
Class for handling and storing mass data
Params:
- isotope, ref1, ref2: strings of isotopes to be used
- n_revs: number of revs
- path_to_ame: path to ame mass file
- ame_version: version of AME. Defaults to 2020
Inheritance:
AME: Inherits functionalities from AME
MRToFUtils: Inherits functionalities for calculating masses
'''
def __init__(self, isotope, ref1, ref2, n_revs, path_to_ame, ame_version = 'ame20'):
# Init base class
MRToFUtils.__init__(self, path_to_ame = path_to_ame, ame_version = ame_version)
#
self.isotope = isotope
self.A = int(re.split("(\\d+)",isotope)[1])
self.ref1 = ref1
self.ref2 = ref2
self.n_revs = n_revs
#
self.m_ref1 = self.get_value(self.ref1)
self.m_ref1_err = self.get_value(self.ref1,error=True)
self.m_ref2 = self.get_value(self.ref2)
self.m_ref2_err = self.get_value(self.ref2,error=True)
#
self.m_isotope_AME = self.get_value(isotope)
self.m_isotope_AME_err = self.get_value(isotope,error=True)
def calc_mass(self, file_isotope, file_ref1, file_ref2,
centroid = 'mu0',
print_results = False):
'''
Calculates mass and mass error from fit files in form of FitToDict objects passed to method
- file_isotope, file_ref1, file_ref2: path to fit files to be used
- centroid: time-of-flight centroid to be used to calculate mass ['mu0', 'numerical_mean']
'''
#
self.file_isotope = file_isotope
self.file_ref1 = file_ref1
self.file_ref2 = file_ref2
self.centroid = centroid
#
self.isotope_fit = FitToDict(file_isotope)
self.isotope_gs_t = float(self.isotope_fit.get_val(centroid, 'value'))
self.isotope_gs_t_err = float(self.isotope_fit.get_val('mu0', 'error'))
#
self.ref1_fit = FitToDict(file_ref1)
self.ref1_t = float(self.ref1_fit.get_val(centroid, 'value'))
self.ref1_t_err = float(self.ref1_fit.get_val('mu0', 'error'))
#
self.ref2_fit = FitToDict(file_ref2)
self.ref2_t = float(self.ref2_fit.get_val(centroid, 'value'))
self.ref2_t_err = float(self.ref2_fit.get_val('mu0', 'error'))
#
self.C_tof = self.calc_C_ToF(self.isotope_gs_t, self.ref1_t, self.ref2_t)
self.C_tof_err = self.calc_C_ToF_err(t=self.isotope_gs_t, t_err=self.isotope_gs_t_err,
t1=self.ref1_t, t1_err=self.ref1_t_err,
t2=self.ref2_t, t2_err=self.ref2_t_err)
#
self.m_isotope = self.calc_sqrt_m(self.C_tof, self.m_ref1, self.m_ref2)**2
self.m_isotope_err = self.calc_m_err(self.C_tof, self.C_tof_err,
self.m_ref1, self.m_ref1_err/self.u ,
self.m_ref2, self.m_ref2_err/self.u)
self.me_isotope = (self.m_isotope-self.A) * self.u # [keV]
self.me_isotope_err = self.m_isotope_err * self.u # [keV]
#
if print_results:
print(f"Result for {self.isotope}:\n\
- Mass Excess ISOLTRAP: {self.me_isotope:.1f}({self.me_isotope_err:.1f})keV\n\
- Mass Excess {self.ame_version}: {(self.m_isotope_AME-self.A)*self.u:.1f}({self.m_isotope_AME_err:.1f})keV\n\
- Mass Difference ISOLTRAP-{self.ame_version}: {abs(self.me_isotope)-abs((self.m_isotope_AME-self.A)*self.u):.1f}keV"
)
def store_result(self, results_file, overwrite = False, tags=""):
'''
Appends results from calc_mass in a results file. Creates new file if file does not exist
Parameters:
- results_file: .csv file to store results in
- overwrite: checks if entry with isotope, n_revs, and tag exists, and overwrites that entry
- tags: custom string to add to entry in results_file
'''
self.tags = tags
# Create row to append
d = {
'A' : [self.A],
'isotope': [self.isotope],
'n_revs': [self.n_revs],
'tags': [self.tags],
'ref1': [self.ref1],
'ref2': [self.ref2],
'm_isotope': [self.m_isotope],
'm_isotope_err': [self.m_isotope_err],
'me_isotope': [self.me_isotope],
'me_isotope_err': [self.me_isotope_err],
'C_tof': [self.C_tof],
'C_tof_err': [self.C_tof_err],
'm_ref1': [self.m_ref1],
'm_ref1_err': [self.m_ref1_err],
'm_ref2': [self.m_ref2],
'm_ref2_err': [self.m_ref2_err],
'm_isotope_AME': [self.m_isotope_AME],
'm_isotope_AME_err': [self.m_isotope_AME_err],
'file_isotope': [self.file_isotope],
'file_ref1': [self.file_ref1],
'file_ref2': [self.file_ref2],
}
# Load file if exists or create new file
if not os.path.isfile(results_file):
print(f"'{results_file}' does not exist, will be created...")
df = pd.DataFrame.from_dict(data=d)
else:
df = pd.read_csv(results_file)
# Check if entry in file exists
line_exists = False
idx_list = df.index[(df["isotope"]==self.isotope) & (df["n_revs"]==self.n_revs) & (df["tags"]==self.tags)].tolist()
if len(idx_list) != 0:
line_exists = True
#
if line_exists and not overwrite:
print(f"Entry with tags '{self.tags}' and isotope '{self.isotope}' already exists. To overwrite, set overwrite flag.")
return
elif line_exists and overwrite:
print(f"Entry with tags '{self.tags}' and isotope '{self.isotope}' already exists. Will be overwritten.")
for idx in idx_list:
for key in d:
df.loc[idx,key] = d[key]
else:
print(f"Appending to '{results_file}'...")
df2 = pd.DataFrame.from_dict(data=d)
df = df.append(df2, ignore_index=True)
#
df.to_csv(results_file, index=False)
class Peaks:
"""
Wrapper class for finding peaks in an MR-ToF MS spectrum
dataframe containing the converted .lst content
"""
# df_file:
def __init__(self, df_file):
self.file = df_file
self.n_peaks = 0
self.bins = 500
self.peak_threshold = 0.002
self.peak_min_distance = 5
self.peak_min_height = 10
self.peak_width_inbins = (3,100)
self.peak_prominence = None
self.peak_wlen = None
def get_binning(self, bins=10):
"""
Adapts binning to multiples of 0.8ns, assuming that 0.8ns was used to take data (to common case)
"""
# Get min and max tof from data frame
minn = self.file.tof.min()
maxx = self.file.tof.max()
return round((maxx-minn)/0.8/bins)
def find_peaks(self, bins=10, peak_threshold = 0.002, peak_min_distance = 5, peak_min_height = 10, peak_width_inbins = (3,100),
peak_prominence = None, peak_wlen = None):
"""
Arguments:
- bins: Rebinning for faster peak finding
- peak_threshold:
- peak_min_distance:
- ...
"""
#
self.pos = []
self.std = []
self.left_bases = []
self.right_bases = []
self.bins = bins
# faster binning for projections than histograms -> necessary in order to automatically find peaks
x_proj_for_pfind = self.file.tof.value_counts(bins=self.get_binning(self.bins)).sort_index()
y_proj_for_pfind = self.file.sweep.value_counts(bins=self.get_binning(self.bins)).sort_index()
# Do the peak finding
self.x_proj_peaks, self.peaks_info = sc.signal.find_peaks(x_proj_for_pfind, threshold=peak_threshold,
distance=peak_min_distance,
height=peak_min_height,
width=peak_width_inbins,
prominence=peak_prominence,
wlen=peak_wlen)
# Calculate some additional meta data for the found peaks
self.n_peaks = len(self.x_proj_peaks)
if self.n_peaks == 0:
print(f"(Peaks.find_peaks): No peaks with current settings found, try different ones.")
return
self.highest_peak = self.peaks_info['peak_heights'].argmax()
# variables to store earliest left_base and lastest right_base for constraining plot range
self.earliest_left_base = 1e15
self.earliest_peak_idx = 1e15
self.latest_right_base = 0
self.latest_peak_idx = 0
for i in range(self.n_peaks):
# get the peak bases ranges from the peak finder
left = x_proj_for_pfind.index.mid[self.peaks_info['left_bases'][i]]
right = x_proj_for_pfind.index.mid[self.peaks_info['right_bases'][i]]
self.left_bases.append(left)
self.right_bases.append(right)
#calculate the median (more accurate due to asym. tails) of the data in the peak ranges
#better value for the actual peak center than the simple highest point in peak
peak_pos = self.file.tof[(self.file.tof < right) &
(self.file.tof > left)].median()
peak_std = self.file.tof[(self.file.tof < right) &
(self.file.tof > left)].std()
# estimate the mean and sigma from a Gaussian with exponential tail (accounting for asym. peaks)
try:
peak_fit = stats.exponnorm.fit(file_df.tof[(file_df.tof < peak_pos+peak_std) &
(file_df.tof > peak_pos-peak_std)],
loc=peak_pos)
# update peak position if a fit was possible
peak_pos = peak_fit[1]
except:
pass
self.pos.append(peak_pos)
self.std.append(peak_std)
# assign earliest and latest bases
if (left < self.earliest_left_base and not math.isnan(left) and not math.isnan(peak_std)):
self.earliest_left_base = left
self.earliest_peak_idx = i
if (right > self.latest_right_base and not math.isnan(right) and not math.isnan(peak_std)):
self.latest_right_base = right
self.latest_peak_idx = i
def plot(self, bins = 10, lines = True, focus=False, log=False, silent = False, save = False, path_to_file = "peaks"):
'''
Plot 1D Histogram with found peaks.
Parameters:
- bins: Number of bins to be rebinned. Default=10
- lines: Draws lines where peaks are found. Default=True
- focus: if True, sets xlimits to first and last found peak
- log: if Ture, sets logscale on y-axis
- silent: if True, shows plot on canvas
- save: if True, uses path_to_file to save plot as .pdf
- path_to_file: path to save .pdf in
'''
#
plt.rcParams["figure.figsize"] = (10,6)
#
if self.n_peaks == 0:
print("Not peaks, no plots :)")
return 0
#
xdata = self.file.tof
n, xe = np.histogram(xdata, bins=self.get_binning(bins))
cx = 0.5 * (xe[1:] + xe[:-1])
dx = np.diff(xe)
plt.errorbar(cx, n, n ** 0.5, fmt="ok", zorder=1)
plt.plot(xdata, np.zeros_like(xdata)-5, "|", alpha=0.1, label = "ToF Data", zorder = 3)
#
if log:
plt.yscale('log')
#
if lines:
for i in range(self.n_peaks):
plt.axvline(self.pos[i], c='r', linewidth=1, zorder=3)
xm = np.linspace(xe[0], xe[-1], num=1000)
plt.legend();
# plt.xlim(peaks.pos[0]-300, peaks.pos[0]+300)
# Zoom in on found peaks
if focus:
plt.xlim(self.earliest_left_base-200, self.latest_right_base+200)
# Add axis labels
plt.xlabel(f'Time-of-Flight [ns]', fontsize=20)
plt.ylabel(f'Counts per bin', fontsize=20)
if not silent:
plt.show()
# plt.clf()
#
if save:
plt.savefig(path_to_file+".pdf", dpi=300)
plt.clf()
def plot2d(self, bins=500, focus=-1, log=False):
"""
Plot 2D Histogram with found peaks.
"""
# plt.rcParams["figure.figsize"] = (10,4)
tof = self.file.tof
sweep = self.file.sweep
# Create plot canvas
fig, ((ax_x, blank),(ax_0, ax_y)) = plt.subplots(2,2,sharex='col',sharey='row', figsize=(9,9),
gridspec_kw={'height_ratios':[1,4],
'width_ratios':[4,1],
'hspace': 0.05,
'wspace':0.05})
# faster binning for projections than histograms -> necessary in order to automatically find peaks
x_proj = self.file.tof.value_counts(bins=500).sort_index()
y_proj = self.file.sweep.value_counts(bins=500).sort_index()
# main plotting
self.file.plot(x='tof', y='sweep', style='o', alpha=0.15, ms=2, ax=ax_0, label='unbinned data')
ax_x.semilogy(x_proj.index.mid.to_numpy(), x_proj.to_numpy())
ax_y.plot(y_proj.to_numpy(), y_proj.index.mid.to_numpy())
# plt.plot(tof, sweep, 'o', alpha=0.15, ms=2, label='unbinned data')
for i in range(self.n_peaks):
ax_0.axvline(self.pos[i], c='r', linewidth=1, zorder=3)
ax_x.axvline(self.pos[i], c='r', linewidth=1, zorder=3)
if focus != -1:
plt.xlim(self.pos[focus]-300, self.pos[focus]+300)
#
ax_0.set_xlabel(f'Time-of-Flight [ns]', fontsize=20)
ax_0.set_ylabel(f'Rolling sweep number', fontsize=20)
ax_x.set_ylabel('# / 0.8 ns', fontsize=20)
ax_y.set_xlabel('# / 10 sw.', fontsize=20)
ax_y.xaxis.set_ticks_position('top')
ax_y.xaxis.set_label_position('top')
#
plt.show()
class softCool(Peaks, ProcessorBase):
"""
Class for performing software cooling on 2D MR-ToF MS Data
df_file: dataframe containing the converted .lst content
Inherits functionality from the peak finder
"""
def __init__(self, file_list):
"""
Class for performing software cooling on 2D MR-ToF MS Data
Parameters:
file_list: list of .csv-files with dataframe containing the converted .lst content
Inherits functionality from the peak finder
"""
ProcessorBase.__init__(self)
# Inherits
# self.files = []
# self.data = {}
# self.pars = {}
# self.df_dict = {}
# self.step = 0
self.files = file_list
# Read data
for f in self.files:
self.df_dict[f] = pd.read_csv(f)
#
self.corr_factors = []
self.chunk_size = 10
self.post_cool = False
self.tof = 0
self.tof_cut_left = 0
self.tof_cut_right = 0
self.weighted_average_tof = 0
def __prepare_files(self, tof, tof_cut_left=300, tof_cut_right=300, initial_align = True):
"""
"""
#
if initial_align:
self.__initial_align(tof, tof_cut_left, tof_cut_right)
#
# Sum all files
self.file = self.add_all(to_csv=False)
#
self.coolfile = self.file.copy(deep=True) # copy for storing the cooled spectrum
def __initial_align(self, tof, tof_cut_left=300, tof_cut_right=300):
"""
Parameters:
- file_list: array of files to be aligned with respect to each other
"""
#
self.tof = tof
self.tof_cut_left = tof_cut_left
self.tof_cut_right = tof_cut_right
weights = []
averages = []
weighted_average_tof = 0
for f in self.df_dict:
tof_cut = self.df_dict[f][(self.df_dict[f].tof > self.tof-self.tof_cut_left) & (self.df_dict[f].tof > self.tof-self.tof_cut_right)]
averages.append( | np.mean(tof_cut.tof) | numpy.mean |
import numpy as np, regreg.api as rr
import nose.tools as nt
def test_path():
'''
this test looks at the paths of three different parameterizations
of the same problem
'''
X = np.random.standard_normal((100,5))
Z = np.zeros((100,10))
Y = np.random.standard_normal(100)
U = np.random.standard_normal((100,2))
betaX = np.array([3,4,5,0,0])
betaU = np.array([10,-5])
Y += np.dot(X, betaX) + np.dot(U, betaU)
Z[:,5:] = -X
Z[:,:5] = X
Z2 = np.zeros((100,8))
Z2[:,:3] = X[:,:3]
Z2[:,3:6] = -X[:,:3]
Z2[:,6:] = -X[:,3:]
lasso1 = rr.lasso.squared_error(X,Y, nstep=23)
lasso2 = rr.lasso.squared_error(Z,Y, penalty_structure=[rr.POSITIVE_PART]*10, nstep=23)
sol1 = lasso1.main(inner_tol=1.e-12)
beta1 = sol1['beta'].todense()
sol2 = lasso2.main(inner_tol=1.e-12)
beta2 = sol2['beta'].todense()
beta2[1:6] = beta2[1:6] - beta2[6:11]
beta2 = beta2[:6]
lasso3 = rr.lasso.squared_error(Z2,Y, penalty_structure=[rr.POSITIVE_PART]*6 + [rr.L1_PENALTY]*2, nstep=23)
sol3 = lasso3.main(inner_tol=1.e-12)
beta3 = sol3['beta'].todense()
beta3[1:4] = beta3[1:4] - beta3[4:7]
beta3[4:6] = - beta3[7:9]
beta3 = beta3[:6]
np.testing.assert_allclose(beta1, beta2)
| np.testing.assert_allclose(beta2, beta3) | numpy.testing.assert_allclose |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import levy
import argparse
import tqdm
def initialize():
parser = argparse.ArgumentParser(description='Determine the mean first passage time for a particle performing a'
'random walk.')
parser.add_argument('-n', '--nwalks', default=1000, type=int, help='Number of random walk trajectories to generate')
parser.add_argument('-L', '--length', default=10, type=float, help='Length of domain in which particle travels')
parser.add_argument('-x0', '--x0', default=5, type=float, help='Initial particle position within domain')
parser.add_argument('-sigma', '--sigma', default=0.5, type=float, help='Standard deviation of hop length per unit'
'time. This effectivley determines the diffusion constant.')
parser.add_argument('-dt', '--time_step', default=0.01, type=float, help='Time between draws from hop distribution.'
' The smaller, the better.')
# plotting
parser.add_argument('-maxt', '--maxt', default=100, type=float, help='Maximum passage time to include in plots.')
parser.add_argument('-savename', '--savename', help='Name of saved plots. cdf and pdf will be added to end of '
'names of saved plots.')
return parser
class CDF:
def __init__(self, data):
""" Generate an emperical cumulative distribution function from data
:param data: x-values of data in no particular order
:type data: list
"""
self.xs = np.array(sorted(data))
self.N = float(len(self.xs))
self.ys = np.arange(1, self.N + 1) / self.N
def cdf(self, x):
""" Callable cumulative emperical distribution function
:param x: array of x-values at which to evaluate cumulative emperical distribution function
:return:
"""
if type(x) is np.float64:
x = np.array([x])
ndx = [np.argmin(np.abs(self.xs - x[i])) for i in range(x.size)]
return self.ys[ndx]
class MFPT:
def __init__(self, L, x0):
""" Release a particle at x0 and let it perform a random walk until it is absorbed at 0 or L
:param L: length
:param x0: initial particle position
:type L: float
:type x0: float
"""
# system geometry and release point
self.L = L
self.x0 = x0
# Initializing variables for usage later
self.passage_times = None
self.dt = None
self.sigma = None
self.trajectories = []
def simulate_passage_time(self, nwalks, sigma, dt, nplot_random=0, nt=1):
"""
:param nwalks: number of trajectories to simulate
:param sigma: width of hop distribution. Related to diffusion constant.
:param dt: time step
:param nplot_random: number of random trajectories to plot. If 0, a plot won't be made
:type nwalks: int
:type sigma: float
:type dt: float
:type nplot_random: int
"""
self.passage_times = np.zeros(nwalks)
self.dt = dt
self.sigma = sigma
jump_sigma = np.sqrt(dt) * sigma
for w in tqdm.tqdm(range(self.passage_times.size), unit='walks'):
walk = [self.x0]
while 0 < walk[-1] < self.L:
walk.append(walk[-1] + jump_sigma * levy.random(2, 0, mu=0)) # faster than scipy
self.passage_times[w] = (len(walk) - 1) * self.dt # subtract 1 so time zero isn't counted
self.trajectories.append(walk)
if nplot_random > 0:
# plot random trajectories
ndx = np.random.choice(nwalks, size=nplot_random)
for i in ndx:
plt.plot(self.dt*np.arange(int(self.passage_times[i] / self.dt) + 1), self.trajectories[i])
plt.show()
def plot_cdf(self, max_t=200, show=True, savename=False):
""" Plot the analtyical versus empirical cumulative distribution function of first passage times
:param max_t: Largest value of time to show
:param show: show the plot once it's made
:param savename: if not None, save the figure by this name
:type max_t: float
:type show: bool
:type savename: NoneType or str
"""
plt.figure()
time = self.dt*np.arange(1, int(max_t / self.dt)).astype(float)
plt.plot(time, self._analytical_passage_time_cdf(time), lw=2, label='analytical')
plt.plot(time, self._empirical_cdf(time), lw=2, label='empirical')
# formatting
plt.legend(fontsize=14)
plt.xlabel('Time', fontsize=14)
plt.ylabel('Cumulative Density', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
if savename is not None:
plt.savefig('%s_cdf.pdf' % savename)
if show:
plt.show()
def plot_passage_time_distribution(self, bins=50, max_t=100, show=True, savename=None):
""" Plot the distribution of first passage times against the analytical PDF
:param bins: number of bins to use to discretize empirical data
:param max_t: maxmimum passage time to plot
:param show: show the plot when done
:param savename: if not None, save the figure by this name
:type bins: int
:type max_t: float
:type show: bool
:type savename: NoneType or str
"""
plt.figure() # create new figure
# get PDF by taking derivative of CDF
time = self.dt * np.arange(1, int(max_t / self.dt)).astype(float)
cdf = self._analytical_passage_time_cdf(time)
time = time[cdf > 0] # there is a giant drop near zero
cdf = cdf[cdf > 0]
dx = time[1] - time[0]
deriv = np.diff(cdf) / dx
plt.plot(time[:-1], deriv, '--', label='Approximate Analytical PDF', lw=2, color='black')
# histogram empirical measurements
heights, edges = np.histogram(self.passage_times, bins=bins, range=(0, max_t), density=True)
heights = heights / cdf[-1] # normalize by area under theoretical curve up to max_t
bin_width = edges[1] - edges[0]
bin_centers = [i + bin_width/2 for i in edges[:-1]]
plt.bar(bin_centers, heights, bin_width, label='Observations')
print('Empirical mean first passage time: %.2f' % self.passage_times.mean())
print('Approximate analytical mean first passage time: %.2f' % np.average(time[:-1], weights=deriv))
plt.legend(fontsize=14)
plt.xlabel('Time', fontsize=14)
plt.ylabel('Probability', fontsize=14)
plt.tick_params(labelsize=14)
plt.tight_layout()
if savename is not None:
plt.savefig('%s_pdf.pdf' % savename)
if show:
plt.show()
def _analytical_passage_time_cdf(self, time, nterms=100):
""" Equation 6 from : https://journals.aps.org/pre/pdf/10.1103/PhysRevE.73.046104
"""
f = | np.zeros_like(time) | numpy.zeros_like |
#!python
"""This module provides functions to handle Bruker data.
It primarily implements the TimsTOF class, that acts as an in-memory container
for Bruker data accession and storage.
"""
# builtin
import os
import sys
import contextlib
import logging
# external
import numpy as np
import pandas as pd
import h5py
# local
import alphatims
import alphatims.utils
if sys.platform[:5] == "win32":
BRUKER_DLL_FILE_NAME = os.path.join(
alphatims.utils.EXT_PATH,
"timsdata.dll"
)
elif sys.platform[:5] == "linux":
BRUKER_DLL_FILE_NAME = os.path.join(
alphatims.utils.EXT_PATH,
"timsdata.so"
)
else:
logging.warning(
"WARNING: "
"No Bruker libraries are available for this operating system. "
"Mobility and m/z values need to be estimated. "
"While this estimation often returns acceptable results with errors "
"< 0.02 Th, huge errors (e.g. offsets of 6 Th) have already been "
"observed for some samples!"
)
logging.info("")
BRUKER_DLL_FILE_NAME = ""
def init_bruker_dll(bruker_dll_file_name: str = BRUKER_DLL_FILE_NAME):
"""Open a bruker.dll in Python.
Five functions are defined for this dll:
- tims_open: [c_char_p, c_uint32] -> c_uint64
- tims_close: [c_char_p, c_uint32] -> c_uint64
- tims_read_scans_v2: [c_uint64, c_int64, c_uint32, c_uint32, c_void_p, c_uint32] -> c_uint32
- tims_index_to_mz: [c_uint64, c_int64, POINTER(c_double), POINTER(c_double), c_uint32] -> None
- tims_scannum_to_oneoverk0: Same as "tims_index_to_mz"
Parameters
----------
bruker_dll_file_name : str
The absolute path to the timsdata.dll.
Default is alphatims.utils.BRUKER_DLL_FILE_NAME.
Returns
-------
: ctypes.cdll
The Bruker dll library.
"""
import ctypes
bruker_dll = ctypes.cdll.LoadLibrary(
os.path.realpath(bruker_dll_file_name)
)
bruker_dll.tims_open.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
bruker_dll.tims_open.restype = ctypes.c_uint64
bruker_dll.tims_close.argtypes = [ctypes.c_uint64]
bruker_dll.tims_close.restype = None
bruker_dll.tims_read_scans_v2.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.c_uint32,
ctypes.c_void_p,
ctypes.c_uint32
]
bruker_dll.tims_read_scans_v2.restype = ctypes.c_uint32
bruker_dll.tims_index_to_mz.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32
]
bruker_dll.tims_index_to_mz.restype = ctypes.c_uint32
bruker_dll.tims_scannum_to_oneoverk0.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32
]
bruker_dll.tims_scannum_to_oneoverk0.restype = ctypes.c_uint32
bruker_dll.tims_set_num_threads.argtypes = [ctypes.c_uint64]
bruker_dll.tims_set_num_threads.restype = None
bruker_dll.tims_set_num_threads(alphatims.utils.MAX_THREADS)
# multiple threads is equally fast as just 1 for io?
# bruker_dll.tims_set_num_threads(1)
return bruker_dll
@contextlib.contextmanager
def open_bruker_d_folder(
bruker_d_folder_name: str,
bruker_dll_file_name=BRUKER_DLL_FILE_NAME,
) -> tuple:
"""A context manager for a bruker dll connection to a .d folder.
Parameters
----------
bruker_d_folder_name : str
The name of a Bruker .d folder.
bruker_dll_file_name : str, ctypes.cdll
The path to Bruker' timsdata.dll library.
Alternatively, the library itself can be passed as argument.
Default is alphatims.utils.BRUKER_DLL_FILE_NAME,
which in itself is dependent on the OS.
Returns
-------
: tuple (ctypes.cdll, int).
The opened bruker dll and identifier of the .d folder.
"""
try:
if isinstance(bruker_dll_file_name, str):
bruker_dll = init_bruker_dll(bruker_dll_file_name)
logging.info(f"Opening handle for {bruker_d_folder_name}")
bruker_d_folder_handle = bruker_dll.tims_open(
bruker_d_folder_name.encode('utf-8'),
0
)
yield bruker_dll, bruker_d_folder_handle
finally:
logging.info(f"Closing handle for {bruker_d_folder_name}")
bruker_dll.tims_close(bruker_d_folder_handle)
def read_bruker_sql(
bruker_d_folder_name: str,
add_zeroth_frame: bool = True,
drop_polarity: bool = True,
convert_polarity_to_int: bool = True,
) -> tuple:
"""Read metadata, (fragment) frames and precursors from a Bruker .d folder.
Parameters
----------
bruker_d_folder_name : str
The name of a Bruker .d folder.
add_zeroth_frame : bool
Bruker uses 1-indexing for frames.
If True, a zeroth frame is added without any TOF detections to
make Python simulate this 1-indexing.
If False, frames are 0-indexed.
Default is True.
drop_polarity : bool
The polarity column of the frames table contains "+" or "-" and
is not numerical.
If True, the polarity column is dropped from the frames table.
this ensures a fully numerical pd.DataFrame.
If False, this column is kept, resulting in a pd.DataFrame with
dtype=object.
Default is True.
convert_polarity_to_int : bool
Convert the polarity to int (-1 or +1).
This allows to keep it in numerical form.
This is ignored if the polarity is dropped.
Default is True.
Returns
-------
: tuple
(str, dict, pd.DataFrame, pd.DataFrame, pd.DataFrame).
The acquisition_mode, global_meta_data, frames, fragment_frames
and precursors.
For diaPASEF, precursors is None.
"""
import sqlite3
logging.info(f"Reading frame metadata for {bruker_d_folder_name}")
with sqlite3.connect(
os.path.join(bruker_d_folder_name, "analysis.tdf")
) as sql_database_connection:
global_meta_data = pd.read_sql_query(
"SELECT * from GlobalMetaData",
sql_database_connection
)
frames = pd.read_sql_query(
"SELECT * FROM Frames",
sql_database_connection
)
if 9 in frames.MsMsType.values:
acquisition_mode = "diaPASEF"
fragment_frames = pd.read_sql_query(
"SELECT * FROM DiaFrameMsMsInfo",
sql_database_connection
)
fragment_frame_groups = pd.read_sql_query(
"SELECT * from DiaFrameMsMsWindows",
sql_database_connection
)
fragment_frames = fragment_frames.merge(
fragment_frame_groups,
how="left"
)
fragment_frames.rename(
columns={"WindowGroup": "Precursor"},
inplace=True
)
precursors = None
elif 8 in frames.MsMsType.values:
acquisition_mode = "ddaPASEF"
fragment_frames = pd.read_sql_query(
"SELECT * from PasefFrameMsMsInfo",
sql_database_connection
)
precursors = pd.read_sql_query(
"SELECT * from Precursors",
sql_database_connection
)
else:
acquisition_mode = "noPASEF"
fragment_frames = pd.DataFrame(
{
"Frame": np.array([0]),
"ScanNumBegin": np.array([0]),
"ScanNumEnd": np.array([0]),
"IsolationWidth": np.array([0]),
"IsolationMz": np.array([0]),
"Precursor": np.array([0]),
}
)
precursors = None
# raise ValueError("Scan mode is not ddaPASEF or diaPASEF")
if add_zeroth_frame:
frames = pd.concat(
[
pd.DataFrame(frames.iloc[0]).T,
frames,
],
ignore_index=True
)
frames.Id[0] = 0
frames.Time[0] = 0
frames.MaxIntensity[0] = 0
frames.SummedIntensities[0] = 0
frames.NumPeaks[0] = 0
polarity_col = frames["Polarity"].copy()
frames = pd.DataFrame(
{
col: pd.to_numeric(
frames[col]
) for col in frames if col != "Polarity"
}
)
if not drop_polarity:
if convert_polarity_to_int:
frames['Polarity'] = polarity_col.apply(
lambda x: 1 if x == "+" else -1
).astype(np.int8)
else:
frames['Polarity'] = polarity_col
return (
acquisition_mode,
global_meta_data,
frames,
fragment_frames,
precursors
)
@alphatims.utils.njit(nogil=True)
def parse_decompressed_bruker_binary_type2(decompressed_bytes: bytes) -> tuple:
"""Parse a Bruker binary frame buffer into scans, tofs and intensities.
Parameters
----------
decompressed_bytes : bytes
A Bruker frame binary buffer that is already decompressed with pyzstd.
Returns
-------
: tuple (np.uint32[:], np.uint32[:], np.uint32[:]).
The scan_indices, tof_indices and intensities present in this binary
array
"""
temp = np.frombuffer(decompressed_bytes, dtype=np.uint8)
buffer = np.frombuffer(temp.reshape(4, -1).T.flatten(), dtype=np.uint32)
scan_count = buffer[0]
scan_indices = buffer[:scan_count].copy() // 2
scan_indices[0] = 0
tof_indices = buffer[scan_count::2].copy()
index = 0
for size in scan_indices:
current_sum = 0
for i in range(size):
current_sum += tof_indices[index]
tof_indices[index] = current_sum
index += 1
intensities = buffer[scan_count + 1::2]
last_scan = len(intensities) - np.sum(scan_indices[1:])
scan_indices[:-1] = scan_indices[1:]
scan_indices[-1] = last_scan
return scan_indices, tof_indices, intensities
@alphatims.utils.njit(nogil=True)
def parse_decompressed_bruker_binary_type1(
decompressed_bytes: bytes,
scan_indices_: np.ndarray,
tof_indices_: np.ndarray,
intensities_: np.ndarray,
scan_start: int,
scan_index: int,
) -> int:
"""Parse a Bruker binary scan buffer into tofs and intensities.
Parameters
----------
decompressed_bytes : bytes
A Bruker scan binary buffer that is already decompressed with lzf.
scan_indices_ : np.ndarray
The scan_indices_ buffer array.
tof_indices_ : np.ndarray
The tof_indices_ buffer array.
intensities_ : np.ndarray
The intensities_ buffer array.
scan_start : int
The offset where to start new tof_indices and intensity_values.
scan_index : int
The scan index.
Returns
-------
: int
The number of peaks in this scan.
"""
buffer = np.frombuffer(decompressed_bytes, dtype=np.int32)
tof_index = 0
previous_was_intensity = True
current_index = scan_start
for value in buffer:
if value >= 0:
if previous_was_intensity:
tof_index += 1
tof_indices_[current_index] = tof_index
intensities_[current_index] = value
previous_was_intensity = True
current_index += 1
else:
tof_index -= value
previous_was_intensity = False
scan_size = current_index - scan_start
scan_indices_[scan_index] = scan_size
return scan_size
def process_frame(
frame_id: int,
tdf_bin_file_name: str,
tims_offset_values: np.ndarray,
scan_indptr: np.ndarray,
intensities: np.ndarray,
tof_indices: np.ndarray,
frame_indptr: np.ndarray,
max_scan_count: int,
compression_type: int,
max_peaks_per_scan: int,
) -> None:
"""Read and parse a frame directly from a Bruker .d.analysis.tdf_bin.
Parameters
----------
frame_id : int
The frame number that should be processed.
Note that this is interpreted as 1-indixed instead of 0-indexed,
so that it is compatible with Bruker.
tdf_bin_file_name : str
The full file name of the SQL database "analysis.tdf_bin" in a Bruker
.d folder.
tims_offset_values : np.int64[:]
The offsets that indicate the starting indices of each frame in the
binary.
These are contained in the "TimsId" column of the frames table in
"analysis.tdf_bin".
scan_indptr : np.int64[:]
A buffer containing zeros that can store the cumulative number of
detections per scan.
The size should be equal to max_scan_count * len(frames) + 1.
A dummy 0-indexed frame is required to be present for len(frames).
The last + 1 allows to explicitly interpret the end of a scan as
the start of a subsequent scan.
intensities : np.uint16[:]
A buffer that can store the intensities of all detections.
It's size can be determined by summing the "NumPeaks" column from
the frames table in "analysis.tdf_bin".
tof_indices : np.uint32[:]
A buffer that can store the tof indices of all detections.
It's size can be determined by summing the "NumPeaks" column from
the frames table in "analysis.tdf_bin".
frame_indptr : np.int64[:]
The cumulative sum of the number of detections per frame.
The size should be equal to len(frames) + 1.
A dummy 0-indexed frame is required to be present for len(frames).
The last + 1 allows to explicitly interpret the end of a frame as
the start of a subsequent frame.
max_scan_count : int
The maximum number of scans a single frame can have.
compression_type : int
The compression type. This must be either 1 or 2.
Should be treieved from the global metadata.
max_peaks_per_scan : int
The maximum number of peaks per scan.
Should be treieved from the global metadata.
"""
with open(tdf_bin_file_name, "rb") as infile:
frame_start = frame_indptr[frame_id]
frame_end = frame_indptr[frame_id + 1]
if frame_start != frame_end:
offset = tims_offset_values[frame_id]
infile.seek(offset)
bin_size = int.from_bytes(infile.read(4), "little")
scan_count = int.from_bytes(infile.read(4), "little")
max_peak_count = min(
max_peaks_per_scan,
frame_end - frame_start
)
if compression_type == 1:
import lzf
compression_offset = 8 + (scan_count + 1) * 4
scan_offsets = np.frombuffer(
infile.read((scan_count + 1) * 4),
dtype=np.int32
) - compression_offset
compressed_data = infile.read(bin_size - compression_offset)
scan_indices_ = np.zeros(scan_count, dtype=np.int64)
tof_indices_ = np.empty(
frame_end - frame_start,
dtype=np.uint32
)
intensities_ = np.empty(
frame_end - frame_start,
dtype=np.uint16
)
scan_start = 0
for scan_index in range(scan_count):
start = scan_offsets[scan_index]
end = scan_offsets[scan_index + 1]
if start == end:
continue
decompressed_bytes = lzf.decompress(
compressed_data[start: end],
max_peak_count * 4 * 2
)
scan_start += parse_decompressed_bruker_binary_type1(
decompressed_bytes,
scan_indices_,
tof_indices_,
intensities_,
scan_start,
scan_index,
)
elif compression_type == 2:
import pyzstd
compressed_data = infile.read(bin_size - 8)
decompressed_bytes = pyzstd.decompress(compressed_data)
(
scan_indices_,
tof_indices_,
intensities_
) = parse_decompressed_bruker_binary_type2(decompressed_bytes)
else:
raise ValueError("TimsCompressionType is not 1 or 2.")
scan_start = frame_id * max_scan_count
scan_end = scan_start + scan_count
scan_indptr[scan_start: scan_end] = scan_indices_
tof_indices[frame_start: frame_end] = tof_indices_
intensities[frame_start: frame_end] = intensities_
def read_bruker_binary(
frames: np.ndarray,
bruker_d_folder_name: str,
compression_type: int,
max_peaks_per_scan: int,
) -> tuple:
"""Read all data from an "analysis.tdf_bin" of a Bruker .d folder.
Parameters
----------
frames : pd.DataFrame
The frames from the "analysis.tdf" SQL database of a Bruker .d folder.
These can be acquired with e.g. alphatims.bruker.read_bruker_sql.
bruker_d_folder_name : str
The full path to a Bruker .d folder.
compression_type : int
The compression type. This must be either 1 or 2.
max_peaks_per_scan : int
The maximum number of peaks per scan.
Should be treieved from the global metadata.
Returns
-------
: tuple (np.int64[:], np.uint32[:], np.uint16[:]).
The scan_indptr, tof_indices and intensities.
"""
frame_indptr = np.empty(frames.shape[0] + 1, dtype=np.int64)
frame_indptr[0] = 0
frame_indptr[1:] = np.cumsum(frames.NumPeaks.values)
max_scan_count = frames.NumScans.max() + 1
scan_count = max_scan_count * frames.shape[0]
scan_indptr = np.zeros(scan_count + 1, dtype=np.int64)
intensities = np.empty(frame_indptr[-1], dtype=np.uint16)
tof_indices = np.empty(frame_indptr[-1], dtype=np.uint32)
tdf_bin_file_name = os.path.join(bruker_d_folder_name, "analysis.tdf_bin")
tims_offset_values = frames.TimsId.values
logging.info(
f"Reading {frame_indptr.size - 2:,} frames with "
f"{frame_indptr[-1]:,} detector events for {bruker_d_folder_name}"
)
if compression_type == 1:
process_frame_func = alphatims.utils.threadpool(
process_frame,
thread_count=1
)
else:
process_frame_func = alphatims.utils.threadpool(process_frame)
process_frame_func(
range(1, len(frames)),
tdf_bin_file_name,
tims_offset_values,
scan_indptr,
intensities,
tof_indices,
frame_indptr,
max_scan_count,
compression_type,
max_peaks_per_scan,
)
scan_indptr[1:] = np.cumsum(scan_indptr[:-1])
scan_indptr[0] = 0
return scan_indptr, tof_indices, intensities
class TimsTOF(object):
"""A class that stores Bruker TimsTOF data in memory for fast access.
Data can be read directly from a Bruker .d folder.
All OS's are supported,
but reading mz_values and mobility_values from a .d folder
requires Windows or Linux due to availability of Bruker libraries.
On MacOS, they are estimated based on metadata,
but these values are not guaranteed to be correct.
Often they fall within 0.02 Th, but errors up to 6 Th have already
been observed!
A TimsTOF object can also be exported to HDF for subsequent access.
This file format is portable to all OS's.
As such, initial reading on Windows with correct mz_values and
mobility_values can be done and the resulting HDF file can
safely be read on MacOS.
This HDF file also provides improved accession times for subsequent use.
After reading, data can be accessed with traditional Python slices.
As TimsTOF data is 5-dimensional, the data can be sliced in 5 dimensions
as well. These dimensions follows the design of the TimsTOF Pro:
1 LC: rt_values, frame_indices
The first dimension allows to slice retention_time values
or frames indices. These values and indices
have a one-to-one relationship.
2 TIMS: mobility_values, scan_indices
The second dimension allows to slice mobility values or
scan indices (i.e. a single push).
These values and indices have a one-to-one relationship.
3 QUAD: quad_mz_values, precursor_indices
The third dimension focusses on the quadrupole and indirectly
on the collision cell. It allows to slice lower and upper
quadrupole mz values (e.g. the m/z of
unfragmented ions / precursors). If set to -1, the quadrupole and
collision cell are assumed to be inactive, i.e. precursor ions
are detected instead of fragments.
Equally, this dimension allows to slice precursor indices.
Precursor index 0 defaults to all precusors (i.e. quad mz values
equal to -1). In DDA, precursor indices larger than 0 point
to ddaPASEF MSMS spectra.
In DIA, precursor indices larger than 0 point to windows,
i.e. all scans in a frame with equal quadrupole and collision
settings that is repeated once per full cycle.
Note that these values do not have a one-to-one relationship.
4 TOF: mz_values, tof_indices
The fourth dimension allows to slice (fragment) mz_values
or tof indices. Note that the quadrupole dimension determines
if precursors are detected or fragments.
These values and indices have a one-to-one relationship.
5 DETECTOR: intensity_values
The fifth dimension allows to slice intensity values.
Note that all dimensions except for the detector have both
(float) values and (integer) indices.
For each dimension, slices can be provided in several different ways:
- int:
A single int can be used to select a single index.
If used in the fifth dimension, it still allows to select
intensity_values
- float:
A single float can be used to select a single value.
As the values arrays are discrete, the smallest index with a value
equal to or larger than this value is actually selected.
For intensity_value slicing, the exact value is used.
- slice:
A Python slice with start, stop and step can be provided.
Start and stop values can independently be set to int or float.
If a float is provided it conversed to an int as previously
described.
The step always needs to be provided as an int.
Since there is not one-to-one relation from values to indices for
QUAD and DETECTOR, the step value is ignored in these cases and
only start and stop can be used.
**IMPORTANT NOTE:** negative start, step and stop integers are not
supported!
- iterable:
An iterable with (mixed) floats and ints can also be provided,
in a similar fashion as Numpy's fancy indexing.
**IMPORTANT NOTE:** The resulting integers after float->int
conversion need to be sorted in ascending order!
- np.ndarray:
Multiple slicing is supported by providing either a
np.int64[:, 3] array, where each row is assumed to be a
(start, stop, step) tuple or np.float64[:, 2] where each row
is assumed to be a (start, stop) tuple.
**IMPORTANT NOTE:** These arrays need to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(precursor_slices[:, :2].ravel()) >= 0)
= True).
Alternatively, a dictionary can be used to define filters for each
dimension (see examples).
The result of such slicing is a pd.DataFrame with the following columns:
- raw_indices
- frame_indices
- scan_indices
- precursor_indices
- tof_indices
- rt_values
- mobility_values
- quad_low_mz_values
- quad_high_mz_values
- mz_values
- intensity_values
Instead of returning a pd.DataFrame, raw indices can be returned by
setting the last slice element to "raw".
Examples
--------
>>> data[:100.0]
# Return all datapoints with rt_values < 100.0 seconds
>>> data[:, 450]
# Return all datapoints with scan_index = 450
>>> data[:, :, 700.: 710.]
# Return all datapoints with 700.0 <= quad_mz_values < 710.0
>>> data[:, :, :, 621.9: 191000]
# Return all datapoints with 621.9 <= mz_values and
# tof_indices < 191000
>>> data[[1, 8, 10], :, 0, 621.9: np.inf]
# Return all datapoints from frames 1, 8 and 10, which are unfragmented
# (precursor_index = 0) and with 621.9 <= mz_values < np.inf
>>> data[:, :, 999]
# Return all datapoints from precursor 999
# (for diaPASEF this is a traditional MSMS spectrum)
>>> scan_slices = np.array([[10, 20, 1], [100, 200, 10]])
>>> data[:, scan_slices, :, :, :]
# Return all datapoints with scan_indices in range(10, 20) or
# range(100, 200, 10)
>>> df = data[
... {
... "frame_indices": [1, 191],
... "scan_indices": slice(300, 800, 10),
... "mz_values": slice(None, 400.5),
... "intensity_values": 50,
... }
... ]
# Slice by using a dictionary
>>> data[:, :, 999, "raw"]
# Return the raw indices of datapoints from precursor 999
"""
@property
def sample_name(self):
""": str : The sample name of this TimsTOF object."""
file_name = os.path.basename(self.bruker_d_folder_name)
return '.'.join(file_name.split('.')[:-1])
@property
def directory(self):
""": str : The directory of this TimsTOF object."""
return os.path.dirname(self.bruker_d_folder_name)
@property
def is_compressed(self):
""": bool : HDF array is compressed or not."""
return self._compressed
@property
def version(self):
""": str : AlphaTims version used to create this TimsTOF object."""
return self._version
@property
def acquisition_mode(self):
""": str : The acquisition mode."""
return self._acquisition_mode
@property
def meta_data(self):
""": dict : The metadata for the acquisition."""
return self._meta_data
@property
def rt_values(self):
""": np.ndarray : np.float64[:] : The rt values."""
return self._rt_values
@property
def mobility_values(self):
""": np.ndarray : np.float64[:] : The mobility values."""
return self._mobility_values
@property
def mz_values(self):
""": np.ndarray : np.float64[:] : The mz values."""
if self._use_calibrated_mz_values_as_default:
return self._calibrated_mz_values
else:
return self._mz_values
@property
def calibrated_mz_values(self):
""": np.ndarray : np.float64[:] : The global calibrated mz values."""
return self._calibrated_mz_values
@property
def quad_mz_values(self):
""": np.ndarray : np.float64[:, 2] : The (low, high) quad mz values."""
return self._quad_mz_values
@property
def intensity_values(self):
""": np.ndarray : np.uint16[:] : The intensity values."""
return self._intensity_values
@property
def frame_max_index(self):
""": int : The maximum frame index."""
return self._frame_max_index
@property
def scan_max_index(self):
""": int : The maximum scan index."""
return self._scan_max_index
@property
def tof_max_index(self):
""": int : The maximum tof index."""
return self._tof_max_index
@property
def precursor_max_index(self):
""": int : The maximum precursor index."""
return self._precursor_max_index
@property
def mz_min_value(self):
""": float : The minimum mz value."""
return self.mz_values[0]
@property
def mz_max_value(self):
""": float : The maximum mz value."""
return self.mz_values[-1]
@property
def calibrated_mz_min_value(self):
""": float : The minimum calibrated mz value."""
return self.calibrated_mz_values[0]
@property
def calibrated_mz_max_value(self):
""": float : The maximum calibrated mz value."""
return self.calibrated_mz_values[-1]
@property
def rt_max_value(self):
""": float : The maximum rt value."""
return self.rt_values[-1]
@property
def quad_mz_min_value(self):
""": float : The minimum quad mz value."""
return self._quad_min_mz_value
@property
def quad_mz_max_value(self):
""": float : The maximum quad mz value."""
return self._quad_max_mz_value
@property
def mobility_min_value(self):
""": float : The minimum mobility value."""
return self._mobility_min_value
@property
def mobility_max_value(self):
""": float : The maximum mobility value."""
return self._mobility_max_value
@property
def intensity_min_value(self):
""": float : The minimum intensity value."""
return self._intensity_min_value
@property
def intensity_max_value(self):
""": float : The maximum intensity value."""
return self._intensity_max_value
@property
def frames(self):
""": pd.DataFrame : The frames table of the analysis.tdf SQL."""
return self._frames
@property
def fragment_frames(self):
""": pd.DataFrame : The fragment frames table."""
return self._fragment_frames
@property
def precursors(self):
""": pd.DataFrame : The precursor table."""
return self._precursors
@property
def tof_indices(self):
""": np.ndarray : np.uint32[:] : The tof indices."""
return self._tof_indices
@property
def push_indptr(self):
""": np.ndarray : np.int64[:] : The tof indptr."""
return self._push_indptr
@property
def quad_indptr(self):
""": np.ndarray : np.int64[:] : The quad indptr (tof_indices)."""
return self._quad_indptr
@property
def raw_quad_indptr(self):
""": np.ndarray : np.int64[:] : The raw quad indptr (push indices)."""
return self._raw_quad_indptr
@property
def precursor_indices(self):
""": np.ndarray : np.int64[:] : The precursor indices."""
return self._precursor_indices
@property
def dia_precursor_cycle(self):
""": np.ndarray : np.int64[:] : The precursor indices of a DIA cycle."""
return self._dia_precursor_cycle
@property
def dia_mz_cycle(self):
""": np.ndarray : np.float64[:, 2] : The mz_values of a DIA cycle."""
return self._dia_mz_cycle
@property
def zeroth_frame(self):
""": bool : A blank zeroth frame is present so frames are 1-indexed."""
return self._zeroth_frame
@property
def max_accumulation_time(self):
""": float : The maximum accumulation time of all frames."""
return self._max_accumulation_time
@property
def accumulation_times(self):
""": np.ndarray : The accumulation times of all frames."""
return self._accumulation_times
@property
def intensity_corrections(self):
""": np.ndarray : The intensity_correction per frame."""
return self._intensity_corrections
def __init__(
self,
bruker_d_folder_name: str,
*,
mz_estimation_from_frame: int = 1,
mobility_estimation_from_frame: int = 1,
slice_as_dataframe: bool = True,
use_calibrated_mz_values_as_default: int = 0,
use_hdf_if_available: bool = False,
mmap_detector_events: bool = None,
drop_polarity: bool = True,
convert_polarity_to_int: bool = True,
):
"""Create a Bruker TimsTOF object that contains all data in-memory.
Parameters
----------
bruker_d_folder_name : str
The full file name to a Bruker .d folder.
Alternatively, the full file name of an already exported .hdf
can be provided as well.
mz_estimation_from_frame : int
If larger than 0, mz_values from this frame are read as
default mz_values with the Bruker library.
If 0, mz_values are being estimated with the metadata
based on "MzAcqRangeLower" and "MzAcqRangeUpper".
IMPORTANT NOTE: MacOS defaults to 0, as no Bruker library
is available.
Default is 1.
mobility_estimation_from_frame : int
If larger than 0, mobility_values from this frame are read as
default mobility_values with the Bruker library.
If 0, mobility_values are being estimated with the metadata
based on "OneOverK0AcqRangeLower" and "OneOverK0AcqRangeUpper".
IMPORTANT NOTE: MacOS defaults to 0, as no Bruker library
is available.
Default is 1.
slice_as_dataframe : bool
If True, slicing returns a pd.DataFrame by default.
If False, slicing provides a np.int64[:] with raw indices.
This value can also be modified after creation.
Default is True.
use_calibrated_mz_values : int
If not 0, the mz_values are overwritten with global
calibrated_mz_values.
If 1, calibration at the MS1 level is performed.
If 2, calibration at the MS2 level is performed.
Default is 0.
use_hdf_if_available : bool
If an HDF file is available, use this instead of the
.d folder.
Default is False.
mmap_detector_events : bool
Do not save the intensity_values and tof_indices in memory,
but use an mmap instead. If no .hdf file is available to use for
mmapping, one will be created automatically.
Default is False for .d folders and True for .hdf files.
drop_polarity : bool
The polarity column of the frames table contains "+" or "-" and
is not numerical.
If True, the polarity column is dropped from the frames table.
this ensures a fully numerical pd.DataFrame.
If False, this column is kept, resulting in a pd.DataFrame with
dtype=object.
Default is True.
convert_polarity_to_int : bool
Convert the polarity to int (-1 or +1).
This allows to keep it in numerical form.
This is ignored if the polarity is dropped.
Default is True.
"""
logging.info(f"Importing data from {bruker_d_folder_name}")
if (mmap_detector_events is None) and bruker_d_folder_name.endswith(".hdf"):
mmap_detector_events = True
if bruker_d_folder_name.endswith(".d"):
bruker_hdf_file_name = f"{bruker_d_folder_name[:-2]}.hdf"
hdf_file_exists = os.path.exists(bruker_hdf_file_name)
if use_hdf_if_available and hdf_file_exists:
self._import_data_from_hdf_file(
bruker_hdf_file_name,
mmap_detector_events,
)
self.bruker_hdf_file_name = bruker_hdf_file_name
else:
self.bruker_d_folder_name = os.path.abspath(
bruker_d_folder_name
)
if mmap_detector_events:
raise IOError(
f"Can only use mmapping from .hdf files. "
f"Either use the .hdf file as input directly, "
"or use the use_hdf_if_available option."
)
self._import_data_from_d_folder(
bruker_d_folder_name,
mz_estimation_from_frame,
mobility_estimation_from_frame,
drop_polarity,
convert_polarity_to_int,
)
elif bruker_d_folder_name.endswith(".hdf"):
self._import_data_from_hdf_file(
bruker_d_folder_name,
mmap_detector_events,
)
self.bruker_hdf_file_name = bruker_d_folder_name
else:
raise NotImplementedError(
"WARNING: file extension not understood"
)
if not hasattr(self, "version"):
self._version = "none"
if self.version != alphatims.__version__:
logging.info(
"WARNING: "
f"AlphaTims version {self.version} was used to initialize "
f"{bruker_d_folder_name}, while the current version of "
f"AlphaTims is {alphatims.__version__}."
)
self.slice_as_dataframe = slice_as_dataframe
self.use_calibrated_mz_values_as_default(
use_calibrated_mz_values_as_default
)
# Precompile
self[0, "raw"]
logging.info(f"Succesfully imported data from {bruker_d_folder_name}")
def __len__(self):
return len(self.intensity_values)
def __hash__(self):
return hash(self.bruker_d_folder_name)
def _import_data_from_d_folder(
self,
bruker_d_folder_name: str,
mz_estimation_from_frame: int,
mobility_estimation_from_frame: int,
drop_polarity: bool = True,
convert_polarity_to_int: bool = True,
):
self._version = alphatims.__version__
self._zeroth_frame = True
(
self._acquisition_mode,
global_meta_data,
self._frames,
self._fragment_frames,
self._precursors,
) = read_bruker_sql(
bruker_d_folder_name,
self._zeroth_frame,
drop_polarity,
convert_polarity_to_int,
)
self._meta_data = dict(
zip(global_meta_data.Key, global_meta_data.Value)
)
(
self._push_indptr,
self._tof_indices,
self._intensity_values,
) = read_bruker_binary(
self.frames,
bruker_d_folder_name,
int(self._meta_data["TimsCompressionType"]),
int(self._meta_data["MaxNumPeaksPerScan"]),
)
logging.info(f"Indexing {bruker_d_folder_name}...")
self._use_calibrated_mz_values_as_default = False
self._frame_max_index = self.frames.shape[0]
self._scan_max_index = int(self.frames.NumScans.max()) + 1
self._tof_max_index = int(self.meta_data["DigitizerNumSamples"]) + 1
self._rt_values = self.frames.Time.values.astype(np.float64)
self._mobility_min_value = float(
self.meta_data["OneOverK0AcqRangeLower"]
)
self._mobility_max_value = float(
self.meta_data["OneOverK0AcqRangeUpper"]
)
self._accumulation_times = self.frames.AccumulationTime.values.astype(
np.float64
)
self._max_accumulation_time = np.max(self._accumulation_times)
self._intensity_corrections = self._max_accumulation_time / self._accumulation_times
bruker_dll_available = BRUKER_DLL_FILE_NAME != ""
if (mobility_estimation_from_frame != 0) and bruker_dll_available:
import ctypes
with alphatims.bruker.open_bruker_d_folder(
bruker_d_folder_name
) as (bruker_dll, bruker_d_folder_handle):
logging.info(
f"Fetching mobility values from {bruker_d_folder_name}"
)
indices = np.arange(self.scan_max_index).astype(np.float64)
self._mobility_values = np.empty_like(indices)
bruker_dll.tims_scannum_to_oneoverk0(
bruker_d_folder_handle,
mobility_estimation_from_frame,
indices.ctypes.data_as(
ctypes.POINTER(ctypes.c_double)
),
self.mobility_values.ctypes.data_as(
ctypes.POINTER(ctypes.c_double)
),
self.scan_max_index
)
else:
if (mobility_estimation_from_frame != 0):
logging.info(
"Bruker DLL not available, estimating mobility values"
)
self._mobility_values = self.mobility_max_value - (
self.mobility_max_value - self.mobility_min_value
) / self.scan_max_index * np.arange(self.scan_max_index)
mz_min_value = float(self.meta_data["MzAcqRangeLower"])
mz_max_value = float(self.meta_data["MzAcqRangeUpper"])
tof_intercept = np.sqrt(mz_min_value)
tof_slope = (
np.sqrt(mz_max_value) - tof_intercept
) / self.tof_max_index
if (mz_estimation_from_frame != 0) and bruker_dll_available:
import ctypes
with alphatims.bruker.open_bruker_d_folder(
bruker_d_folder_name
) as (bruker_dll, bruker_d_folder_handle):
logging.info(
f"Fetching mz values from {bruker_d_folder_name}"
)
indices = np.arange(self.tof_max_index).astype(np.float64)
self._mz_values = np.empty_like(indices)
bruker_dll.tims_index_to_mz(
bruker_d_folder_handle,
mz_estimation_from_frame,
indices.ctypes.data_as(
ctypes.POINTER(ctypes.c_double)
),
self._mz_values.ctypes.data_as(
ctypes.POINTER(ctypes.c_double)
),
self.tof_max_index
)
else:
if (mz_estimation_from_frame != 0):
logging.info(
"Bruker DLL not available, estimating mz values"
)
self._mz_values = (
tof_intercept + tof_slope * np.arange(self.tof_max_index)
)**2
self._parse_quad_indptr()
self._intensity_min_value = int(np.min(self.intensity_values))
self._intensity_max_value = int(np.max(self.intensity_values))
def save_as_hdf(
self,
directory: str,
file_name: str,
overwrite: bool = False,
compress: bool = False,
return_as_bytes_io: bool = False,
):
"""Save the TimsTOF object as an hdf file.
Parameters
----------
directory : str
The directory where to save the HDF file.
Ignored if return_as_bytes_io == True.
file_name : str
The file name of the HDF file.
Ignored if return_as_bytes_io == True.
overwrite : bool
If True, an existing file is truncated.
If False, the existing file is appended to only if the original
group, array or property does not exist yet.
Default is False.
compress : bool
If True, compression is used.
This roughly halves files sizes (on-disk),
at the cost of taking 3-6 longer accession times.
See also alphatims.utils.create_hdf_group_from_dict.
If False, no compression is used
Default is False.
return_as_bytes_io
If True, the HDF file is only created in memory and returned
as a bytes stream.
If False, the file is written to disk.
Default is False.
Returns
-------
str, io.BytesIO
The full file name or a bytes stream containing the HDF file.
"""
import io
if overwrite:
hdf_mode = "w"
else:
hdf_mode = "a"
if return_as_bytes_io:
full_file_name = io.BytesIO()
else:
full_file_name = os.path.join(
directory,
file_name
)
logging.info(
f"Writing TimsTOF data to {full_file_name}."
)
self._compressed = compress
with h5py.File(full_file_name, hdf_mode) as hdf_root:
# hdf_root.swmr_mode = True
alphatims.utils.create_hdf_group_from_dict(
hdf_root.create_group("raw"),
self.__dict__,
overwrite=overwrite,
compress=compress,
)
if return_as_bytes_io:
full_file_name.seek(0)
else:
logging.info(
f"Succesfully wrote TimsTOF data to {full_file_name}."
)
return full_file_name
def _import_data_from_hdf_file(
self,
bruker_d_folder_name: str,
mmap_detector_events: bool = False,
):
with h5py.File(bruker_d_folder_name, "r") as hdf_root:
mmap_arrays = []
if mmap_detector_events:
mmap_arrays.append("/raw/_tof_indices")
mmap_arrays.append("/raw/_intensity_values")
self.__dict__ = alphatims.utils.create_dict_from_hdf_group(
hdf_root["raw"],
mmap_arrays,
bruker_d_folder_name,
)
def convert_from_indices(
self,
raw_indices=None,
*,
frame_indices=None,
quad_indices=None,
scan_indices=None,
tof_indices=None,
return_raw_indices: bool = False,
return_frame_indices: bool = False,
return_scan_indices: bool = False,
return_quad_indices: bool = False,
return_tof_indices: bool = False,
return_precursor_indices: bool = False,
return_rt_values: bool = False,
return_rt_values_min: bool = False,
return_mobility_values: bool = False,
return_quad_mz_values: bool = False,
return_push_indices: bool = False,
return_mz_values: bool = False,
return_intensity_values: bool = False,
return_corrected_intensity_values: bool = False,
raw_indices_sorted: bool = True,
) -> dict:
"""Convert selected indices to a dict.
Parameters
----------
raw_indices : np.int64[:], None
The raw indices for which coordinates need to be retrieved.
frame_indices : np.int64[:], None
The frame indices for which coordinates need to be retrieved.
quad_indices : np.int64[:], None
The quad indices for which coordinates need to be retrieved.
scan_indices : np.int64[:], None
The scan indices for which coordinates need to be retrieved.
tof_indices : np.int64[:], None
The tof indices for which coordinates need to be retrieved.
return_raw_indices : bool
If True, include "raw_indices" in the dict.
Default is False.
return_frame_indices : bool
If True, include "frame_indices" in the dict.
Default is False.
return_scan_indices : bool
If True, include "scan_indices" in the dict.
Default is False.
return_quad_indices : bool
If True, include "quad_indices" in the dict.
Default is False.
return_tof_indices : bool
If True, include "tof_indices" in the dict.
Default is False.
return_precursor_indices : bool
If True, include "precursor_indices" in the dict.
Default is False.
return_rt_values : bool
If True, include "rt_values" in the dict.
Default is False.
return_rt_values_min : bool
If True, include "rt_values_min" in the dict.
Default is False.
return_mobility_values : bool
If True, include "mobility_values" in the dict.
Default is False.
return_quad_mz_values : bool
If True, include "quad_low_mz_values" and
"quad_high_mz_values" in the dict.
Default is False.
return_push_indices : bool
If True, include "push_indices" in the dict.
Default is False.
return_mz_values : bool
If True, include "mz_values" in the dict.
Default is False.
return_intensity_values : bool
If True, include "intensity_values" in the dict.
Default is False.
return_corrected_intensity_values : bool
If True, include "corrected_intensity_values" in the dict.
Default is False.
raw_indices_sorted : bool
If True, raw_indices are assumed to be sorted,
resulting in a faster conversion.
Default is True.
Returns
-------
dict
A dict with all requested columns.
"""
result = {}
if (raw_indices is not None) and any(
[
return_frame_indices,
return_scan_indices,
return_quad_indices,
return_rt_values,
return_rt_values_min,
return_mobility_values,
return_quad_mz_values,
return_precursor_indices,
return_push_indices,
return_corrected_intensity_values,
]
):
if raw_indices_sorted:
push_indices = indptr_lookup(
self.push_indptr,
raw_indices,
)
else:
push_indices = np.searchsorted(
self.push_indptr,
raw_indices,
"right"
) - 1
if (
any(
[
return_frame_indices,
return_rt_values,
return_rt_values_min,
return_corrected_intensity_values,
]
)
) and (
frame_indices is None
):
frame_indices = push_indices // self.scan_max_index
if (return_scan_indices or return_mobility_values) and (
scan_indices is None
):
scan_indices = push_indices % self.scan_max_index
if any(
[
return_quad_indices,
return_quad_mz_values,
return_precursor_indices
]
) and (
quad_indices is None
):
if raw_indices_sorted:
quad_indices = indptr_lookup(
self.quad_indptr,
raw_indices,
)
else:
quad_indices = np.searchsorted(
self.quad_indptr,
raw_indices,
"right"
) - 1
if (return_tof_indices or return_mz_values) and (tof_indices is None):
tof_indices = self.tof_indices[raw_indices]
if return_raw_indices:
result["raw_indices"] = raw_indices
if return_frame_indices:
result["frame_indices"] = frame_indices
if return_scan_indices:
result["scan_indices"] = scan_indices
if return_quad_indices:
result["quad_indices"] = quad_indices
if return_precursor_indices:
result["precursor_indices"] = self.precursor_indices[quad_indices]
if return_push_indices:
result["push_indices"] = push_indices
if return_tof_indices:
result["tof_indices"] = tof_indices
if return_rt_values:
result["rt_values"] = self.rt_values[frame_indices]
if return_rt_values_min:
if "rt_values" in result:
result['rt_values_min'] = result["rt_values"] / 60
else:
result['rt_values_min'] = self.rt_values[frame_indices] / 60
if return_mobility_values:
result["mobility_values"] = self.mobility_values[scan_indices]
if return_quad_mz_values:
selected_quad_values = self.quad_mz_values[quad_indices]
low_mz_values = selected_quad_values[:, 0]
high_mz_values = selected_quad_values[:, 1]
result["quad_low_mz_values"] = low_mz_values
result["quad_high_mz_values"] = high_mz_values
if return_mz_values:
result["mz_values"] = self.mz_values[tof_indices]
if return_intensity_values:
result["intensity_values"] = self.intensity_values[raw_indices]
if return_corrected_intensity_values:
result["corrected_intensity_values"] = (
self.intensity_values[raw_indices] * self.intensity_corrections[frame_indices]
).astype(np.uint32)
return result
def convert_to_indices(
self,
values: np.ndarray,
*,
return_frame_indices: bool = False,
return_scan_indices: bool = False,
return_tof_indices: bool = False,
side: str = "left",
return_type: str = "",
):
"""Convert selected values to an array in the requested dimension.
Parameters
----------
values : float, np.float64[...], iterable
The raw values for which indices need to be retrieved.
return_frame_indices : bool
If True, convert the values to "frame_indices".
Default is False.
return_scan_indices : bool
If True, convert the values to "scan_indices".
Default is False.
return_tof_indices : bool
If True, convert the values to "tof_indices".
Default is False.
side : str
If there is an exact match between the values and reference array,
which index should be chosen. See also np.searchsorted.
Options are "left" or "right".
Default is "left".
return_type : str
Alternative way to define the return type.
Options are "frame_indices", "scan_indices" or "tof_indices".
Default is "".
Returns
-------
np.int64[...], int
An array with the same shape as values or iterable or an int
which corresponds to the requested value.
Raises
------
PrecursorFloatError
When trying to convert a quad float other than np.inf or -np.inf
to precursor index.
"""
if return_frame_indices:
return_type = "frame_indices"
elif return_scan_indices:
return_type = "scan_indices"
elif return_tof_indices:
return_type = "tof_indices"
if return_type == "frame_indices":
return np.searchsorted(self.rt_values, values, side)
elif return_type == "scan_indices":
return self.scan_max_index - np.searchsorted(
self.mobility_values[::-1],
values,
"left" if side == "right" else "right"
)
elif return_type == "tof_indices":
return np.searchsorted(self.mz_values, values, side)
elif return_type == "precursor_indices":
try:
if values not in [-np.inf, np.inf]:
raise PrecursorFloatError(
"Can not convert values to precursor_indices"
)
except ValueError:
raise PrecursorFloatError(
"Can not convert values to precursor_indices"
)
if values == -np.inf:
return 0
elif values == np.inf:
return self.precursor_max_index
else:
raise KeyError(f"return_type '{return_type}' is invalid")
def __getitem__(self, keys):
if not isinstance(keys, tuple):
keys = tuple([keys])
if isinstance(keys[-1], str):
if keys[-1] == "df":
as_dataframe = True
elif keys[-1] == "raw":
as_dataframe = False
else:
raise ValueError(f"Cannot use {keys[-1]} as a key")
keys = keys[:-1]
else:
as_dataframe = self.slice_as_dataframe
parsed_keys = parse_keys(self, keys)
raw_indices = filter_indices(
frame_slices=parsed_keys["frame_indices"],
scan_slices=parsed_keys["scan_indices"],
precursor_slices=parsed_keys["precursor_indices"],
tof_slices=parsed_keys["tof_indices"],
quad_slices=parsed_keys["quad_values"],
intensity_slices=parsed_keys["intensity_values"],
frame_max_index=self.frame_max_index,
scan_max_index=self.scan_max_index,
push_indptr=self.push_indptr,
precursor_indices=self.precursor_indices,
quad_mz_values=self.quad_mz_values,
quad_indptr=self.quad_indptr,
tof_indices=self.tof_indices,
intensities=self.intensity_values
)
if as_dataframe:
return self.as_dataframe(raw_indices)
else:
return raw_indices
def estimate_strike_count(
self,
frame_slices: np.ndarray,
scan_slices: np.ndarray,
precursor_slices: np.ndarray,
tof_slices: np.ndarray,
quad_slices: np.ndarray,
) -> int:
"""Estimate the number of detector events, given a set of slices.
Parameters
----------
frame_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(frame_slices[:, :2].ravel()) >= 0) = True).
scan_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(scan_slices[:, :2].ravel()) >= 0) = True).
precursor_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(precursor_slices[:, :2].ravel()) >= 0)
= True).
tof_slices : np.int64[:, 3]
Each row of the array is assumed to be a (start, stop, step) tuple.
This array is assumed to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(tof_slices[:, :2].ravel()) >= 0) = True).
quad_slices : np.float64[:, 2]
Each row of the array is assumed to be (lower_mz, upper_mz) tuple.
This array is assumed to be sorted,
disjunct and strictly increasing
(i.e. np.all(np.diff(quad_slices.ravel()) >= 0) = True).
Returns
-------
int
The estimated number of detector events given these slices.
"""
frame_count = 0
for frame_start, frame_end, frame_stop in frame_slices:
frame_count += len(range(frame_start, frame_end, frame_stop))
scan_count = 0
for scan_start, scan_end, scan_stop in scan_slices:
scan_count += len(range(scan_start, scan_end, scan_stop))
tof_count = 0
for tof_start, tof_end, tof_stop in tof_slices:
tof_count += len(range(tof_start, tof_end, tof_stop))
precursor_count = 0
precursor_index_included = False
for precursor_start, precursor_end, precursor_stop in precursor_slices:
precursor_count += len(
range(precursor_start, precursor_end, precursor_stop)
)
if 0 in range(precursor_start, precursor_end, precursor_stop):
precursor_index_included = True
quad_count = 0
precursor_quad_included = False
for quad_start, quad_end in quad_slices:
if quad_start < 0:
precursor_quad_included = True
if quad_start < self.quad_mz_min_value:
quad_start = self.quad_mz_min_value
if quad_end > self.quad_mz_max_value:
quad_end = self.quad_mz_max_value
if quad_start < quad_end:
quad_count += quad_end - quad_start
estimated_count = len(self)
estimated_count *= frame_count / self.frame_max_index
estimated_count *= scan_count / self.scan_max_index
estimated_count *= tof_count / self.tof_max_index
fragment_multiplier = 0.5 * min(
precursor_count / (self.precursor_max_index),
quad_count / (
self.quad_mz_max_value - self.quad_mz_min_value
)
)
if fragment_multiplier < 0:
fragment_multiplier = 0
if precursor_index_included and precursor_quad_included:
fragment_multiplier += 0.5
estimated_count *= fragment_multiplier
return int(estimated_count)
def bin_intensities(self, indices: np.ndarray, axis: tuple):
"""Sum and project the intensities of the indices along 1 or 2 axis.
Parameters
----------
indices : np.int64[:]
The selected indices whose coordinates need to be summed along
the selected axis.
axis : tuple
Must be length 1 or 2 and can only contain the elements
"rt_values", "mobility_values" and "mz_values".
Returns
-------
np.float64[:], np.float64[:, 2]
An array or heatmap that express the summed intensity along
the selected axis.
"""
intensities = self.intensity_values[indices].astype(np.float64)
max_index = {
"rt_values": self.frame_max_index,
"mobility_values": self.scan_max_index,
"mz_values": self.tof_max_index,
}
parsed_indices = self.convert_from_indices(
indices,
return_frame_indices="rt_values" in axis,
return_scan_indices="mobility_values" in axis,
return_tof_indices="mz_values" in axis,
)
binned_intensities = np.zeros(tuple([max_index[ax] for ax in axis]))
parse_dict = {
"rt_values": "frame_indices",
"mobility_values": "scan_indices",
"mz_values": "tof_indices",
}
add_intensity_to_bin(
range(indices.size),
intensities,
tuple(
[
parsed_indices[parse_dict[ax]] for ax in axis
]
),
binned_intensities
)
return binned_intensities
def as_dataframe(
self,
indices: np.ndarray,
*,
raw_indices: bool = True,
frame_indices: bool = True,
scan_indices: bool = True,
quad_indices: bool = False,
tof_indices: bool = True,
precursor_indices: bool = True,
rt_values: bool = True,
rt_values_min: bool = True,
mobility_values: bool = True,
quad_mz_values: bool = True,
push_indices: bool = True,
mz_values: bool = True,
intensity_values: bool = True,
corrected_intensity_values: bool = True,
raw_indices_sorted: bool = True,
):
"""Convert raw indices to a pd.DataFrame.
Parameters
----------
indices : np.int64[:]
The raw indices for which coordinates need to be retrieved.
raw_indices : bool
If True, include "raw_indices" in the dataframe.
Default is True.
frame_indices : bool
If True, include "frame_indices" in the dataframe.
Default is True.
scan_indices : bool
If True, include "scan_indices" in the dataframe.
Default is True.
quad_indices : bool
If True, include "quad_indices" in the dataframe.
Default is False.
tof_indices : bool
If True, include "tof_indices" in the dataframe.
Default is True.
precursor_indices : bool
If True, include "precursor_indices" in the dataframe.
Default is True.
rt_values : bool
If True, include "rt_values" in the dataframe.
Default is True.
rt_values_min : bool
If True, include "rt_values_min" in the dataframe.
Default is True.
mobility_values : bool
If True, include "mobility_values" in the dataframe.
Default is True.
quad_mz_values : bool
If True, include "quad_low_mz_values" and
"quad_high_mz_values" in the dict.
Default is True.
push_indices : bool
If True, include "push_indices" in the dataframe.
Default is True.
mz_values : bool
If True, include "mz_values" in the dataframe.
Default is True.
intensity_values : bool
If True, include "intensity_values" in the dataframe.
Default is True.
corrected_intensity_values : bool
If True, include "corrected_intensity_values" in the dataframe.
Default is True.
raw_indices_sorted : bool
If True, raw_indices are assumed to be sorted,
resulting in a faster conversion.
Default is True.
Returns
-------
pd.DataFrame
A dataframe with all requested columns.
"""
return pd.DataFrame(
self.convert_from_indices(
indices,
return_raw_indices=raw_indices,
return_frame_indices=frame_indices,
return_scan_indices=scan_indices,
return_quad_indices=quad_indices,
return_precursor_indices=precursor_indices,
return_tof_indices=tof_indices,
return_rt_values=rt_values,
return_rt_values_min=rt_values_min,
return_mobility_values=mobility_values,
return_quad_mz_values=quad_mz_values,
return_push_indices=push_indices,
return_mz_values=mz_values,
return_intensity_values=intensity_values,
return_corrected_intensity_values=corrected_intensity_values,
raw_indices_sorted=raw_indices_sorted,
)
)
def _parse_quad_indptr(self) -> None:
logging.info("Indexing quadrupole dimension")
frame_ids = self.fragment_frames.Frame.values + 1
scan_begins = self.fragment_frames.ScanNumBegin.values
scan_ends = self.fragment_frames.ScanNumEnd.values
isolation_mzs = self.fragment_frames.IsolationMz.values
isolation_widths = self.fragment_frames.IsolationWidth.values
precursors = self.fragment_frames.Precursor.values
if (precursors[0] is None):
if self.zeroth_frame:
frame_groups = self.frames.MsMsType.values[1:]
else:
frame_groups = self.frames.MsMsType.values
precursor_frames = np.flatnonzero(frame_groups == 0)
group_sizes = np.diff(precursor_frames)
group_size = group_sizes[0]
if np.any(group_sizes != group_size):
raise ValueError("Sample type not understood")
precursors = (1 + frame_ids - frame_ids[0]) % group_size
if self.zeroth_frame:
precursors[0] = 0
self.fragment_frames.Precursor = precursors
self._acquisition_mode = "diaPASEF"
scan_max_index = self.scan_max_index
frame_max_index = self.frame_max_index
quad_indptr = [0]
quad_low_values = []
quad_high_values = []
precursor_indices = []
high = -1
for (
frame_id,
scan_begin,
scan_end,
isolation_mz,
isolation_width,
precursor
) in zip(
frame_ids - 1,
scan_begins,
scan_ends,
isolation_mzs,
isolation_widths / 2,
precursors
):
low = frame_id * scan_max_index + scan_begin
# TODO: CHECK?
# if low < high:
# print(frame_id, low, frame_id * scan_max_index + scan_end, high, low - high)
if low != high:
quad_indptr.append(low)
quad_low_values.append(-1)
quad_high_values.append(-1)
precursor_indices.append(0)
high = frame_id * scan_max_index + scan_end
quad_indptr.append(high)
quad_low_values.append(isolation_mz - isolation_width)
quad_high_values.append(isolation_mz + isolation_width)
precursor_indices.append(precursor)
quad_max_index = scan_max_index * frame_max_index
if high < quad_max_index:
quad_indptr.append(quad_max_index)
quad_low_values.append(-1)
quad_high_values.append(-1)
precursor_indices.append(0)
self._quad_mz_values = np.stack([quad_low_values, quad_high_values]).T
self._precursor_indices = np.array(precursor_indices)
self._raw_quad_indptr = np.array(quad_indptr)
self._quad_indptr = self.push_indptr[self._raw_quad_indptr]
self._quad_max_mz_value = np.max(self.quad_mz_values[:, 1])
self._quad_min_mz_value = np.min(
self.quad_mz_values[
self.quad_mz_values[:, 0] >= 0,
0
]
)
self._precursor_max_index = int(np.max(self.precursor_indices)) + 1
if self._acquisition_mode == "diaPASEF":
offset = int(self.zeroth_frame)
cycle_index = np.searchsorted(
self.raw_quad_indptr,
(self.scan_max_index) * (self.precursor_max_index + offset),
"r"
) + 1
repeats = np.diff(self.raw_quad_indptr[: cycle_index])
if self.zeroth_frame:
repeats[0] -= self.scan_max_index
cycle_length = self.scan_max_index * self.precursor_max_index
repeat_length = np.sum(repeats)
if repeat_length != cycle_length:
repeats[-1] -= repeat_length - cycle_length
self._dia_mz_cycle = np.empty((cycle_length, 2))
self._dia_mz_cycle[:, 0] = np.repeat(
self.quad_mz_values[: cycle_index - 1, 0],
repeats
)
self._dia_mz_cycle[:, 1] = np.repeat(
self.quad_mz_values[: cycle_index - 1, 1],
repeats
)
self._dia_precursor_cycle = np.repeat(
self.precursor_indices[: cycle_index - 1],
repeats
)
else:
self._dia_mz_cycle = np.empty((0, 2))
self._dia_precursor_cycle = np.empty(0, dtype=np.int64)
def index_precursors(
self,
centroiding_window: int = 0,
keep_n_most_abundant_peaks: int = -1,
) -> tuple:
"""Retrieve all MS2 spectra acquired with DDA.
IMPORTANT NOTE: This function is intended for DDA samples.
While it in theory works for DIA sample too, this probably has little
value.
Parameters
----------
centroiding_window : int
The centroiding window to use.
If 0, no centroiding is performed.
Default is 0.
keep_n_most_abundant_peaks : int
Keep the n most abundant peaks.
If -1, all peaks are retained.
Default is -1.
Returns
-------
: tuple (np.int64[:], np.uint32[:], np.uint32[:])
The spectrum_indptr array, spectrum_tof_indices array and
spectrum_intensity_values array.
"""
precursor_order = np.argsort(self.precursor_indices)
precursor_offsets = np.empty(
self.precursor_max_index + 1,
dtype=np.int64
)
precursor_offsets[0] = 0
precursor_offsets[1:-1] = np.flatnonzero(
np.diff(self.precursor_indices[precursor_order]) > 0) + 1
precursor_offsets[-1] = len(precursor_order)
offset = precursor_offsets[1]
offsets = precursor_order[offset:]
counts = np.empty(len(offsets) + 1, dtype=np.int)
counts[0] = 0
counts[1:] = np.cumsum(
self.quad_indptr[offsets + 1] - self.quad_indptr[offsets]
)
spectrum_indptr = np.empty(
self.precursor_max_index + 1,
dtype=np.int64
)
spectrum_indptr[1:] = counts[
precursor_offsets[1:] - precursor_offsets[1]
]
spectrum_indptr[0] = 0
spectrum_counts = np.zeros_like(spectrum_indptr)
spectrum_tof_indices = np.empty(spectrum_indptr[-1], dtype=np.uint32)
spectrum_intensity_values = np.empty(
len(spectrum_tof_indices),
dtype=np.float64
)
set_precursor(
range(1, self.precursor_max_index),
precursor_order,
precursor_offsets,
self.quad_indptr,
self.tof_indices,
self.intensity_values,
spectrum_tof_indices,
spectrum_intensity_values,
spectrum_indptr,
spectrum_counts,
)
if centroiding_window > 0:
centroid_spectra(
range(1, self.precursor_max_index),
spectrum_indptr,
spectrum_counts,
spectrum_tof_indices,
spectrum_intensity_values,
centroiding_window,
)
if keep_n_most_abundant_peaks > -1:
filter_spectra_by_abundant_peaks(
range(1, self.precursor_max_index),
spectrum_indptr,
spectrum_counts,
spectrum_tof_indices,
spectrum_intensity_values,
keep_n_most_abundant_peaks,
)
new_spectrum_indptr = np.empty_like(spectrum_counts)
new_spectrum_indptr[1:] = np.cumsum(spectrum_counts[:-1])
new_spectrum_indptr[0] = 0
trimmed_spectrum_tof_indices = np.empty(
new_spectrum_indptr[-1],
dtype=np.uint32
)
trimmed_spectrum_intensity_values = np.empty(
len(trimmed_spectrum_tof_indices),
dtype=np.float64
)
spectrum_intensity_values
trim_spectra(
range(1, self.precursor_max_index),
spectrum_tof_indices,
spectrum_intensity_values,
spectrum_indptr,
trimmed_spectrum_tof_indices,
trimmed_spectrum_intensity_values,
new_spectrum_indptr,
)
return (
new_spectrum_indptr,
trimmed_spectrum_tof_indices,
trimmed_spectrum_intensity_values
)
def save_as_mgf(
self,
directory: str,
file_name: str,
overwrite: bool = False,
centroiding_window: int = 5,
keep_n_most_abundant_peaks: int = -1,
):
"""Save profile spectra from this TimsTOF object as an mgf file.
Parameters
----------
directory : str
The directory where to save the mgf file.
file_name : str
The file name of the mgf file.
overwrite : bool
If True, an existing file is truncated.
If False, nothing happens if a file already exists.
Default is False.
centroiding_window : int
The centroiding window to use.
If 0, no centroiding is performed.
Default is 5.
keep_n_most_abundant_peaks : int
Keep the n most abundant peaks.
If -1, all peaks are retained.
Default is -1.
Returns
-------
str
The full file name of the mgf file.
"""
full_file_name = os.path.join(
directory,
file_name
)
if self.acquisition_mode != "ddaPASEF":
logging.info(
f"File {self.bruker_d_folder_name} is not "
"a ddaPASEF file, nothing to do."
)
return full_file_name
if os.path.exists(full_file_name):
if not overwrite:
logging.info(
f"File {full_file_name} already exists, nothing to do."
)
return full_file_name
logging.info(f"Indexing spectra of {self.bruker_d_folder_name}...")
(
spectrum_indptr,
spectrum_tof_indices,
spectrum_intensity_values,
) = self.index_precursors(
centroiding_window=centroiding_window,
keep_n_most_abundant_peaks=keep_n_most_abundant_peaks,
)
mono_mzs = self.precursors.MonoisotopicMz.values
average_mzs = self.precursors.AverageMz.values
charges = self.precursors.Charge.values
charges[np.flatnonzero(np.isnan(charges))] = 0
charges = charges.astype(np.int64)
rtinseconds = self.rt_values[self.precursors.Parent.values]
intensities = self.precursors.Intensity.values
mobilities = self.mobility_values[
self.precursors.ScanNumber.values.astype(np.int64)
]
with open(full_file_name, "w") as infile:
logging.info(f"Exporting profile spectra to {full_file_name}...")
for index in alphatims.utils.progress_callback(
range(1, self.precursor_max_index)
):
start = spectrum_indptr[index]
end = spectrum_indptr[index + 1]
title = (
f"index: {index}, "
f"intensity: {intensities[index - 1]:.1f}, "
f"mobility: {mobilities[index - 1]:.3f}, "
f"average_mz: {average_mzs[index - 1]:.3f}"
)
infile.write("BEGIN IONS\n")
infile.write(f'TITLE="{title}"\n')
infile.write(f"PEPMASS={mono_mzs[index - 1]:.6f}\n")
infile.write(f"CHARGE={charges[index - 1]}\n")
infile.write(f"RTINSECONDS={rtinseconds[index - 1]:.2f}\n")
for mz, intensity in zip(
self.mz_values[spectrum_tof_indices[start: end]],
spectrum_intensity_values[start: end],
):
infile.write(f"{mz:.6f} {intensity}\n")
infile.write("END IONS\n")
logging.info(
f"Succesfully wrote {self.precursor_max_index - 1:,} "
f"spectra to {full_file_name}."
)
return full_file_name
def calculate_global_calibrated_mz_values(
self,
calibrant1: tuple = (922.009798, 1.1895, slice(0, 1)),
calibrant2: tuple = (1221.990637, 1.3820, slice(0, 1)),
mz_tolerance: float = 10, # in Th
mobility_tolerance: float = 0.1, # in 1/k0,
) -> None:
"""Calculate global calibrated_mz_values based on two calibrant ions.
Parameters
----------
calibrant1 : tuple
The first calibrant ion.
This is a tuple with (mz, mobility, precursor_slice) foat values.
Default is (922.009798, 1.1895, slice(0, 1)).
calibrant2 : tuple
The first calibrant ion.
This is a tuple with (mz, mobility, precursor_slice) foat values.
Default is (1221.990637, 1.3820, slice(0, 1)).
mz_tolerance : float
The tolerance window (in Th) with respect to the
uncalibrated mz_values. If this is too large,
the calibrant ion might not be the most intense ion anymore.
If this is too small, the calibrant ion might not be contained.
Default is 10.
mobility_tolerance : float
The tolerance window with respect to the
uncalibrated mobility_values. If this is too large,
the calibrant ion might not be the most intense ion anymore.
If this is too small, the calibrant ion might not be contained.
Default is 0.1.
"""
logging.info("Calculating global calibrated mz values...")
if calibrant1[0] > calibrant2[0]:
calibrant1, calibrant2 = calibrant2, calibrant1
calibrant1_lower_mz = calibrant1[0] - mz_tolerance
calibrant1_upper_mz = calibrant1[0] + mz_tolerance
calibrant1_lower_mobility = calibrant1[1] - mobility_tolerance
calibrant1_upper_mobility = calibrant1[1] + mobility_tolerance
calibrant1_tof = np.argmax(
np.bincount(
self.tof_indices[
self[
:,
calibrant1_lower_mobility: calibrant1_upper_mobility,
calibrant1[2],
calibrant1_lower_mz: calibrant1_upper_mz,
"raw"
]
]
)
)
calibrant2_lower_mz = calibrant2[0] - mz_tolerance
calibrant2_upper_mz = calibrant2[0] + mz_tolerance
calibrant2_lower_mobility = calibrant2[1] - mobility_tolerance
calibrant2_upper_mobility = calibrant2[1] + mobility_tolerance
calibrant2_tof = np.argmax(
np.bincount(
self.tof_indices[
self[
:,
calibrant2_lower_mobility: calibrant2_upper_mobility,
calibrant2[2],
calibrant2_lower_mz: calibrant2_upper_mz,
"raw"
]
]
)
)
tof_slope = (
np.sqrt(calibrant2[0]) - np.sqrt(calibrant1[0])
) / (calibrant2_tof - calibrant1_tof)
tof_intercept = np.sqrt(calibrant1[0]) - tof_slope * calibrant1_tof
self._calibrated_mz_values = (
tof_intercept + tof_slope * np.arange(self.tof_max_index)
)**2
ppms = 10**6 * (
self._mz_values - self._calibrated_mz_values
) / self._mz_values
logging.info(
"Global calibration of mz values yielded differences between "
f"{np.min(ppms):.2f} and {np.max(ppms):.2f} ppm."
)
def use_calibrated_mz_values_as_default(
self,
use_calibrated_mz_values: int
) -> None:
"""Override the default mz_values with the global calibrated_mz_values.
Calibrated_mz_values will be calculated if they do not exist yet.
Parameters
----------
use_calibrated_mz_values : int
If not 0, the mz_values are overwritten with global
calibrated_mz_values.
If 1, calibration at the MS1 level is performed.
If 2, calibration at the MS2 level is performed.
"""
if use_calibrated_mz_values != 0:
if not hasattr(self, "_calibrated_mz_values"):
if use_calibrated_mz_values == 1:
ms_level = 0
if use_calibrated_mz_values == 2:
ms_level = slice(1, None)
self.calculate_global_calibrated_mz_values(
calibrant1=(922.009798, 1.1895, ms_level),
calibrant2=(1221.990637, 1.3820, ms_level),
mz_tolerance=1
)
self._use_calibrated_mz_values_as_default = use_calibrated_mz_values
class PrecursorFloatError(TypeError):
"""Used to indicate that a precursor value is not an int but a float."""
pass
@alphatims.utils.pjit(
signature_or_function="void(i8,i8[:],i8[:],i8[:],u4[:],u2[:],u4[:],f8[:],i8[:],i8[:])"
)
def set_precursor(
precursor_index: int,
offset_order: np.ndarray,
precursor_offsets: np.ndarray,
quad_indptr: np.ndarray,
tof_indices: np.ndarray,
intensities: np.ndarray,
spectrum_tof_indices: np.ndarray,
spectrum_intensity_values: np.ndarray,
spectrum_indptr: np.ndarray,
spectrum_counts: np.ndarray,
) -> None:
"""Sum the intensities of all pushes belonging to a single precursor.
IMPORTANT NOTE: This function is decorated with alphatims.utils.pjit.
The first argument is thus expected to be provided as an iterable
containing ints instead of a single int.
Parameters
----------
precursor_index : int
The precursor index indicating which MS2 spectrum to determine.
offset_order : np.int64[:]
The order of self.precursor_indices, obtained with np.argsort.
precursor_offsets : np.int64[:]
An index pointer array for precursor offsets.
quad_indptr : np.int64[:]
The self.quad_indptr array of a TimsTOF object.
tof_indices : np.uint32[:]
The self.tof_indices array of a TimsTOF object.
intensities : np.uint16[:]
The self.intensity_values array of a TimsTOF object.
spectrum_tof_indices : np.uint32[:]
A buffer array to store tof indices of the new spectrum.
spectrum_intensity_values : np.float64[:]
A buffer array to store intensity values of the new spectrum.
spectrum_indptr : np.int64[:]
An index pointer array defining the original spectrum boundaries.
spectrum_counts : np. int64[:]
An buffer array defining how many distinct tof indices the new
spectrum has.
"""
offset = spectrum_indptr[precursor_index]
precursor_offset_lower = precursor_offsets[precursor_index]
precursor_offset_upper = precursor_offsets[precursor_index + 1]
selected_offsets = offset_order[
precursor_offset_lower: precursor_offset_upper
]
starts = quad_indptr[selected_offsets]
ends = quad_indptr[selected_offsets + 1]
offset_index = offset
for start, end in zip(starts, ends):
spectrum_tof_indices[
offset_index: offset_index + end - start
] = tof_indices[start: end]
spectrum_intensity_values[
offset_index: offset_index + end - start
] = intensities[start: end]
offset_index += end - start
offset_end = spectrum_indptr[precursor_index + 1]
order = np.argsort(spectrum_tof_indices[offset: offset_end])
current_index = offset - 1
previous_tof_index = -1
for tof_index, intensity in zip(
spectrum_tof_indices[offset: offset_end][order],
spectrum_intensity_values[offset: offset_end][order],
):
if tof_index != previous_tof_index:
current_index += 1
spectrum_tof_indices[current_index] = tof_index
spectrum_intensity_values[current_index] = intensity
previous_tof_index = tof_index
else:
spectrum_intensity_values[current_index] += intensity
spectrum_tof_indices[current_index + 1: offset_end] = 0
spectrum_intensity_values[current_index + 1: offset_end] = 0
spectrum_counts[precursor_index] = current_index + 1 - offset
@alphatims.utils.pjit
def centroid_spectra(
index: int,
spectrum_indptr: np.ndarray,
spectrum_counts: np.ndarray,
spectrum_tof_indices: np.ndarray,
spectrum_intensity_values: np.ndarray,
window_size: int,
):
"""Smoothen and centroid a profile spectrum (inplace operation).
IMPORTANT NOTE: This function will overwrite all input arrays.
IMPORTANT NOTE: This function is decorated with alphatims.utils.pjit.
The first argument is thus expected to be provided as an iterable
containing ints instead of a single int.
Parameters
----------
index : int
The push index whose intensity_values and tof_indices will be
centroided.
spectrum_indptr : np.int64[:]
An index pointer array defining the (untrimmed) spectrum boundaries.
spectrum_counts : np. int64[:]
The original array defining how many distinct tof indices each
spectrum has.
spectrum_tof_indices : np.uint32[:]
The original array containing tof indices.
spectrum_intensity_values : np.float64[:]
The original array containing intensity values.
window_size : int
The window size to use for smoothing and centroiding peaks.
"""
start = spectrum_indptr[index]
end = start + spectrum_counts[index]
if start == end:
return
mzs = spectrum_tof_indices[start: end]
ints = spectrum_intensity_values[start: end]
smooth_ints = ints.copy()
for i, self_mz in enumerate(mzs[:-1]):
for j in range(i + 1, len(mzs)):
other_mz = mzs[j]
diff = other_mz - self_mz + 1
if diff >= window_size:
break
smooth_ints[i] += ints[j] / diff
smooth_ints[j] += ints[i] / diff
pre_apex = True
maxima = [mzs[0]]
intensities = [ints[0]]
for i, self_mz in enumerate(mzs[1:], 1):
if self_mz > mzs[i - 1] + window_size:
maxima.append(mzs[i])
intensities.append(0)
pre_apex = True
elif pre_apex:
if smooth_ints[i] < smooth_ints[i - 1]:
pre_apex = False
maxima[-1] = mzs[i - 1]
elif smooth_ints[i] > smooth_ints[i - 1]:
maxima.append(mzs[i])
intensities.append(0)
pre_apex = True
intensities[-1] += ints[i]
spectrum_tof_indices[start: start + len(maxima)] = np.array(
maxima,
dtype=spectrum_tof_indices.dtype
)
spectrum_intensity_values[start: start + len(maxima)] = np.array(
intensities,
dtype=spectrum_intensity_values.dtype
)
spectrum_counts[index] = len(maxima)
@alphatims.utils.pjit
def filter_spectra_by_abundant_peaks(
index: int,
spectrum_indptr: np.ndarray,
spectrum_counts: np.ndarray,
spectrum_tof_indices: np.ndarray,
spectrum_intensity_values: np.ndarray,
keep_n_most_abundant_peaks: int,
):
"""Filter a spectrum to retain only the most abundant peaks.
IMPORTANT NOTE: This function will overwrite all input arrays.
IMPORTANT NOTE: This function is decorated with alphatims.utils.pjit.
The first argument is thus expected to be provided as an iterable
containing ints instead of a single int.
Parameters
----------
index : int
The push index whose intensity_values and tof_indices will be
centroided.
spectrum_indptr : np.int64[:]
An index pointer array defining the (untrimmed) spectrum boundaries.
spectrum_counts : np. int64[:]
The original array defining how many distinct tof indices each
spectrum has.
spectrum_tof_indices : np.uint32[:]
The original array containing tof indices.
spectrum_intensity_values : np.float64[:]
The original array containing intensity values.
keep_n_most_abundant_peaks : int
Keep only this many abundant peaks.
"""
start = spectrum_indptr[index]
end = start + spectrum_counts[index]
if end - start <= keep_n_most_abundant_peaks:
return
mzs = spectrum_tof_indices[start: end]
ints = spectrum_intensity_values[start: end]
selected_indices = np.sort(
np.argsort(ints)[-keep_n_most_abundant_peaks:]
)
count = len(selected_indices)
spectrum_tof_indices[start: start + count] = mzs[selected_indices]
spectrum_intensity_values[start: start + count] = ints[selected_indices]
spectrum_counts[index] = count
@alphatims.utils.pjit
def trim_spectra(
index: int,
spectrum_tof_indices: np.ndarray,
spectrum_intensity_values: np.ndarray,
spectrum_indptr: np.ndarray,
trimmed_spectrum_tof_indices: np.ndarray,
trimmed_spectrum_intensity_values: np.ndarray,
new_spectrum_indptr: np.ndarray,
) -> None:
"""Trim remaining bytes after merging of multiple pushes.
IMPORTANT NOTE: This function is decorated with alphatims.utils.pjit.
The first argument is thus expected to be provided as an iterable
containing ints instead of a single int.
Parameters
----------
index : int
The push index whose intensity_values and tof_indices will be trimmed.
spectrum_tof_indices : np.uint32[:]
The original array containing tof indices.
spectrum_intensity_values : np.float64[:]
The original array containing intensity values.
spectrum_indptr : np.int64[:]
An index pointer array defining the original spectrum boundaries.
trimmed_spectrum_tof_indices : np.uint32[:]
A buffer array to store new tof indices.
trimmed_spectrum_intensity_values : np.float64[:]
A buffer array to store new intensity values.
new_spectrum_indptr : np.int64[:]
An index pointer array defining the trimmed spectrum boundaries.
"""
start = spectrum_indptr[index]
new_start = new_spectrum_indptr[index]
new_end = new_spectrum_indptr[index + 1]
trimmed_spectrum_tof_indices[new_start: new_end] = spectrum_tof_indices[
start: start + new_end - new_start
]
trimmed_spectrum_intensity_values[
new_start: new_end
] = spectrum_intensity_values[
start: start + new_end - new_start
]
def parse_keys(data: TimsTOF, keys) -> dict:
"""Convert different keys to a key dict with defined types.
NOTE: Negative slicing is not supported and all indiviudal keys
are assumed to be sorted, disjunct and strictly increasing
Parameters
----------
data : alphatims.bruker.TimsTOF
The TimsTOF objext for which to get slices.
keys : tuple
A tuple of at most 5 elemens, containing
slices, ints, floats, Nones, and/or iterables.
See `alphatims.bruker.convert_slice_key_to_int_array` and
`alphatims.bruker.convert_slice_key_to_float_array` for more details.
Returns
-------
: dict
The resulting dict always has the following items:
- "frame_indices": np.int64[:, 3]
- "scan_indices": np.int64[:, 3]
- "tof_indices": np.int64[:, 3]
- "precursor_indices": np.int64[:, 3]
- "quad_values": np.float64[:, 2]
- "intensity_values": np.float64[:, 2]
"""
dimensions = [
"frame_indices",
"scan_indices",
"precursor_indices",
"tof_indices",
]
dimension_slices = {}
if len(keys) > (len(dimensions) + 1):
raise KeyError(
"LC-IMS-MSMS data can be sliced in maximum 5 dimensions. "
"Integers are assumed to be indices, while "
"floats are assumed as values. Intensity is always casted "
"to integer values, regardless of input type."
)
if isinstance(keys[0], dict):
new_keys = []
dimension_translations = {
"frame_indices": "rt_values",
"scan_indices": "mobility_values",
"precursor_indices": "quad_mz_values",
"tof_indices": "mz_values",
}
for indices, values in dimension_translations.items():
if indices in keys[0]:
new_keys.append(keys[0][indices])
elif values in keys[0]:
new_keys.append(keys[0][values])
else:
new_keys.append(slice(None))
if "intensity_values" in keys[0]:
new_keys.append(keys[0]["intensity_values"])
keys = new_keys
for i, dimension in enumerate(dimensions):
try:
dimension_slices[
dimension
] = convert_slice_key_to_int_array(
data,
keys[i] if (i < len(keys)) else slice(None),
dimension
)
except PrecursorFloatError:
dimension_slices[
"precursor_indices"
] = convert_slice_key_to_int_array(
data,
slice(None),
"precursor_indices"
)
dimension_slices[
"quad_values"
] = convert_slice_key_to_float_array(keys[i])
dimension_slices[
"intensity_values"
] = convert_slice_key_to_float_array(
keys[-1] if (len(keys) > len(dimensions)) else slice(None)
)
if "quad_values" not in dimension_slices:
dimension_slices["quad_values"] = np.array(
[[-np.inf, np.inf]],
dtype=np.float64
)
return dimension_slices
def convert_slice_key_to_float_array(key):
"""Convert a key to a slice float array.
NOTE: This function should only be used for QUAD or DETECTOR dimensions.
Parameters
----------
key : slice, int, float, None, iterable
The key that needs to be converted.
Returns
-------
: np.float64[:, 2]
Each row represent a a (start, stop) slice.
Raises
------
ValueError
When the key is an np.ndarray with more than 2 columns.
"""
try:
iter(key)
except TypeError:
if key is None:
key = slice(None)
if isinstance(key, slice):
start = key.start
if start is None:
start = -np.inf
stop = key.stop
if stop is None:
stop = np.inf
else:
start = key
stop = key
return np.array([[start, stop]], dtype=np.float64)
else:
if not isinstance(key, np.ndarray):
key = np.array(key, dtype=np.float64)
key = key.astype(np.float64)
if len(key.shape) == 1:
return np.array([key, key]).T
elif len(key.shape) == 2:
if key.shape[1] != 2:
raise ValueError
return key
else:
raise ValueError
def convert_slice_key_to_int_array(data: TimsTOF, key, dimension: str):
"""Convert a key of a data dimension to a slice integer array.
Parameters
----------
data : alphatims.bruker.TimsTOF
The TimsTOF objext for which to get slices.
key : slice, int, float, None, iterable
The key that needs to be converted.
dimension : str
The dimension for which the key needs to be retrieved
Returns
-------
: np.int64[:, 3]
Each row represent a a (start, stop, step) slice.
Raises
------
ValueError
When the key contains elements other than int or float.
PrecursorFloatError
When trying to convert a quad float to precursor index.
"""
result = np.empty((0, 3), dtype=np.int64)
inverse_of_scans = False
try:
iter(key)
except TypeError:
if key is None:
key = slice(None)
if isinstance(key, slice):
if dimension == "scan_indices":
if isinstance(key.start, (np.inexact, float)) or isinstance(
key.stop,
(np.inexact, float)
):
key = slice(key.stop, key.start, key.step)
start = key.start
if not isinstance(start, (np.integer, int)):
if start is None:
if dimension == "scan_indices":
start = np.inf
else:
start = -np.inf
if not isinstance(start, (np.inexact, float)):
raise ValueError
start = data.convert_to_indices(
start,
return_type=dimension
)
stop = key.stop
if not isinstance(stop, (np.integer, int)):
if stop is None:
if dimension == "scan_indices":
stop = -np.inf
else:
stop = np.inf
if not isinstance(stop, (np.inexact, float)):
raise ValueError
stop = data.convert_to_indices(
stop,
return_type=dimension,
)
step = key.step
if not isinstance(step, (np.integer, int)):
if step is not None:
raise ValueError
step = 1
result = np.array([[start, stop, step]])
elif isinstance(key, (np.integer, int)):
result = | np.array([[key, key + 1, 1]]) | numpy.array |
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.autograd import Variable
from torch.distributions.categorical import Categorical
import numpy as np
class Controller(nn.Module):
def __init__(self,
n_subpolicy=2,
n_op=2,
lstm_size=100,
operation_types = 15,
operation_prob = 11,
operation_mag = 10,
lstm_num_layers=1,
baseline=None,
tanh_constant=1.5,
temperature=None,
img_input=True,
n_group=0,
gr_prob_weight=1e-3):
super(Controller, self).__init__()
self.n_subpolicy = n_subpolicy
self.n_op = n_op
self.lstm_size = lstm_size
self.lstm_num_layers = lstm_num_layers
self.baseline = baseline
self.tanh_constant = tanh_constant
self.temperature = temperature
self.n_group = n_group
self.gr_prob_weight = gr_prob_weight
self._operation_types = operation_types
self._operation_prob = operation_prob
self._operation_mag = operation_mag
self._search_space_size = [self._operation_types, self._operation_prob, self._operation_mag]
self.img_input = img_input
self._create_params()
def _create_params(self):
self.lstm = nn.LSTM(input_size=self.lstm_size,
hidden_size=self.lstm_size,
num_layers=self.lstm_num_layers)
if self.img_input:
# input CNN
self.conv_input = nn.Sequential(
# Input size: [batch, 3, 32, 32]
# Output size: [1, batch, lstm_size]
nn.Conv2d(3, 16, 3, stride=2, padding=1), # [batch, 16, 16, 16]
nn.ReLU(),
nn.Conv2d(16, 32, 3, stride=2, padding=1), # [batch, 32, 8, 8]
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=2, padding=1), # [batch, 64, 4, 4]
nn.BatchNorm2d(64),
nn.ReLU(),
nn.AvgPool2d(2, stride=2), # [batch, 64, 2, 2]
nn.Flatten(),
nn.Linear(64*2*2, self.lstm_size)
)
else:
pass
# self.in_emb = nn.Embedding(1, self.lstm_size) # Learn the starting input
if self.n_group > 0:
self.logit2group = nn.Sequential(
nn.Linear(self.lstm_size, self.n_group),
nn.LogSoftmax()
)
self.gr_emb = nn.Embedding(self.n_group, self.lstm_size)
# LSTM output to Categorical logits
self.o_logit = nn.Linear(self.lstm_size, self._operation_types)#, bias=False)
self.p_logit = nn.Linear(self.lstm_size, self._operation_prob )#, bias=False)
self.m_logit = nn.Linear(self.lstm_size, self._operation_mag )#, bias=False)
# Embedded input to LSTM: (class:int)->(lstm input vector)
self.o_emb = nn.Embedding(self._operation_types, self.lstm_size)
self.p_emb = nn.Embedding(self._operation_prob , self.lstm_size)
self.m_emb = nn.Embedding(self._operation_mag , self.lstm_size)
self._reset_params()
def _reset_params(self):
for m in self.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Embedding):
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.uniform_(self.lstm.weight_hh_l0, -0.1, 0.1)
nn.init.uniform_(self.lstm.weight_ih_l0, -0.1, 0.1)
def softmax_tanh(self, logit):
if self.temperature is not None:
logit /= self.temperature
if self.tanh_constant is not None:
logit = self.tanh_constant * torch.tanh(logit)
return logit
def forward(self, image=None):
"""
return: log_probs, entropys, subpolicies
log_probs: batch of log_prob, (tensor)[batch or 1]
entropys: batch of entropy, (tensor)[batch or 1]
subpolicies: batch of sampled policies, (np.array)[batch, n_subpolicy, n_op, 3]
"""
log_probs = []
entropys = []
subpolicies = []
self.hidden = None # setting state to None will initialize LSTM state with 0s
if self.img_input:
inputs = self.conv_input(image) # [batch, lstm_size]
if self.n_group > 0:
gr_vectors = self.logit2group(inputs)
gr_log_prob, gr_ids = gr_vectors.max(1)
inputs = self.gr_emb(gr_ids)
log_probs.append(self.gr_prob_weight * gr_log_prob)
else:
# inputs = self.in_emb.weight # [1, lstm_size]
if self.n_group > 0:
gr_ids = torch.randint(low=0, high=self.n_group, size=(len(image),)).cuda()
inputs = self.gr_emb(gr_ids)
inputs = inputs.unsqueeze(0) # [1, batch(or 1), lstm_size]
for i_subpol in range(self.n_subpolicy):
subpolicy = []
for i_op in range(self.n_op):
# sample operation type, o
output, self.hidden = self.lstm(inputs, self.hidden) # [1, batch, lstm_size]
output = output.squeeze(0) # [batch, lstm_size]
logit = self.o_logit(output) # [batch, _operation_types]
logit = self.softmax_tanh(logit)
o_id_dist = Categorical(logits=logit)
o_id = o_id_dist.sample() # [batch]
log_prob = o_id_dist.log_prob(o_id) # [batch]
entropy = o_id_dist.entropy() # [batch]
log_probs.append(log_prob)
entropys.append(entropy)
inputs = self.o_emb(o_id) # [batch, lstm_size]
inputs = inputs.unsqueeze(0) # [1, batch, lstm_size]
# sample operation probability, p
output, self.hidden = self.lstm(inputs, self.hidden)
output = output.squeeze(0)
logit = self.p_logit(output)
logit = self.softmax_tanh(logit)
p_id_dist = Categorical(logits=logit)
p_id = p_id_dist.sample()
log_prob = p_id_dist.log_prob(p_id)
entropy = p_id_dist.entropy()
log_probs.append(log_prob)
entropys.append(entropy)
inputs = self.p_emb(p_id)
inputs = inputs.unsqueeze(0)
# sample operation magnitude, m
output, self.hidden = self.lstm(inputs, self.hidden)
output = output.squeeze(0)
logit = self.m_logit(output)
logit = self.softmax_tanh(logit)
m_id_dist = Categorical(logits=logit)
m_id = m_id_dist.sample()
log_prob = m_id_dist.log_prob(m_id)
entropy = m_id_dist.entropy()
log_probs.append(log_prob)
entropys.append(entropy)
inputs = self.m_emb(m_id)
inputs = inputs.unsqueeze(0)
subpolicy.append([o_id.detach().cpu().numpy(), p_id.detach().cpu().numpy(), m_id.detach().cpu().numpy()])
subpolicies.append(subpolicy)
sampled_policies = np.array(subpolicies) # (np.array) [n_subpolicy, n_op, 3, batch]
self.sampled_policies = np.moveaxis(sampled_policies,-1,0) # (np.array) [batch, n_subpolicy, n_op, 3]
self.log_probs = sum(log_probs) # (tensor) [batch]
self.entropys = sum(entropys) # (tensor) [batch]
return self.log_probs, self.entropys, self.sampled_policies
class RandAug(object):
"""
"""
def __init__(self,
n_subpolicy=2,
n_op=2,
operation_types = 15,
operation_prob = 11,
operation_mag = 10):
self.n_subpolicy = n_subpolicy
self.n_op = n_op
self._operation_types = operation_types
self._operation_prob = operation_prob
self._operation_mag = operation_mag
self._search_space_size = [self._operation_types, self._operation_prob, self._operation_mag]
def __call__(self, input):
"""
input: (tensor) [batch, W, H, 3]
return sampled_policies: (np.array) [batch, n_subpolicy, n_op, 3]
"""
# *_id (np.array) [batch]
batch_size = input.size(0)
subpolicies = []
for i_subpol in range(self.n_subpolicy):
subpolicy = []
for i_op in range(self.n_op):
oper = []
for oper_len in self._search_space_size:
ids = np.random.randint(0, oper_len, batch_size)
oper.append(ids)
subpolicy.append(oper)
subpolicies.append(subpolicy)
sampled_policies = | np.array(subpolicies) | numpy.array |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
# np.set_printoptions(formatter={'int':hex})
from py3dtiles import TileContentReader, Feature, Pnts
class TestTileContentReader(unittest.TestCase):
def test_read(self):
tile = TileContentReader().read_file('tests/pointCloudRGB.pnts')
self.assertEqual(tile.header.version, 1.0)
self.assertEqual(tile.header.tile_byte_length, 15176)
self.assertEqual(tile.header.ft_json_byte_length, 148)
self.assertEqual(tile.header.ft_bin_byte_length, 15000)
self.assertEqual(tile.header.bt_json_byte_length, 0)
self.assertEqual(tile.header.bt_bin_byte_length, 0)
feature_table = tile.body.feature_table
feature = feature_table.feature(0)
dcol_res = {'Red': 44, 'Blue': 209, 'Green': 243}
self.assertDictEqual(dcol_res, feature.colors)
class TestTileBuilder(unittest.TestCase):
def test_build_without_colors(self):
tread = TileContentReader().read_file('tests/pointCloudRGB.pnts')
f0_ref = tread.body.feature_table.feature(0).positions
# numpy dtype for positions and colors
pdt = np.dtype([('X', '<f4'), ('Y', '<f4'), ('Z', '<f4')])
# create features
features = []
for i in range(0, tread.body.feature_table.header.points_length):
f = tread.body.feature_table.feature(i)
p = f.positions
pos = | np.array([(p['X'], p['Y'], p['Z'])], dtype=pdt) | numpy.array |
"""Lekhnitskii solutions to homogeneous anisotropic plates with loaded and unloaded holes
Notes
-----
This module uses the following acronyms
* CLPT: Classical Laminated Plate Theory
References
----------
.. [1] <NAME>. (2007). *Stress distribution and strength prediction of composite
laminates with multiple holes* (PhD thesis). Retrieved from
https://rc.library.uta.edu/uta-ir/bitstream/handle/10106/767/umi-uta-1969.pdf?sequence=1&isAllowed=y
.. [2] <NAME>., <NAME>., & <NAME>. (1987). *Anisotropic plates* (2nd ed.).
New York: Gordon and Breach science.
.. [3] <NAME>. and <NAME>. (1981) *Effect of variances and manufacturing
tolerances on the design strength and life of mechanically fastened
composite joints* (Vol. 1,2,3). AFWAL-TR-81-3041.
.. [4] <NAME>. and <NAME>. (1973) *A synthesis procedure for mechanically
fastened joints in advanced composite materials* (Vol. II). AFML-TR-73-145.
"""
import logging
import abc
import numpy as np
import numpy.testing as npt
import bjsfm.fourier_series as fs
logger = logging.getLogger(__name__)
def rotate_plane_stress(stresses, angle=0.):
r"""Rotates the stress components by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Parameters
----------
stresses : ndarray
array of [:math: `\sigma_x, \sigma_y, \tau_{xy}`] in-plane stresses
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D array of [:math: `\sigma_x', \sigma_y', \tau_{xy}'`] rotated stresses
"""
c = np.cos(angle)
s = np.sin(angle)
rotation_matrix = np.array([
[c**2, s**2, 2*s*c],
[s**2, c**2, -2*s*c],
[-s*c, s*c, c**2-s**2]
])
stresses = rotation_matrix @ stresses.T
return stresses.T
def rotate_material_matrix(a_inv, angle=0.):
r"""Rotates the material compliance matrix by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
This function implements Eq. 9.6 [1]_
Parameters
----------
a_inv : ndarray
2D (3, 3) inverse CLPT A-matrix
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D (3, 3) rotated compliance matrix
"""
c = np.cos(angle)
s = np.sin(angle)
a11 = a_inv[0, 0]
a12 = a_inv[0, 1]
a16 = a_inv[0, 2]
a22 = a_inv[1, 1]
a26 = a_inv[1, 2]
a66 = a_inv[2, 2]
a11p = a11*c**4 + (2*a12 + a66)*s**2*c**2 + a22*s**4 + (a16*c**2 + a26*s**2)*np.sin(2*angle)
a22p = a11*s**4 + (2*a12 + a66)*s**2*c**2 + a22*c**4 - (a16*s**2 + a26*c**2)*np.sin(2*angle)
a12p = a12 + (a11 + a22 - 2*a12 - a66)*s**2*c**2 + 0.5*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a66p = a66 + 4*(a11 + a22 - 2*a12 - a66)*s**2*c**2 + 2*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a16p = ((a22*s**2 - a11*c**2 + 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*c**2*(c**2 - 3*s**2) + a26*s**2*(3*c**2 - s**2))
a26p = ((a22*c**2 - a11*s**2 - 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*s**2*(3*c**2 - s**2) + a26*c**2*(c**2 - 3*s**2))
# test invariants (Eq. 9.7 [2]_)
npt.assert_almost_equal(a11p + a22p + 2*a12p, a11 + a22 + 2*a12, decimal=4)
npt.assert_almost_equal(a66p - 4*a12p, a66 - 4*a12, decimal=4)
return np.array([[a11p, a12p, a16p], [a12p, a22p, a26p], [a16p, a26p, a66p]])
def rotate_complex_parameters(mu1, mu2, angle=0.):
r"""Rotates the complex parameters by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
Implements Eq. 10.8 [2]_
Parameters
----------
mu1 : complex
first complex parameter
mu2 : complex
second complex parameter
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
mu1p, mu2p : complex
first and second transformed complex parameters
"""
c = np.cos(angle)
s = np.sin(angle)
mu1p = (mu1*c - s)/(c + mu1*s)
mu2p = (mu2*c - s)/(c + mu2*s)
return mu1p, mu2p
class Hole(abc.ABC):
"""Abstract parent class for defining a hole in an anisotropic infinite plate
This class defines shared methods and attributes for anisotropic elasticity solutions of plates with circular
holes.
This is an abstract class, do not instantiate this class.
Notes
-----
The following assumptions apply for plates in a state of generalized plane stress.
#. The plates are homogeneous and a plane of elastic symmetry which is parallel to their middle plane
exists at every point.
#. Applied forces act within planes that are parallel and symmetric to the middle plane of the plates,
and have negligible variation through the thickness.
#. Plate deformations are small.
Parameters
----------
diameter : float
hole diameter
thickness : float
laminate thickness
a_inv : array_like
2D (3, 3) inverse of CLPT A-matrix
Attributes
----------
r : float
the hole radius
a : ndarray
(3, 3) inverse a-matrix of the laminate
h : float
thickness of the laminate
mu1 : float
real part of first root of characteristic equation
mu2 : float
real part of second root of characteristic equation
mu1_bar : float
imaginary part of first root of characteristic equation
mu2_bar : float
imaginary part of second root of characteristic equation
"""
MAPPING_PRECISION = 0.0000001
def __init__(self, diameter, thickness, a_inv):
self.r = diameter/2.
self.a = np.array(a_inv, dtype=float)
self.h = thickness
self.mu1, self.mu2, self.mu1_bar, self.mu2_bar = self.roots()
def roots(self):
r""" Finds the roots to the characteristic equation
Notes
-----
This method implements Eq. A.2 [1]_ or Eq. 7.4 [2]_
.. math:: a_11\mu^4-2a_16\mu^3+(2a_12+a_66)\mu^2-2a_26\mu+a_22=0
Raises
------
ValueError
If roots cannot be found
"""
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a16 = self.a[0, 2]
a22 = self.a[1, 1]
a26 = self.a[1, 2]
a66 = self.a[2, 2]
roots = np.roots([a11, -2 * a16, (2 * a12 + a66), -2 * a26, a22])
if np.imag(roots[0]) >= 0.0:
mu2 = roots[0]
mu2_bar = roots[1]
elif np.imag(roots[1]) >= 0.0:
mu2 = roots[1]
mu2_bar = roots[0]
else:
raise ValueError("mu1 cannot be solved")
if np.imag(roots[2]) >= 0.0:
mu1 = roots[2]
mu1_bar = roots[3]
elif np.imag(roots[3]) >= 0.0:
mu1 = roots[3]
mu1_bar = roots[2]
else:
raise ValueError("mu2 cannot be solved")
return mu1, mu2, mu1_bar, mu2_bar
def xi_1(self, z1s):
r"""Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
Parameters
----------
z1s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_1s : ndarray
1D array of the first mapping parameters
sign_1s : ndarray
1D array of signs producing positive mapping parameters
"""
mu1 = self.mu1
a = self.r
b = self.r
xi_1s = np.zeros(len(z1s), dtype=complex)
sign_1s = np.zeros(len(z1s), dtype=int)
xi_1_pos = (z1s + np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
xi_1_neg = (z1s - np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
pos_indices = np.where(np.abs(xi_1_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_1_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_1s[pos_indices] = xi_1_pos[pos_indices]
xi_1s[neg_indices] = xi_1_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_1s.size:
bad_indices = np.where(xi_1s == 0)[0]
logger.warning(f"xi_1 unsolvable\n Failed Indices: {bad_indices}")
sign_1s[pos_indices] = 1
sign_1s[neg_indices] = -1
return xi_1s, sign_1s
def xi_2(self, z2s):
r""" Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
Parameters
----------
z2s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_2s : ndarray
1D array of the first mapping parameters
sign_2s : ndarray
1D array of signs producing positive mapping parameters
"""
mu2 = self.mu2
a = self.r
b = self.r
xi_2s = np.zeros(len(z2s), dtype=complex)
sign_2s = np.zeros(len(z2s), dtype=int)
xi_2_pos = (z2s + np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
xi_2_neg = (z2s - np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
pos_indices = np.where(np.abs(xi_2_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_2_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_2s[pos_indices] = xi_2_pos[pos_indices]
xi_2s[neg_indices] = xi_2_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_2s.size:
bad_indices = np.where(xi_2s == 0)[0]
logger.warning(f"xi_2 unsolvable\n Failed Indices: {bad_indices}")
sign_2s[pos_indices] = 1
sign_2s[neg_indices] = -1
return xi_2s, sign_2s
@abc.abstractmethod
def phi_1_prime(self, z1):
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_2_prime(self, z2):
raise NotImplementedError("You must implement this function.")
def stress(self, x, y):
r""" Calculates the stress at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.2 [2]_
.. math:: \sigma_x=2Re[\mu_1^2\Phi_1'(z_1)+\mu_2^2\Phi_2'(z_2)]
.. math:: \sigma_y=2Re[\Phi_1'(z_1)+\Phi_2'(z_2)]
.. math:: \tau_xy=-2Re[\mu_1\Phi_1'(z_1)+\mu_2\Phi_2'(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
mu1 = self.mu1
mu2 = self.mu2
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z1 = x + mu1 * y
z2 = x + mu2 * y
phi_1_prime = self.phi_1_prime(z1)
phi_2_prime = self.phi_2_prime(z2)
sx = 2.0 * np.real(mu1 * mu1 * phi_1_prime + mu2 * mu2 * phi_2_prime)
sy = 2.0 * np.real(phi_1_prime + phi_2_prime)
sxy = -2.0 * np.real(mu1 * phi_1_prime + mu2 * phi_2_prime)
return np.array([sx, sy, sxy]).T
class UnloadedHole(Hole):
r"""Class for defining an unloaded hole in an infinite anisotropic homogeneous plate
This class represents an infinite anisotropic plate with a unfilled circular hole loaded at infinity with
forces in the x, y and xy (shear) directions.
Parameters
----------
loads: array_like
1D array [Nx, Ny, Nxy] force / unit length
diameter: float
hole diameter
thickness: float
laminate thickness
a_inv: array_like
2D array (3, 3) inverse CLPT A-matrix
Attributes
----------
applied_stress : (1, 3) ndarray
[:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`] stresses applied at infinity
"""
def __init__(self, loads, diameter, thickness, a_inv):
super().__init__(diameter, thickness, a_inv)
self.applied_stress = np.array(loads, dtype=float) / self.h
def alpha(self):
r"""Calculates the alpha loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_{x}^*, \sigma_{y}^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \alpha_1=\frac{r}{2}(\tau_{xy}^*i-\sigma_{y}^*)
Returns
-------
complex
first fourier series term for applied stress at infinity
"""
sy = self.applied_stress[1]
sxy = self.applied_stress[2]
r = self.r
return 1j * sxy * r / 2 - sy * r / 2
def beta(self):
r"""Calculates the beta loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \beta_1=\frac{r}{2}(\tau_{xy}^*-\sigma_x^*i)
Returns
-------
complex
first fourier series term for applied stresses at infinity
"""
sx = self.applied_stress[0]
sxy = self.applied_stress[2]
r = self.r
return sxy * r / 2 - 1j * sx * r / 2
def phi_1_prime(self, z1):
r"""Calculates derivative of the first stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_1=\frac{\beta_1-\mu_2\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
.. math:: \kappa_1=\frac{1}{a-i\mu_1b}
.. math:: \Phi_1'=-\frac{C_1}{\xi_1^2}(1+\frac{z_1}{\eta_1})\kappa_1
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_1, sign_1 = self.xi_1(z1)
C1 = (beta - mu2 * alpha) / (mu1 - mu2)
eta1 = sign_1 * np.sqrt(z1 * z1 - a * a - mu1 * mu1 * b * b)
kappa1 = 1 / (a - 1j * mu1 * b)
return -C1 / (xi_1 ** 2) * (1 + z1 / eta1) * kappa1
def phi_2_prime(self, z2):
r"""Calculates derivative of the second stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_2=-\frac{\beta_1-\mu_1\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
.. math:: \kappa_2=\frac{1}{a-i\mu_2b}
.. math:: \Phi_2'=-\frac{C_2}{\xi_2^2}(1+\frac{z_2}{\eta_2})\kappa_2
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_2, sign_2 = self.xi_2(z2)
C2 = -(beta - mu1 * alpha) / (mu1 - mu2)
eta2 = sign_2 * np.sqrt(z2 * z2 - a * a - mu2 * mu2 * b * b)
kappa2 = 1 / (a - 1j * mu2 * b)
return -C2 / (xi_2 ** 2) * (1 + z2 / eta2) * kappa2
def stress(self, x, y):
r""" Calculates the stress at (x, y) points in the plate
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
sx, sy, sxy = super().stress(x, y).T
sx_app = self.applied_stress[0]
sy_app = self.applied_stress[1]
sxy_app = self.applied_stress[2]
return np.array([sx + sx_app, sy + sy_app, sxy + sxy_app]).T
class LoadedHole(Hole):
"""Class for defining a loaded hole in an infinite anisotropic homogeneous plate
A cosine bearing load distribution is assumed to apply to the inside of the hole.
Notes
-----
Bearing distribution as shown below Ref. [4]_
.. image:: ../img/cosine_distribution.png
:height: 400px
Parameters
----------
load : float
bearing force
diameter : float
hole diameter
thickness : float
plate thickness
a_inv : array_like
2D array (3, 3) inverse CLPT A-matrix
theta : float, optional
bearing angle counter clock-wise from positive x-axis (radians)
Attributes
----------
p : float
bearing force
theta : float
bearing angle counter clock-wise from positive x-axis (radians)
A : float
real part of equilibrium constant for first stress function
A_bar : float
imaginary part of equilibrium constant for first stress function
B : float
real part of equilibrium constant for second stress function
B_bar : float
imaginary part of equilibrium constant for second stress function
"""
FOURIER_TERMS = 45 # number of fourier series terms [3]_
# X_DIR_COEFFICIENTS = self._x_dir_fourier_coefficients()
X_DIR_COEFFICIENTS = np.array([
2.12206591e-01 - 4.77083644e-17j, 1.25000000e-01 - 5.89573465e-17j,
4.24413182e-02 - 1.91840853e-17j, -8.90314393e-18 - 1.79348322e-19j,
-6.06304545e-03 + 6.55633890e-18j, 5.48463980e-18 + 4.37501201e-18j,
2.02101515e-03 - 3.66997376e-18j, -2.47147905e-18 - 3.77237815e-19j,
-9.18643250e-04 + 6.67550845e-19j, 1.15294597e-18 + 4.32409913e-20j,
4.94654058e-04 - 5.26048781e-18j, -1.92490138e-18 - 3.55274303e-18j,
-2.96792435e-04 + 4.00276461e-18j, 3.49945789e-18 + 2.84432075e-18j,
1.92042164e-04 - 7.15349518e-19j, -2.10847715e-18 + 5.86429928e-19j,
-1.31397270e-04 + 5.42357122e-19j, 5.26279974e-19 + 5.07907945e-19j,
9.38551927e-05 - 1.60287068e-18j, -2.62667554e-19 - 2.81642867e-20j,
-6.93712294e-05 + 4.72318710e-19j, -1.55309233e-19 - 6.73163746e-19j,
5.27221344e-05 + 3.74419334e-19j, 1.10507308e-18 - 3.45051024e-18j,
-4.10061045e-05 + 1.56923065e-19j, 9.40356979e-19 - 2.19017030e-18j,
3.25220829e-05 - 3.91078386e-19j, 1.36872347e-19 - 4.27353360e-19j,
-2.62274862e-05 + 2.86611820e-19j, 9.78311008e-20 - 7.89061684e-20j,
2.14588523e-05 - 8.91027872e-19j, -1.30904740e-19 + 1.91919825e-19j,
-1.77801919e-05 + 1.97944104e-19j, 8.14254172e-19 + 2.81801032e-19j,
1.48969176e-05 - 1.66624951e-19j, -1.34123974e-18 + 1.17525380e-18j,
-1.26050841e-05 + 1.21462369e-18j, 5.21951371e-19 - 1.06955735e-18j,
1.07604376e-05 - 1.17456794e-18j, -8.16624019e-20 + 5.13214752e-20j,
-9.25898123e-06 - 1.65297614e-19j, 3.30062278e-19 - 2.46250926e-20j,
8.02445040e-06 - 2.73275116e-19j, -2.39245061e-19 + 5.01995076e-19j,
-7.00005248e-06 + 1.01720924e-19j
])
# Y_DIR_COEFFICIENTS = self._y_dir_fourier_coefficients()
Y_DIR_COEFFICIENTS = np.array([
-1.94319243e-17 - 1.06103295e-01j, -5.45839291e-17 - 1.25000000e-01j,
-3.62876318e-17 - 6.36619772e-02j, 1.30591839e-18 + 1.52792630e-17j,
1.58336660e-17 + 1.51576136e-02j, 1.61007420e-18 - 1.20107231e-17j,
-9.15844587e-18 - 7.07355303e-03j, -4.65834606e-19 + 4.69348027e-18j,
7.82631893e-18 + 4.13389463e-03j, -2.07168349e-19 - 5.48019331e-18j,
-7.79806861e-18 - 2.72059732e-03j, -8.28820898e-19 + 3.72983658e-18j,
5.67464898e-18 + 1.92915083e-03j, -9.41779078e-19 - 2.96224847e-18j,
-4.81136247e-18 - 1.44031623e-03j, -4.18882423e-20 + 3.92096760e-18j,
3.53379639e-18 + 1.11687679e-03j, 1.18208219e-18 - 3.45316542e-18j,
-3.35800239e-18 - 8.91624331e-04j, -3.88844853e-19 + 2.81568924e-18j,
3.55287198e-18 + 7.28397909e-04j, -7.24302864e-22 - 3.24725934e-18j,
-2.86484044e-18 - 6.06304545e-04j, 1.85812997e-18 + 2.72227446e-18j,
2.71489222e-18 + 5.12576306e-04j, -1.22325211e-18 - 2.62305288e-18j,
-3.25375118e-18 - 4.39048119e-04j, 5.06148684e-20 + 1.30612327e-18j,
2.02547194e-18 + 3.80298550e-04j, -1.10424267e-19 - 1.61508137e-18j,
-2.30407373e-18 - 3.32612211e-04j, -4.65115570e-19 + 1.28879601e-18j,
2.22873521e-18 + 2.93373167e-04j, 8.28830477e-20 - 1.39232809e-18j,
-1.82653809e-18 - 2.60696058e-04j, 3.63246046e-19 + 1.92275788e-18j,
1.97581297e-18 + 2.33194056e-04j, 2.19814138e-20 - 1.77673402e-18j,
-1.35481930e-18 - 2.09828534e-04j, 9.33755027e-20 + 1.34376519e-18j,
1.71339592e-18 + 1.89809115e-04j, 1.30928047e-19 - 1.79294538e-18j,
-1.94173495e-18 - 1.72525684e-04j, -1.07013407e-19 + 9.92738558e-19j,
1.57354012e-18 + 1.57501181e-04j
])
def __init__(self, load, diameter, thickness, a_inv, theta=0.):
a_inv = rotate_material_matrix(a_inv, angle=theta)
super().__init__(diameter, thickness, a_inv)
self.p = load
self.theta = theta
self.A, self.A_bar, self.B, self.B_bar = self.equilibrium_constants()
def _x_dir_fourier_coefficients(self, sample_rate=100000):
"""Calculates Fourier coefficients of x-direction components of bearing load
This function calculates the fourier series coefficients for the x-direction components of a cosine bearing
load distribution centered on the positive x-axis.
Parameters
----------
sample_rate : int, default 100000
used to tune the fast fourier transform (FFT) algorithm for accuracy
Returns
-------
complex ndarray
fourier series coefficients
"""
N = self.FOURIER_TERMS
def brg_load_x_component(thetas):
"""x-direction components of a cosine load distribution centered at positive x-axis
Parameters
----------
thetas : 1D ndarray
angles
Returns
-------
ndarray
array of x-direction force terms for each angle in thetas
"""
new_array = np.zeros(len(thetas))
for i, angle in enumerate(thetas):
if -np.pi / 2 <= angle <= np.pi / 2:
# x-direction component of cosine load distribution
new_array[i] = np.cos(angle) ** 2
return new_array
# return all coefficients except the first one (Ao)
return fs.fourier_series_coefficients(brg_load_x_component, 2 * np.pi, N, sample_rate=sample_rate)[1:]
def _y_dir_fourier_coefficients(self, sample_rate=100000):
"""Calculates Fourier coefficients of y-direction components of bearing load
This function calculates the fourier series coefficients for the y-direction components of a cosine bearing
load distribution centered on the positive x-axis.
Parameters
----------
sample_rate : int, default 100000
used to tune the fast fourier transform (FFT) algorithm for accuracy
Returns
-------
complex ndarray
fourier series coefficients
"""
N = self.FOURIER_TERMS
def brg_load_y_component(thetas):
"""Y-direction components of a cosine load distribution centered at positive x-axis
Parameters
----------
thetas : ndarray
angles (radians)
Returns
-------
ndarray
array of y-direction force terms for each angle in thetas
"""
new_array = np.zeros(len(thetas))
for i, angle in enumerate(thetas):
if -np.pi / 2 <= angle <= np.pi / 2:
# y-direction component of cosine load distribution
new_array[i] = np.cos(angle) * np.sin(angle)
return new_array
# return all coefficients except the first one (Ao)
return fs.fourier_series_coefficients(brg_load_y_component, 2 * np.pi, N, sample_rate=sample_rate)[1:]
def alphas(self):
"""Fourier series coefficients modified for use in stress function equations
Notes
-----
Modifications to the Fourier series coefficients are developed in Eq. 37.2 [2]_
Returns
-------
complex ndarray
"""
h = self.h
p = self.p
# (in Ref. 2 Eq. 37.2, alpha is associated with the y-direction. Can someone explain?)
# return -p / (np.pi * h) * self._x_dir_fourier_coefficients()
# hard coded alpha values used to speed up runtime
return -p / (np.pi * h) * self.X_DIR_COEFFICIENTS
def betas(self):
"""Fourier series coefficients modified for use in stress function equations
Notes
-----
Modifications to the Fourier series coefficients are developed in Eq. 37.2 [2]_
Returns
-------
complex ndarray
"""
h = self.h
p = self.p
N = self.FOURIER_TERMS
m = np.arange(1, N + 1)
# (in Ref. 2 Eq. 37.2, beta is associated with the x-direction. Can someone explain?)
# return 4 * p / (np.pi * m**2 * h) * self._y_dir_fourier_coefficients()
# hard coded beta values used to speed up runtime
return 4 * p / (np.pi * m**2 * h) * self.Y_DIR_COEFFICIENTS
def equilibrium_constants(self):
"""Solve for constants of equilibrium
When the plate has loads applied that are not in equilibrium, the unbalanced loads are reacted at infinity.
This function solves for the constant terms in the stress functions that account for these reactions.
Notes
-----
This method implements Eq. 37.5 [2]_. Complex terms have been expanded and resolved for
A, A_bar, B and B_bar (setting Py equal to zero).
Returns
-------
[A, A_bar, B, B_bar] : tuple
real and imaginary parts of constants A and B
"""
R1, R2 = np.real(self.mu1), np.imag(self.mu1)
R3, R4 = np.real(self.mu2), np.imag(self.mu2)
p = self.p
h = self.h
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a22 = self.a[1, 1]
a16 = self.a[0, 2]
pi = np.pi
mu_mat = np.array([[0., 1, 0., 1.],
[R2, R1, R4, R3],
[2*R1*R2, (R1**2 - R2**2), 2*R3*R4, (R3**2 - R4**2)],
[R2/(R1**2 + R2**2), -R1/(R1**2 + R2**2), R4/(R3**2 + R4**2), -R3/(R3**2 + R4**2)]])
load_vec = p/(4.*pi*h) * np.array([0.,
1.,
a16/a11,
a12/a22])
A1, A2, B1, B2 = np.dot(np.linalg.inv(mu_mat), load_vec)
return A1, A2, B1, B2
def phi_1_prime(self, z1):
r"""Calculates derivative of the first stress function
Notes
-----
This method implements [Eq. 37.6, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_2\alpha_m}{\mu_1-\mu_2}
.. math:: \eta_1=\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}
.. math:: \Phi_1'=-\frac{1}{\eta_1}(A-\sum_{m=1}^{\infty}\frac{C_m}{\xi_1^m})
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
a = self.r
b = self.r
A = self.A + 1j * self.A_bar
N = self.FOURIER_TERMS
xi_1, sign_1 = self.xi_1(z1)
eta_1 = sign_1 * np.sqrt(z1 * z1 - a * a - b * b * mu1 * mu1)
m = np.arange(1, N + 1)
alphas = self.alphas()
betas = self.betas()
return np.array([1 / eta_1[i] * (A - np.sum(m * (betas - mu2 * alphas) / (mu1 - mu2) / xi_1[i] ** m))
for i in range(len(xi_1))])
def phi_2_prime(self, z2):
r"""Calculates derivative of the first stress function
Notes
-----
This method implements [Eq. 37.6, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_1\alpha_m}{\mu_1-\mu_2}
.. math:: \eta_2=\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}
.. math:: \Phi_2'=-\frac{1}{\eta_2}(B+\sum_{m=1}^{\infty}\frac{C_m}{\xi_2^m})
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
a = self.r
b = self.r
B = self.B + 1j * self.B_bar
N = self.FOURIER_TERMS
xi_2, sign_2 = self.xi_2(z2)
eta_2 = sign_2 * np.sqrt(z2 * z2 - a * a - b * b * mu2 * mu2)
m = np.arange(1, N + 1)
alphas = self.alphas()
betas = self.betas()
return np.array([1 / eta_2[i] * (B + np.sum(m * (betas - mu1 * alphas) / (mu1 - mu2) / xi_2[i] ** m))
for i in range(len(xi_2))])
def stress(self, x, y):
r""" Calculates the stress at (x, y) points in the plate
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
# rotation back to original coordinates
rotation = -self.theta
# convert points to polar coordinates
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
r = np.sqrt(x**2 + y**2)
# calculate angles and fix signs
angles = np.arccos(np.array([1, 0]).dot(np.array([x, y])) / r)
where_vals = np.nonzero(y)[0]
angles[where_vals] = angles[where_vals] * np.sign(y[where_vals])
# rotate coordinates by negative theta
angles += rotation
# convert back to cartesian
x = r * | np.cos(angles) | numpy.cos |
import sys, os
import json
import itertools
import cv2
from concurrent import futures
import functools
import collections
import torch
import torch.nn
import torch.nn.functional as F
import torch.distributions as tdist
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.io
import cv2
from PIL import Image
from IPython.display import HTML as IP_HTML
from IPython.display import Image as IP_Image
from IPython.display import display as IP_display
import io
class AverageMeter(object):
"""Compute and store the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def torch_to_numpy(torch_tensor, is_standardized_image=False):
"""Convert torch tensor (NCHW) to numpy tensor (NHWC) for plotting.
If it's an rgb image, it puts it back in [0,255] range (and undoes ImageNet standardization)
Args:
torch_tensor: a torch Tensor.
Returns:
a np.ndarray.
"""
np_tensor = copy_to_numpy(torch_tensor)
if np_tensor.ndim == 4: # NCHW
np_tensor = np_tensor.transpose(0,2,3,1)
if is_standardized_image:
_mean=[0.485, 0.456, 0.406]; _std=[0.229, 0.224, 0.225]
for i in range(3):
np_tensor[...,i] *= _std[i]
np_tensor[...,i] += _mean[i]
np_tensor *= 255
np_tensor = np_tensor.astype(np.uint8)
return np_tensor
def copy_to_numpy(m):
"""Copy tensor (either numpy array or torch tensor) to a numpy array."""
if isinstance(m, np.ndarray):
m = m.copy()
elif torch.is_tensor(m):
m = m.cpu().clone().detach().numpy()
else:
raise NotImplementedError("MUST pass torch tensor or numpy array")
return m
def copy_to_torch(m, cuda=False):
"""Copy tensor (either numpy array or torch tensor) to a numpy array."""
if torch.is_tensor(m):
m = m.clone()
elif isinstance(m, np.ndarray):
m = torch.from_numpy(m)
else:
raise NotImplementedError("MUST pass torch tensor or numpy array")
if cuda:
m = m.cuda()
else:
m = m.cpu()
return m
def normalize(M):
"""Normalize values of M to the range [0,1]."""
M = M.astype(np.float32)
return (M - M.min()) / (M.max() - M.min())
def get_color_mask(object_index, nc=None):
"""Convert segmentation image to color image.
Colors each index differently. Useful for visualizing semantic masks.
Args:
object_index: a [H, W] numpy array of ints from {0, ..., nc-1}
nc: int. total number of colors. If None, this will be inferred by masks
Returns:
a [H, W, 3] np.ndarray of type uint8.
"""
object_index = object_index.astype(int)
if nc is None:
NUM_COLORS = object_index.max() + 1
else:
NUM_COLORS = nc
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1. * i/NUM_COLORS) for i in range(NUM_COLORS)]
color_mask = np.zeros(object_index.shape + (3,)).astype(np.uint8)
for i in np.unique(object_index):
if i == 0 or i == -1:
continue
color_mask[object_index == i, :] = np.array(colors[i][:3]) * 255
return color_mask
def build_matrix_of_indices(height, width):
"""Build a [height, width, 2] numpy array containing coordinates.
Args:
height: int.
width: int.
Returns:
np.ndarray B [H, W, 2] s.t. B[..., 0] contains y-coordinates, B[..., 1] contains x-coordinates
"""
return np.indices((height, width), dtype=np.float32).transpose(1,2,0)
def torch_moi(h, w, device='cpu'):
"""Build matrix of indices in pytorch.
Torch function to do the same thing as build_matrix_of_indices, but returns CHW format.
Args:
h: int
w: int
Returns:
torch.FloatTensor B [2, H, W] s.t. B[0, ...] contains y-coordinates, B[1, ...] contains x-coordinates
"""
ys = torch.arange(h, device=device).view(-1,1).expand(h,w)
xs = torch.arange(w, device=device).view(1,-1).expand(h,w)
return torch.stack([ys, xs], dim=0).float()
def consecutive_label_img(labels):
""" Map labels to {0, 1, ..., K-1}.
Args:
labels: a [H, W] np.ndarray with integer values
Returns:
a [H, W] np.ndarray
"""
# Find the unique (nonnegative) labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(labels)
mapped_labels = labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[labels == unique_nonnegative_indices[k]] = k
return mapped_labels
def visualize_segmentation(im, masks, nc=None, save_dir=None):
"""Visualize segmentations nicely.
Based on code from:
https://github.com/roytseng-tw/Detectron.pytorch/blob/master/lib/utils/vis.py
Args:
im: a [H, W, 3] RGB image. numpy array of dtype np.uint8
masks: a [H, W] numpy array of dtype np.uint8 with values in {0, ..., K}
nc: int. total number of colors. If None, this will be inferred by masks
Returns:
A [H, W, 3] RGB image as a numpy array.
OR
PIL Image instance.
"""
from matplotlib.patches import Polygon
masks = masks.astype(int)
masks = consecutive_label_img(masks)
im = im.copy()
# Generate color mask
if nc is None:
NUM_COLORS = masks.max() + 1
else:
NUM_COLORS = nc
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1. * i/NUM_COLORS) for i in range(NUM_COLORS)]
# Mask
imgMask = np.zeros(im.shape)
# Draw color masks
for i in np.unique(masks):
if i == 0: # background
continue
# Get the color mask
color_mask = np.array(colors[i][:3])
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
e = (masks == i)
# Add to the mask
imgMask[e] = color_mask
# Add the mask to the image
imgMask = (imgMask * 255).round().astype(np.uint8)
im = cv2.addWeighted(im, 0.5, imgMask, 0.5, 0.0)
# Draw mask contours
for i in np.unique(masks):
if i == 0: # background
continue
# Get the color mask
color_mask = np.array(colors[i][:3])
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
e = (masks == i)
# Find contours
contour, hier = cv2.findContours(
e.astype(np.uint8).copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# Plot the nice outline
for c in contour:
cv2.drawContours(im, contour, -1, (255,255,255), 2)
if save_dir is not None:
# Save the image
PIL_image = Image.fromarray(im)
PIL_image.save(save_dir)
return PIL_image
else:
return im
def visualize_contour_img(contour_mean, contour_std, rgb_img):
"""Visualize uncertainty estimates from RICE.
Args:
contour_mean: a [H, W] np.ndarray with values in [0,1].
contour_std: a [H, W] np.ndarray with values in [0, inf).
rgb_img: a [H, W, 3] np.ndarray.
Returns:
a [H, W, 3] np.ndarray.
"""
image_H, image_W = rgb_img.shape[:2]
contour_img = np.round(contour_mean * 255).astype(np.uint8)
contour_img = np.stack([np.zeros((image_H, image_W), dtype=np.uint8),
contour_img,
np.zeros((image_H, image_W), dtype=np.uint8)], axis=-1)
contour_std_img = np.round(normalize(contour_std) * 255).astype(np.uint8)
contour_std_img = np.stack([contour_std_img,
np.zeros((image_H, image_W), dtype=np.uint8),
np.zeros((image_H, image_W), dtype=np.uint8)], axis=-1)
contour_img[contour_std_img[...,0] > 0] = 0
contour_img[contour_std_img > 0] = contour_std_img[contour_std_img > 0]
contour_img = cv2.addWeighted(rgb_img, 0.25, contour_img, 0.75, 0.0)
return contour_img
### These two functions were adatped from the DAVIS public dataset ###
def imread_indexed(filename):
"""Load segmentation image (with palette) given filename."""
im = Image.open(filename)
annotation = | np.array(im) | numpy.array |
# -*- coding: utf-8 -*-
from scipy.integrate import solve_ivp
import matplotlib
"""in case it's not working uncomment this: matplotlib.use('TkAgg') """
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
from matplotlib import colors as mcolors
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import testround_difftest_set as r #for sparse matrix stuff
#import testround_difftest_backup as r
import stencil_import as tubemodel
import os
import plot_saved_v
d=10.0
dx=20
dt=0.1#10
maxtime = 10000 #TIME IN SECONDS!!!
"""
CHECKLIST
maxtime
dx according to model (10 ori, 20 0.5, 40 0.25 etc)
stencil_import paths
stencils in folder
plotsaved path
Wnt0, Shh0
delta_Wnt, delta_Shh
plotting colourmax here and in plotsaved
how often save?
spheresize according to model
"""
xlen =tubemodel.xmax
ylen =tubemodel.ymax
zlen = tubemodel.zmax
print(xlen,ylen,zlen)
spheresize = r.spheresize
D_Wnt = 150.7
D_Shh = 133.4
delta_Wnt = 0.04
delta_Shh = 0.1
Wnt0 = tubemodel.Wnt0
Shh0 = tubemodel.Shh0
#import the stencils for tubemodel, WNTsecretion and SHHsecretion points
stenc = tubemodel.stenc
WNTstenc= tubemodel.Wstenc
SHHstenc= tubemodel.Sstenc
#plotting colourmax
rosmax = tubemodel.Wnt0#5#0.0
dorsmax = tubemodel.Shh0#5#0.0
unknownbase=5.0
class Grid:
def __init__(self,xdim,ydim,zdim, Name, seeds,Alpha,Baselevel):
self.grid = np.zeros((xdim,ydim,zdim))
self.totalsites = np.sum(stenc.grid)
self.name = Name
self.xlen=xdim
self.ylen=ydim
self.zlen=zdim
self.baselevel=Baselevel
self.plantrandomseed(seeds)
self.alpha=Alpha
if Name =="Wnt":
self.Amatr = A_Wnt
self.b = b_Wnt
self.delta = delta_Wnt
print("deltawnt:",self.delta)
if Name =="Shh":
self.Amatr = A_Shh
self.b = b_Shh
self.delta = delta_Shh
def show(self,ax):
plotgrid(self,ax)
def plantseed(self,coordinates):
for xyz in coordinates:
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[y][x][z] = self.baselevel
def artificialseed(self,coordinates,level):
for i in range(len(coordinates)):
xyz = coordinates[i]
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[x][y][z] = level[i]*self.baselevel
def plantrandomseed(self, seeds):
n = seeds
M = self.totalsites
coords = np.transpose(np.where(stenc.grid))
for c in coords:
randomnr = np.random.uniform()
if randomnr < n/M:
self.grid[c[0]][c[1]][c[2]] = self.baselevel#*np.random.uniform()
n-=1
M-=1
def diffusion(self,n):
for i in range(n):
deltaU,b = laplacian(self,self.Amatr,self.b)
old = self.grid
self.grid =old + dt*self.alpha*(deltaU +b)
def degradation(self,n):
for i in range(n):
old = self.grid
#print("degrmax",np.max(self.delta * self.grid *dt))
self.grid = old - self.delta * old *dt
def rostrocaudal_reaction(rate,FB,MB,HB,Wnt):
for i in range(rate):
fb= (FB.grid).copy()
mb= (MB.grid).copy()
hb= (HB.grid).copy()
gsk3= (GSK3.grid).copy() # Wnt modulates gsk3
wnt= (Wnt.grid).copy()
u = (U.grid).copy()
FB.grid = fb + dt*( ros.c1*(gsk3**ros.n1)/(1+ ros.c1*(gsk3**ros.n1)+ ros.c2*(mb**ros.n2)+ ros.c3*(hb**ros.n3)) -ros.d1*fb )
MB.grid = mb + dt*(ros.c4*(mb**ros.n4)/(1+ ros.c4*(mb**ros.n4)+ ros.c5*(fb**ros.n5)+ ros.c6*(hb**ros.n6)+ ros.c7*(gsk3**ros.n7)) -ros.d2*mb)
HB.grid = hb + dt*( ros.c8*(hb**ros.n8)/(1 + ros.c8*(hb**ros.n8) + ros.c9*(fb**ros.n9) + ros.c10*(mb**ros.n10)+ ros.c11*(gsk3**ros.n11)) -ros.d3*hb )
GSK3.grid = gsk3 + dt*(ros.c12*(gsk3**ros.n12)/(1 + ros.c12*(gsk3**ros.n12)+ ros.c13*(u**ros.n13) ) -ros.d4*gsk3 )
U.grid = u + dt*((ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15))/( 1+ ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15) + ros.c16*(u**ros.n16)) - ros.d5*u)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
FB.grid[c[0]][c[1]][c[2]] = 0
MB.grid[c[0]][c[1]][c[2]] = 0
HB.grid[c[0]][c[1]][c[2]] = 0
GSK3.grid[c[0]][c[1]][c[2]] = 0
def dorsoventral_reaction(rate,P,O,N,G,S,W):
for i in range(rate):
p= (P.grid).copy()
o= (O.grid).copy()
n= (N.grid).copy()
g= (G.grid).copy()
s= (S.grid).copy()
w= (W.grid).copy()
P.grid = p + dt*( dors.alpha / (1.0 + (n/dors.NcritP)**dors.h1 + (o/dors.OcritP)**dors.h2 ) - dors.k1*p )
O.grid = o + dt*(( (dors.beta*g) / (1.0+g) ) * ( 1.0/(1.0+(n/dors.NcritO)**dors.h3) ) - dors.k2*o)
N.grid = n + dt*( (dors.gamma*g/(1.0+g)) * (1.0/(1.0+ (o/dors.OcritN)**dors.h4 + (p/dors.PcritN)**dors.h5 )) - dors.k3*n)
G.grid = g + dt*(((dors.delta*s)/(1.0+s)) * (1.0/(1.0+ (w/dors.WcritG)**dors.h6 )) - dors.k4*g)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
P.grid[c[0]][c[1]][c[2]] = 0
O.grid[c[0]][c[1]][c[2]] = 0
N.grid[c[0]][c[1]][c[2]] = 0
G.grid[c[0]][c[1]][c[2]] = 0
def alldiffuse(rate,Wnt,Shh):
for i in range(rate):
Wnt.diffusion(1)
Shh.diffusion(1)
def alldegrade(rate,Wnt,Shh):
for i in range(rate):
Wnt.degradation(1)
Shh.degradation(1)
def plotgrid(grid,ax,r=0.47,g=0.0,b=1.0):
if np.all(grid.grid ==0):
return
print("minmax",np.min(grid.grid),np.max(grid.grid))
if grid.baselevel!=0:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
else:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(grid.grid))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=grid.baselevel,depthshade=False,s=spheresize )
def plotarray(array,ax,maximum,r=0.47,g=0.0,b=1.0):
if np.all(array ==0):
return
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/maximum ], keep_alpha=True) for z in x] for x in y] for y in array])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(array))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=maximum,depthshade=False,s=spheresize )
def plotarray_fixed_alpha(array,ax,maximum,alpha=0.3,r=0.47,g=0.0,b=1.0):
if np.all(array ==0):
return
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,alpha ], keep_alpha=True) for z in x] for x in y] for y in array])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(array))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=maximum,depthshade=False,s=spheresize )
def secretion(rate,Wnt,Shh):
for i in range(rate):
Shh.artificialseed(SHHstenc.secretion_coords,SHHstenc.secretion_levels)
Wnt.artificialseed(WNTstenc.secretion_coords,WNTstenc.secretion_levels)
def run(maxt, savedirectory, save=True):
for ax in [axWnt,axShh,axRos,axDors]:
ax.clear()
axRos.set_title("Rostrocaudal network (Max)")
axDors.set_title("Dorsoventral network (Balaskas)")
axWnt.set_title("Wnt")
axShh.set_title("Shh ")
if save == True:
sd=savedirectory
wntdir = sd + '/Wnt'
shhdir = sd + '/Shh'
rostrodir = sd + '/rostro'
dorsodir = sd + '/dorso'
os.mkdir(wntdir)
os.mkdir(shhdir)
os.mkdir(rostrodir)
os.mkdir(dorsodir)
os.mkdir(wntdir + '/pictures')
os.mkdir(shhdir + '/pictures')
os.mkdir(rostrodir + '/pictures')
os.mkdir(dorsodir + '/pictures')
else:
print('NOT SAVING')
steps = int((maxt/dt +dt))
print("steps:",steps)
for step in range(steps):
if save == True:
if step in np.arange(0,3000,200) or step in np.arange(0,120000,20000) or step in np.arange(0,10000,1000): #step %1000 == 0 or step# and time % 100 == 0) or (save == True and time in np.arange(0,16,1)):
time = step*dt
save_networks(savedirectory,time,FB,MB,HB,P,O,N,G,Wnt,Shh)
print("Saved time %f"% time)
print("step",step,"/",steps)
dorsoventral_reaction(1,P,O,N,G,Shh,Wnt)
rostrocaudal_reaction(1,FB,MB,HB,Wnt)
alldiffuse(1,Wnt,Shh)
secretion(1,Wnt,Shh)
alldegrade(1,Wnt,Shh)
def sparsedot(A,v):
"""Dot product for sparse matrices"""
w=np.zeros(len(v))
for ija in A:
i=ija[0]
j=ija[1]
a=ija[2]
w[i] += v[j]*a
return w
def laplacian(gridname,Amatr,b):
v,c = r.grid_to_vector(stenc)
c1,c2,c3 = np.transpose(c)
u=(gridname.grid)[c1,c2,c3]
if len(Amatr) == len(Amatr[0]):
newu= np.dot(Amatr,u)
else:
newu= sparsedot(Amatr,u)
L = r.vector_to_grid(newu,gridname,c)
L[:,:,:] = L[:,:,:]/dx**2
b = r.vector_to_grid(b,gridname,c)
b = b*gridname.baselevel/dx**2
return L,b
def compare(matrices):
dimy = len(matrices[0])
dimx = len(matrices[0][0])
dimz = len(matrices[0][0][0])
show= np.zeros_like(matrices)
for i in range(dimy):
for j in range(dimx):
for k in range(dimz):
comparevalues =[m[i][j][k] for m in matrices]
gene = np.argmax(comparevalues)
show[gene][i][j][k] = np.max(comparevalues)
return show
def show_networks(FB,MB,HB,P,O,N,G,Wnt,Shh,axRos,axDors,axWnt,axShh, scale=False):
for ax in [axWnt,axShh,axRos,axDors]:
ax.clear()
if scale == True:
longest = max(xlen,ylen,zlen)
for ax in [axWnt,axShh,axRos,axDors]:
ax.set_xlim([0,longest])
ax.set_ylim([0,longest])
ax.set_zlim([0,longest])
print("ah")
plotgrid(Shh,axShh,r=92/255, g=121/255, b=168/255)
plotgrid(Wnt,axWnt,r=14/255, g=27/255, b=48/255)
print("oh")
rostro = [FB.grid,MB.grid,HB.grid]
ros_show = compare(rostro)
print(np.max(ros_show),"=roshowmax")
for i in range(len(ros_show)):
colours = ros.colours[i]
plotarray(ros_show[i],axRos,rosmax,r=colours[0],g=colours[1],b=colours[2])
dorso = [P.grid,O.grid,N.grid]
dors_show = compare(dorso)
print(np.max(dors_show),"=dorsshowmax")
for i in range(len(dors_show)):
colours = dors.colours[i]
plotarray(dors_show[i],axDors,dorsmax,r=colours[0],g=colours[1],b=colours[2])
"""
#genes rostro
farg = plt.figure()
axtest = farg.add_subplot(2,2,1)
axtest.set_title("genes rostro")
FBax = FB.grid[:,0,-1]
MBax = MB.grid[:,0,-1]
HBax = HB.grid[:,0,-1]
xr=np.arange(xlen)
axtest.plot(xr,FBax,color=ros.colours[0],label='FB')
axtest.plot(xr,MBax,color=ros.colours[1],label='MB')
axtest.plot(xr,HBax,color=ros.colours[2],label='HB')
#genes dorso
axtest2 = farg.add_subplot(2,2,2)
axtest2.set_title("genes dorso")
xd=np.arange(zlen)
Pax = P.grid[0,int(ylen/2),:]
Oax = O.grid[0,int(ylen/2),:]
Nax = N.grid[0,int(ylen/2),:]
axtest2.plot(xd,Pax,color=dors.colours[0],label='P')
axtest2.plot(xd,Oax,color=dors.colours[1],label='O')
axtest2.plot(xd,Nax,color=dors.colours[2],label='N')
#morphogens rostro
axtest3 = farg.add_subplot(2,2,3)
axtest3.set_title("morphogens rostro")
Wntplotr = Wnt.grid[:,0,-1]
Shhplotr = Shh.grid[:,0,-1]
GSKplotr = GSK3.grid[:,0,-1]
axtest3.plot(xr,Wntplotr,color='k',label='Wnt')
axtest3.plot(xr,Shhplotr,color='b',label='Shh')
#axtest3.plot(xr,GSKplotr,color='r',label='GSK')
#morphogens dorso
axtest4 = farg.add_subplot(2,2,4)
axtest4.set_title("morphogens dorso")
Wntplotd = Wnt.grid[0,int(ylen/2),:]
Shhplotd = Shh.grid[0,int(ylen/2),:]
GSKplotd = GSK3.grid[0,int(ylen/2),:]
axtest4.plot(xd,Wntplotd,color='k',label='Wnt')
axtest4.plot(xd,Shhplotd,color='b',label='Shh')
#axtest4.plot(xd,GSKplotd,color='r',label='GSK')
axtest.legend()
axtest2.legend()
axtest3.legend()
axtest4.legend()
"""
#plt.show()
def save_networks(savedir,t, FB,MB,HB,P,O,N,G,Wnt,Shh):
sd = savedir
#if os.path.isdir(savedir):
#print("directory already exists. creating new")
#sd= savedir + '_1'
wntdir = sd + '/Wnt'
shhdir = sd + '/Shh'
rostrodir = sd + '/rostro'
dorsodir = sd + '/dorso'
infopath = sd + '/info.txt'
if os.path.isfile(infopath) == False:
f = open(infopath, 'w')
info = "Model: %s \n Secretion Wnt: %s \n Secretion Shh: %s\n" % (tubemodel.stencpath,tubemodel.wntsecrpath,tubemodel.shhsecrpath)
info += "D_Wnt %f D_Shh %f delta_Wnt %f delta_Shh %f \n rosmax %f dorsmax %f unknownbase %f \n dx %f dt %f \n" % (D_Wnt, D_Shh, delta_Wnt, delta_Shh, rosmax, dorsmax, unknownbase,dx,dt)
info += "Baselevel: \n Wnt0 %f Shh0 %f \n FB %f MB %f HB %f \n P %f O %f N %f " % (Wnt0, Shh0,FB.baselevel,MB.baselevel,HB.baselevel,P.baselevel,O.baselevel,N.baselevel)
np.savetxt(f,np.asarray([info]),fmt='%s') #.astype(int)
f.close()
#with baselevels
#wntpath = wntdir + '/T%d_BL%d_Wnt' % (t,Wnt.baselevel) + '.npy'
#shhpath = shhdir + '/T%d_BL%d_Shh' % (t,Shh.baselevel) + '.npy'
#FBpath = rostrodir + '/T%d_BL%d_FB' % (t,FB.baselevel) + '.npy'
#MBpath = rostrodir + '/T%d_BL%d_MB' % (t,MB.baselevel) + '.npy'
#HBpath = rostrodir + '/T%d_BL%d_HB' % (t,HB.baselevel) + '.npy'
#Ppath = dorsodir + '/T%d_BL%d_P' % (t,P.baselevel) + '.npy'
#Opath = dorsodir + '/T%d_BL%d_O' % (t,O.baselevel) + '.npy'
#Npath = dorsodir + '/T%d_BL%d_N' % (t,N.baselevel) + '.npy'
#without BL
wntpath = wntdir + '/T%1.1f_Wnt' % t + '.npy'
shhpath = shhdir + '/T%1.1f_Shh' % t + '.npy'
FBpath = rostrodir + '/T%1.1f_FB' % t + '.npy'
MBpath = rostrodir + '/T%1.1f_MB' % t + '.npy'
HBpath = rostrodir + '/T%1.1f_HB' % t + '.npy'
Ppath = dorsodir + '/T%1.1f_P' % t + '.npy'
Opath = dorsodir + '/T%1.1f_O' % t + '.npy'
Npath = dorsodir + '/T%1.1f_N' % t + '.npy'
BLpath = sd+ '/BaseLevels.npy'
| np.save(wntpath,Wnt.grid) | numpy.save |
from matplotlib.testing import setup
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import matplotlib as mpl
import packaging.version
import pytest
import animatplot as amp
from tests.tools import animation_compare
from animatplot.blocks import Block, Title
setup()
class TestTitleBlock:
def test_list_of_str(self):
labels = ['timestep 0', 'timestep 1']
result = Title(labels)
assert labels == result.titles
assert len(result) == 2
def test_invalid_input(self):
with pytest.raises(TypeError):
Title(0)
with pytest.raises(TypeError):
Title([6, 7])
def test_format_str(self):
actual = Title('timestep {num}', num=[1, 2]).titles
assert actual == ['timestep 1', 'timestep 2']
actual = Title('timestep {num}', num=[1]).titles
assert actual == ['timestep 1']
def test_no_replacements(self):
actual = Title('Name').titles
assert actual == ['Name']
def test_multiple_replacements(self):
actual = Title('timestep {num}, max density {n}',
num=[1, 2], n=[500, 10]).titles
expected = ['timestep {num}, max density {n}'.format(num=1, n=500),
'timestep {num}, max density {n}'.format(num=2, n=10)]
assert actual == expected
def test_string_formatting(self):
actual = Title('timestep {values:.2f}', values=[5e7]).titles
assert actual == ['timestep 50000000.00']
def test_format_str_numpy_arrays(self):
actual = Title('timestep {num}', num=np.array([1, 2])).titles
assert actual == ['timestep 1', 'timestep 2']
# Hypothesis test that the strings are always formatted correctly?
def test_text(self):
# TODO test that the right type of object is produced?
title_block = Title('timestep {num}', num=[1, 2])
ax = plt.gca()
assert ax.get_title() == 'timestep 1'
title_block._update(1)
assert ax.get_title() == 'timestep 2'
plt.close('all')
def test_mpl_kwargs(self):
expected = {'loc': 'left', 'fontstyle': 'italic'}
actual = Title('timestep {num}', num=[1, 2], **expected)
assert actual._mpl_kwargs == expected
def assert_jagged_arrays_equal(x, y):
for x, y in zip(x, y):
npt.assert_equal(x, y)
class TestLineBlock:
def test_2d_inputs(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
assert isinstance(line_block, amp.blocks.Line)
npt.assert_equal(line_block.x, x_grid)
npt.assert_equal(line_block.y, y_data)
assert len(line_block) == len(t)
assert isinstance(line_block.line, mpl.lines.Line2D)
xdata, ydata = line_block.line.get_data()
npt.assert_equal(xdata, x)
npt.assert_equal(ydata, y_data[0, :])
def test_update(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
line_block._update(frame=1)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.line.get_ydata(), y_data[1, :])
def test_constant_x(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x, y_data)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.x[-1], x)
def test_no_x_input(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(y_data)
expected_x = np.arange(10)
npt.assert_equal(line_block.line.get_xdata(), expected_x)
def test_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9])]
line_block = amp.blocks.Line(x_data, y_data)
npt.assert_equal(line_block.y, np.array([[5, 6, 7], [4, 2, 9]]))
npt.assert_equal(line_block.x, np.array([[1, 2, 3], [1, 2, 3]]))
def test_ragged_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3, 4])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9, 10])]
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(y_data)
assert "Must specify x data explicitly" in str(err)
line_block = amp.blocks.Line(x_data, y_data)
assert_jagged_arrays_equal(line_block.x, np.array(x_data))
assert_jagged_arrays_equal(line_block.y, np.array(y_data))
def test_bad_ragged_list_input(self):
x_data = np.array([np.array([1, 2, 3]), np.array([1, 2, 3, 4])])
y_data = np.array([np.array([5, 6, 7]), np.array([4, 2, 9, 10, 11])])
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(x_data, y_data)
assert "x & y data must match" in str(err)
def test_bad_input(self):
# incorrect number of args
with pytest.raises(ValueError) as err:
amp.blocks.Line(1, 2, 3)
assert 'Invalid data arguments' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line()
assert 'Invalid data arguments' in str(err.value)
# No y data
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), None)
assert 'Must supply y data' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line(None)
assert 'Must supply y data' in str(err.value)
# y data not 2d
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(5, 2, 2))
assert 'y data must be 2-dimensional' in str(err.value)
# 1d x doesn't match y
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(4, 2))
assert 'dimensions of x must be compatible' in str(err.value)
# 2d x doesn't match y
with pytest.raises(ValueError) as err:
x = np.array([np.arange(5), np.arange(5)])
amp.blocks.Line(x, np.random.randn(4, 2), t_axis=1)
assert 'dimensions of x must be compatible' in str(err.value)
def test_kwarg_throughput(self):
x = np.array([np.arange(5), np.arange(5)])
line_block = amp.blocks.Line(x, np.random.randn(2, 5), t_axis=1,
alpha=0.5)
assert line_block.line.get_alpha() == 0.5
class TestComparisons:
@animation_compare(baseline_images='Blocks/Line', nframes=5)
def test_Line(self):
x = np.linspace(0, 2*np.pi, 20)
t = np.linspace(0, 2*np.pi, 5)
X, T = np.meshgrid(x, t)
Y = np.sin(X+T)
block = amp.blocks.Line(X, Y)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh', nframes=3)
def test_Pcolormesh(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh_corner', nframes=3)
def test_Pcolormesh_corner_positions(self):
# Test with size of Z being (nx-1)*(ny-1) like matplotlib expects for 'flat'
# shading
x = np.linspace(-2*np.pi, 2*np.pi, 10)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = | np.sin(X**2+Y**2-T) | numpy.sin |
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi3
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchil = 1
bdchir = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
if x == 0:
tensors = [us]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi
elif x > 0 and x < N-1:
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = bdchi
elif x == N-1:
tensors = [sv]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-5,1,-2],[1,-6,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x > 1 and x < N-2:
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
elif x == N-2:
tensors = [sv2,us2]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2
elif x == N-1:
tensors = [sv2]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-4,1,-2],[1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x == 2:
tensors = [us3,us2,us1]
legs = [[-1,-6,1,-3],[-2,1,2,-4],[2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x > 2 and x < N-3:
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x == N-3:
tensors = [sv3,us3,us2]
legs = [[-1,-6,1],[-2,1,2,-4],[-3,2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
elif x == N-2:
tensors = [sv3,us3]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2
bdchir = bdchi3
elif x == N-1:
tensors = [sv3]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3
bdchir = 1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdchl = bdchil
bdchr = bdchir
ch[x] = chi
else:
bdchl = bdchil*bdchl
bdchr = bdchir*bdchr
tensors = [chi,ch[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch[x] = ncon(tensors,legs)
ch[x] = np.reshape(ch[x],(bdchl,bdchr,d**2,d**2),order='F')
elif BC == 'P':
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchi = 1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = np.outer(sv[:,nx],us[nx,:])
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
bdchi = bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
bdchi = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchi = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchi = bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchi = bdchi3*bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdch = bdchi
ch = chi
else:
bdch = bdchi*bdch
tensors = [chi,ch]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch = ncon(tensors,legs)
ch = np.reshape(ch,(bdch,bdch,d**2,d**2),order='F')
ch = ch[:,:,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
def fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list):
"""
Creates a MPO for the derivative (over estimated parameter) of the superoperator describing the quantum channel. Function for finite size systems.
Function for translationally invariant channels with unitary parameter encoding generated by h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
Returns:
chd: list of length N of ndarrays of a shape (Dl_chd,Dr_chd,d**2,d**2) for OBC (Dl_chd, Dr_chd can vary between sites) or ndarray of a shape (D_chd,D_chd,d**2,d**2,N) for PBC
Derivative of superoperator describing quantum channel in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
if len(so_before_list) == 0:
if BC == 'O':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:]
ch1 = [ch1]*N
elif BC == 'P':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:,np.newaxis]
ch1 = np.tile(ch1,(1,1,1,1,N))
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
elif len(so_after_list) == 0:
ch1 = fin_create_channel(N,d,BC,so_before_list)
chd = fin_commutator(N,d,BC,ch1,h,1j)
else:
ch1 = fin_create_channel(N,d,BC,so_before_list)
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
return chd
def fin_commutator(N, d, BC, a, h, c):
"""
Calculate MPO for commutator b = [a, c*sum{h}] of MPO a with sum of local generators h and with arbitrary multiplicative scalar factor c.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
a: list of length N of ndarrays of a shape (Dl_a,Dr_a,d,d) for OBC (Dl_a, Dr_a can vary between sites) or ndarray of a shape (D_a,D_a,d,d,N) for PBC
MPO.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
c: complex
Scalar factor which multiplies sum of local generators.
Returns:
b: list of length N of ndarrays of a shape (Dl_b,Dr_b,d,d) for OBC (Dl_b, Dr_b can vary between sites) or ndarray of a shape (D_b,D_b,d,d,N) for PBC
Commutator [a, c*sum{h}] in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.')
if BC == 'O':
bh = [0]*N
b = [0]*N
for x in range(N):
da = np.shape(a[x])[2]
bda1 = np.shape(a[x])[0]
bda2 = np.shape(a[x])[1]
if x == 0:
bdbh1 = 1
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x > 0 and x < N-1:
bdbh1 = 2
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x == N-1:
bdbh1 = 2
bdbh2 = 1
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1],[c*(h[nxp,nxp]-h[nx,nx])]])
if da == d:
# a is operator
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx,nxp],a[x][:,:,nx,nxp])
elif da == d**2:
# a is superoperator (vectorized channel)
bh[x] = np.reshape(bh[x],(bdbh1,bdbh2,d**2),order='F')
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d**2,d**2),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx],a[x][:,:,nx,nxp])
elif BC == 'P':
da = np.shape(a)[2]
bda = np.shape(a)[0]
if N == 1:
bdbh = 1
else:
bdbh = 2
bh = np.zeros((bdbh,bdbh,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
if N == 1:
bh[:,:,nx,nxp,0] = c*(h[nxp,nxp]-h[nx,nx])
else:
bh[:,:,nx,nxp,0] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1],[0,0]])
for x in range(1,N-1):
bh[:,:,nx,nxp,x] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
bh[:,:,nx,nxp,N-1] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),0]])
if da == d:
# a is operator
b = np.zeros((bdbh*bda,bdbh*bda,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,nxp,x],a[:,:,nx,nxp,x])
elif da == d**2:
# a is superoperator (vectorized channel)
bh = np.reshape(bh,(bdbh,bdbh,d**2,N),order='F')
b = np.zeros((bdbh*bda,bdbh*bda,d**2,d**2,N),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,x],a[:,:,nx,nxp,x])
return b
def fin_enlarge_bdl(cold,factor):
"""
Enlarge bond dimension of SLD MPO. Function for finite size systems.
Parameters:
cold: SLD MPO, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
factor: factor which determine on average relation between old and newly added values of SLD MPO
Returns:
c: SLD MPO with bd += 1
"""
rng = np.random.default_rng()
if type(cold) is list:
n = len(cold)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
c = [0]*n
x = 0
d = np.shape(cold[x])[2]
bdl1 = 1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl2-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl2-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
for x in range(1,n-1):
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
x = n-1
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = 1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl1-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl1-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
elif type(cold) is np.ndarray:
n = np.shape(cold)[4]
d = np.shape(cold)[2]
bdl = np.shape(cold)[0]+1
c = np.zeros((bdl,bdl,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(n):
meanrecold = np.sum(np.abs(np.real(cold[:,:,nx,nxp,x])))/(bdl-1)**2
meanimcold = np.sum(np.abs(np.imag(cold[:,:,nx,nxp,x])))/(bdl-1)**2
c[:,:,nx,nxp,x] = (meanrecold*rng.random((bdl,bdl))+1j*meanimcold*rng.random((bdl,bdl)))*factor
c = (c + np.conj(np.moveaxis(c,2,3)))/2
c[0:bdl-1,0:bdl-1,:,:,:] = cold
return c
def fin_enlarge_bdpsi(a0old,factor):
"""
Enlarge bond dimension of wave function MPS. Function for finite size systems.
Parameters:
a0old: wave function MPS, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ratio: factor which determine on average relation between last and next to last values of diagonals of wave function MPS
Returns:
a0: wave function MPS with bd += 1
"""
rng = np.random.default_rng()
if type(a0old) is list:
n = len(a0old)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
a0 = [0]*n
x = 0
d = np.shape(a0old[x])[2]
bdpsi1 = 1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi2-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi2-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
for x in range(1,n-1):
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
x = n-1
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = 1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi1-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi1-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[n-1] = a0[n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[x] = a0[x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[0]),a0[0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[0] = a0[0]/np.sqrt(np.abs(r1))
elif type(a0old) is np.ndarray:
n = np.shape(a0old)[3]
d = np.shape(a0old)[2]
bdpsi = np.shape(a0old)[0]+1
a0 = np.zeros((bdpsi,bdpsi,d,n),dtype=complex)
for nx in range(d):
for x in range(n):
meanrea0old = np.sum(np.abs(np.real(a0old[:,:,nx,x])))/(bdpsi-1)**2
meanima0old = np.sum(np.abs(np.imag(a0old[:,:,nx,x])))/(bdpsi-1)**2
a0[:,:,nx,x] = (meanrea0old*rng.random((bdpsi,bdpsi))+1j*meanima0old*rng.random((bdpsi,bdpsi)))*factor
a0[0:bdpsi-1,0:bdpsi-1,:,:] = a0old
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[2,2,1],[3,3,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
else:
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,n-1] = a0[:,:,:,n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[:,:,:,x] = a0[:,:,:,x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
return a0
#########################################
# 1.2.1 Problems with exact derivative. #
#########################################
def fin_FoM_FoMD_optbd(n,d,bc,ch,chp,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also check of convergence in bond dimensions. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD in function of bd of respectively SLD MPO [rows] and initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin_FoM_optbd(n,d,bc,a,b,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM in function of bd of SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin_FoMD_optbd(n,d,bc,c2d,cpd,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin_FoM_OBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoM_PBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC.
Parameters:
a: MPO for density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoM_OBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0:,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],[0][0,0,:,:],[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*l1-l2
return fomval
def fin_FoM_PBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with PBC.
Parameters:
a: MPO for a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
return fomval
def fin_FoMD_OBC_val(c2d,cpd,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
fomdval = 2*lpd-l2d
return fomdval
def fin_FoMD_PBC_val(c2d,cpd,a0):
"""
Calculate the value of FoMD. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for the initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
return fomdval
#################################################################
# 1.2.2 Problems with discrete approximation of the derivative. #
#################################################################
def fin2_FoM_FoMD_optbd(n,d,bc,ch,chp,epsilon,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also a check of convergence with increasing bond dimensions. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of the local Hilbert space (dimension of the physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for a quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for a quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
cini: initial MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if the maximal value of bd for SLD MPO has to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for the initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if the maximal value of bd for initial wave function MPS has to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD as a function of bd of respectively SLD MPO [rows] and the initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin2_FoM_optbd(n,d,bc,a,b,epsilon,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems. Version with two states separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in a and b, float
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM as a function of bd of the SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin2_FoMD_optbd(n,d,bc,c2d,cd,cpd,epsilon,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence when increasing the bond dimension. Function for finite size systems. Version with two dual SLDs separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cd = channel_acting_on_operator(chd,c)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin2_FoMD_PBC_optm(c2d,cd,cpd,epsilon,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:]]
legs = [[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l1_0f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[n-2] = ncon(tensors,legs)
l1_0f[n-2] = l1_0f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],l1_0f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1_0f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],l1_0f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1_0c = ncon(tensors,legs)
l1_0c = l1_0c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[x],l1_0f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[x],a[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC. Version with two states separated by epsilon.
Parameters:
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected ndarray of a shape (bd,bd,d,d,n)
epsilon: value of a separation between estimated parameters encoded in a and b, float
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l1_0f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1_0f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],l1_0f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,x],l1_0f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l1_0c,c[:,:,:,:,x],a[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1_0c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l1_0c,a[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1_0 = ncon(tensors,legs)
l1_0 = np.reshape(l1_0,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*(l1-l1_0)/epsilon
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ (l1-l1_0)/epsilon - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin2_FoMD_OBC_optm(c2d,cd,cpd,epsilon,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC. Version with two dual SLDs separated by epsilon.
Parameters:
c2d: MPO for the square of the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cd: MPO for the dual of the SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for the dual of the SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0: MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for the initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
tensors = [cd[0][0,0,:,:]]
legs = [[-1,-2]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(d,d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
ldf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
ldf[n-2] = ncon(tensors,legs)
ldf[n-2] = ldf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cd[x],a0[x],ldf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
ldf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cd[0],ldf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
ldc = ncon(tensors,legs)
ldc = ldc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [ldc,cd[x],ldf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
ld = ncon(tensors,legs)
ld = np.reshape(ld,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*(lpd-ld)/epsilon-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc, | np.conj(a0[x]) | numpy.conj |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cm as cm
import netCDF4
import scipy.interpolate as intrp
import datetime
import gsw
import seawater as sw
import os
from mpl_toolkits.basemap import Basemap
import cmocean
import pygamma
import copy
import glob
import xarray as xr
from holteandtalley import HolteAndTalley
import time
class grids_one_buoy():
def __init__(self,filename,**kargs):
if "den_ml_crit" in kargs:
den_ml_crit = kargs["den_ml_crit"]
else:
den_ml_crit = 0.03
if "DO_ml_crit" in kargs:
DO_ml_crit = kargs["DO_ml_crit"]
else:
#DO_ml_crit = 1. #Kortzinger 2008 proportional to 0.03 kg/m3 if 0.125 kg/m-3 in kortzinger
#DO_ml_crit = 5. #Kortzinger 2008
DO_ml_crit = 2.5
if "dz" in kargs:
dz = kargs["dz"]
else:
dz = 5.
if "dzLT" in kargs:
dzLT = kargs["dzLT"]
else:
dzLT = 20.
if "gridding" in kargs:
gridding = kargs["gridding"]
else:
gridding = False
if "display_info" in kargs:
display_info = kargs["display_info"]
else:
display_info = False
if "verbose" in kargs:
verbose = kargs["verbose"]
else:
verbose = False
if "clear_short" in kargs:
#clears short cut propfiles at 950 m
clear_short = kargs["clear_short"]
else:
clear_short = False
nfc = netCDF4.Dataset(filename)
metadata = nfc.__dict__["Comments"]
if display_info:
display(nfc)
variables = list(nfc.variables.keys())
#print(nfc)
self.raw = dict()
self.raw["depth"] = nfc["Depth"][:]
self.raw["Lat"] = nfc["Lat"][:]
self.raw["Lon"] = nfc["Lon"][:]
self.raw["Lon"][self.raw["Lon"]>180] = self.raw["Lon"][self.raw["Lon"]>180] - 360.
#UOW CODE
i0 = filename.rfind("/")+1
i1 = filename.rfind("_")
self.raw["code"]= filename[i0:i1]
#WMO code
WMO_str = "WMO ID:"
i0 = metadata.find(WMO_str) + len(WMO_str) + 1
i1 = metadata[i0:].find("\n") + i0
self.raw["WMO_code"] = metadata[i0:i1]
ref_date_str = nfc["REFERENCE_DATE_TIME"][:].tostring().decode("ascii")
ref_date = datetime.datetime.strptime(ref_date_str,"%Y%m%d%H%M%S")
self.raw["date"] = nfc["JULD"][:] + ref_date.toordinal()
self.raw["date_dt"] = convert_time_to_date(self.raw["date"])
#reads the variables
self.raw["depth"] = nfc["Depth"][:].T
if np.ma.isMaskedArray(self.raw["depth"]):
self.raw["depth"].mask = (self.raw["depth"].mask) | (nfc["Depth_QFA"][:].T == 8) | (self.raw["depth"]<0)
else:
self.raw["depth"] = np.ma.array(self.raw["depth"])
self.raw["depth"].mask = (nfc["Depth_QFA"][:].T == 8)
self.raw["Pressure"] = nfc["Pressure"][:].T
if np.ma.isMaskedArray(self.raw["Pressure"]):
self.raw["Pressure"].mask = (self.raw["Pressure"].mask) | (nfc["Pressure_QFA"][:].T == 8)
else:
self.raw["Pressure"] = np.ma.array(self.raw["Pressure"])
self.raw["Pressure"].mask = (nfc["Pressure_QFA"][:].T == 8)
self.raw["Temperature"] = nfc["Temperature"][:].T
if np.ma.isMaskedArray(self.raw["Temperature"]):
self.raw["Temperature"].mask = (self.raw["Temperature"].mask) | (nfc["Temperature_QFA"][:].T == 8)
else:
self.raw["Temperature"] = np.ma.array(self.raw["Temperature"])
self.raw["Temperature"].mask = (nfc["Temperature_QFA"][:].T == 8)
self.raw["Salinity"] = nfc["Salinity"][:].T
if np.ma.isMaskedArray(self.raw["Salinity"]):
self.raw["Salinity"].mask = (self.raw["Salinity"].mask) | (nfc["Salinity_QFA"][:].T == 8)
else:
self.raw["Salinity"] = np.ma.array(self.raw["Salinity"])
self.raw["Salinity"].mask = (nfc["Salinity_QFA"][:].T == 8)
#derived values
self.raw["SA"] = gsw.SA_from_SP( self.raw["Salinity"], self.raw["Pressure"], self.raw["Lon"], self.raw["Lat"] ) #-10.1325
self.raw["CT"] = gsw.CT_from_t(self.raw["SA"],self.raw["Temperature"],self.raw["Pressure"]) #-10.1325
self.raw["Sigma_theta"] = gsw.sigma0(self.raw["SA"],self.raw["CT"])
self.raw["gamma_n"] = np.transpose(pygamma.gamma_n( self.raw["Salinity"].T, self.raw["Temperature"].T, self.raw["Pressure"].T, self.raw["Lon"], self.raw["Lat"] )[0])
if not np.ma.isMaskedArray(self.raw["gamma_n"]):
self.raw["gamma_n"] = np.ma.array( self.raw["gamma_n"] )
self.raw["gamma_n"].mask = np.copy( self.raw["Sigma_theta"].mask )
#biogeochemical
bg_vars = ["Oxygen","OxygenSat","Nitrate","DIC_LIAR","TALK_LIAR","pCO2_LIAR","Chla_corr","POC"]
self.raw_bg = dict()
if "Oxygen" in variables:
self.raw_bg["Oxygen"] = nfc["Oxygen"][:].T
if np.ma.isMaskedArray(self.raw_bg["Oxygen"]):
self.raw_bg["Oxygen"].mask = (self.raw_bg["Oxygen"].mask) | (nfc["Oxygen_QFA"][:].T == 8)
else:
self.raw_bg["Oxygen"] = np.ma.array(self.raw_bg["Oxygen"])
self.raw_bg["Oxygen"].mask = (nfc["Oxygen_QFA"][:].T == 8)
if "OxygenSat" in variables:
self.raw_bg["OxygenSat"] = nfc["OxygenSat"][:].T
if np.ma.isMaskedArray(self.raw_bg["OxygenSat"]):
self.raw_bg["OxygenSat"].mask = (self.raw_bg["OxygenSat"].mask) | (nfc["OxygenSat_QFA"][:].T == 8)
else:
self.raw_bg["OxygenSat"] = np.ma.array(self.raw_bg["OxygenSat"])
self.raw_bg["OxygenSat"].mask = (nfc["OxygenSat_QFA"][:].T == 8)
if "Nitrate" in variables:
self.raw_bg["Nitrate"] = nfc["Nitrate"][:].T
if np.ma.isMaskedArray(self.raw_bg["Nitrate"]):
self.raw_bg["Nitrate"].mask = (self.raw_bg["Nitrate"].mask) | (nfc["Nitrate_QFA"][:].T == 8)
else:
self.raw_bg["Nitrate"] = np.ma.array(self.raw_bg["Nitrate"])
self.raw_bg["Nitrate"].mask = (nfc["Nitrate_QFA"][:].T == 8)
if "DIC_LIAR" in variables:
self.raw_bg["DIC_LIAR"] = nfc["DIC_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["DIC_LIAR"]):
self.raw_bg["DIC_LIAR"].mask = (self.raw_bg["DIC_LIAR"].mask) | (nfc["DIC_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["DIC_LIAR"] = np.ma.array(self.raw_bg["DIC_LIAR"])
self.raw_bg["DIC_LIAR"].mask = (nfc["DIC_LIAR_QFA"][:].T == 8)
if "TALK_LIAR" in variables:
self.raw_bg["TALK_LIAR"] = nfc["TALK_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["TALK_LIAR"]):
self.raw_bg["TALK_LIAR"].mask = (self.raw_bg["TALK_LIAR"].mask) | (nfc["TALK_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["TALK_LIAR"] = np.ma.array(self.raw_bg["TALK_LIAR"])
self.raw_bg["TALK_LIAR"].mask = (nfc["TALK_LIAR_QFA"][:].T == 8)
if "pCO2_LIAR" in variables:
self.raw_bg["pCO2_LIAR"] = nfc["pCO2_LIAR"][:].T
if np.ma.isMaskedArray(self.raw_bg["pCO2_LIAR"]):
self.raw_bg["pCO2_LIAR"].mask = (self.raw_bg["pCO2_LIAR"].mask) | (nfc["pCO2_LIAR_QFA"][:].T == 8)
else:
self.raw_bg["pCO2_LIAR"] = np.ma.array(self.raw_bg["pCO2_LIAR"])
self.raw_bg["pCO2_LIAR"].mask = (nfc["pCO2_LIAR_QFA"][:].T == 8)
if "Chl_a_corr" in variables:
self.raw_bg["Chl_a"] = nfc["Chl_a_corr"][:].T
if np.ma.isMaskedArray(self.raw_bg["Chl_a"]):
self.raw_bg["Chl_a"].mask = (self.raw_bg["Chl_a"].mask) | (nfc["Chl_a_corr_QFA"][:].T == 8)
else:
self.raw_bg["Chl_a"] = np.ma.array(self.raw_bg["Chl_a"])
self.raw_bg["Chl_a"].mask = (nfc["Chl_a_corr_QFA"][:].T == 8)
if "POC" in variables:
self.raw_bg["POC"] = nfc["POC"][:].T
if np.ma.isMaskedArray(self.raw_bg["POC"]):
self.raw_bg["POC"].mask = (self.raw_bg["POC"].mask) | (nfc["POC_QFA"][:].T == 8)
else:
self.raw_bg["POC"] = np.ma.array(self.raw_bg["POC"])
self.raw_bg["POC"].mask = (nfc["POC_QFA"][:].T == 8)
nt = self.raw["Temperature"].shape[1]
#LT
self.raw["LT_ov"] = np.full( self.raw["Temperature"].shape, np.nan )
self.raw["size_ov"] = np.full( self.raw["Temperature"].shape, np.nan )
#grids
self.gr = dict()
self.gr["depth"] = np.arange(0,2000+dz,dz)
nz = self.gr["depth"].size
self.gr["date"] = np.copy(self.raw["date"])
#self.gr["date_dt"] = convert_time_to_date(self.gr["date"])
self.gr["Lon"] = np.copy(self.raw["Lon"])
self.gr["Lat"] = np.copy(self.raw["Lat"])
self.gr["code"] = copy.copy(self.raw["code"])
self.gr["WMO_code"] = copy.copy(self.raw["WMO_code"])
#gridded variables
self.gr["Pressure"] = np.full((nz, nt), np.nan)
self.gr["Temperature"] = np.full((nz, nt), np.nan)
self.gr["Salinity"] = np.full((nz, nt), np.nan)
self.gr["SA"] = np.full((nz, nt), np.nan)
self.gr["CT"] = np.full((nz, nt), np.nan)
self.gr["Sigma_theta"] = np.full((nz, nt), np.nan)
self.gr["gamma_n"] = np.full((nz, nt), np.nan)
self.gr["N2"] = np.full((nz, nt), np.nan)
self.gr["PV"] = np.full((nz, nt), np.nan)
#biogeochemical variables
for var in bg_vars:
self.gr[var] = np.full((nz, nt), np.nan)
#mixing parameters
self.gr["LT"] = np.full((nz, nt), np.nan)
self.gr["mld"] = np.full(nt, np.nan)
self.gr["mld_HT"] = np.full(nt, np.nan)
#self.gr["gpa0"] = np.full(nt, np.nan)
self.gr["mld_DO"] = np.full(nt, np.nan)
self.gr["LT_ml"] = np.full(nt, 0.)
self.gr["LT_ov"] = np.full((nz,nt), 0.)
self.gr["LT_largest_ov"] = np.full(nt, 0.)
self.gr["size_largest_ov"] = np.full(nt, 0.)
self.gr["h_largest_ov"] = np.full(nt, 0.)
self.gr["h_no_ov"] = np.full(nt, 0.)
for i in range(nt):
if verbose:
print("Float %s, profile: %d"%(self.raw["code"],i+1))
#Interpolates temperature
ii = np.argsort(self.raw["depth"][:,i])
z0 = self.raw["depth"][ii,i]
#deletes profiles shorter than 950 m
if clear_short and max(z0)<950:
continue
p0 = self.raw["Pressure"][ii,i]
T0 = self.raw["Temperature"][ii,i]
msk = ~((T0.mask) | (z0.mask))
self.gr["Temperature"][:,i] = grids_interpolates(z0[msk], T0[msk], self.gr["depth"], dz, grid = gridding)
#Pressure
msk = ~((p0.mask) | (z0.mask))
self.gr["Pressure"][:,i] = grids_interpolates(z0[msk], p0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates potential temperature
CT0 = self.raw["CT"][ii,i]
msk = ~((CT0.mask) | (z0.mask))
self.gr["CT"][:,i] = grids_interpolates(z0[msk], CT0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates salinity
S0 = self.raw["Salinity"][ii,i]
msk = ~((S0.mask) | (z0.mask))
self.gr["Salinity"][:,i] = grids_interpolates(z0[msk], S0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates SA
SA0 = self.raw["SA"][ii,i]
msk = ~((SA0.mask) | (z0.mask))
self.gr["SA"][:,i] = grids_interpolates(z0[msk], SA0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates density
Sigma_theta0 = self.raw["Sigma_theta"][ii,i]
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["Sigma_theta"][:,i] = grids_interpolates(z0[msk], Sigma_theta0[msk], self.gr["depth"], dz, grid = gridding)
#Interpolates gamma_n
gamma_n0 = self.raw["gamma_n"][ii,i]
msk = ~((gamma_n0.mask) | (z0.mask))
self.gr["gamma_n"][:,i] = grids_interpolates(z0[msk].T, gamma_n0[msk].T, self.gr["depth"], dz, grid = gridding)
##
#interpolates the biogeochemical variables
##
for var in bg_vars:
if var in self.raw_bg.keys():
XX = self.raw_bg[var][ii,i]
msk = ~((XX.mask) | (z0.mask))
if np.nansum(msk)>10:
self.gr[var][:,i] = grids_interpolates(z0[msk], XX[msk],self.gr["depth"], dz, grid = gridding)
#mixed layer depth from density
msk = ~((Sigma_theta0.mask) | (z0.mask))
self.gr["mld"][i] = mixed_layer_depth(z0[msk],np.sort(np.array([Sigma_theta0[msk]]).T), Dd = den_ml_crit)[0]
#Mixed layer Holte and Talley
Pgr = self.gr["Pressure"][:,i]
CTgr = self.gr["CT"][:,i]
SAgr = self.gr["SA"][:,i]
STgr = self.gr["Sigma_theta"][:,i]
msk = ~( np.isnan(Pgr+CTgr+SAgr+STgr))
if np.sum(msk)>10:
html = HolteAndTalley( Pgr[msk], CTgr[msk], SAgr[msk], STgr[msk] )
self.gr["mld_HT"][i] = html.densityMLD
#stratification
#N2,pmid = gsw.Nsquared( self.gr["SA"][:,i], self.gr["CT"][:,i], self.gr["Pressure"][:,i]-10.1325 )
ddendz = first_centered_differences( -self.gr["depth"], self.gr["Sigma_theta"][:,i] )
self.gr["N2"][:,i] = -(1000+self.gr["Sigma_theta"][:,i])**-1*gsw.grav( self.gr["Pressure"][:,i], self.gr["Lat"][i] )*ddendz #-10.1325
self.gr["PV"][:,i] = (1000+self.gr["Sigma_theta"][:,i])**-1*gsw.f( self.gr["Lat"][i] )*ddendz
#self.gr["PV"][:,i] = sw.f( self.gr["Lat"][i] )*self.gr["N2"][:,i]
"""
#geopotential anomaly
msk = ~( (S0.mask) | (T0.mask) | (p0.mask) )
if np.sum(msk)>10:
self.gr["gpa0"][i] = geopotential_anomaly(CT0[msk],SA0[msk], p0[msk])
"""
#calculates thorpe displacements and mean LT
igood = np.where( ~((Sigma_theta0.mask) | (z0.mask) ))[0]
if igood.size<10:
continue
Sigma_theta00 = Sigma_theta0[igood].data
z00 = z0[igood].data
isort = np.argsort( Sigma_theta00)
disp = z00 - z00[isort]
nz1000 = np.where( self.gr["depth"]<=1000 )[0][-1]
for j in range(nz1000):
if self.gr["depth"][j]>1000:
break
jj = (z00>= self.gr["depth"][j]-dzLT) & (z00<= self.gr["depth"][j]+dzLT)
self.gr["LT"][j,i] = np.nanmean(disp[jj]**2)**0.5
#detection of Thorpe overturns
ii1000 = (z00<=1000) & (np.isfinite(Sigma_theta00))
zth,LT, ovsize, ovnum = calculates_thorpe_scale(z00[ii1000], Sigma_theta00[ii1000])
self.raw["LT_ov"][:,i] = grids_interpolates(zth,LT,self.raw["depth"][:,i].data, dz, grid = gridding)
self.raw["size_ov"][:,i] = grids_interpolates(zth,ovsize,self.raw["depth"][:,i].data,dz)
self.gr["LT_ov"][:,i] = grids_interpolates(zth,LT,self.gr["depth"], dz, grid = gridding)
#mean thorpe displacement in the mixed layer
jjmld = np.where(z00<=self.gr["mld"][i])[0]
if jjmld.size>0:
self.gr["LT_ml"][i] = np.nanmean( (disp[jjmld]-np.mean(disp[jjmld]))**2)**0.5
else:
self.gr["LT_ml"][i] = 0.
#stores the size and LT of biggest overturn within the mixed layer
jjml = np.where(zth<=self.gr["mld"][i])[0]
if jjml.size:
j_largest = jjml[ np.argmax(ovsize[jjml]) ]
n_largest_ov = ovnum[ j_largest ]
j_bot_largest = np.where(ovnum == n_largest_ov)[0][-1]
if n_largest_ov>0:
self.gr["size_largest_ov"][i] = ovsize[0]
self.gr["LT_largest_ov"][i] = LT[0]
self.gr["h_largest_ov"][i] = zth[ j_bot_largest]
#first depth with no overturn
i_nov = np.where(ovsize==0.)[0]
if i_nov.size>0:
self.gr["h_no_ov"][i] = zth[ i_nov[0] ]
else:
self.gr["h_no_ov"][i] = zth[ -1 ]
#mixed layer from oxygen
if "Oxygen" in self.raw_bg.keys():
XX = self.raw_bg["Oxygen"][ii,i]
msk = ~XX.mask
if np.nansum(msk)>5:
mld_DO_0 = mixed_layer_depth(z0[msk], -np.array([XX[msk]]).T, Dd = DO_ml_crit)[0]
mld_DO_1 = mixed_layer_depth(z0[msk], np.array([XX[msk]]).T, Dd = DO_ml_crit)[0]
self.gr["mld_DO"][i] = np.nanmin(np.array([mld_DO_0,mld_DO_1]))
#self.gr["mld_DO"][i] = mixed_layer_depth(z0[msk], -np.array([XX[msk]]).T, Dd = DO_ml_crit, crit = "DO")[0]
self.gr["gpa"] = gsw.geo_strf_dyn_height(self.gr["SA"], self.gr["CT"], self.gr["Pressure"], interp_method = "linear", p_ref = 500.)
self.gr["gpa_500_1500"] = np.full(nt, np.nan)
for i in range(nt):
try:
j = np.nanargmin(np.abs(self.gr["Pressure"][:,i]-1500. ))
except:
j = np.nan
if np.isnan(j) or np.abs(self.gr["Pressure"][j,i]-1500)>100:
continue
self.gr["gpa_500_1500"][i] = -self.gr["gpa"][j,i]
#other derived variables
self.gr["AOU"] = 100*self.gr["Oxygen"]/self.gr["OxygenSat"]-self.gr["Oxygen"]
##calculates PT and SP
#self.gr["SP"] = gsw.SP_from_SA( self.gr["SA"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"] )
#self.gr["PT"] = gsw.pt_from_CT( self.gr["SA"], self.gr["CT"] )
def calculates_carbon_framework(self,**kargs):
#kargs: CO2file (file for xCO2 data), sp (surface pressure in Pa), timemet (meteo time for surface pressure)
print("Carbon framework")
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
if "ML_zero" in kargs:
ML_zero = kargs["ML_zero"]
else:
ML_zero = True
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
xCO2 = intCO2(self.gr["date"])
if "sp" in kargs:
if type(kargs["timemet"])==np.datetime64:
kargs["timemet"] = convert_datetime64_to_time(kargs["timemet"])
sp = np.full( self.gr["date"].size, np.nan )
for i in range(self.gr["date"].size):
if i == 0:
time0 = self.gr["date"][0]-5.
if self.gr["date"].size>1:
time1 = 0.5*(self.gr["date"][0]+self.gr["date"][1])
else:
time1 = self.gr["date"][0]+5.
if i==self.gr["date"].size-1:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = self.gr["date"][i]+5.
else:
time0 = 0.5*(self.gr["date"][i-1]+self.gr["date"][i])
time1 = 0.5*(self.gr["date"][i]+self.gr["date"][i+1])
ij = np.where( (kargs["timemet"]>=time0) & (kargs["timemet"]<=time1) )[0]
if ij.size == 0:
continue
sp[i] = np.nanmean(kargs["sp"]/101325.)
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = np.tile(self.gr["depth"],(nt,1)).T
mldM = np.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = np.copy(self.gr["CT"])
Tml[~ismld] = np.nan
Tml = np.nanmean(Tml, axis = 0)
Sml = np.copy(self.gr["SA"])
Sml[~ismld] = np.nan
Sml = np.nanmean(Sml, axis = 0)
pH2O = partial_pressure_water_vapour( Sml, Tml )
pCO2atm = xCO2*(sp - pH2O)
else:
pCO2atm = np.copy(xCO2)
self.gr["CF"] = carbon_framework(self.gr["DIC_LIAR"], self.gr["TALK_LIAR"], self.gr["SA"],\
self.gr["CT"], self.gr["Pressure"], self.gr["Lon"], self.gr["Lat"], \
self.gr["AOU"], pCO2atm,self.gr["depth"], mld = self.gr["mld"], ML_zero = ML_zero)
self.gr["CF"]["pCO2atm"] = np.copy(pCO2atm)
def calculates_CO2_O2_flux(self, met,**kargs):
if type(met["time"][0]) == np.datetime64:
met["time"] = convert_datetime64_to_time(met["time"])
met["Wsp"],met["wind_dir"] = uv_to_wdir( met["u10"], met["v10"] )
nt = self.gr["date"].size
nz = self.gr["depth"].size
zM = np.tile(self.gr["depth"],(nt,1)).T
mldM = np.tile(self.gr["mld"],(nz,1))
ismld = zM<mldM
Tml = np.copy(self.gr["CT"])
Tml[~ismld] = np.nan
Tml = np.nanmean(Tml, axis = 0)
iif = np.isfinite(Tml)
if np.sum(iif)>2:
intTml = intrp.interp1d( self.gr["date"][iif], Tml[iif], bounds_error = False )
Tml_met = intTml( met["time"])
iif = np.where(np.isfinite(Tml_met))[0]
Tml_met[0:iif[0]] = Tml_met[iif[0]]
Tml_met[iif[-1]+1:] = Tml_met[iif[-1]]
else:
Tml_met = np.nanmean(Tml[iif])*np.ones(met["time"].size)
Sml = np.copy(self.gr["SA"])
Sml[~ismld] = np.nan
Sml = np.nanmean(Sml, axis = 0)
iif = np.isfinite(Sml)
if np.sum(iif)>2:
intSml = intrp.interp1d( self.gr["date"][iif], Sml[iif], bounds_error = False )
Sml_met = intSml( met["time"])
iif = np.where(np.isfinite(Sml_met))[0]
Sml_met[0:iif[0]] = Sml_met[iif[0]]
Sml_met[iif[-1]+1:] = Sml_met[iif[-1]]
else:
Sml_met = np.nanmean(Sml[iif])*np.ones(met["time"].size)
denml = np.copy(self.gr["Sigma_theta"])
denml[~ismld] = np.nan
denml = np.nanmean(denml, axis = 0)
iif = np.isfinite(denml)
if np.sum(iif)>2:
intdenml = intrp.interp1d( self.gr["date"][iif], denml[iif], bounds_error = False )
denml_met = intdenml( met["time"])
iif = np.where(np.isfinite(denml_met))[0]
denml_met[0:iif[0]] = denml_met[iif[0]]
denml_met[iif[-1]+1:] = denml_met[iif[-1]]
else:
denml_met = np.nanmean(denml[iif])*np.ones(met["time"].size)
AOUml = np.copy(self.gr["AOU"])
AOUml[~ismld] = np.nan
AOUml = np.nanmean(AOUml, axis = 0)
iif = np.isfinite(AOUml)
if np.sum(iif)>10:
intAOUml = intrp.interp1d( self.gr["date"][iif], AOUml[iif], bounds_error = False )
AOUml_met = intAOUml( met["time"])
iif = np.where(np.isfinite(AOUml_met))[0]
AOUml_met[0:iif[0]] = AOUml_met[iif[0]]
if iif[-1]>= AOUml_met.size*3./4.:
AOUml_met[iif[-1]+1:] = AOUml_met[iif[-1]]
else:
AOUml_met = np.full(met["time"].size, np.nan)
pCO2ml = np.copy(self.gr["pCO2_LIAR"])
pCO2ml[~ismld] = np.nan
pCO2ml = np.nanmean(pCO2ml, axis = 0)
iif = np.isfinite(pCO2ml)
if np.sum(iif) > 10:
intpCO2ml = intrp.interp1d( self.gr["date"][iif], pCO2ml[iif], bounds_error = False )
pCO2ml_met = intpCO2ml( met["time"])
iif = np.where(np.isfinite(pCO2ml_met))[0]
pCO2ml_met[0:iif[0]] = pCO2ml_met[iif[0]]
if iif[-1]>= pCO2ml_met.size*3./4.:
pCO2ml_met[iif[-1]+1:] = pCO2ml_met[iif[-1]]
else:
pCO2ml_met = np.full(met["time"].size, np.nan)
if "CO2file" in kargs:
CO2args = {"textfile": kargs["CO2file"]}
else:
CO2args = {}
intCO2 = reads_CO2_file_cape_grim(interpolation = "linear",plots = False, **CO2args)
#interpolates CO2
xCO2met = intCO2(met["time"])
pH2Oatm = partial_pressure_water_vapour( Sml_met, Tml_met )
pCO2atm = xCO2met*(met["sp"]/101325. - pH2Oatm)
K0 = CO2_solubility(Sml_met, Tml_met)
#gets the CO2 flux
kwCO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "CO2")/100*24. #m/d
FCO2 = kwCO2*K0*(pCO2ml_met - pCO2atm )*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000 ~ mol m-2 y-1
#gets the oxygen flux
kwO2 = kw_wanninkhof(met["Wsp"],Tml_met, gas = "O2")/100*24. #m/d
FO2 = -kwO2*(AOUml_met)*365/1000.*(1000+denml_met)/1000 #umol/kg *m/d *365/1000~ mmol m-2 d-1 ~ mol m-2 y-1
self.gr["FCO2"] = np.full(nt, np.nan)
self.gr["FO2"] = np.full(nt, np.nan)
for i in range(nt):
ij = np.where( (np.abs( self.gr["date"][i] - met["time"] )<5.) )[0]
if ij.size == 0:
continue
if np.isnan(pCO2ml[i]) or np.isnan(Tml[i]):
continue
#removes data with ice
if Tml[i]<-1:
if np.sum( np.isfinite(self.gr["CT"][0:2,i]) ) == 0:
continue
self.gr["FCO2"][i] = np.nanmean(FCO2[ij])
self.gr["FO2"][i] = np.nanmean(FO2[ij])
def plots_all_mixing_profiles(self, save = True, show = False):
nprf = self.raw["date"].size
for i in range(nprf):
print("Plot profile %d of %d"%(i+1, nprf))
self.plots_mixing_layer_profile(i, save = save, show = show)
def plots_mixing_layer_profile(self,pn, save = True, show = False):
if save:
if not os.path.exists('prof_ml'):
os.makedirs('prof_ml')
date0 = datetime.datetime.fromordinal(int(self.raw["date"][pn]))
date_str = date0.strftime("%Y %b %d")
if "Oxygen" in self.raw_bg.keys():
nsbp = 4
else:
nsbp = 3
xsize = int(np.round(nsbp*2.5))
fig, ax = plt.subplots(1,nsbp, sharey = True, figsize = (xsize,4))
ax[0].plot(self.gr["CT"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[0].plot(self.raw["CT"][:,pn],self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\Theta$ [$^{\\mathrm{o}}$C]")
ax[0].set_ylabel("Depth [m]")
ax0 = ax[0].twiny()
ax0.plot(self.gr["SA"][:,pn],self.gr["depth"],"-", color = "gray")
ax0.plot(self.raw["SA"][:,pn],self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax0.set_xlabel("$S_A$", color = "gray")
ax[1].plot(self.gr["Sigma_theta"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[1].plot( self.raw["Sigma_theta"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[1].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[2].plot(self.raw["size_ov"][:,pn], self.raw["depth"][:,pn], color = "gray", lw = 1)
ax[2].plot(self.raw["LT_ov"][:,pn], self.raw["depth"][:,pn], color = "k")
ax[2].set_xlabel("$L_T$ (black), $l_{ov}$ (gray)")
if "Oxygen" in self.raw_bg:
ax[3].plot(self.gr["Oxygen"][:,pn],self.gr["depth"],"k-", ms = 2)
ax[3].plot( self.raw_bg["Oxygen"][:,pn], self.raw["depth"][:,pn],"ko", ms = 2, mfc = "w")
ax[3].set_xlabel("DO [$\\mu$mol kg$^{-1}$]")
ax3 = ax[3].twiny()
ax3.plot(self.gr["OxygenSat"][:,pn],self.gr["depth"],"-", ms = 2, color = "gray")
ax3.plot( self.raw_bg["OxygenSat"][:,pn], self.raw["depth"][:,pn],"o", ms = 2, mfc = "w", mec = "gray")
ax3.set_xlabel("% DO$_{sat}$", color = "gray")
for ax0 in ax:
l0 = ax0.axhline(self.gr["mld"][pn], color = cm.tab10(0))
l1 = ax0.axhline(self.gr["mld_HT"][pn], color = cm.tab10(2))
l2 = ax0.axhline(self.gr["mld_DO"][pn], color = cm.tab10(3))
l3 = ax0.axhline(self.gr["h_no_ov"][pn], color = cm.tab10(4))
l4 = ax0.axhline(self.gr["h_largest_ov"][pn], color = cm.tab10(5))
l = (l0,l1,l2, l3,l4)
ax[1].legend(l, ["mld$_{\\sigma_{\\theta}}$","mld$_{\\mathrm{HT}}$","mld$_{\\mathrm{DO}}$","$l_{ov}=0$ m","larg$^{\\mathrm{st}}$. eddy"] )
fig.suptitle("Float %s, date %s\nLon: %1.2f Lat: %1.2f"%(self.raw["code"], date_str, self.raw["Lon"][pn], self.raw["Lat"][pn]))
if save:
date_str0 = date0.strftime("%Y%m%d")
figname = "prof_ml/%s_%s.png"%(self.raw["code"],date_str0)
fig.savefig(figname, dpi = 300, bbox_inches = "tight")
if show:
plt.show()
else:
plt.close(fig)
def plots_map_main_variables(self, saves = True, shows = False,**kargs):
if not os.path.exists('float_maps'):
os.makedirs('float_maps')
if self.raw["Temperature"].shape[1] == 1:
print("Only one profile")
return
fig = plt.figure(figsize = (14,8))
ax0 = fig.add_axes([0.10,0.67,0.3,0.3])
width = 15e6; lon_0 = 0; lat_0 = -90
m1 = Basemap(width=width,height=width,projection='aeqd',
lat_0=lat_0,lon_0=lon_0)
m1.drawcoastlines()
m1.fillcontinents()
m1.drawmapboundary(fill_color='skyblue')
m1.fillcontinents(color='#cc9966',lake_color='#99ffff')
m1.drawparallels(np.arange(-80,-20,10),labels=[1,0,0,0])
m1.drawmeridians(np.arange(-180,180,30),labels=[0,0,0,1])
x,y = m1( self.raw["Lon"], self.raw["Lat"])
#plt.scatter(x,y,10,T_gr[5,:])
#plt.plot(x,y,color = "crimson")
cc = plt.scatter(x,y,20, c = self.raw["date"])#-self.raw["date"][0])
loc = mdates.AutoDateLocator()
fig.colorbar(cc, ticks=loc,
format=mdates.AutoDateFormatter(loc))
#cb = fig.colorbar(cc)
#cb.set_label("Survey day")
ax1 = fig.add_axes([0.07,0.35,0.47,0.27])
cfT=ax1.contourf(self.gr["date"], self.gr["depth"], self.gr["CT"],20, cmap = cmocean.cm.thermal)
#ccT = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["Temperature"],20, colors = "w", linewidths = 1)
ax1.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax1.plot(self.gr["date"], self.gr["mld_HT"], color = "w", lw = 1, ls = "dotted")
ax1.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "w", lw = 1)
ax1.plot(self.gr["date"],1990*np.ones(self.gr["date"].size),marker = "|", color = "k")
cD = ax1.contour(self.gr["date"], self.gr["depth"], self.gr["gamma_n"],[26.80,27.23,27.50], colors = "skyblue", linewidths = 1)
plt.clabel(cD, fmt = "%1.2f", fontsize = 6)
cb = fig.colorbar(cfT)
ax1.annotate("$\Theta$ [$^{\\mathrm{o}}$C]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
if "ylim" in kargs:
yl = kargs["ylim"]
else:
yl = ax1.get_ylim()[::-1]
ax1.set_ylim(yl)
ax1.set_ylabel("Depth [m]")
ax1.set_xticklabels([])
#ax1.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
ax2 = fig.add_axes([0.07,0.05,0.47,0.27])
cfT=ax2.contourf(self.gr["date"], self.gr["depth"], self.gr["SA"],20, cmap = cmocean.cm.haline)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ax2.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax2.plot(self.gr["date"], self.gr["mld_DO"], ls = "--",color = "w", lw = 1)
cb = fig.colorbar(cfT)
ax2.annotate("$S_A$", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8) )
ax2.set_ylim(yl)
ax2.set_ylabel("Depth [m]")
ax2.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
"""
ax3 = fig.add_axes([0.54,0.65,0.47,0.27])
ccT = ax3.pcolor(self.gr["date"], self.gr["depth"], self.gr["LT"], cmap = cm.inferno)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("$L_T$ [m]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
"""
if "Nitrate" in self.gr.keys():
ax3 = fig.add_axes([0.54,0.65,0.47,0.27])
ccT = ax3.contourf(self.gr["date"], self.gr["depth"], self.gr["Nitrate"], 20, cmap = cmocean.cm.matter)
ax3.plot(self.gr["date"], self.gr["mld"], color = "w", lw = 1)
ax3.plot(self.gr["date"], self.gr["mld_DO"], ls ="--",color = "w", lw = 1)
plt.colorbar(ccT, ax = ax3)
ax3.set_ylim(yl)
ax3.set_ylabel("Depth [m]")
ax3.set_xticklabels([])
ax3.annotate("Nitrate [$\\mu$mol kg$^{-1}$]" , xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax3.set_title("Float: %s"%(self.raw["code"]))
if "Oxygen" in self.gr.keys():
ax4 = fig.add_axes([0.54,0.35,0.47,0.27])
cfT=ax4.contourf(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],20, cmap = cmocean.cm.oxy)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
ccT = ax4.contour(self.gr["date"], self.gr["depth"], self.gr["Oxygen"]-100*self.gr["Oxygen"]/self.gr["OxygenSat"],[0], colors = "blue", linewidths = 1)
ax4.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax4.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax4.annotate("DO-DO$_{\\mathrm{sat}}$ [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax4.set_ylim(yl)
ax4.set_yticklabels([])
ax4.set_xticklabels([])
if "DIC_LIAR" in self.gr.keys():
ax5 = fig.add_axes([0.54,0.05,0.47,0.27])
cfT=ax5.contourf(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],20, cmap = cmocean.cm.ice_r)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["Salinity"],20, colors = "gray", linewidths = 1)
#ccT = ax2.contour(self.gr["date"], self.gr["depth"], self.gr["DIC_LIAR"],[0], colors = "gray", linewidths = 1)
ax5.plot(self.gr["date"], self.gr["mld"], color = "k", lw = 1)
ax5.plot(self.gr["date"], self.gr["mld_DO"], ls = "--", color = "k", lw = 1)
cb = fig.colorbar(cfT)
ax5.annotate("DIC [$\\mu$ mol kg$^{-1}$]", xy = (0.02,0.05), xycoords = "axes fraction", fontweight = "bold", color = "k", bbox = dict(facecolor = "w", alpha =0.8))
ax5.set_ylim(yl)
ax5.set_yticklabels([])
ax5.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
filename = "float_maps/%s_map.png"%(self.raw["code"])
if saves:
fig.savefig(filename)
plt.close(fig)
if shows:
plt.show()
def grids_interpolates(x0,y0,x,dx, grid = False):
y = np.full(x.size,np.nan)
if grid:
for i in range(x.size):
jj = (x0>=x[i]-dx/2.) & (x0<=x[i]+dx/2.)
if np.nansum(jj)>0:
y[i] = np.mean(y0[jj])
igood = np.isfinite(y)
if np.sum(igood)>5:
intt = intrp.interp1d( x[igood], y[igood], bounds_error = False)
y[~igood] = intt(x[~igood])
elif np.sum(np.isfinite(y0))>5:
intt = intrp.interp1d( x0, y0, bounds_error = False)
y = intt(x)
return y
##############################
######### OTHER FUNCTIONS ####
##############################
def mixed_layer_depth(z0, den0, Dd = 0.03, crit = "diff", z_min = 30., intrp = True):
#Mixed layer calculation
if crit != "diff" and crit != "grad" and crit != "DO":
crit = "diff"
print("Incorrect criterion, set to diff")
c,f = den0.shape
MLD = np.full(f, np.nan)
for i in range(f):
if z0.ndim ==1:
z = np.copy(z0)
else:
z = z0[:,i]
#den = np.sort(den0[:,i])
den = den0[:,i]
iif = np.isfinite(den+z)
if np.sum(iif)<=1:
continue
den = den[iif]
z = z[iif]
if np.min(z0)>z_min:
continue
if crit == "diff":
sden = den[0]
denp = den-sden
imld = np.where( denp>=Dd )[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
denp2 = denp[imld]
denp1 = denp[imld-1]
if intrp:
MLD[i] = (z2-z1)/(denp2-denp1)*(Dd - denp1) + z1
else:
MLD[i] = (z1+z2)*0.5
else:
MLD[i] = np.max(z)
#MLD[i] = z0[0,i]
elif crit == "grad":
grden = np.abs(first_centered_differences(z, den))
imld = np.where(grden>=Dd)[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
grd2 = grden[imld]
grd1 = grden[imld-1]
if intrp:
MLD[i] = (z2-z1)/(grd2-grd1)*(Dd - grd1) + z1
else:
MLD[i] = 0.5*(z1+z2)
else:
MLD[i] = z[0]
if crit == "DO":
sden = den[0]
denp = den-sden
imld = np.where( np.abs(denp)>=Dd )[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
MLD[i] = z1
else:
MLD[i] = np.max(z)
#MLD[i] = z0[0,i]
return MLD
def calculates_thorpe_scale(z,dens,PLOT = False):
#sorts for ascending depth
ii = np.argsort(z)
z = z[ii]
dens = dens[ii]
#sorts for ascending density
jj = np.argsort(dens)
disp = z - z[jj]
nn = disp.size
#Looks for individual overturns
LT = np.zeros(nn)
ov_size = np.zeros(nn)
ov_num = np.zeros(nn)
ovN0 = 1
i = 0
while True:
#plt.plot(dens[i:]-dens[i])
ii_lighter0 = np.where( (dens[i:]-dens[i])<=0 )[0]
if ii_lighter0.size>1:
ii_lighter = np.arange(i,i+ii_lighter0[-1]+1)
#print(ii_lighter0)
dens_ov = dens[ii_lighter]
z_ov = z[ii_lighter]
jj = np.argsort(dens_ov)
disp_ov = z_ov - z_ov[jj]
#print(disp_ov)
LT[ii_lighter] = np.nanmean(disp_ov**2)**0.5
if LT[ii_lighter][0]>0:
ov_size[ii_lighter] = np.max(z_ov)-np.min(z_ov)
ov_num[ii_lighter] = ovN0
ovN0+=1
i = ii_lighter[-1]+1
else:
i+=1
if i>=nn:
break
if PLOT == True:
fig, ax = plt.subplots(1,2, sharey = True)
ax[0].plot(dens, z)
ax[0].set_ylim(ax[0].get_ylim()[::-1])
ax[0].set_xlabel("$\\sigma_{\\theta}$ [kg m$^{-3}$]")
ax[0].set_ylabel("Depth [m]")
ax[1].plot(np.abs(disp),z, lw = 1, color = "gray")
ax[1].plot(LT,z, color = "k")
#ax[1].plot(ov_size,z)
ax[1].set_xlabel("$L_T$ [m]")
plt.show()
return z, LT, ov_size, ov_num
def geopotential_anomaly(CT,SA,p, pref = np.array([500.,1500.])):
rho = gsw.rho(SA,CT,p)
rho0 = gsw.rho(35.,0.,p)
delta = rho**-1 - rho0**-1
#delta = gsw.specvol_anom_standard(SA,CT,p+10)
if np.max(p)<np.max(pref):
return np.nan
p_i = np.arange(pref[0], pref[1]+1.,1.)
dp = 1.*1e4 #Pa
intd = intrp.interp1d( p, delta, bounds_error = False )
delta_i = intd( p_i )
gpa = np.sum(dp*delta_i)
return gpa
def FCD_2d(x, y, axis = 0):
if x.ndim != 2 or y.ndim !=2:
sys.exit("Invalid dimensions")
if axis != 0 and axis != 1:
sys.exit("Invalid axis")
if axis == 1:
x = x.T
y = y.T
dy = np.full(y.shape,np.nan)
for i in range(x.shape[1]):
dy[:,i] = first_centered_differences(x[:,i], y[:,i])
if axis == 1:
dy = dy.T
return dy
def first_centered_differences(x, y, fill = False):
if x.size != y.size:
print("first-centered differences: vectors do not have the same size")
dy = np.full( x.size, np.nan )
iif = np.where( (np.isfinite(x)) & (np.isfinite(y))) [0]
if iif.size < 2:
return dy
x0 = x[iif]
y0 = y[iif]
dy0 = np.full( x0.size, np.nan )
#calculates differences
dy0[0] = (y0[1] - y0[0])/(x0[1]-x0[0])
dy0[-1] = (y0[-1] - y0[-2])/(x0[-1]-x0[-2])
dy0[1:-1] = (y0[2:] - y0[0:-2])/(x0[2:]- x0[0:-2])
dy[iif] = dy0
if fill:
dy[0:iif[0]] = dy[iif[0]]
dy[iif[-1]+1:] = dy[iif[-1]]
return dy
def moving_average(x,n, window = "flat"):
if n%2 == 0:
n+=1
N = x.size
cx = np.full(x.size, np.nan)
for i in range(N):
ii = np.arange(i-n//2, i+n//2+1,1)
if window == "flat":
ww = np.ones(ii.size)
elif window == "gauss":
xx = ii - i
ww = np.exp(- xx**2/(float(n)/4)**2 )
elif window == "hanning":
ww = np.hanning(ii.size)
ww = ww[ (ii>=0) & (ii<N)]
ii = ii[ (ii>=0) & (ii<N)]
kk = np.isfinite(x[ii])
if np.sum(kk)<0.25*ii.size:
continue
cx[i] = np.sum(x[ii[kk]]*ww[kk])/np.sum(ww[kk])
return cx
#time conversion
def convert_time_to_date(time):
date = [datetime.datetime.fromordinal(int(time0)) + datetime.timedelta(time0%1) for time0 in time]
return date
def convert_date_to_time(date):
N = len(date)
time = np.full(N, np.nan)
for i in range(N):
time[i]=date[i].toordinal() + date[i].hour/24. + date[i].minute/24./60. + date[i].second/24./60./60. + date[i].microsecond/24./60./60./1e6
return time
def convert_datetime64_to_date(date64):
ts = (date64 - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
date = [datetime.datetime.utcfromtimestamp(ts0) for ts0 in ts]
return date
def convert_datetime64_to_time(date64):
ts = (date64 - np.datetime64('1970-01-01T00:00:00Z')) / | np.timedelta64(1, 's') | numpy.timedelta64 |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 18:17:32 2019
@author: <NAME>
"""
import numpy as np
import csv
from matplotlib import pyplot as plt
# Processing the data
a = open("data.csv")
b = [row for row in csv.reader(a)]
c = [[b[i][-1]] for i in range(len(b))]
for i in range(len(b)):
del b[i][-1]
X = np.array(b, dtype=np.float32)
X_max = np.max(X)
X /= X_max
y = np.array(c, dtype=np.float32)
y_max = np.max(y)
y /= y_max
# Hypothesis
theta = np.random.standard_normal((1, 2))
def hypothesis(data_in):
data_in /= np.max(X)
pred = theta[0][0] + (theta[0][1] * data_in)
return pred * np.max(y)
# Graph function
def line(Xs, ys):
x_plot = np.linspace( | np.min(Xs) | numpy.min |
# Author: <NAME>
# Roll No.: 2016217
import random
import itertools
import operator
import numpy as np
import scipy as sp
import scipy.linalg as spla
np.set_printoptions(precision=4, linewidth=np.nan)
def nf2DualLP(filename):
"""
Convert network flow to Dual form LP
"""
# assumes that first row is source and last is sink like in the question
# edges will be numbered as if they are being read row-by-row left to right
# vertices will be numbered by row
nf = | np.loadtxt(filename) | numpy.loadtxt |
# %% [markdown]
# # Overview
# The notebook shows how to extract the segmentation map for the ships, augment the images and train a simple DNN model to detect them. A few additional tweaks like balancing the ship-count out a little better have been done.
# %% [markdown]
# ## Model Parameters
# We might want to adjust these later (or do some hyperparameter optimizations)
# %%
BATCH_SIZE = 4
EDGE_CROP = 16
NB_EPOCHS = 5
GAUSSIAN_NOISE = 0.1
UPSAMPLE_MODE = 'SIMPLE'
# downsampling inside the network
NET_SCALING = None
# downsampling in preprocessing
IMG_SCALING = (1, 1)
# number of validation images to use
VALID_IMG_COUNT = 400
# maximum number of steps_per_epoch in training
MAX_TRAIN_STEPS = 200
AUGMENT_BRIGHTNESS = False
#%%
import logging
import sys
logger = logging.getLogger()
logger.handlers = []
# Set level
logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
# Create formatter
FORMAT = "%(levelno)-2s %(asctime)s : %(message)s"
DATE_FMT = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(FORMAT, DATE_FMT)
# Create handler and assign
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.handlers = [handler]
logging.info("Logging started")
# %%
import os
import logging
from pathlib import Path
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# SK
from skimage.io import imread
from skimage.segmentation import mark_boundaries
from skimage.util import montage
from skimage.morphology import label
from sklearn.model_selection import train_test_split
montage_rgb = lambda x: np.stack([montage(x[:, :, :, i]) for i in range(x.shape[3])], -1)
# TensorFlow
import tensorflow as tf
from tensorflow.keras import models, layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import binary_crossentropy
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
# ship_dir = '../input'
# train_image_dir = os.path.join(ship_dir, 'train_v2')
# test_image_dir = os.path.join(ship_dir, 'test_v2')
import gc; gc.enable() # memory is tight
def multi_rle_encode(img):
labels = label(img[:, :, 0])
return [rle_encode(labels==k) for k in np.unique(labels[labels>0])]
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_encode(img):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle_decode(mask_rle, shape=(768, 768)):
'''
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T # Needed to align to RLE direction
def masks_as_image(in_mask_list):
# Take the individual ship masks and create a single mask array for all ships
all_masks = np.zeros((768, 768), dtype = np.int16)
#if isinstance(in_mask_list, list):
for mask in in_mask_list:
if isinstance(mask, str):
all_masks += rle_decode(mask)
return np.expand_dims(all_masks, -1)
# %%
DIR_INPUT = Path("~/DATA/airbus-ship-detection-sample").expanduser()
assert DIR_INPUT.exists()
PATH_CSV = DIR_INPUT / 'train_ship_segmentations_v2.csv'
assert PATH_CSV.exists()
DIR_IMAGES = DIR_INPUT / 'images'
assert DIR_IMAGES.exists()
masks = pd.read_csv(PATH_CSV)
print(masks.shape[0], 'masks found')
print(masks['ImageId'].value_counts().shape[0])
DIR_WEIGHTS = DIR_INPUT / 'weights'
#%% Align the df with the actual sampled data
masks
DIR_IMAGES.joinpath('teas').exists()
masks['exists'] = masks['ImageId'].apply(lambda image_id: DIR_IMAGES.joinpath(image_id).exists())
# r = masks.head(10)
masks = masks[masks['exists']]
logging.info("Resampled df to match existing images, {} records".format(len(masks)))
# %% [markdown]
# # Make sure encode/decode works
# Given the process
# $$ RLE_0 \stackrel{Decode}{\longrightarrow} \textrm{Image}_0 \stackrel{Encode}{\longrightarrow} RLE_1 \stackrel{Decode}{\longrightarrow} \textrm{Image}_1 $$
# We want to check if/that
# $ \textrm{Image}_0 \stackrel{?}{=} \textrm{Image}_1 $
# We could check the RLEs as well but that is more tedious. Also depending on how the objects have been labeled we might have different counts.
#
#
# %%
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5))
rle_0 = masks.query('ImageId=="00021ddc3.jpg"')['EncodedPixels']
img_0 = masks_as_image(rle_0)
ax1.imshow(img_0[:, :, 0])
ax1.set_title('Image$_0$')
rle_1 = multi_rle_encode(img_0)
img_1 = masks_as_image(rle_1)
ax2.imshow(img_1[:, :, 0])
ax2.set_title('Image$_1$')
print('Check Decoding->Encoding',
'RLE_0:', len(rle_0), '->',
'RLE_1:', len(rle_1))
# plt.show()
# %% [markdown]
# # Split into training and validation groups
# We stratify by the number of boats appearing so we have nice balances in each set
# %%
masks['ships'] = masks['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0)
unique_img_ids = masks.groupby('ImageId').agg({'ships': 'sum'}).reset_index()
unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0)
unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x])
# some files are too small/corrupt
masks['ImageId'].apply(lambda image_id: DIR_IMAGES.joinpath(image_id).exists())
unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda image_id: os.stat(DIR_IMAGES.joinpath(image_id)).st_size/1024)
unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb']>50] # keep only 50kb files
unique_img_ids['file_size_kb'].hist()
# plt.show()
masks.drop(['ships'], axis=1, inplace=True)
unique_img_ids.sample(5)
logging.info("Unique records: {}".format(len(unique_img_ids)))
logging.info("Total records: {}".format(len(masks)))
# %%
train_ids, valid_ids = train_test_split(unique_img_ids,
test_size = 0.3,
stratify = unique_img_ids['ships'])
train_df = pd.merge(masks, train_ids)
valid_df = pd.merge(masks, valid_ids)
print(train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
# %% [markdown]
# ### Examine Number of Ship Images
# Here we examine how often ships appear and replace the ones without any ships with 0
# %%
train_df['ships'].hist()
# plt.show()
# %% [markdown] {"_uuid": "ef8115a80749ac47f295e9a70217a5553970c2b3"}
# # Undersample Empty Images
# Here we undersample the empty images to get a better balanced group with more ships to try and segment
# %%
train_df['grouped_ship_count'] = train_df['ships'].map(lambda x: (x+1)//2).clip(0, 7)
def sample_ships(in_df, base_rep_val=1500):
if in_df['ships'].values[0]==0:
return in_df.sample(base_rep_val//3) # even more strongly undersample no ships
else:
return in_df.sample(base_rep_val, replace=(in_df.shape[0]<base_rep_val))
balanced_train_df = train_df.groupby('grouped_ship_count').apply(sample_ships)
balanced_train_df['ships'].hist(bins= | np.arange(10) | numpy.arange |
import numpy as np
from dynamol import data
import scipy.constants as sc
from functools import reduce
class Particle:
def __init__(self, r=[], v=[], a=[], m=None, dim=3):
"""Clase base de uma partícula
Args:
r (3D/2D array): posição
v (3D/2D array): velocidade
a (3D/2D array): aceleração
m (int, optional): massa. Padrão: 1.
"""
if len(r) == 0:
if dim == 2:
r = data.zero2D
else:
r = data.zero3D
if len(v) == 0:
if dim == 2:
v = data.zero2D
else:
v = data.zero3D
if len(a) == 0:
if dim == 2:
a = data.zero2D
else:
a = data.zero3D
if dim not in (2, 3):
clsname = type(self).__name__
print(f"{clsname}: Dimensão {dim} inválida. Usando 3.")
dim = 3
self.dim = dim
self.r = np.array(r)
self.v = np.array(v)
self.a = np.array(a)
if m is None:
m = 1
self.m = m
def __repr__(self, ):
text = "Partícula: \n"
text += f"\tPosição: {self.r}\n"
text += f"\tVelocidade: {self.v}\n"
return text
class SetOfParticles:
"""Clase base para um conjunto de partículas
"""
def __init__(self, N=27, dim=3):
self.N = N
self.dim = dim
self.particles = [Particle(dim=dim) for n in range(N)]
self.particles = np.array(self.particles, dtype=object)
def __getitem__(self, n):
return self.particles[n]
def __setitem__(self, n, item: Particle):
self.particles[n] = item
@property
def positions(self, ):
for p in self.particles:
yield p.r
@property
def velocities(self, ):
for p in self.particles:
yield p.v
@property
def accels(self, ):
for p in self.particles:
yield p.a
@property
def masses(self, ):
for p in self.particles:
yield p.m
@positions.setter
def positions(self, R):
for p, r in zip(self.particles, R):
p.r = np.array(r)
@velocities.setter
def velocities(self, V):
for p, v in zip(self.particles, V):
p.v = | np.array(v) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.