prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""Main entry points for scripts."""
from __future__ import print_function, division
from argparse import ArgumentParser
from collections import OrderedDict
from copy import copy
from datetime import datetime
import glob
import json
import logging
import math
import os
import scipy.stats
import numpy as np
from .version import __version__
from .psffuncs import gaussian_moffat_psf
from .psf import TabularPSF, GaussianMoffatPSF
from .io import read_datacube, write_results, read_results
from .fitting import (guess_sky, fit_galaxy_single, fit_galaxy_sky_multi,
fit_position_sky, fit_position_sky_sn_multi,
RegularizationPenalty)
from .utils import yxbounds
from .extern import ADR, Hyper_PSF3D_PL
__all__ = ["cubefit", "cubefit_subtract", "cubefit_plot"]
MODEL_SHAPE = (32, 32)
SPAXEL_SIZE = 0.43
MIN_NMAD = 2.5 # Minimum Number of Median Absolute Deviations above
# the minimum spaxel value in fit_position
LBFGSB_FACTOR = 1e10
REFWAVE = 5000. # reference wavelength in Angstroms for PSF params and ADR
POSITION_BOUND = 3. # Bound on fitted positions relative in initial positions
def snfpsf(wave, psfparams, header, psftype):
"""Create a 3-d PSF based on SNFactory-specific parameterization of
Gaussian + Moffat PSF parameters and ADR."""
# Get Gaussian+Moffat parameters at each wavelength.
relwave = wave / REFWAVE - 1.0
ellipticity = abs(psfparams[0]) * np.ones_like(wave)
alpha = np.abs(psfparams[1] +
psfparams[2] * relwave +
psfparams[3] * relwave**2)
# correlated parameters (coefficients determined externally)
sigma = 0.545 + 0.215 * alpha # Gaussian parameter
beta = 1.685 + 0.345 * alpha # Moffat parameter
eta = 1.040 + 0.0 * alpha # gaussian ampl. / moffat ampl.
# Atmospheric differential refraction (ADR): Because of ADR,
# the center of the PSF will be different at each wavelength,
# by an amount that we can determine (pretty well) from the
# atmospheric conditions and the pointing and angle of the
# instrument. We calculate the offsets here as a function of
# observation and wavelength and input these to the model.
# Correction to parallactic angle and airmass for 2nd-order effects
# such as MLA rotation, mechanical flexures or finite-exposure
# corrections. These values have been trained on faint-std star
# exposures.
#
# `predict_adr_params` uses 'AIRMASS', 'PARANG' and 'CHANNEL' keys
# in input dictionary.
delta, theta = Hyper_PSF3D_PL.predict_adr_params(header)
# check for crazy values of pressure and temperature, and assign default
# values.
pressure = header.get('PRESSURE', 617.)
if not 550. < pressure < 650.:
pressure = 617.
temp = header.get('TEMP', 2.)
if not -20. < temp < 20.:
temp = 2.
adr = ADR(pressure, temp, lref=REFWAVE, delta=delta, theta=theta)
adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE)
# adr_refract[0, :] corresponds to x, adr_refract[1, :] => y
xctr, yctr = adr_refract
if psftype == 'gaussian-moffat':
return GaussianMoffatPSF(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
elif psftype == 'tabular':
A = gaussian_moffat_psf(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
return TabularPSF(A)
else:
raise ValueError("unknown psf type: " + repr(psftype))
def setup_logging(loglevel, logfname=None):
# if loglevel isn't an integer, parse it as "debug", "info", etc:
if not isinstance(loglevel, int):
loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel, int):
print('Invalid log level: %s' % loglevel)
exit(1)
# remove logfile if it already exists
if logfname is not None and os.path.exists(logfname):
os.remove(logfname)
logging.basicConfig(filename=logfname, format="%(levelname)s %(message)s",
level=loglevel)
def cubefit(argv=None):
DESCRIPTION = "Fit SN + galaxy model to SNFactory data cubes."
parser = ArgumentParser(prog="cubefit", description=DESCRIPTION)
parser.add_argument("configfile",
help="configuration file name (JSON format)")
parser.add_argument("outfile", help="Output file name (FITS format)")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument("--logfile", help="Write log to this file "
"(default: print to stdout)", default=None)
parser.add_argument("--loglevel", default="info",
help="one of: debug, info, warning (default is info)")
parser.add_argument("--diagdir", default=None,
help="If given, write intermediate diagnostic results "
"to this directory")
parser.add_argument("--refitgal", default=False, action="store_true",
help="Add an iteration where galaxy model is fit "
"using all epochs and then data/SN positions are "
"refit")
parser.add_argument("--mu_wave", default=0.07, type=float,
help="Wavelength regularization parameter. "
"Default is 0.07.")
parser.add_argument("--mu_xy", default=0.001, type=float,
help="Spatial regularization parameter. "
"Default is 0.001.")
parser.add_argument("--psftype", default="gaussian-moffat",
help="Type of PSF: 'gaussian-moffat' or 'tabular'. "
"Currently, tabular means generate a tabular PSF from "
"gaussian-moffat parameters.")
args = parser.parse_args(argv)
setup_logging(args.loglevel, logfname=args.logfile)
# record start time
tstart = datetime.now()
logging.info("cubefit v%s started at %s", __version__,
tstart.strftime("%Y-%m-%d %H:%M:%S"))
tsteps = OrderedDict() # finish time of each step.
logging.info("parameters: mu_wave={:.3g} mu_xy={:.3g} refitgal={}"
.format(args.mu_wave, args.mu_xy, args.refitgal))
logging.info(" psftype={}".format(args.psftype))
logging.info("reading config file")
with open(args.configfile) as f:
cfg = json.load(f)
# basic checks on config contents.
assert (len(cfg["filenames"]) == len(cfg["xcenters"]) ==
len(cfg["ycenters"]) == len(cfg["psf_params"]))
# -------------------------------------------------------------------------
# Load data cubes from the list of FITS files.
nt = len(cfg["filenames"])
logging.info("reading %d data cubes", nt)
cubes = []
for fname in cfg["filenames"]:
logging.debug(" reading %s", fname)
cubes.append(read_datacube(os.path.join(args.dataprefix, fname)))
wave = cubes[0].wave
nw = len(wave)
# assign some local variables for convenience
refs = cfg["refs"]
master_ref = cfg["master_ref"]
if master_ref not in refs:
raise ValueError("master ref choice must be one of the final refs (" +
" ".join(refs.astype(str)) + ")")
nonmaster_refs = [i for i in refs if i != master_ref]
nonrefs = [i for i in range(nt) if i not in refs]
# Ensure that all cubes have the same wavelengths.
if not all(np.all(cubes[i].wave == wave) for i in range(1, nt)):
raise ValueError("all data must have same wavelengths")
# -------------------------------------------------------------------------
# PSF for each observation
logging.info("setting up PSF for all %d epochs", nt)
psfs = [snfpsf(wave, cfg["psf_params"][i], cubes[i].header, args.psftype)
for i in range(nt)]
# -------------------------------------------------------------------------
# Initialize all model parameters to be fit
yctr0 = np.array(cfg["ycenters"])
xctr0 = np.array(cfg["xcenters"])
galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
sn = np.zeros((nt, nw), dtype=np.float64) # SN spectrum at each epoch
skys = np.zeros((nt, nw), dtype=np.float64) # Sky spectrum at each epoch
yctr = yctr0.copy()
xctr = xctr0.copy()
snctr = (0., 0.)
# For writing out to FITS
modelwcs = {"CRVAL1": -SPAXEL_SIZE * (MODEL_SHAPE[0] - 1) / 2.,
"CRPIX1": 1,
"CDELT1": SPAXEL_SIZE,
"CRVAL2": -SPAXEL_SIZE * (MODEL_SHAPE[1] - 1) / 2.,
"CRPIX2": 1,
"CDELT2": SPAXEL_SIZE,
"CRVAL3": cubes[0].header["CRVAL3"],
"CRPIX3": cubes[0].header["CRPIX3"],
"CDELT3": cubes[0].header["CDELT3"]}
# -------------------------------------------------------------------------
# Position bounds
# Bounds on data position: shape=(nt, 2)
xctrbounds = np.vstack((xctr - POSITION_BOUND, xctr + POSITION_BOUND)).T
yctrbounds = np.vstack((yctr - POSITION_BOUND, yctr + POSITION_BOUND)).T
snctrbounds = (-POSITION_BOUND, POSITION_BOUND)
# For data positions, check that bounds do not extend
# past the edge of the model and adjust the minbound and maxbound.
# This doesn't apply to SN position.
gshape = galaxy.shape[1:3] # model shape
for i in range(nt):
dshape = cubes[i].data.shape[1:3]
(yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)
yctrbounds[i, 0] = max(yctrbounds[i, 0], yminabs)
yctrbounds[i, 1] = min(yctrbounds[i, 1], ymaxabs)
xctrbounds[i, 0] = max(xctrbounds[i, 0], xminabs)
xctrbounds[i, 1] = min(xctrbounds[i, 1], xmaxabs)
# -------------------------------------------------------------------------
# Guess sky
logging.info("guessing sky for all %d epochs", nt)
for i, cube in enumerate(cubes):
skys[i, :] = guess_sky(cube, npix=30)
# -------------------------------------------------------------------------
# Regularization penalty parameters
# Calculate rough average galaxy spectrum from all final refs.
spectra = np.zeros((len(refs), len(wave)), dtype=np.float64)
for j, i in enumerate(refs):
avg_spec = np.average(cubes[i].data, axis=(1, 2)) - skys[i]
mean_spec, bins, bn = scipy.stats.binned_statistic(wave, avg_spec,
bins=len(wave)/10)
spectra[j] = np.interp(wave, bins[:-1] + | np.diff(bins) | numpy.diff |
import numpy as np
from rlpyt.agents.base import AgentInputs
from rlpyt.samplers.parallel.gpu.action_server import (
ActionServer,
AlternatingActionServer,
NoOverlapAlternatingActionServer,
)
class AsyncActionServer(ActionServer):
def serve_actions_evaluation(self, itr):
"""Similar to normal action-server, but with different signaling logic
for ending evaluation early; receive signal from main sampler process
and pass it along to my workers.
"""
obs_ready, act_ready = self.sync.obs_ready, self.sync.act_ready
step_np, step_pyt = self.eval_step_buffer_np, self.eval_step_buffer_pyt
self.agent.reset()
agent_inputs = AgentInputs(
step_pyt.observation, step_pyt.action, step_pyt.reward
) # Fixed buffer objects.
for t in range(self.eval_max_T):
for b in obs_ready:
b.acquire()
for b_reset in np.where(step_np.done)[0]:
step_np.action[b_reset] = 0 # Null prev_action.
step_np.reward[b_reset] = 0 # Null prev_reward.
self.agent.reset_one(idx=b_reset)
action, agent_info = self.agent.step(*agent_inputs)
step_np.action[:] = action
step_np.agent_info[:] = agent_info
if self.ctrl.stop_eval.value: # From overall master process.
self.sync.stop_eval.value = True # Give to my workers.
for w in act_ready:
# assert not w.acquire(block=False) # Debug check.
w.release()
if self.sync.stop_eval.value: # Signal from sampler runner.
break
for b in obs_ready:
b.acquire() # Workers always do extra release; drain it.
assert not b.acquire(block=False) # Debug check.
for w in act_ready:
assert not w.acquire(block=False) # Debug check.
class AsyncAlternatingActionServer(AlternatingActionServer):
def serve_actions_evaluation(self, itr):
"""Similar to normal action-server, but with different signaling logic
for ending evaluation early; receive signal from main sampler process
and pass it along to my workers.
"""
obs_ready, act_ready = self.sync.obs_ready, self.sync.act_ready
obs_ready_pair = self.obs_ready_pair
act_ready_pair = self.act_ready_pair
step_np, step_np_pair = self.eval_step_buffer_np, self.eval_step_buffer_np_pair
agent_inputs_pair = self.eval_agent_inputs_pair
self.agent.reset()
step_np.action[:] = 0 # Null prev_action.
step_np.reward[:] = 0 # Null prev_reward.
stop = False
for t in range(self.eval_max_T):
for alt in range(2):
step_h = step_np_pair[alt]
for b in obs_ready_pair[alt]:
b.acquire()
# assert not b.acquire(block=False) # Debug check.
for b_reset in | np.where(step_h.done) | numpy.where |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
``Histogram`` plots a histogram for a region drawn in the image, or for the
entire image.
**Plugin Type: Local**
``Histogram`` is a local plugin, which means it is associated with a channel.
An instance can be opened for each channel.
**Usage**
Click and drag to define a region within the image that will be used to
calculate the histogram. To take the histogram of the full image, click
the button in the UI labeled "Full Image".
.. note:: Depending on the size of the image, calculating the
full histogram may take time.
If a new image is selected for the channel, the histogram plot will be
recalculated based on the current parameters with the new data.
Unless disabled in the settings file for the histogram plugin, a line of
simple statistics for the box is calculated and shown in a line below the
plot.
**UI Controls**
Three radio buttons at the bottom of the UI are used to control the
effects of the click/drag action:
* select "Move" to drag the region to a different location
* select "Draw" to draw a new region
* select "Edit" to edit the region
To make a log plot of the histogram, check the "Log Histogram" checkbox.
To plot by the full range of values in the image instead of by the range
within the cut values, uncheck the "Plot By Cuts" checkbox.
The "NumBins" parameter determines how many bins are used in calculating
the histogram. Type a number in the box and press "Enter" to change the
default value.
**Cut Levels Convenience Controls**
Because a histogram is useful feedback for setting the cut levels,
controls are provided in the UI for setting the low and high cut levels
in the image, as well as for performing an auto cut levels, according to
the auto cut levels settings in the channel preferences.
**User Configuration**
"""
import numpy as np
from ginga.gw import Widgets
from ginga import GingaPlugin
from ginga import AutoCuts
try:
from ginga.gw import Plot
from ginga.util import plots
have_mpl = True
except ImportError:
have_mpl = False
__all__ = ['Histogram']
class Histogram(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Histogram, self).__init__(fv, fitsimage)
self.layertag = 'histogram-canvas'
self.histtag = None
# If True, limits X axis to lo/hi cut levels
self.xlimbycuts = True
self._split_sizes = [400, 500]
# get Histogram preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Histogram')
self.settings.add_defaults(draw_then_move=True, num_bins=2048,
hist_color='aquamarine', show_stats=True,
maxdigits=7)
self.settings.load(onError='silent')
# Set up histogram control parameters
self.histcolor = self.settings.get('hist_color', 'aquamarine')
self.numbins = self.settings.get('num_bins', 2048)
self.autocuts = AutoCuts.Histogram(self.logger)
# for formatting statistics line
self.show_stats = self.settings.get('show_stats', True)
maxdigits = self.settings.get('maxdigits', 7)
self.fmt_cell = '{:< %d.%dg}' % (maxdigits - 1, maxdigits // 2)
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.drag,
move=self.drag, up=self.update)
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.set_surface(self.fitsimage)
canvas.set_draw_mode('draw')
self.canvas = canvas
fitssettings = fitsimage.get_settings()
for name in ['cuts']:
fitssettings.get_setting(name).add_callback(
'set', self.cutset_ext_cb, fitsimage)
self.gui_up = False
def build_gui(self, container):
if not have_mpl:
raise ImportError('Install matplotlib to use this plugin')
top = Widgets.VBox()
top.set_border_width(4)
# Make the cuts plot
box, sw, orientation = Widgets.get_oriented_box(container)
box.set_border_width(4)
box.set_spacing(2)
paned = Widgets.Splitter(orientation=orientation)
self.w.splitter = paned
self.plot = plots.Plot(logger=self.logger,
width=400, height=400)
ax = self.plot.add_axis()
ax.grid(True)
w = Plot.PlotWidget(self.plot)
w.resize(400, 400)
paned.add_widget(Widgets.hadjust(w, orientation))
vbox = Widgets.VBox()
vbox.set_border_width(2)
# for statistics line
self.w.stats1 = Widgets.Label('')
vbox.add_widget(self.w.stats1)
captions = (('Cut Low:', 'label', 'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High', 'entry',
'Cut Levels', 'button'),
('Auto Levels', 'button'),
('Log Histogram', 'checkbutton',
'Plot By Cuts', 'checkbutton'),
('NumBins:', 'label', 'NumBins', 'entry'),
('Full Image', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.log_histogram.set_tooltip("Use the log of the pixel values for the "
"histogram (empty bins map to 10^-1)")
b.plot_by_cuts.set_tooltip("Only show the part of the histogram "
"between the cuts")
b.numbins.set_tooltip("Number of bins for the histogram")
b.full_image.set_tooltip("Use the full image for calculating the "
"histogram")
b.numbins.set_text(str(self.numbins))
b.cut_low.add_callback('activated', lambda w: self.cut_levels())
b.cut_high.add_callback('activated', lambda w: self.cut_levels())
b.cut_levels.add_callback('activated', lambda w: self.cut_levels())
b.auto_levels.add_callback('activated', lambda w: self.auto_levels())
b.log_histogram.set_state(self.plot.logy)
b.log_histogram.add_callback('activated', self.log_histogram_cb)
b.plot_by_cuts.set_state(self.xlimbycuts)
b.plot_by_cuts.add_callback('activated', self.plot_by_cuts_cb)
b.numbins.add_callback('activated', lambda w: self.set_numbins_cb())
b.full_image.add_callback('activated', lambda w: self.full_image_cb())
fr = Widgets.Frame("Histogram")
vbox.add_widget(w)
fr.set_widget(vbox)
box.add_widget(fr, stretch=0)
paned.add_widget(sw)
paned.set_sizes(self._split_sizes)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Move")
btn1.set_state(mode == 'move')
btn1.add_callback('activated',
lambda w, val: self.set_mode_cb('move', val))
btn1.set_tooltip("Choose this to position box")
self.w.btn_move = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Draw", group=btn1)
btn2.set_state(mode == 'draw')
btn2.add_callback('activated',
lambda w, val: self.set_mode_cb('draw', val))
btn2.set_tooltip("Choose this to draw a replacement box")
self.w.btn_draw = btn2
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Edit", group=btn1)
btn3.set_state(mode == 'edit')
btn3.add_callback('activated',
lambda w, val: self.set_mode_cb('edit', val))
btn3.set_tooltip("Choose this to edit a box")
self.w.btn_edit = btn3
hbox.add_widget(btn3)
if self.histtag is None:
self.w.btn_move.set_enabled(False)
self.w.btn_edit.set_enabled(False)
hbox.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(paned, stretch=5)
top.add_widget(hbox, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.plot.set_titles(rtitle="Histogram")
# insert canvas, if not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
#self.canvas.delete_all_objects()
self.resume()
def pause(self):
self.canvas.ui_set_active(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_set_active(True)
self.fv.show_status("Draw a rectangle with the right mouse button")
def stop(self):
self.gui_up = False
self._split_sizes = self.w.splitter.get_sizes()
# remove the rect from the canvas
## try:
## self.canvas.delete_object_by_tag(self.histtag)
## except Exception:
## pass
##self.histtag = None
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
self.fv.show_status("")
def full_image_cb(self):
canvas = self.canvas
try:
canvas.delete_object_by_tag(self.histtag)
except Exception:
pass
image = self.fitsimage.get_image()
width, height = image.get_size()
x1, y1, x2, y2 = 0, 0, width - 1, height - 1
tag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.draw_cb(canvas, tag)
def get_data(self, image, x1, y1, x2, y2, z=None):
if z is not None:
data = image.get_data()
data = data[y1:y2, x1:x2, z]
else:
tup = image.cutout_adjust(x1, y1, x2, y2)
data = tup[0]
return data
def histogram(self, image, x1, y1, x2, y2, z=None, pct=1.0, numbins=2048):
self.logger.warning("This call will be deprecated soon. "
"Use get_data() and histogram_data().")
data_np = self.get_data(image, x1, y1, x2, y2, z=z)
return self.histogram_data(data_np, pct=pct, numbins=numbins)
def histogram_data(self, data, pct=1.0, numbins=2048):
return self.autocuts.calc_histogram(data, pct=pct, numbins=numbins)
def redo(self):
if self.histtag is None:
return
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind != 'compound':
return True
bbox = obj.objects[0]
# Do histogram on the points within the rect
image = self.fitsimage.get_image()
self.plot.clear()
numbins = self.numbins
depth = image.get_depth()
if depth != 3:
data_np = self.get_data(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2))
res = self.histogram_data(data_np, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = np.append(res.dist, res.dist[-1])
ymax = y.max()
if self.plot.logy:
y = np.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color='blue', alpha=1.0, drawstyle='steps-post')
else:
colors = ('red', 'green', 'blue')
ymax = 0
for z in range(depth):
data_np = self.get_data(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2), z=z)
res = self.histogram_data(data_np, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = np.append(res.dist, res.dist[-1])
ymax = max(ymax, y.max())
if self.plot.logy:
y = np.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color=colors[z], alpha=0.33,
drawstyle='steps-post')
# show cut levels
loval, hival = self.fitsimage.get_cut_levels()
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='red')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='green')
if self.xlimbycuts:
self.plot.ax.set_xlim(loval, hival)
# Make x axis labels a little more readable
## lbls = self.plot.ax.xaxis.get_ticklabels()
## for lbl in lbls:
## lbl.set(rotation=45, horizontalalignment='right')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
self.plot.fig.canvas.draw()
if self.show_stats:
# calculate statistics on finite elements in box
i = np.isfinite(data_np)
if np.any(i):
maxval = | np.max(data_np[i]) | numpy.max |
import torch as th
import os
from torch import nn
from torch.optim import Adam, RMSprop
import numpy as np
from pytorch_DRL.common.Agent import Agent
from pytorch_DRL.common.Model import ActorNetwork, CriticNetwork
from pytorch_DRL.common.utils import entropy, index_to_one_hot, to_tensor_var, dict_to_arr
class MAA2C(Agent):
"""
An multi-agent learned with Advantage Actor-Critic
- Actor takes its local observations as input
- agent interact with environment to collect experience
- agent training with experience to update policy
Parameters
- training_strategy:
- cocurrent
- each agent learns its own individual policy which is independent
- multiple policies are optimized simultaneously
- centralized (see MADDPG in [1] for details)
- centralized training and decentralized execution
- decentralized actor map it's local observations to action using individual policy
- centralized critic takes both state and action from all agents as input, each actor
has its own critic for estimating the value function, which allows each actor has
different reward structure, e.g., cooperative, competitive, mixed task
- actor_parameter_sharing:
- True: all actors share a single policy which enables parameters and experiences sharing,
this is mostly useful where the agents are homogeneous. Please see Sec. 4.3 in [2] and
Sec. 4.1 & 4.2 in [3] for details.
- False: each actor use independent policy
- critic_parameter_sharing:
- True: all actors share a single critic which enables parameters and experiences sharing,
this is mostly useful where the agents are homogeneous and reward sharing holds. Please
see Sec. 4.1 in [3] for details.
- False: each actor use independent critic (though each critic can take other agents actions
as input, see MADDPG in [1] for details)
Reference:
[1] Multi-Agent Actor-Critic for Mixed Cooperative-Competitive Environments
[2] Cooperative Multi-Agent Control Using Deep Reinforcement Learning
[3] Parameter Sharing Deep Deterministic Policy Gradient for Cooperative Multi-agent Reinforcement Learning
"""
def __init__(self, env, n_agents, state_dim, action_dim,
memory_capacity=10000, max_steps=None,
roll_out_n_steps=10,
reward_gamma=0.99, reward_scale=1., done_penalty=None,
actor_hidden_size=32, critic_hidden_size=32,
actor_output_act=nn.functional.log_softmax, critic_loss="mse",
actor_lr=0.001, critic_lr=0.001,
optimizer_type="rmsprop", entropy_reg=0.01,
max_grad_norm=0.5, batch_size=100, episodes_before_train=100,
epsilon_start=0.9, epsilon_end=0.01, epsilon_decay=200,
use_cuda=True, training_strategy="cocurrent",
actor_parameter_sharing=False, critic_parameter_sharing=False):
super(MAA2C, self).__init__(env, state_dim, action_dim,
memory_capacity, max_steps,
reward_gamma, reward_scale, done_penalty,
actor_hidden_size, critic_hidden_size,
actor_output_act, critic_loss,
actor_lr, critic_lr,
optimizer_type, entropy_reg,
max_grad_norm, batch_size, episodes_before_train,
epsilon_start, epsilon_end, epsilon_decay,
use_cuda)
assert training_strategy in ["cocurrent", "centralized"]
self.n_agents = n_agents
self.roll_out_n_steps = roll_out_n_steps
self.training_strategy = training_strategy
self.actor_parameter_sharing = actor_parameter_sharing
self.critic_parameter_sharing = critic_parameter_sharing
self.env_state = dict_to_arr(self.env.reset(), self.n_agents)
self.actors = [ActorNetwork(self.state_dim, self.actor_hidden_size, self.action_dim, self.actor_output_act)] * self.n_agents
if self.training_strategy == "cocurrent":
self.critics = [CriticNetwork(self.state_dim, self.action_dim, self.critic_hidden_size, 1)] * self.n_agents
elif self.training_strategy == "centralized":
critic_state_dim = self.n_agents * self.state_dim
critic_action_dim = self.n_agents * self.action_dim
self.critics = [CriticNetwork(critic_state_dim, critic_action_dim, self.critic_hidden_size, 1)] * self.n_agents
if optimizer_type == "adam":
self.actor_optimizers = [Adam(a.parameters(), lr=self.actor_lr) for a in self.actors]
self.critic_optimizers = [Adam(c.parameters(), lr=self.critic_lr) for c in self.critics]
elif optimizer_type == "rmsprop":
self.actor_optimizers = [RMSprop(a.parameters(), lr=self.actor_lr) for a in self.actors]
self.critic_optimizers = [RMSprop(c.parameters(), lr=self.critic_lr) for c in self.critics]
# tricky and memory consumed implementation of parameter sharing
if self.actor_parameter_sharing:
for agent_id in range(1, self.n_agents):
self.actors[agent_id] = self.actors[0]
self.actor_optimizers[agent_id] = self.actor_optimizers[0]
if self.critic_parameter_sharing:
for agent_id in range(1, self.n_agents):
self.critics[agent_id] = self.critics[0]
self.critic_optimizers[agent_id] = self.critic_optimizers[0]
if self.use_cuda:
for a in self.actors:
a.cuda()
for c in self.critics:
c.cuda()
# agent interact with the environment to collect experience
def interact(self):
if (self.max_steps is not None) and (self.n_steps >= self.max_steps):
# env_state is dictionary
self.env_state = self.env.reset()
# tranfrom from dict to arr
self.env_state = dict_to_arr(self.env.reset(), self.n_agents)
states = []
actions = []
rewards = []
# take n steps
for i in range(self.roll_out_n_steps):
states.append(self.env_state)
action = self.exploration_action(self.env_state)
# print(action[0].shape)
# action from arr to dictionary
action_dict = {agent_id: action[agent_id] for agent_id in range(self.n_agents)}
next_state, reward, done, _ = self.env.step(action_dict)
# next_state, reward, done return as a dictionary
next_state = dict_to_arr(self.env.reset(), self.n_agents)
reward = dict_to_arr(self.env.reset(), self.n_agents)
done =dict_to_arr(self.env.reset(), self.n_agents)
actions.append([index_to_one_hot(a, self.action_dim) for a in action])
rewards.append(reward)
done = done[0]
final_state = next_state
self.env_state = next_state
if done:
self.env_state = self.env.reset()
self.env_state = dict_to_arr(self.env.reset(), self.n_agents)
break
# discount reward
if done:
final_r = [0.0] * self.n_agents
self.n_episodes += 1
self.episode_done = True
else:
self.episode_done = False
final_action = self.action(final_state)
one_hot_action = [index_to_one_hot(a, self.action_dim) for a in final_action]
final_r = self.value(final_state, one_hot_action)
rewards = np.array(rewards)
for agent_id in range(self.n_agents):
rewards[:,agent_id] = self._discount_reward(rewards[:,agent_id], final_r[agent_id])
rewards = rewards.tolist()
self.n_steps += 1
self.memory.push(states, actions, rewards)
# train on a roll out batch
def train(self):
if self.n_episodes <= self.episodes_before_train:
pass
batch = self.memory.sample(self.batch_size)
states_var = to_tensor_var(batch.states, self.use_cuda).view(-1, self.n_agents, self.state_dim)
actions_var = to_tensor_var(batch.actions, self.use_cuda).view(-1, self.n_agents, self.action_dim)
rewards_var = to_tensor_var(batch.rewards, self.use_cuda).view(-1, self.n_agents, 1)
whole_states_var = states_var.view(-1, self.n_agents*self.state_dim)
whole_actions_var = actions_var.view(-1, self.n_agents*self.action_dim)
for agent_id in range(self.n_agents):
# update actor network
self.actor_optimizers[agent_id].zero_grad()
action_log_probs = self.actors[agent_id](states_var[:,agent_id,:])
entropy_loss = th.mean(entropy(th.exp(action_log_probs)))
action_log_probs = th.sum(action_log_probs * actions_var[:,agent_id,:], 1)
if self.training_strategy == "cocurrent":
values = self.critics[agent_id](states_var[:,agent_id,:], actions_var[:,agent_id,:])
elif self.training_strategy == "centralized":
values = self.critics[agent_id](whole_states_var, whole_actions_var)
advantages = rewards_var[:,agent_id,:] - values.detach()
pg_loss = -th.mean(action_log_probs * advantages)
actor_loss = pg_loss - entropy_loss * self.entropy_reg
actor_loss.backward()
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm(self.actors[agent_id].parameters(), self.max_grad_norm)
self.actor_optimizers[agent_id].step()
# update critic network
self.critic_optimizers[agent_id].zero_grad()
target_values = rewards_var[:,agent_id,:]
if self.critic_loss == "huber":
critic_loss = nn.functional.smooth_l1_loss(values, target_values)
else:
critic_loss = nn.MSELoss()(values, target_values)
critic_loss.backward()
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm(self.critics[agent_id].parameters(), self.max_grad_norm)
self.critic_optimizers[agent_id].step()
# predict softmax action based on state
def _softmax_action(self, state):
state_var = to_tensor_var([state], self.use_cuda)
softmax_action = np.zeros((self.n_agents, self.action_dim), dtype=np.float64)
for agent_id in range(self.n_agents):
softmax_action_var = th.exp(self.actors[agent_id](state_var[:,agent_id,:]))
if self.use_cuda:
softmax_action[agent_id] = softmax_action_var.data.cpu().numpy()[0]
else:
softmax_action[agent_id] = softmax_action_var.data.numpy()[0]
return softmax_action
# predict action based on state, added random noise for exploration in training
def exploration_action(self, state):
softmax_action = self._softmax_action(state)
actions = [0]*self.n_agents
epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
np.exp(-1. * self.n_steps / self.epsilon_decay)
for agent_id in range(self.n_agents):
if np.random.rand() < epsilon:
actions[agent_id] = np.random.choice(self.action_dim)
else:
actions[agent_id] = np.argmax(softmax_action[agent_id])
return actions
# predict action based on state for execution
def action(self, state):
softmax_actions = self._softmax_action(state)
actions = | np.argmax(softmax_actions, axis=1) | numpy.argmax |
import abc
import logging
import numpy as np
from numpy.linalg import norm
from gail.crowd_sim.envs.policy.policy_factory import policy_factory
from gail.crowd_sim.envs.utils.action import ActionXY, ActionRot
from gail.crowd_sim.envs.utils.state import JointState, ObservableState, FullState
class Agent(object):
def __init__(self, config, section, max_rot=1, kinematics='holonomic'):
"""
Base class for robot and human. Have the physical attributes of an agent.
"""
self.visible = getattr(config, section).visible
self.v_pref = getattr(config, section).v_pref
self.radius = getattr(config, section).radius
self.policy = policy_factory[getattr(config, section).policy]()
self.sensor = getattr(config, section).sensor
# self.kinematics = self.policy.kinematics if self.policy is not None else None
self.kinematics = kinematics if self.policy is not None else None
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
self.time_step = None
self.max_rot = max_rot
def print_info(self):
logging.info('Agent is {} and has {} kinematic constraint'.format(
'visible' if self.visible else 'invisible', self.kinematics))
def set_policy(self, policy):
if self.time_step is None:
raise ValueError('Time step is None')
policy.set_time_step(self.time_step)
self.policy = policy
self.kinematics = self.kinematics #policy.kinematics
def sample_random_attributes(self):
"""
Sample agent radius and v_pref attribute from certain distribution
:return:
"""
self.v_pref = np.random.uniform(0.5, 1.5)
self.radius = np.random.uniform(0.3, 0.5)
def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
self.px = px
self.py = py
self.sx = px
self.sy = py
self.gx = gx
self.gy = gy
self.vx = vx
self.vy = vy
self.theta = theta
if radius is not None:
self.radius = radius
if v_pref is not None:
self.v_pref = v_pref
def get_observable_state(self):
return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)
def get_next_observable_state(self, action):
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
next_px, next_py = pos
if self.kinematics == 'holonomic':
next_vx = action.vx
next_vy = action.vy
else:
next_vx = action.v * np.cos(self.theta)
next_vy = action.v * np.sin(self.theta)
return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)
def get_next_full_state(self, action):
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
next_px, next_py = pos
if self.kinematics == 'holonomic':
next_theta = self.theta
next_vx = action.vx
next_vy = action.vy
else:
next_theta = (self.theta + action[1]*self.max_rot) % (2 * np.pi)
next_vx = action.v * np.cos(self.theta)
next_vy = action.v * np.sin(self.theta)
return FullState(next_px, next_py, next_vx, next_vy, self.radius, self.gx, self.gy, self.v_pref, next_theta)
def get_full_state(self):
return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)
def get_position(self):
return self.px, self.py
def set_position(self, position):
self.px = position[0]
self.py = position[1]
def get_goal_position(self):
return self.gx, self.gy
def get_start_position(self):
return self.sx, self.sy
def get_velocity(self):
return self.vx, self.vy
def set_velocity(self, velocity):
self.vx = velocity[0]
self.vy = velocity[1]
@abc.abstractmethod
def act(self, ob):
"""
Compute state using received observation and pass it to policy
"""
return
def check_validity(self, action):
if type(action) is np.ndarray:
assert action.shape[0] == 2
else:
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
def compute_position(self, action, delta_t):
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action[0] * delta_t
py = self.py + action[1] * delta_t
else:
if self.reverse:
action_v = action[0]
else:
action_v = (action[0]+1)/2
theta = self.theta + action[1]*self.max_rot
px = self.px + np.cos(theta) * action_v * delta_t
py = self.py + np.sin(theta) * action_v * delta_t
return px, py
def step(self, action):
"""
Perform an action and update the state
"""
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
self.px, self.py = pos
if type(action) is np.ndarray:
if self.kinematics == 'holonomic':
self.vx = action[0]
self.vy = action[1]
else:
self.theta = (self.theta + action[1]*self.max_rot) % (2 * np.pi)
self.vx = action[0] * np.cos(self.theta)
self.vy = action[0] * np.sin(self.theta)
else:
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r*self.max_rot) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
def reached_destination(self):
return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius
class BasicPolicy():
def __init__(self):
self.multiagent_training = False
class BasicRobot(Agent):
def __init__(self, relative=False, xy_relative=True, max_rot=np.pi/10, kinematics='holonomic', reverse=True, value=None):
self.visible = True
self.v_pref = 1
self.radius = 0.3
self.sensor = "coordinates"
self.px = None
self.py = None
self.gx = None
self.gy = None
self.vx = None
self.vy = None
self.theta = None
self.time_step = None
self.sensor_radius = 15
#Policy is required to pass asserts later, it isn't used at all due to the custom act
self.policy = BasicPolicy()
self.kinematics = kinematics
self.relative = relative
self.xy_relative = xy_relative
self.max_rot = max_rot
self.reverse=reverse
# Required to visualise
self.value = value
def engagement2(self, target):
r = self.get_full_state()
pos = np.array([r.px, r.py])
target = np.array([target[0], target[1]])
dist = np.linalg.norm(target - pos)
yaw = r.theta % (2 * np.pi)
R = np.array([[np.cos(yaw), np.sin(yaw)],
[-np.sin(yaw), np.cos(yaw)]])
T_p = target.pos() - self.pos()
T_p = R.dot(T_p)
alpha = np.arctan2(T_p[1], T_p[0])
if self.xy_relative:
return dist*np.cos(alpha), dist*np.sin(alpha)
else:
return alpha, dist
def set_act(self, action_function):
self.action_function = action_function
def act(self, ob):
r = self.get_full_state()
obs = []
if self.relative:
#In relative mode the robot does not have access to its current global position.
r.px = 0
r.py = 0
#Engagement 2 translates the position of a target to the robots coordinate frame in this case its the goal.
goal_rel = self.engagement2([r.gx,r.gy])
r.gx = goal_rel[0]
r.gy = goal_rel[1]
#Translate each human to the robots coordinate frame.
for o in ob:
rel = self.engagement2(o)
obs.append(ObservableState(rel[0], rel[1], o.vx, o.vy, o.radius))
#Combine the robot and human observations
state = JointState(r, ob)
return self.action_function(state)
def step(self, action):
"""
Perform an action and update the state
"""
self.check_validity(action)
pos = self.compute_position(action, self.time_step)
self.px, self.py = pos
if type(action) is np.ndarray:
if self.kinematics == 'holonomic':
self.vx = action[0]
self.vy = action[1]
else:
#The rotation of the robot is reduced by self.max_rot by default np.pi/10
self.theta = (self.theta + action[1]*self.max_rot) % (2 * np.pi)
if self.reverse:
action_v = action[0]
else:
action_v = (action[0]+1)/2
self.vx = action_v * np.cos(self.theta)
self.vy = action_v * np.sin(self.theta)
else:
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
#The rotation of the robot is reduced by self.max_rot by default np.pi/10
self.theta = (self.theta + action.r*self.max_rot) % (2 * np.pi)
if self.reverse:
action_v = action.v
else:
action_v = (action.v+1)/2
self.vx = action_v * | np.cos(self.theta) | numpy.cos |
#-*- coding:Utf-8 -*-
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.signature
.. autosummary::
:members:
"""
import os
import glob
import doctest
import numpy as np
#import scipy as sp
import scipy.linalg as la
import pdb
import h5py
import copy
import time
import pickle
import logging
import networkx as nx
import shapely.geometry as shg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pylayers.gis.layout as layout
import pylayers.util.geomutil as geu
import pylayers.util.cone as cone
#import pylayers.util.graphutil as gph
import pylayers.util.pyutil as pyu
import pylayers.util.plotutil as plu
from pylayers.antprop.rays import Rays
from pylayers.util.project import *
import heapq
import shapely.geometry as sh
import shapely.ops as sho
from tqdm import tqdm
#from numba import autojit
logger = logging.getLogger(__name__)
def plot_lines(ax, ob, color = []):
""" plot lines with colors
Parameters
----------
ax : matplotlib axis
ob : list of lines
color : list (optional)
"""
from descartes.patch import PolygonPatch
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
""" plot polygon
Parameters
----------
ax :
ob :
"""
from descartes.patch import PolygonPatch
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
def showsig(L,s,tx=[],rx=[]):
""" show signature
Parameters
----------
L : Layout
s :
tx :
rx :
"""
L.display['thin']=True
fig,ax = L.showGs()
L.display['thin']=False
L.display['edlabel']=True
L.showGs(fig=fig,ax=ax,edlist=s,width=4)
if tx !=[]:
plt.plot(tx[0],tx[1],'x')
if rx !=[]:
plt.plot(rx[0],rx[1],'+')
plt.title(str(s))
plt.show()
L.display['edlabel']=False
def gidl(g):
""" gi without diffraction
Returns
-------
gr : A graph
"""
edlist=[]
pos={}
for n in g.nodes():
if len(n)>1:
edlist.append(n)
gr = g.subgraph(edlist)
for k in gr.edges():
#print(k)
di = gr[k[0]][k[1]]
ke = di['output'].keys()
va = di['output'].values()
keva = zip(ke,va)
keva_valid = [ x for x in keva if len(x[0])>1]
gr[k[0]][k[1]]['output'] = dict(keva_valid)
dpos = {k:g.pos[k] for k in edlist}
gr.pos=dpos
return(gr)
def shLtmp(L):
seg_connect = {x:L.Gs.edge[x].keys() for x in L.Gs.nodes() if x >0}
dpts = {x[0]:(L.Gs.pos[x[1][0]],L.Gs.pos[x[1][1]]) for x in seg_connect.items() }
L._shseg = {p[0]:sh.LineString(p[1]) for p in dpts.items()}
def showsig2(lsig,L,tahe):
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
for k in lsig:
k0 = k[0]
k1 = k[1]
if k0>0:
npt = L.Gs[k0].keys()
pta = np.array(L.Gs.pos[npt[0]])
phe = np.array(L.Gs.pos[npt[1]])
if k1==2:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='r',linewidth=2)
if k1 ==3:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='g',linewidth=2)
for th in tahe:
ta = th[0]
he = th[1]
plu.displot(ta.reshape(2,1),he.reshape(2,1),color='k',linewidth=1)
tahe = np.array(tahe) # Nseg x tahe x xy
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
seq = lsig[:,0]
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# twisted = True
lef = sh.LineString((pta[:,0],phe[:,-1]))
rig = sh.LineString((phe[:,0],pta[:,-1]))
else:
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
lef = sh.LineString((pta[:,0],pta[:,-1]))
rig = sh.LineString((phe[:,0],phe[:,-1]))
plt.ion()
plt.gcf()
#L.showG('s',labels=True)
lines = [L._shseg[seq[0]]]
plt.title(str(lsig))
plot_lines(ax=plt.gca(),ob=lines)
plot_lines(ax=plt.gca(),ob=[lef],color='g')
plot_lines(ax=plt.gca(),ob=[rig],color='r')
plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
#plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
#plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
plt.axis('auto')
plt.legend()
#@profile
def valid(lsig,L,tahe=[]):
"""
Check if a signature is valid.
if a segment of a given signature is not in or touches the polygon
described by the 1st and last segment, the signature is not valid
Parameters
----------
lsig : list of tuple from run |signatures
L : layout
tahe :
lensig , ta|he , x,y
Returns
-------
inside : boolean
is the signature valid ?
"""
lensi = len(lsig)
if lensi<=3:
return True
# DEBUG
# if lensi == 4:
# if np.all(lsig == np.array([[ 5, 2, 67, 58],[ 2, 2, 3, 2]]).T):
# import ipdb
# ipdb.set_trace()
# ensure compatibility with Signature.run where
# lsig is a list of tuple
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
pta = np.empty((2,lensi))
phe = np.empty((2,lensi))
seq = lsig[:,0]
# upos = np.where(seq>0)[0]
# uneg = np.where(seq<0)[0]
# tahep = L.seg2pts(seq[upos])
# tahen = np.array([L.Gs.pos[i] for i in seq[uneg]]).T
# tahen = np.vstack((tahen,tahen))
# tahe = np.empty((4,lensi))
# tahe[:,upos]=tahep
# try:
# tahe[:,uneg]=tahen
# except:
# pass
# pts = [k for i in seq for k in [L.Gs[i].keys()[0],L.Gs[i].keys()[1]]]
# if tahe ==[]:
# print 'run tahe\n',np.array(tahe)
# if tahe == []:
# pts = [L.Gs[i].keys() for i in seq]
# tahe = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# pta[:,0] = tahe[0,0,:]
# phe[:,0] = tahe[0,1,:]
# typ = lsig[:,1]
# mirror=[]
# # lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahe[i,0,:].reshape(2,1)
# pbm = tahe[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# else:
tahe = np.array(tahe) # Nseg x tahe x xy
pdb.set_trace()
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
# ### ONLY FOR TEST TO BE DELETED
# pts = [L.Gs[i].keys() for i in seq]
# tahetest = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# ptat = np.empty((2,lensi))
# phet = np.empty((2,lensi))
# ptat[:,0] = tahetest[0,0,:]
# phet[:,0] = tahetest[0,1,:]
# typ = lsig[:,1]
# mirror=[]
#lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahetest[i,0,:].reshape(2,1)
# pbm = tahetest[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# tahetest = np.dstack((ptat.T,phet.T)).swapaxes(1,2)
# if np.sum(tahe-tahetest) != 0:
# import ipdb
# ipdb.set_trace()
# determine the 2 side of the polygon ( top/bottom = tahe[0]/tahe[-1])
#vl and vr are 2 director vector lying on the polygon side.
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
# vr = ( pta[:,0],phe[:,-1])
# vl = ( phe[:,0],pta[:,-1])
# twisted = True
#lef = sh.LineString((pta[:,0],pta[:,-1]))
#rig = sh.LineString((phe[:,0],phe[:,-1]))
else:
vr = ( pta[:,0], phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# vr = ( pta[:,0],pta[:,-1])
# vl = ( phe[:,0],phe[:,-1])
# twisted = False
#lef = sh.LineString((pta[:,0],phe[:,-1]))
#rig = sh.LineString((pta[:,-1],phe[:,0]))
# looking situation where Tail and head are not inside the polygon
# => both tahe are left of vr and vl
#=> both tahe are right of vr and vl
lta = geu.isleft(pta[:,1:-1],vl[0][:,None],vl[1][:,None])
rta = geu.isleft(pta[:,1:-1],vr[0][:,None],vr[1][:,None])
lhe = geu.isleft(phe[:,1:-1],vl[0][:,None],vl[1][:,None])
rhe = geu.isleft(phe[:,1:-1],vr[0][:,None],vr[1][:,None])
out = (lta & lhe ) | (~rta & ~rhe)
inside = ~out
# #debug
# plt.ion()
# plt.gcf()
# #plt.title(str(cond))
# #Ok plot_lines(ax=plt.gca(),ob=lines)
# plot_lines(ax=plt.gca(),ob=[lef],color='g')
# plot_lines(ax=plt.gca(),ob=[rig],color='r')
# plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
# plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
# plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
# plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
# plt.legend()
return np.all(inside)
class Signatures(PyLayers,dict):
""" set of Signature given 2 Gt cycle (convex) indices
Attributes
----------
L : gis.Layout
source : int
source convex cycle
target : int
target convex cycle
"""
def __init__(self,L,source,target,cutoff=3,threshold = 0.6):
""" object constructor
Parameters
----------
L : Layout
dump : int
source : int
cycle number
target : int
cycle index
cutoff : int
limiting depth level in graph exploration (default 3)
A signature ia a dict of arrays
The array is an interleaving between nstr and type of interaction
typeInt = 1,2,3 (extremity,diffraction,reflexion,transmission)
Si[1]
np.array([5,2,19,2,26,2,72,2])
"""
self.L = L
self.dump = -1
self.source = source
self.target = target
self.cutoff = cutoff
self.threshold = threshold
self.ratio = {}
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
def __repr__(self):
def fun1(x):
if x==1:
return('R')
if x==2:
return('T')
if x==3:
return('D')
size = {}
s = self.__class__.__name__ + '\n' + '----------'+'\n'
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = int(len(self[k])/2)
s = s + 'from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n'
if self.dump==-1:
ldump = self.keys()
else:
ldump = self.dump
for k in ldump:
s = s + str(k) + ' : ' + str(size[k]) + '\n'
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for l in np.arange(a.shape[2]):
for i in range(k):
if i==k-1:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+')'
else:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+'),'
s = s+'\n'
return(s)
def __len__(self):
nsig = 0
for k in self:
size = int(len(self[k])/2)
nsig += size
return(nsig)
def compl(self,lint,L):
""" completion from lint
Parameters
----------
lint : list
list of interactions
Examples
--------
>>> Si.compl([(6220,3),(6262,3),(6241,3)],DL.L)
"""
# all group of interactions
for k in self:
if k > len(lint):
Si = self[k]
Ns,Nb = Si.shape
# all signatures form a group of interactions
for l in range(int(Ns/2)):
# all interactions
b1 = True
for i1,it in enumerate(lint):
if ((Si[2*l,i1] == it[0]) and
(Si[2*l+1,i1] == it[1])):
pass
else:
b1 = False
if b1:
sig = Si[2*l:2*l+2,:]
sigi = self.sig2inter(L,sig)
#print(k,l,' :',sigi)
# all
def sig2inter(self,L,lsi=[]):
''' convert signature to corresponding list of interactions in Gi
Paramaters:
----------
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Examples:
---------
>>> lsi = DL.Si[3]
>>> DL.Si.sig2inter(DL.L,lsi)
"""
'''
assert L.isbuilt, AttributeError('Layout is not built')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
tlinter = []
for uu in range(0,len(lsi),2):
si = lsi[uu:uu+2,:]
lsig = si.shape[1]
linter = []
for k in range(lsig):
# nstr : seg or points
nstr = si[0,k]
typ = si[1,k]
# cycles connected to seg or point
seg_cy = copy.deepcopy(L.Gs.node[nstr]['ncycles'])
if k == 0:
cy0 = self.source
lcy0 =[cy0]
if (typ==3) or (typ==2):
cy0 = list(set(seg_cy).intersection(set(lcy0)))[0]
cy1 = [x for x in seg_cy if x!= cy0 ][0]
if k == (lsig -1):
cy1 = self.target
if typ == 1:
inter = (nstr,)
lcy0 = L.Gs.node[nstr]['ncycles']
elif typ == 2:
inter = (nstr,cy0)
elif typ == 3:
inter = (nstr,cy0,cy1)
# changing cycle
lcy0 = [cy1]
linter.append(inter)
tlinter.append(linter)
if len(lsi) == 2:
tlinter=tlinter[0]
return tlinter
def sig2prob(self,L,lsi):
""" get signatures probability
Parameters
---------
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Returns
-------
tlproba : list (nb_sig,sig_length-2)
output proba of each triplet of interaction
"""
slsi = lsi.shape[1]
assert L.isbuilt, AttributeError('Layout is not built')
assert hasattr(L,'Gi'), AttributeError('Layout has not Gi Graph')
assert L.Gi.size != 0, AttributeError('Gi Graph is empty')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
assert slsi>=3, AttributeError('Proba available for signature with at least 3 interacitons')
linter = self.sig2inter(L,lsi)
if len(lsi) == 2:
linter=[linter]
tlproba = []
for inter in linter:
lproba = []
for k in range(slsi-2):
proba = L.Gi[inter[k]][inter[k+1]]['output'][inter[k+2]]
lproba.append(proba)
tlproba.append(lproba)
return tlproba
def num(self):
""" determine the number of signatures
"""
self.nsig = 0
self.nint = 0
for k in self:
size = int(len(self[k])/2)
self.nsig += size
self.nint += size*k
def info(self):
# print "Signatures for scenario defined by :"
# print "Layout"
# print "======"
# L = self.L.info()
# print "================================"
# print "source : ", self.source
# print "target : ", self.target
size = {}
print(self.__class__.__name__ + '\n' + '----------'+'\n')
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = int(len(self[k])/2)
print('from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n')
pyu.printout('Reflection',pyu.BLUE)
print(' ')
pyu.printout('Transmission',pyu.GREEN)
print(' ')
pyu.printout('Diffraction',pyu.RED)
print(' \n')
for k in self:
print(str(k) + ' : ' + str(size[k]))
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for i in range(k):
nstr=a[i,0,:]
typ=a[i,1,:]
print('[',)
for n,t in zip(nstr,typ):
if t==1:
pyu.printout(str(n),pyu.BLUE)
if t==2:
pyu.printout(str(n),pyu.GREEN)
if t==3:
pyu.printout(str(n),pyu.RED)
print(']')
print('\n')
# s = s + ' '+ str(a[i,0,:]) + '\n'
# s = s + ' '+ str(a[i,1,:]) + '\n'
def check(self):
""" check signature
Returns
-------
OK : np.array
KO : np.array
"""
OK = Signatures(self.L,self.target,self.source)
KO = Signatures(self.L,self.target,self.source)
for i in self:
sigs = self[i]
for s in range(int(len(sigs)/2)):
sig = sigs[2*s:2*s+2,:]
ok = valid(sig.T,self.L)
if ok :
try :
OK[i]=np.vstack((OK[i],sig))
except:
OK[i]=[]
OK[i]=sig
pass
else :
try :
KO[i]=np.vstack((KO[i],sig))
except:
KO[i]=[]
KO[i]=sig
pass
return OK,KO
def saveh5(self):
""" save signatures in hdf5 format
"""
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
f=h5py.File(filename,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
for k in self.keys():
f.create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f.close()
except:
f.close()
raise NameError('Signature: issue when writting h5py file')
def loadh5(self,filename=[]):
""" load signatures hdf5 format
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename+'.h5',pstruc['DIRSIG'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filename,'r')
for k in f.keys():
self.update({eval(k):f[k][:]})
f.close()
except:
f.close()
raise NameError('Signature: issue when reading h5py file')
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def _saveh5(self,filenameh5,grpname):
""" Save in hdf5 compliant with Links
Parameters
----------
filenameh5
hrpname
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname == '':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
try:
# file management
fh5=h5py.File(filename,'a')
if not grpname in fh5['sig'].keys():
fh5['sig'].create_group(grpname)
else :
raise NameError('sig/'+grpname +'already exists in '+filenameh5)
f=fh5['sig/'+grpname]
# write data
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
f.attrs['threshold']=self.threshold
f.create_group('ratio')
f.create_group('sig')
for k in self.keys():
f['sig'].create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f['ratio'].create_dataset(str(k),shape=np.shape(self.ratio[k]),data=self.ratio[k])
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load signatures in hdf5 format compliant with class Links
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
kwargs
may contain a L: layout object
if L = [] the layout is loaded from the layout name stored
into the h5 file
if L = Layout the layout passed in arg is used
See Also
--------
pylayers.simul.links
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname =='':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'r')
f=fh5['sig/'+grpname]
# compliant with new h5 format:
if 'sig' in f.keys():
for k in f['sig'].keys():
self.update({eval(k):f['sig'][k][:]})
self.ratio.update({eval(k):f['ratio'][k][:]})
# old h5 format
else:
for k in f.keys():
self.update({eval(k):f[k][:]})
Lname=f.attrs['L']
self.cutoff = f.attrs['cutoff']
if 'threshold' in f.attrs.keys():
self.threshold = f.attrs['threshold']
# ensure backward compatibility
else:
# find threshold
th = np.min([np.min(self.ratio[x])
for x in self.ratio])
self.threshold = th.round(decimals=2)
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when reading h5py file')
if 'L' in kwargs:
self.L = kwargs['L']
else:
self.L = layout.Layout(Lname)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def save(self):
""" save signatures
"""
L=copy.deepcopy(self.L)
del(self.L)
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
with open(filename, 'wb') as handle:
pickle.dump(self, handle)
self.L=L
def load(self,filename=[]):
""" load signatures
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename,pstruc['DIRSIG'])
try:
handle=open(filename, 'rb')
sitmp = pickle.load(handle)
except:
raise NameError(filename +' does not exist')
# to load a dictionary, use update
self.update(sitmp)
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def sp(self,G, source, target, cutoff=None):
""" algorithm for signature determination
Parameters
----------
G : Graph
source : tuple or int
target : tuple or int
cutoff : int
See Also
--------
pylayers.antprop.signature.run3
"""
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
# yield visited +[target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
stack.pop()
visited.pop()
def calsig(self,G,dia={},cutoff=None):
""" calculates signature
Parameters
----------
G : graph
dia : dictionnary of interactions
cutoff : integer
"""
if cutoff < 1:
return
di=copy.deepcopy(dia)
source = 'Tx'
target = 'Rx'
d={}
visited = [source]
stack = [iter(G[source])]
out=[]
while stack:
# pdb.set_trace()
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
elif len(visited) < cutoff:
if child == target:
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
# yield visited + [target]
elif child not in visited:
visited.append(child)
out.extend(di[child])
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
# yield visited + [target]
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
return d
def exist(self,seq):
""" verifies if seq exists in signatures
Parameters
----------
seq : list of tuple
[(2,2),(5,3),(7,2)]
1 : Diffraction
2 : Reflexion
3 : Diffraction
Returns
-------
Examples
--------
>>> DL=DLink()
>>> DL.eval()
>>> seq = [(2,3)] # transmission through segment 2
>>> DL.Si.exist(seq)
"""
# Number of interactions
N = len(seq)
# signatures with N interaction
sig = self[N]
# Number signature with N interaction
Nsig = int(sig.shape[0]/2)
nstr = sig[::2,:]
typ = sig[1::2,:]
# List of signat
lsig = []
for k in range(Nsig):
lint = []
for l in range(N):
lint.append((nstr[k,l],typ[k,l]))
lsig.append(lint)
if seq in lsig:
return True
else:
return False
def run(self,**kwargs):
""" evaluate signatures between cycle of tx and cycle of rx
Parameters
----------
cutoff : int
limit the exploration of all_simple_path
bt : boolean
backtrace (allow to visit already visited nodes in simple path algorithm)
progress : boolean
display the time passed in the loop
diffraction : boolean
activate diffraction
threshold : float
for reducing calculation time
animations : boolean
nD : int
maximum number of diffraction
nR : int
maximum number of reflection
nT : int
maximum number of transmission
See Also
--------
pylayers.simul.link.Dlink.eval
"""
defaults = {'cutoff' : 2,
'threshold': 0.1,
'delay_excess_max_ns': 400,
'nD': 1,
'nR': 10,
'nT': 10,
'bt' : True,
'progress': True,
'diffraction' : True,
'animation' : False
}
self.cpt = 0
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
self.cutoff = kwargs['cutoff']
if 'threshold' not in kwargs:
kwargs['threshold'] = self.threshold
else:
self.threshold=kwargs['threshold']
nD = kwargs['nD']
nT = kwargs['nT']
nR = kwargs['nR']
bt = kwargs['bt']
progress = kwargs['progress']
diffraction = kwargs['diffraction']
animation = kwargs['animation']
delay_excess_max_ns = kwargs['delay_excess_max_ns']
dist_excess_max = delay_excess_max_ns*0.3
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
#
# AIR : editable AIR separation
# _AIR : constructed AIR separation
#
lair = self.L.name['AIR'] + self.L.name['_AIR']
# list of interactions visible from source
lisR, lisT, lisD = self.L.intercy(self.source,typ='source')
if diffraction:
lis = lisT + lisR + lisD
else:
lis = lisT + lisR
# list of interactions visible from target
litR, litT, litD = self.L.intercy(self.target,typ='target')
if diffraction:
lit = litT + litR + litD
else:
lit = litT + litR
pt_source = np.array(self.L.Gt.node[self.source]['polyg'].centroid.coords.xy)
pt_target = np.array(self.L.Gt.node[self.target]['polyg'].centroid.coords.xy)
d_source_target = np.linalg.norm(pt_source - pt_target)
#print("source,lis :",self.source,lis)
#print("target,lit :",self.target,lit)
# for u in lit:
# print u
# print "-------------"
Gi = self.L.Gi
Gi.pos = self.L.Gi.pos
#
# remove diffractions from Gi
#
if not diffraction:
Gi = gidl(Gi)
# initialize dout dictionnary
dout = {}
# progresss stuff...
lmax = len(lis)*len(lit)
pe = 0
tic = time.time()
tic0 = tic
#for interaction source in list of source interactions
bvisu = False
# signature counter
cptsig = 0
if animation:
fig,ax = self.L.showG('s',aw=1)
ax.plot(self.L.Gt.pos[self.source][0],self.L.Gt.pos[self.source][1],'ob')
ax.plot(self.L.Gt.pos[self.target][0],self.L.Gt.pos[self.target][1],'or')
#
# Loop over all interactions seen from the source
#
# us : loop counter
# s : interaction tuple
# s[0] : point (<0) or segment (>0)a
# pts : list of neighbour nodes from s[0]
# tahe : segment extremities or point coordinates (repeated twice)
lhash = []
if progress :
pbar = tqdm(total=100, desc='Signatures')
for us,s in enumerate(lis):
if progress:
pbar.update(100./(1.*len(lis)))
# start from a segment
if s[0] > 0:
pts = list(dict(self.L.Gs[s[0]]).keys())
tahe = [ np.array([ self.L.Gs.pos[pts[0]], self.L.Gs.pos[pts[1]]]) ]
# start from a point
else:
tahe = [np.array([self.L.Gs.pos[s[0]], self.L.Gs.pos[s[0]]])]
# R is a list which contains reflexion matrices (Sn) and translation matrices(vn)
# for interaction mirroring
# R=[[S0,v0],[S1,v1],...]
R = [(np.eye(2),np.array([0,0]))]
# initialize visited list sequence with the first intercation s
visited = [s]
# if
# s is in target interaction list
# or
# arrival cycle is equal to target cycle
# then stack a new signature in self[len(typ)]
#
# TODO : It concerns self[1] : only one interaction (i.e several single reflection or diffraction)
#
if (s in lit) or (s[-1]==self.target):
#anstr = np.array(map(lambda x: x[0],visited))
anstr = np.array([ x[0] for x in visited ])
#typ = np.array(map(lambda x: len(x),visited))
typ =np.array([len(x) for x in visited ])
assert(len(typ)==1)
try:
self[len(typ)] = np.vstack((self[len(typ)],anstr,typ))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],1.)
except:
self[len(typ)] = np.vstack((anstr,typ))
self.ratio[len(typ)] = np.array([1.])
# update signature counter
cptsig +=1
# stack is a list of iterators
#
#
stack = [iter(Gi[s])]
# air walls do not intervene in the number of transmission (cutoff criteria)
# lawp is the list of airwall position in visited sequence
# handle the case of the first segment which can be an airwall
#
if len(s)==3:
nseg = s[0]
if ((self.L.Gs.node[nseg]['name']=='_AIR') or
(self.L.Gs.node[nseg]['name']=='AIR')):
lawp = [1]
else:
lawp = [0]
else:
lawp = [0]
# while the stack of iterators is not void
cpt = 0
while stack: #
# iter_on_interactions is the last iterator in the stack
iter_on_interactions = stack[-1]
# next interaction child
interaction = next(iter_on_interactions, None)
#print visited
#if ((visited ==[(6236,74,91),(-213,)]) and (interaction==(-1002,))):
# print(interaction)
# pdb.set_trace()
#if (visited ==[(6236,74,91),(-213,),(6248,99,111)]):
#if (visited ==[(6236,74,91),(-213,),(6248,99,111),(6287,111,118)]):
#pdb.set_trace()
# import ipdb
# cond1 : there is no more interactions
# continue if True
cond1 = not(interaction is None)
# cond2 : enable reverberation
# interaction has not been visited yet
# or
# bt : True (allow reentrance) (unconditionnaly)
# continue if True
#cond2 = (interaction in visited) and bt (old)
cond2 = not (interaction in visited) or bt
# cond3 : test the cutoff condition not get to the limit
# continue if True
cond3 = not(len(visited) > (self.cutoff + sum(lawp)))
uD = [ k for k in range(len(visited)) if len(visited[k])==1 ]
uR = [ k for k in range(len(visited)) if len(visited[k])==2 ]
uT = [ k for k in range(len(visited)) if len(visited[k])==3 ]
if cond1:
condD = True
condR = True
condT = True
if ((len(interaction)==1) and (len(uD)==nD)):
condD = False
if ((len(interaction)==2) and (len(uR)==nR)):
condR = False
if ((len(interaction)==3) and (len(uT)==nT)):
condT = False
#
# animation
#
if animation :
cpt = cpt+1
edge = zip(visited[:-1],visited[1:])
N = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_size=15,ax=ax,fig=fig)
E = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},width=0.1,
arrows=False,ax=ax,fig=fig)
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(N)
except:
pass
try:
ax.collections.remove(E)
except:
pass
if (cond1 and cond2 and cond3):
if (condD and condR and condT):
visited.append(interaction)
self.cpt+=1
#print(visited)
# [(44,2,7),(62,7,15),(21,15),(62,15,7),(44,7,2),(16,2)]
# if visited ==[(6236,74,91),(141,91)]:
# import ipdb
# ipdb.set_trace()
# update list of airwalls
if interaction[0] in lair:
lawp.append(1)
else:
lawp.append(0)
# update number of useful segments
# if there is airwall in visited
nstr = interaction[0]
#
#
#
# Testing the type of interaction at rank -2
# R is a list which contains a rotation matrix
# and a translation vector for doing the mirroring
# operation
# diffraction (retrieve a point)
if len(visited[-2]) == 1:
#th = self.L.Gs.pos[nstr]
R.append((np.eye(2),np.array([0,0])))
elif len(visited[-2])==2:
#
# l'avant dernier point est une reflection
#
nseg_points = list(dict(self.L.Gs[visited[-2][0]]).keys())
ta_seg = np.array(self.L.Gs.pos[nseg_points[0]])
he_seg = np.array(self.L.Gs.pos[nseg_points[1]])
#
# get reflection matrix from segment visited[-2]
#
R.append(geu.axmat(ta_seg,he_seg))
# direct order
#R.append(geu.axmat(tahe[-1][0],tahe[-1][1]))
# transmission do nothing
else :
pass
# current interaction is of segment type
if (nstr>0):
nseg_points = list(dict(self.L.Gs[nstr]).keys())
th = np.array([self.L.Gs.pos[nseg_points[0]],
self.L.Gs.pos[nseg_points[1]]])
else:
th = self.L.Gs.pos[nstr]
th = np.array([th,th])
# current interaction is of point type (diffraction)
# apply current chain of symmetries
#
# th is the current segment tail-head coordinates
# tahe is a list of well mirrored tail-head coordinates
#tahe.append(a)
#if ((visited[0]==(104,23,17)) and (visited[1]==(1,17))):
# print("th (avant mirror)",th)
ik = 1
r = R[-ik]
#
# dtarget : distance between th and target
#
pt_th = np.sum(th,axis=0)/2.
d_target = np.linalg.norm(pt_target-pt_th)
#
# mirroring th until the previous point
#
th_mirror = copy.copy(th)
while np.any(r[0] != np.eye(2)):
th_mirror = np.einsum('ki,ij->kj',th_mirror,r[0])+r[1]
ik = ik + 1
r = R[-ik]
pt_mirror = np.sum(th_mirror,axis=0)/2.
d_source = np.linalg.norm(pt_source-pt_mirror)
d_excess = d_source + d_target - d_source_target
# if at least 2 interactions
# or previous point is a diffraction
if (len(tahe)<2) or (len(visited[-2])==1) or (len(visited[-1])==1):
ratio = 1.0
ratio2 = 1.0
else:
# Determine the origin of the cone
# either the transmitter (ilast =0)
# or the last diffraction point (ilast=udiff[-1] )
udiff = [ k for k in range(len(visited)) if len(visited[k])==1 ]
if udiff==[]:
ilast = 0
else:
ilast=udiff[-1]
#print(tahe)
pta0 = tahe[ilast][0] # tail first segment (last difraction)
phe0 = tahe[ilast][1] # head first segment
#
# TODO : it would be better to replace pta_ and phe_ with the intersection
# of the previous cone with tahe[-1]
#
pta_ = tahe[-1][0] # tail last segment
phe_ = tahe[-1][1] # head last segment
#
# Calculates the left and right vector of the cone
#
# vl left vector
# vr right vector
#
#
# Detect situations of connected segments
#
# [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]
# if visited == [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]:
# print '\n',visited
# import ipdb
# ipdb.set_trace()
connected = False
if (pta0==pta_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = phe_ - apex
elif (pta0==phe_).all():
apex = pta0
connected = True
v0 = phe0 - apex
v_ = pta_ - apex
elif (phe0==pta_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = phe_ - apex
elif (phe0==phe_).all():
apex = phe0
connected = True
v0 = pta0 - apex
v_ = pta_ - apex
if connected:
if ((np.linalg.norm(v0)==0) or (np.linalg.norm(v_)==0)):
logger.debug("pta0 : %g,%g", pta0[0], pta0[1])
logger.debug("pta_ : %g,%g", pta_[0], pta_[1])
logger.debug("phe0 : %g,%g", phe0[0], phe0[1])
logger.debug("phe_ : %g,%g", phe_[0], phe_[1])
logger.debug("v0 : %g,%g", v0[0], v0[1])
logger.debug("v_ : %g,%g", v_[0], v_[1])
#
# Does the cone is built from 2 connected segments or
# 2 unconnected segments
#
if not connected:
if not (geu.ccw(pta0,phe0,phe_) ^
geu.ccw(phe0,phe_,pta_) ):
vr = (pta0,phe_)
vl = (phe0,pta_)
else: # twisted case
vr = (pta0,pta_)
vl = (phe0,phe_)
# cone dot product
# print vr
# print vl
vr_n = (vr[1]-vr[0])/np.linalg.norm(vr[1]-vr[0])
vl_n = (vl[1]-vl[0])/np.linalg.norm(vl[1]-vl[0])
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.0))
#angle_cone = np.arccos(vrdotvl)
# prepare lines and seg argument for intersection checking
if angle_cone!=0:
linel = (vl[0],vl[1]-vl[0])
liner = (vr[0],vr[1]-vr[0])
# from origin mirrored segment to be tested
seg = (th_mirror[0],th_mirror[1])
# apex calculation
a0u = np.dot(pta0,vr_n)
a0v = np.dot(pta0,vl_n)
b0u = np.dot(phe0,vr_n)
b0v = np.dot(phe0,vl_n)
#import warnings
#warnings.filterwarnings("error")
try:
kb = ((b0v-a0v)-vrdotvl*(b0u-a0u))/(vrdotvl*vrdotvl-1)
except:
pdb.set_trace()
apex = phe0 + kb*vl_n
else: # cone from connected segments
v0n = v0/np.linalg.norm(v0)
try:
v_n = v_/np.linalg.norm(v_)
except:
pdb.set_trace()
# import ipdb
# ipdb.set_trace()
sign = np.sign(np.cross(v_n,v0n))
if sign>0:
vr_n = -v0n
vl_n = v_n
else:
vr_n = v_n
vl_n = -v0n
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.))
#
# the illuminating cone is defined
# the th_mirror to be tested with this cone are known
#
if ( (not np.isclose(angle_cone,0,atol=1e-6) )
and ( not np.isclose(angle_cone,np.pi)) ) :
#if self.cpt==16176:
# pdb.set_trace()
seg,ratio2 = geu.intersect_cone_seg((apex,vl_n),(apex,vr_n),(th_mirror[0],th_mirror[1]),bvis=False)
elif ( not np.isclose(angle_cone,0) ):
ratio2 = 1
else:
ratio2 = 0
#print ratio
if len(seg)==2:
th_mirror = np.vstack((seg[0],seg[1]))
else:
pass
al = np.arctan2(vl_n[1],vl_n[0])
ar = np.arctan2(vr_n[1],vr_n[0])
if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
ratio2 = 1.
# On connecte l'apex du cone courant aux extrémités du segment courant mirroré
# Dans certaines circonstances par example un cone emanant d'un point colinéaire
# avec le segment d'arrivé" (-4) (6,4) le point -4 est aligné avec le segment 6
# l'ouverture du cone est nul => arret. Cela pourrait être géré dans Gi en interdisant
# la visibilité (-4) (6,4)
# if angle_cone ==0:
# ratio = 0
# else:
# if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
# ratio = 1.
# else:
# wseg0 = th_mirror[0] - apex
# wseg1 = th_mirror[1] - apex
# mod_wseg0 = np.sqrt(np.sum(wseg0*wseg0,axis=0))
# mod_wseg1 = np.sqrt(np.sum(wseg1*wseg1,axis=0))
#
# if np.isclose(mod_wseg0,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# if np.isclose(mod_wseg1,0):
# #bvisu = True
# #pdb.set_trace()#
# pass
# #wseg0_n = wseg0/mod_wseg0
# #wseg1_n = wseg1/mod_wseg1
# wseg0_n = wseg0/np.linalg.norm(wseg0)
# wseg1_n = wseg1/np.linalg.norm(wseg1)
# aseg0 = np.arctan2(wseg0_n[1],wseg0_n[0])
# aseg1 = np.arctan2(wseg1_n[1],wseg1_n[0])
#
# # if al==aseg0 or al==aseg1 or ar==aseg0 or ar==aseg1:
# # ratio = 1
# #print "toto"
# # else:
# I = geu.angle_intersection2(al,ar,aseg0,aseg1)
# ratio = I/angle_cone
# #if ratio>=1:
# # pdb.set_trace()
#
# # if connected:
# # print "ratio :",ratio
#
#
# #if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (bvisu):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# #
# # magenta : start of the cone
# # cyan :
# # yellow : last interaction
# #
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
# ax = geu.linet(ax,np.array(self.L.Gs.pos[nseg_points[0]]),np.array(self.L.Gs.pos[nseg_points[1]]),al=1,color='yellow',linewidth=4)
# # ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# # ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th_mirror[0,:],th_mirror[1,:],al=1,color='green',linewidth=3)
# nx.draw_networkx_labels(self.L.Gi,
# self.L.Gi.pos,labels={x:str(x) for x in visited},
# ax=ax,fontsize=18)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# pdb.set_trace()
# #if visited == [(104, 23, 17), (1, 17), (53, 17), (108, 17, 18)]:
# # if visited == [(104, 23, 17), (1, 17), (53, 17)]:
# if (1==0):
# fig ,ax = self.L.showG('s',aw=1,labels=0)
# ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
# ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
#
# ax = geu.linet(ax,np.array(self.L.Gs.pos[pts[0]]),np.array(self.L.Gs.pos[pts[1]]),al=1,color='yellow',linewidth=4)
# ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
# #ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
# ax = geu.linet(ax,th[0,:],th[1,:],al=1,color='green',linewidth=3)
# plt.title(str(visited)+' '+str(ratio))
# ax.plot(apex[0],apex[1],'or')
# plt.axis('auto')
# plt.show()
#else:
# th = self.L.Gs.pos[nstr]
# th = np.array([th,th])
# ratio = 1
#print self.cpt,ratio,ratio2
#if (ratio>0.1) and (ratio2==0):
# pdb.set_trace()
#print d_excess,dist_excess_max
#if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
if (ratio2 > self.threshold) and (d_excess<dist_excess_max):
#if (ratio > self.threshold):
#
# Update sequence of mirrored points
#
if nstr<0:
tahe.append(th)
else:
tahe.append(th_mirror)
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
#
# Check if the target has been reached
# sequence is valid and last interaction is in the list of targets
#if (interaction in lit) or (interaction[-1]==self.target):
if (interaction in lit):
# idea here is to produce signature without any airwalls
# lawp_tmp is a mask where 0 mean no air wall and 1 = airwall
# anstr does not contains airwalls
# lawp_tmp = [0]+lawp
# lll = [x[0] for ix,x in enumerate(visited) if lawp_tmp[ix]==1]
# print([self.L.Gs.node[x]['name'] for x in lll])
#anstr = np.array([x[0] for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#typ = np.array([len(x) for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#sig = np.array([anstr,typ])
#sighash = hash(str(sig))
# if len(anstr) == 2:
# if (anstr == np.array([323,351])).all():
# import ipdb
# ipdb.set_trace()
anstr = np.array([x[0] for x in visited ])
typ = np.array([len(x) for x in visited])
sig = np.array([anstr,typ])
sighash = hash(str(sig))
if sighash not in lhash:
lhash.append(sighash)
try:
self[len(typ)] = np.vstack((self[len(typ)],sig))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],ratio)
except:
self[len(typ)] = np.vstack((sig))
self.ratio[len(typ)] = np.array([ratio])
# print ('added',visited)
cptsig +=1
if animation:
Nf = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_color='b',
node_size=40,
ax=ax,fig=fig)
Ef = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},
width=0.1,arrows=False,
ax=ax,fig=fig)
cpt=cpt+1
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(Nf)
except:
pass
try:
ax.collections.remove(Ef)
except:
pass
outint = Gi[visited[-2]][interaction]['output'].keys()
#
# proint not used
#
proint = Gi[visited[-2]][interaction]['output'].values()
nexti = [it for it in outint ]
stack.append(iter(nexti))
# 1590 ratio <= threshold
else:
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
lawp.pop()
# 1389 condR and condT and condD
else:
pass
# 1388 cond1 and cond2 and cond3
else:
# if at least 2 interactions
# and antepenultiem is a reflexion
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
#
# Poping
# tahe
# lawp
# stack
#if (tahe[-1][0]==tahe[-1][1]).all():
# pdb.set_trace()
tahe.pop()
try:
lawp.pop()
except:
pass
stack.pop()
#stack.pop()
def plot_cones(self,L,i=0,s=0,fig=[],ax=[],figsize=(10,10)):
""" display cones of an unfolded signature
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
fig :
ax :
figsize :
"""
if fig == []:
fig= plt.figure()
ax = fig.add_subplot(111)
elif ax ==[]:
ax = fig.add_subplot(111)
pta,phe = self.unfold(L,i=i,s=s)
# create a global array or tahe segments
seg = np.vstack((pta,phe))
lensi = np.shape(seg)[1]
for s in range(1,lensi):
pseg0 = seg[:,s-1].reshape(2,2).T
pseg1 = seg[:,s].reshape(2,2).T
#
# create the cone seg0 seg1
#
cn = cone.Cone()
cn.from2segs(pseg0,pseg1)
fig,ax = cn.show(fig = fig,ax = ax,figsize = figsize)
return (fig,ax)
def unfold(self,L,i=0,s=0):
""" unfold a given signature
return 2 np.ndarray of pta and phe "aligned"
(reflexion interaction are mirrored)
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
Returns
-------
pta,phe
See Also
--------
Signature.unfold
"""
si = Signature(self[i][(2*s):(2*s)+2])
si.ev(L)
pta,phe = si.unfold()
return pta,phe
def pltunfold(self,L,i=0,s=0):
import shapely.ops as sho
from descartes.patch import PolygonPatch
plt.ion()
plt.gcf()
plt.clf()
def plot_lines(ax, ob, color = []):
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
pta,phe=self.unfold(L=L,i=i,s=s)
ML =sh.MultiLineString([((pta[0][i],pta[1][i]),(phe[0][i],phe[1][i])) for i in range(pta.shape[1])])
fig=plt.gcf()
ax=plt.gca()
ax = plot_lines(ax,ML)
s0=sh.LineString([(pta[0,0],pta[1,0]),(phe[0,-1],phe[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(pta[0,-1],pta[1,-1])])
if s0.crosses(s1):
s0=sh.LineString([(pta[0,0],pta[1,0]),(pta[0,-1],pta[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(phe[0,-1],phe[1,-1])])
cross = sh.MultiLineString([s0,s1,ML[0],ML[-1]])
poly=sho.polygonize(cross)
# ax = plot_lines(ax,cross,color='b')
ax = plot_poly(ax,poly)
def show(self,L,**kwargs):
""" plot signatures in the simulated environment
Parameters
----------
L : Layout
i : list or -1 (default = all groups)
list of interaction group numbers
s : list or -1 (default = all sig)
list of indices of signature in interaction group
ctx : cycle of tx (optional)
crx : cycle of rx (optional)
graph : type of graph to be displayed
color : string
alphasig : float
widthsig : float
colsig : string
ms : int
ctx : int
crx :int
"""
defaults = {'i':-1,
's':-1,
'fig':[],
'ax':[],
'graph':'s',
'color':'black',
'alphasig':1,
'widthsig':0.1,
'colsig':'black',
'ms':5,
'ctx':-1,
'crx':-1,
'aw':True
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# display layout
fig,ax = L.showG(**kwargs)
if kwargs['ctx']!=-1:
Tpoly = self.L.Gt.node[kwargs['ctx']]['polyg']
Tpoly.coul='r'
Tpoly.plot(fig=fig,ax=ax,color='r')
if kwargs['crx']!=-1:
Rpoly = self.L.Gt.node[kwargs['crx']]['polyg']
Rpoly.plot(fig=fig,ax=ax,color='g')
# i=-1 all rays
# else block of interactions i
if kwargs['i']==-1:
lgrint = self.keys()
else:
lgrint = [kwargs['i']]
if kwargs['s'] == -1:
for i in lgrint:
lsig = range(int(len(self[i])/2))
for j in lsig:
sig = [ self.L.Gs.pos[x] for x in self[i][2*j] ]
siga = np.array(sig)
ax.plot(siga[:,0], siga[:,1],
alpha = kwargs['alphasig'],
color = kwargs['colsig'],
linewidth = kwargs['widthsig'])
ax.axis('off')
else:
lsig = [kwargs['s']]
for s1 in lsig:
sig = [ self.L.Gs.pos[x[0]] for x in s1]
siga = np.array(sig)
ax.plot(siga[:,0], siga[:,1],
alpha = kwargs['alphasig'],
color = kwargs['colsig'],
linewidth = kwargs['widthsig'])
ax.axis('off')
return(fig,ax)
def showi(self,uni=0,us=0):
""" interactive show
press n to visit signatures sequentially
Parameters
----------
uni : index of interaction dictionnary keys
us : signature index
"""
plt.ion()
fig = plt.figure()
nit = self.keys()
ni = nit[uni]
ust = len(self[ni])/2
polyS = self.L.Gt.node[self.source]['polyg']
cp1 = polyS.centroid.xy
polyT = self.L.Gt.node[self.target]['polyg']
cp2 = polyT.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0]])
prx = np.array([cp2[0][0],cp2[1][0]])
st='a'
while st != 'q':
inter=[]
ax = fig.add_subplot(111)
fig,ax=self.L.showG(fig=fig,ax=ax,graph='s')
title = '# interaction :', ni, 'signature #',us,'/',ust
ax.set_title(title)
line = ptx
# draw terminal points (centroid of source and target cycle)
ax.plot(ptx[0],prx[1],'xr')
ax.plot(prx[0],prx[1],'xb')
if ni not in self.keys():
print("incorrect number of interactions")
pos={}
try:
for u in self[ni][us*2]:
pos.update({u:self.L.Gs.pos[u]})
line = np.vstack((line,np.array((self.L.Gs.pos[u]))))
nx.draw_networkx_nodes(self.L.Gs,pos=pos,nodelist=pos.keys(),node_color='r',ax=ax)
for ii in self[ni][(us*2)+1]:
if ii == 1:
inter.append('R')
if ii == 2:
inter.append('T')
if ii == 3:
inter.append('D')
except:
print("signature index out of bounds of signature")
line = np.vstack((line,prx))
ax.plot(line[:,0],line[:,1])
plt.draw()
print(inter)
st = raw_input()
ax.cla()
if st == 'n':
if us+2 <= ust:
us=us+2
else:
uni = uni+1
try:
ni = nit[uni]
ust = len(self[ni])/2
us=0
except:
uni=0
ni=nit[uni]
us = 0
else:
print('press n for next signature')
def rays(self,ptx=0,prx=1):
""" from signatures dict to 2D rays
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle number if
type(tx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle number if
sigtype(rx)=int
Returns
-------
rays : Rays
Notes
-----
In the same time the signature of the ray is stored in the Rays object
Todo : Find the best memory implementation
See Also
--------
Signature.sig2ray
Signature.raysv
"""
if type(ptx) == int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx) == int:
prx = np.array(self.L.Gt.pos[prx])
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
# merged cycle of each point
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
#
# Handling LOS ray
#
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if cyptx==cyprx:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
# k : Loop on interaction group
# l : loop on signature
# --->
# this part should be a generator
#
for k in self:
# print 'block#',k
# if k ==3:
# import ipdb
# ipdb.set_trace()
# get signature block with k interactions
tsig = self[k]
shsig = np.shape(tsig)
for l in range(shsig[0]/2):
sig = tsig[2*l:2*l+2,:]
ns0 = sig[0,0]
nse = sig[0,-1]
validtx = True
validrx = True
if (ns0<0):
pD = self.L.Gs.pos[ns0]
TxD = shg.LineString(((ptx[0], ptx[1]), (pD[0], pD[1])))
seg = polyctx.intersection(TxD)
validtx = seg.almost_equals(TxD,decimal=4)
if not validtx:
pass
#print "Signature.rays": ns0
if (nse<0):
pD = self.L.Gs.pos[nse]
DRx = shg.LineString(((pD[0], pD[1]), (prx[0], prx[1])))
validrx = polyctx.contains(DRx)
if not validrx:
pass
#print nse
if validtx & validrx:
# print sig
# print pD
s = Signature(sig)
#
# Transform signature into a ray
# --> sig2ray
isray,Yi = s.sig2ray(self.L, ptx[:2], prx[:2])
if isray:
Yi = np.fliplr(Yi)
if k in rays.keys():
Yi3d = np.vstack((Yi[:, 1:-1], np.zeros((1, k))))
Yi3d = Yi3d.reshape(3, k, 1)
rays[k]['pt'] = np.dstack(( rays[k]['pt'], Yi3d))
rays[k]['sig'] = np.dstack(( rays[k]['sig'],
sig.reshape(2, k, 1)))
else:
rays[k] = {'pt': np.zeros((3, k, 1)),
'sig': np.zeros((2, k, 1),dtype=int)}
rays[k]['pt'][0:2, :, 0] = Yi[:, 1:-1]
rays[k]['sig'][:, :, 0] = sig
rays.nb_origin_sig = len(self)
rays.origin_sig_name = self.filename
return rays
def raysv(self, ptx=0, prx=1):
""" transform dict of signatures into 2D rays - default vectorized version
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle ptx if
type(ptx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle prx if
type(prx)=int
Returns
-------
rays : Rays
Notes
-----
This is a vectorized version of Signatures.rays.
This implementation takes advantage of the np.ndarray
and calculates images and backtrace for block of signatures.
A block of signatures gathers all signatures with the same number of interactions.
For mathematical details see :
@phdthesis{amiot:tel-00971809,
TITLE = {{Design of simulation platform joigning site specific radio propagation and human mobility for localization applications}},
AUTHOR = {<NAME>},
URL = {https://tel.archives-ouvertes.fr/tel-00971809},
NUMBER = {2013REN1S125},
SCHOOL = {{Universit{\'e} Rennes 1}},
YEAR = {2013},
MONTH = Dec,
TYPE = {Theses},
HAL_ID = {tel-00971809},
HAL_VERSION = {v1},
}
See Also
--------
Signatures.image
Signatures.backtrace
"""
if type(ptx)==int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx)==int:
prx = np.array(self.L.Gt.pos[prx])
if len(ptx) == 2:
ptx= np.r_[ptx, 0.5]
if len(ptx) == 2:
prx= np.r_[prx, 0.5]
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target are in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
# The Line of sight situation is detected here
# dtxtx : square distance between Tx and Rx
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
M = self.image2(ptx)
R = self.backtrace(ptx,prx,M)
#
# Add LOS ray in ray 2D
#
if rays.los:
R[0]= {'sig':np.zeros(shape=(0,0,1)),'pt': np.zeros(shape=(2,1,0))}
rays.update(R)
rays.nb_origin_sig = len(self.keys())
rays.origin_sig_name = self.filename
return rays
def backtrace(self, tx, rx, M):
''' backtracing betwen tx and rx
Parameters
----------
tx : ndarray
position of tx (2,)
rx : ndarray
position of tx (2,)
M : dict
position of intermediate points obtained from self.image()
Returns
-------
rayp : dict
key = number_of_interactions
value =ndarray positions of interactions for creating rays
Notes
-----
dictionnary of intermediate coordinated :
key = number_of_interactions
value = nd array M with shape : (2,nb_signatures,nb_interactions)
and 2 represent x and y coordinates
See Also
--------
pylayers.antprop.signature.image
'''
if len(tx) > 2:
tx = tx[:2]
if len(rx) > 2:
rx = rx[:2]
rayp={}
# loop on number of interactions
for ninter in self.keys():
signatures = copy.deepcopy(self[ninter])
#get segment ids of signature with ninter interactions
# seg = self[ninter][::2]
# unegseg=np.where(seg<0)
# uninegseg,idx = np.unique(seg[unegseg],return_inverse=True)
# pneg = np.array([self.L.Gs.pos[x] for x in uninegseg])
# nsig = len(seg)
# # determine positions of points limiting the semgments
# #1 get index in L.tahe
# # 2 get associated position in L.pt
# utahe = self.L.tahe[:,self.L.tgs[seg]]
# # pt : (xycoord (2),pt indexes (2),nb_signatures,nb_interactions)
# pt = self.L.pt[:,utahe]
# ####WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
# try:
# pt[:,0,unegseg[0],unegseg[1]]=pneg[idx].T
# pt[:,1,unegseg[0],unegseg[1]]=-pneg[idx].T
# except:
# pass
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the semgnet) a=0,b=1
#2 : nb of found signatures/segments
# 3 : nb interaction
################################
###############################
####### This part between hash has been copy/paste from self.image2
###### should be considered to become a function
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
pt = np.empty((2,2,nsig,ninter))
# 1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idx = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# TO BE FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# #### WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idx]
pt[:,1,upoint[0],upoint[1]] = -pointcoord[:,idx]
except:
pass
# 2 positive points
# seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxp = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxp]
###################################
########################################
# how to do this into a while loop ?
p=rx
# creating W matrix required in eq (2.70) thesis Nicolas AMIOT
# Warning W is rolled after and becomes (nsig,4,4)
W = np.zeros((4,4,nsig))
I = np.eye(2)[:,:,np.newaxis]*np.ones((nsig))
W[:2,:2,...] = I
W[2:4,:2,...] = I
# once rolled :
# W (nsig,4,4)
W = np.rollaxis(W,-1)
kinter=ninter-1
ptr = pt
Mr = copy.deepcopy(M)
epsilon = 1e-12
rayp_i = np.zeros((3,nsig,ninter))
# rayp_i[:2,:,-1]=rx[:,None]
#backtrace process
# if ninter == 6:
# print np.where(((signatures[:,0]==42) &(signatures[:,1]==-277) & (signatures[:,2]==135) & (signatures[:,3]==21) & (signatures[:,4]==46) & (signatures[:,5]==319)))
# import ipdb
# ipdb.set_trace()
while kinter > -1:
#Initilization, using the Tx position
if kinter == ninter-1:
p_min_m = p[:,np.newaxis]-Mr[ninter][:,:,kinter]
else :
p_min_m = pvalid[:].T-Mr[ninter][:,:,kinter]
a_min_b = ptr[:,0,:,kinter]-ptr[:,1,:,kinter]
# Creating W from eq (2.71)
# a_min_b <=> a_{Lh-l}-b_{Lh-l}
# p_min_m <=> \tilde{p}_{Lh}-\tilde{b}_{Lh-l}
# W (nsig,4,4)
# p_min_m (2,nsig)
# a_min_b (2,nsig)
W[...,:2,2] = p_min_m.T
W[...,2:,3] = a_min_b.T
# create 2nd member from eq (2.72)
if kinter == ninter-1:
y= np.concatenate((p[:,np.newaxis]*np.ones((nsig)),ptr[:,0,:,kinter]))
else:
y= np.concatenate((pvalid.T,ptr[:,0,:,kinter]))
# y once transposed :
# y (nsig,4)
y=y.T
# search and remove point with singular matrix
invalid_sig=np.where(abs(np.linalg.det(W))<1e-15)
W = np.delete(W,invalid_sig,axis=0)
y = np.delete(y,invalid_sig,axis=0)
ptr = np.delete(ptr,invalid_sig,axis=2)
Mr[ninter] = np.delete(Mr[ninter],invalid_sig,axis=1)
rayp_i = np.delete(rayp_i,invalid_sig,axis=1)
#remove signatures
usig = np.repeat(invalid_sig[0],2)
usig[::2]=usig[::2]*2
usig[1::2]=usig[1::2]*2+1
signatures = np.delete(signatures,usig,axis=0)
# detect diffrac
uD = signatures[1::2,kinter]==1
uuD = np.where(signatures[1::2,kinter]==1)[0]
psolved = np.linalg.solve(W,y)
#valid ray is : 0 < \alpha < 1 and 0< \beta < 1
# alpha
uvalidA = psolved[:,2]>0.
uvalidB = psolved[:,2]<1.
#beta
uvalidC = psolved[:,3] >= epsilon
uvalidD = psolved[:,3] <=1.-epsilon
valid = uvalidA & uvalidB & uvalidC & uvalidD
# consider valid diffraction interactions
valid = valid | uD
uvalid = np.where(valid)[0]
# re-add correct position of diffraction interations
#indeed diffraction point should not been solved with linalg,
# but by setting pa=-pb, no singular matrix appear
#and diffraction points can be re-add thereafter.
psolved[uuD,:2] = ptr[:,0,uuD,kinter].T
pvalid = psolved[uvalid,:2]
# keep only valid rays for ptr and Mr
Mr[ninter]=Mr[ninter][:,uvalid,:]
ptr=ptr[:,:,uvalid,:]
W = W[uvalid,:,:]
# remove signatures
usigv = np.repeat(uvalid,2)
usigv[::2]=usigv[::2]*2
usigv[1::2]=usigv[1::2]*2+1
signatures = signatures[usigv,:]
rayp_i[:2,uvalid,kinter] = pvalid.T
rayp_i = rayp_i[:,uvalid,:]
#if no more rays are valid , then quit block
# (kinter <0 is the exit while condition)
if len(uvalid) > 0 :
kinter=kinter-1
else :
kinter = -2
# rayp_i[:2,:,0]=tx[:,None]
if len(uvalid) !=0:
N = int(len(usigv)/2)
sir1=signatures[::2].T.reshape(ninter,N)
sir2=signatures[1::2].T.reshape(ninter,N)
sig = np.empty((2,ninter,N))
sig[0,:,:]=sir1
sig[1,:,:]=sir2
rayp_i=np.swapaxes(rayp_i,1,2)
rayp.update({ninter:{'pt':rayp_i,'sig':sig.astype('int')}})
return rayp
def image2(self,tx):
""" determine rays from images (second implementation)
Parameters
----------
tx : point
"""
if len(tx) > 2:
tx = tx[:2]
dM={}
# loop on number of interactions
for ninter in self.keys():
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
M = np.empty((2,nsig,ninter))
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
try:
pt = np.nan*np.zeros((2,2,nsig,ninter))
except:
pdb.set_trace()
#1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idxpt = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# To be FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# try except to handle the case where there is no diffraction point
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idxpt]
pt[:,1,upoint[0],upoint[1]] = pointcoord[:,idxpt]
except:
pass
#2 positive points
#seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxseg = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxseg]
# check every element of pt is filled
assert not np.isnan(pt).any()
#
# TODO Upgrading layout for handling slab offsets
#
# uncomment those two lines when the numpy array L.norm and
# L.offset exist
#norm = self.L.normal[:,utahe]
#offset = self.L.offset[:,utahe]
# pt = pt + offset*norm
############
#formula 2.61 -> 2.64 N.AMIOT PH.D thesis
############
sx = pt[0,1,:,:]-pt[0,0,:,:]
sy = pt[1,1,:,:]-pt[1,0,:,:]
den = sx**2+sy**2
# den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
# avoiding singularity (should not be possible)
uz = np.where(den==0)
den[uz] = 1.
a = 1 - (2. / den) * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2
b= (2. / den) * (pt[0,1,:, :] - pt[0,0,:, :]) * (pt[1,0,:, :] - pt[1,1,:, :])
c = (2. / den) * (pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2 +
pt[1,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
d = (2. / den) * (pt[1,0,:, :] * (pt[0,1,:, :] - pt[0,0,:, :]) ** 2 +
pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
# a = ((pt[0,0,:,:]-pt[0,1,:,:])**2-(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a=a/(1.*den)
# b = 2*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,1,:,:]-pt[1,0,:,:])
# b=b/(1.*den)
# c= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])**2+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,0,:,:]-pt[1,1,:,:]))
# c = c/(1.*den)
# d= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])*(pt[0,1,:,:]-pt[0,0,:,:])+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])**2)
# d= d/(1.*den)
# K=np.array([[a,-b],[-b,-a]])
K = np.array([[a,-b],[-b,-a]])
# translation vector v (2.60)
v =np.array(([c,d]))
ityp = self[ninter][1::2]
for n in np.arange(ninter):
#get segment ids of signature with ninter interactions
uT = np.where(ityp[:,n]==3)[0]
uR = | np.where(ityp[:,n]==2) | numpy.where |
import numpy as np
import pandas as pd
import scipy.stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import matplotlib.colors as colors
from matplotlib.colors import hsv_to_rgb
import seaborn as sns
import scipy.cluster.hierarchy as hierarchy
from cycler import cycler
import copy
from . import stats
from . import map as qtl_map
def setup_figure(aw=4.5, ah=3, xspace=[0.75,0.25], yspace=[0.75,0.25],
colorbar=False, ds=0.15, cw=0.15, ct=0, ch=None):
"""
"""
dl, dr = xspace
db, dt = yspace
fw = dl + aw + dr
fh = db + ah + dt
fig = plt.figure(facecolor=(1,1,1), figsize=(fw,fh))
ax = fig.add_axes([dl/fw, db/fh, aw/fw, ah/fh])
if not colorbar:
return ax
else:
if ch is None:
ch = ah/2
cax = fig.add_axes([(dl+aw+ds)/fw, (db+ah-ch-ct)/fh, cw/fw, ch/fh])
return ax, cax
# if not box:
# ax.spines['left'].set_position(('outward', 6))
# ax.spines['bottom'].set_position(('outward', 6))
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
# ax.tick_params(axis='both', which='both', direction='out', labelsize=fontsize)
def get_axgrid(nr, nc, ntot=None, sharex=False, sharey=False,
x_offset=6, y_offset=6,
dl=0.5, aw=2, dx=0.75, dr=0.25,
db=0.5, ah=2, dy=0.75, dt=0.25,
colorbar=False, ds=0.15, cw=0.15, ct=0, ch=None,
tri=None, fontsize=10, hide=['top', 'right']):
"""
"""
if ntot is None:
ntot = nr * nc
fw = dl + nc*aw + (nc-1)*dx + dr
fh = db + nr*ah + (nr-1)*dy + dt
fig = plt.figure(figsize=(fw,fh))
axes = []
n = 0
if tri is None:
si = lambda x: 0
elif tri == 'upper':
si = lambda x: x
for j in range(nr):
for i in range(si(j), nc):
if n<ntot:
ax = fig.add_axes([(dl+i*(aw+dx))/fw, (db+(nr-j-1)*(ah+dy))/fh, aw/fw, ah/fh], facecolor='none',
sharex=axes[0] if sharex and n>0 else None,
sharey=axes[0] if sharey and n>0 else None)
format_plot(ax, fontsize=fontsize, hide=hide, x_offset=x_offset, y_offset=y_offset)
axes.append(ax)
n += 1
if not colorbar:
return axes
else:
if ch is None:
ch = ah/2
cax = fig.add_axes([(dl+nc*aw+(nc-1)*dx+ds)/fw, (db+nr*ah+(nr-1)*dy-ch-ct)/fh, cw/fw, ch/fh])
# cax = fig.add_axes([(dl+aw+ds)/fw, (db+ah-ch-ct)/fh, cw/fw, ch/fh])
return axes, cax
def format_plot(ax, tick_direction='out', tick_length=4, hide=['top', 'right'],
hide_spines=True, lw=1, fontsize=10,
equal_limits=False, x_offset=0, y_offset=0, vmin=None):
# ax.autoscale(False)
for i in ['left', 'bottom', 'right', 'top']:
ax.spines[i].set_linewidth(lw)
ax.tick_params(axis='both', which='both', direction=tick_direction, labelsize=fontsize)
# set tick positions
if 'top' in hide and 'bottom' in hide:
ax.get_xaxis().set_ticks_position('none')
elif 'top' in hide:
ax.get_xaxis().set_ticks_position('bottom')
elif 'bottom' in hide:
ax.get_xaxis().set_ticks_position('top')
else:
ax.get_xaxis().set_ticks_position('both')
if 'left' in hide and 'right' in hide:
ax.get_yaxis().set_ticks_position('none')
elif 'left' in hide:
ax.get_yaxis().set_ticks_position('right')
elif 'right' in hide:
ax.get_yaxis().set_ticks_position('left')
elif len(hide)==0:
ax.get_xaxis().set_ticks_position('bottom')
ax.get_yaxis().set_ticks_position('left')
else:
ax.get_yaxis().set_ticks_position('both')
if hide_spines:
for i in hide:
ax.spines[i].set_visible(False)
# adjust tick size
for line in ax.xaxis.get_ticklines() + ax.yaxis.get_ticklines():
line.set_markersize(tick_length)
line.set_markeredgewidth(lw)
for line in (ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True)):
line.set_markersize(tick_length/2)
line.set_markeredgewidth(lw/2)
ax.spines['left'].set_position(('outward', y_offset))
ax.spines['bottom'].set_position(('outward', x_offset))
if equal_limits:
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lims = [ | np.minimum(xlim[0], ylim[0]) | numpy.minimum |
import ctypes
import numpy
import os
import time
from scipy.linalg.blas import daxpy, saxpy
import functools
from numba import jit, prange, njit
import sys
n_axpby = numpy.frompyfunc(lambda x,y,a,b: a*x + b*y, 4,1)
print ("current dir ", os.path.abspath(__file__))
N = 1048
if len(sys.argv) > 1:
N = int(sys.argv[1])
shape = (N, N)
A = numpy.float32(2.)
B = numpy.float32(1.)
numpy.random.seed(1)
a = numpy.random.random(shape)
b = | numpy.random.random(shape) | numpy.random.random |
"""
This class uses matrix least squares analysis for circular weighing measurement sequences
For more information, see 'A General Approach to Comparisons in the Presence of Drift'
Some outputs available within this class:
- The design matrices, expected values, and variance-covariance matrix
- Estimates of item differences and their standard deviations
- Drift parameters and their standard deviations
"""
import numpy as np
from ..log import log
class CircWeigh(object):
_sequences = {2: 5, 3: 4, 4: 3, 5: 3, 6: 3, 7: 3} # key: number of weight groups in weighing, value: number of cycles
_driftorder = {'no drift': 0, 'linear drift': 1, 'quadratic drift': 2, 'cubic drift': 3}
_orderdrift = {0: 'no drift', 1 : 'linear drift', 2 : 'quadratic drift', 3 : 'cubic drift'}
def __init__(self, scheme_entry):
"""Initialises a circular weighing for a single weighing in the scheme
Parameters
----------
scheme_entry : str
the groups of weights to be weighed in order of weighing.
groups of weights should be separated by a space; weights within a group should be separated by a + sign
Examples
----------
e.g. scheme_entry = "1a 1b 1c 1d" for four individual weights
For a 5, 3, 2, 1 sequence, the four scheme entries are:
scheme_entry1 = "5 5s 3+2"
scheme_entry2 = "3 2s+1s 2+1"
scheme_entry3 = "2 2s 1+1s"
scheme_entry4 = "1 1s 0.5+0.5s"
"""
self.wtgrps = scheme_entry.split()
self.num_wtgrps = len(self.wtgrps) # q in paper
self.num_cycles = (self._sequences[self.num_wtgrps])
self.num_readings = self.num_cycles*self.num_wtgrps # p in paper
self.matrices = {}
self.t_matrices = {}
self.b = {}
self.residuals = {}
self.stdev = {}
self.varcovar = {}
self.driftcoeffs = {}
self.grpdiffs = {}
def generate_design_matrices(self, times):
"""Sets up design matrices for linear, quadratic and cubic drift
Parameters
----------
times : list
list of times for each measurement. Ideally these will be equally spaced in time.
Could be [0, 1, 2, 3, ...] or could be from 'Timestamps' attr of dataset
Returns
-------
M1 : numpy array
design matrix for linear drift
M2 : numpy array
design matrix for quadratic drift
M3 : numpy array
design matrix for cubic drift
"""
if len(times) < self.num_readings: # Fill time as simple ascending array, [0,1,2,3...]
times = np.arange(self.num_readings)
self.trend = 'reading'
else: # Ensure that time is a numpy array object.
times = np.array(times)
self.trend = 'minute'
# Prepare matrices for each order of drift correction
id = | np.identity(self.num_wtgrps) | numpy.identity |
import functools
import io
import random
import unittest
import attrdict
import numpy as np
from sqsgenerator.compat import have_mpi_support
from sqsgenerator.core import default_shell_distances
from sqsgenerator.adapters import to_ase_atoms, to_pymatgen_structure
from sqsgenerator.io import read_settings_file
from sqsgenerator.settings.defaults import ATOL, RTOL
from sqsgenerator.settings.readers import read_atol, \
read_rtol, \
read_mode, \
read_which, \
read_structure, \
read_iterations, \
read_composition, \
read_pair_weights, \
read_shell_weights, \
read_shell_distances, \
read_target_objective, \
read_threads_per_rank, \
read_max_output_configurations, \
BadSettings, IterationMode, Structure, process_settings, build_structure
def settings(recursive=True, **kwargs):
return attrdict.AttrDict({**kwargs}, recursive=recursive)
def test_function(test_f):
def test_f_wrapper(**kwargs):
return test_f(settings(**kwargs))
def _decorator(f):
def _inner(self):
return f(self, test_f_wrapper)
return _inner
return _decorator
class TestSettingReaders(unittest.TestCase):
def setUp(self) -> None:
self.raw_dict = read_settings_file('resources/cs-cl.sqs.yaml')
self.raw_dict_from_file = read_settings_file('resources/cs-cl.poscar.sqs.yaml')
self.file_name = self.raw_dict_from_file.structure.file
self.structure = read_structure(self.raw_dict)
self.distances = default_shell_distances(self.structure, ATOL, RTOL)
self.processed = process_settings(self.raw_dict)
def assertStructureEquals(self, s1: Structure, s2: Structure, prec=3):
self.assertEqual(s1.num_unique_species, s2.num_unique_species)
self.assertTrue(np.allclose(s1.numbers, s2.numbers))
coords_close = np.allclose(np.round(s1.frac_coords, prec), np.round(s2.frac_coords, prec))
self.assertTrue(coords_close)
def override_default(self, **kwargs):
cp = attrdict.AttrDict(self.processed.copy())
cp.update(**kwargs)
return cp
@test_function(read_atol)
def test_read_atol(self, f):
self.assertAlmostEqual(f(), ATOL)
self.assertAlmostEqual(f(atol=1.5), 1.5)
with self.assertRaises(BadSettings):
f(atol=-1)
with self.assertRaises(BadSettings):
f(atol="adsfasdf")
@test_function(read_rtol)
def test_read_rtol(self, f):
self.assertAlmostEqual(f(), RTOL)
self.assertAlmostEqual(f(rtol=1.5), 1.5)
with self.assertRaises(BadSettings):
f(rtol=-1)
@test_function(read_mode)
def test_read_mode(self, f):
for mode, obj in IterationMode.names.items():
self.assertEqual(f(mode=mode), obj)
self.assertEqual(f(mode=obj), obj)
with self.assertRaises(BadSettings):
f(mode='atol')
self.assertEqual(f(), IterationMode.random)
@test_function(read_iterations)
def test_read_iterations(self, f):
self.assertEqual(f(mode=IterationMode.random), 1e5)
self.assertEqual(f(mode=IterationMode.systematic), -1)
num_iterations = random.randint(1000, 10000)
self.assertEqual(f(mode=IterationMode.systematic, iterations=num_iterations), num_iterations)
self.assertEqual(f(mode=IterationMode.random, iterations=num_iterations), num_iterations)
with self.assertRaises(BadSettings):
# raise a TypeError in convert
f(iterations=())
with self.assertRaises(BadSettings):
# raise a ValueError in convert
f(iterations="adsfasdf")
with self.assertRaises(BadSettings):
# raise a TypeError in convert
f(iterations=-23)
@test_function(read_max_output_configurations)
def test_read_max_output_configurations(self, f):
self.assertEqual(f(), 10)
self.assertEqual(f(max_output_configurations=1000), 1000)
self.assertEqual(f(max_output_configurations=1e3), 1000)
with self.assertRaises(BadSettings):
# raise a TypeError in convert
f(max_output_configurations=())
with self.assertRaises(BadSettings):
# raise a ValueError in convert
f(max_output_configurations="adsfasdf")
with self.assertRaises(BadSettings):
# raise a TypeError in convert
f(max_output_configurations=-23)
@test_function(read_structure)
def test_read_structure(self, f):
self.assertStructureEquals(f(structure=self.structure), self.structure)
self.assertStructureEquals(f(structure=self.structure), self.structure)
self.assertStructureEquals(f(**self.raw_dict_from_file), self.structure)
self.assertStructureEquals(f(structure=to_ase_atoms(self.structure)), self.structure)
self.assertStructureEquals(f(structure=to_pymatgen_structure(self.structure)), self.structure)
with self.assertRaises(BadSettings):
f()
with self.assertRaises(BadSettings):
f(structure={'A': 1})
with self.assertRaises(BadSettings):
# wrong supercell arguments
shape = (2, 3)
repetitions = np.prod(shape)
structure_dictionary = self.structure.to_dict()
structure_dictionary.update(supercell=(2, 3))
f(structure=structure_dictionary)
with self.assertRaises(BadSettings):
# completely wrong unknown data type
f(structure=io.StringIO())
@test_function(read_composition)
def test_read_composition(self, f):
which = tuple(range(len(self.structure)))
ff = functools.partial(f, structure=self.structure, which=which)
with self.assertRaises(BadSettings):
# raise a TypeError in convert
ff(composition={})
with self.assertRaises(BadSettings):
# raise a wrong number of total atoms
ff(composition=dict(Fr=18, Lu=18))
with self.assertRaises(BadSettings):
# correct number but less than one
ff(composition=dict(Fr=54, Lu=0))
with self.assertRaises(BadSettings):
# correct number but negative number
ff(composition=dict(Fr=55, Lu=-1))
with self.assertRaises(BadSettings):
# wrong species
ff(composition=dict(Fr=27, Kf=27))
with self.assertRaises(BadSettings):
# type error in atom number
ff(composition=dict(Fr=27, Na='asdf'))
with self.assertRaises(BadSettings):
# too few atoms on sublattice
f(structure=self.structure, composition=dict(Fr=0, Na=1), which=(0,))
with self.assertRaises(BadSettings):
# wrong number of atoms on sublattice
f(structure=self.structure, composition=dict(Fr=27, Kf=27), which=which)
s = settings(structure=self.structure, composition=dict(Cs=27, Cl=27), which=which)
read_composition(s)
sublattice = [0,2,4,5,6,7,8,9]
s = settings(structure=self.structure, composition=dict(H=4, He=4), which=sublattice)
read_composition(s)
@test_function(read_shell_distances)
def test_read_shell_distances(self, f):
atol = read_atol(settings())
rtol = read_rtol(settings())
default_which = read_which(settings(structure=self.structure))
default_composition = read_composition(settings(structure=self.structure, which=default_which))
distances = f(
atol=atol,
rtol=rtol,
which=default_which,
structure=self.structure,
composition=default_composition
)
np.testing.assert_array_almost_equal(distances, default_shell_distances(self.structure, atol, rtol))
with self.assertRaises(BadSettings):
f(shell_distances=[0, -1, 2, 3, 4])
with self.assertRaises(BadSettings):
f(shell_distances="Wrong type")
with self.assertRaises(BadSettings):
f(shell_distances=[0, 1, 2, 3, complex(1, 2)])
with self.assertRaises(BadSettings):
f(shell_distances=[])
with self.assertRaises(BadSettings):
f(shell_distances=[0.0]*2)
custom_distances = [0, 1, 2, 4, 5]
np.testing.assert_array_almost_equal(custom_distances, f(shell_distances=custom_distances))
custom_distances = [1, 2, 4, 5]
np.testing.assert_array_almost_equal([0.0]+custom_distances, f(shell_distances=custom_distances))
@test_function(read_shell_weights)
def test_read_shell_weights(self, f):
atol = read_atol(settings())
rtol = read_rtol(settings())
default_which = read_which(settings(structure=self.structure))
default_composition = read_composition(settings(structure=self.structure, which=default_which))
distances = read_shell_distances(
settings(
atol=atol,
rtol=rtol,
which=default_which,
structure=self.structure,
composition=default_composition
)
)
ff = functools.partial(f, shell_distances=distances)
weights = f(shell_distances=distances)
for i, w in weights.items():
self.assertAlmostEqual(w, 1.0/i)
self.assertEqual(len(weights)+1, len(distances))
with self.assertRaises(BadSettings):
ff(shell_weights={len(distances)+1: 1.0})
with self.assertRaises(BadSettings):
ff(shell_weights={-1: 1.0})
with self.assertRaises(BadSettings):
ff(shell_weights={})
with self.assertRaises(BadSettings):
ff(shell_weights=[1.0])
shells = {1: 1.0}
validated = ff(shell_weights=shells)
self.assertDictEqual(shells, validated)
@test_function(read_pair_weights)
def test_read_pair_weights(self, f):
ff = functools.partial(f, structure=self.structure)
ns = self.structure.num_unique_species
def make_actual_structure(settings):
return build_structure(settings.composition, settings.structure[settings.which])
ns = make_actual_structure(self.processed).num_unique_species
num_shells = len(self.processed.shell_weights)
shape = (num_shells, ns, ns)
with self.assertRaises(BadSettings):
# right shape but non symmetric
fake = np.arange(np.prod(shape)).reshape(shape)
f(**self.override_default(pair_weights=fake))
with self.assertRaises(BadSettings):
# wrong shape
f(**self.override_default(pair_weights=np.zeros((num_shells+1, ns, ns))))
with self.assertRaises(BadSettings):
# wrong number of dimensions
f(**self.override_default(pair_weights=np.zeros((num_shells+1, ns, ns, ns))))
proper_value = np.ones(shape)
np.testing.assert_array_almost_equal(f(**self.override_default(pair_weights=proper_value)), proper_value)
@test_function(read_target_objective)
def test_read_test_target_objective(self, f):
default_which = read_which(settings(structure=self.structure))
default_composition = read_composition(settings(structure=self.structure, which=default_which))
ff = functools.partial(
f,
structure=self.structure,
shell_distances=self.distances,
which=default_which,
composition=default_composition
)
default_sw = read_shell_weights(settings(structure=self.structure, atol=ATOL, rtol=RTOL, shell_distances=self.distances))
ns = self.structure.num_unique_species
def shape(sw : dict = default_sw): return (len(sw), ns, ns)
max_num_shells = len(default_sw)
for nshells in range(1, max_num_shells+1):
sw = {i: 1.0/i for i in range(1, nshells+1)}
self.assertEqual(ff(shell_weights=sw).shape, shape(sw))
targets = ff(shell_weights=sw, target_objective=1)
self.assertEqual(targets.shape, shape(sw))
for shell, w in sw.items():
actual = targets[shell-1, :, :]
should_be = np.ones_like(actual)
np.testing.assert_array_almost_equal(actual, should_be)
fff = functools.partial(ff, shell_weights=default_sw)
with self.assertRaises(BadSettings):
fff(target_objective="sadf")
with self.assertRaises(BadSettings):
fff(target_objective=np.arange(10))
with self.assertRaises(BadSettings):
fff(target_objective=np.ones((2,2,2,2)))
# test 3D array
all_ones = np.ones(shape())
np.testing.assert_array_almost_equal(all_ones, fff(target_objective=all_ones))
with self.assertRaises(BadSettings):
# test non symmetric sub-arrays
fff(target_objective=np.arange(np.prod(shape())).reshape(shape()))
with self.assertRaises(BadSettings):
fff(target_objective=np.zeros((2, ns+1, ns)))
# test 2D arrays
all_ones = np.ones((ns, ns))
target_objective = fff(target_objective=all_ones)
for shell in sorted(default_sw):
np.testing.assert_array_almost_equal(target_objective[shell-1], | np.ones_like(all_ones) | numpy.ones_like |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = | np.array([]) | numpy.array |
"""
auralib module containing plotting functions and related stuff...
Author: <NAME>
Created: 20-Jan-2017
Last Mod: 20-Aug-2016
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.widgets import MultiCursor
import numpy as np
def t2xy(d1, d2, d3, norm=True):
c1 = np.array([0, 0])
c2 = np.array([1, 0])
c3 = np.array([0.5, 0.866])
d1 = np.array(d1)
d2 = np.array(d2)
d3 = np.array(d3)
# apply normalization
if norm:
datasum = np.sum(np.vstack([d1, d2, d3]), axis=0)
d1 = d1/datasum
d2 = d2/datasum
d3 = d3/datasum
px = c1[0]*d1 + c2[0]*d2 + c3[0]*d3
py = c1[1]*d1 + c2[1]*d2 + c3[1]*d3
return px, py
def tern(ax, lbls=['C1', 'C2', 'C3']):
# Corner points of triangular axes
c1 = np.array([0, 0])
c2 = np.array([1, 0])
c3 = np.array([0.5, 0.866])
# Draw axes and add labels
axbg_patch = mpatches.Polygon(np.vstack([c1, c2, c3]), closed=True,
fc='white', ec=None, zorder=1)
ax.add_patch(axbg_patch)
ax.plot([0, 1, 0.5, 0], [0, 0, 0.866, 0], 'k', lw=1.5, zorder=5)
ax.text(c1[0], c1[1], lbls[0], ha='right', va='top', fontsize=14)
ax.text(c2[0], c2[1], lbls[1], ha='left', va='top', fontsize=14)
ax.text(c3[0], c3[1], lbls[2], ha='center', va='bottom', fontsize=14)
# Draw gridlines
for i in np.arange(0.1, 1, 0.1):
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy(l1, [0, l2], [l2, 0])
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[-1]+0.01, ly[-1]-0.03, '%.2f' % i, ha='center', va='center', rotation=-60.0)
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy([0, l2], l1, [l2, 0])
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[0]+0.005, ly[0]+0.03, '%.2f' % i, ha='left', va='center', rotation=60.0)
l1 = [i, i]
l2 = 1.0 - i
lx, ly = t2xy([0, l2], [l2, 0], l1)
ax.plot(lx, ly, ':', lw=1.0, color=u'0.4', zorder=2)
ax.text(lx[-1]-0.01, ly[0], '%.2f' % i, ha='right', va='center')
ax.set_xlim([-0.1, 1.1])
ax.set_ylim([-0.1, 0.966])
ax.set_aspect('equal')
ax.set_axis_off()
#ax.xaxis.set_visible(False)
#ax.yaxis.set_visible(False)
#ax.spines['top'].set_visible(False)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
#ax.spines['left'].set_visible(False)
def tern_scatter(ax, d1, d2, d3, s=25, color=None, marker=None, cmap=None,
lw=None, ec=None, alpha=1.0, label=None):
# Transform points from XY -> C1, C2, C3 coordinate system
px, py = t2xy(d1, d2, d3)
# Plot points on
pts = ax.scatter(px, py, s=s, c=color, marker=marker, cmap=cmap, lw=lw,
edgecolor=ec, alpha=alpha, label=label,
zorder=10)
return pts
def tern_line(ax, d1, d2, d3, c=None, lw=None, ls=None, label=None):
# Transform points from XY -> C1, C2, C3 coordinate system
px, py = t2xy(d1, d2, d3)
# Plot points on
hdl = ax.plot(px, py, c=c, lw=lw, ls=ls, label=label, zorder=10)
return hdl
def plot_blocky(ax, data, zdata, linespec='b-', lw=1):
"""
Convenience function for plotting a blocky log.
Ensure that the zdata log has 1 more sample than the data log.
"""
for i in range(0, len(data)):
ax.plot([data[i], data[i]], [zdata[i], zdata[i+1]], linespec, lw=lw)
for i in range(1, len(data)):
ax.plot([data[i-1], data[i]], [zdata[i], zdata[i]], linespec, lw=lw)
def radar(ax, data, data_names, lw=1.0, color='k', ls='-', marker=None, label=None):
"""
function to produce a radar plot
ax = axis handle of a polar axis to do plotting in
data = 1D array or list of data values
data_names = 1D list of data names
"""
# get number of values in data vector
N = len(data)
# append the first data value to the end of the list so we can make closed
# polygonal regions for plotting and filling
data = np.array(data)
data = data.tolist()
data.append(data[0])
# What will be the angle of each axis in the plot?
# (we divide the plot / number of variable)
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
# Draw one axe per variable + add labels labels yet
plt.sca(ax)
plt.xticks(angles[:-1], data_names)
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks(color="grey", size=8)
# Plot data
ax.plot(angles, data, lw=lw, color=color, ls=ls, marker=marker, label=label)
# Fill area
ax.fill(angles, data, color=color, alpha=0.1)
ax.grid(True, ls=':')
def plot_filled_logs(ax, logs, z, fill_colors, labels, lw=1.0, alpha=0.3):
"""
Plot a series of logs using fill colors between each log. Designed to show
a series of mineral fraction logs or fluid saturation logs
"""
import matplotlib as mpl
nlogs = len(logs)
# take the log fill colors, and make them darker to be used for the log
# line colors. This will make them more prominent in the display
log_colors = []
for color in fill_colors:
rgb = mpl.colors.to_rgb(color)
rgb = np.array(rgb)
rgb = rgb*0.3
log_colors.append(rgb)
# first plot the log fills
logsum = 0 # cumulative log value
for i in range(nlogs):
curlog = logs[i] + logsum
ax.fill_betweenx(z, curlog, logsum, where=curlog>=logsum,
facecolor=fill_colors[i], alpha=alpha, label=labels[i])
logsum = logsum + logs[i]
# next plot the log curves to visually separate the filled areas
logsum = 0 # cumulative log value
for i in range(nlogs):
curlog = logs[i] + logsum
ax.plot(curlog, z, c=log_colors[i], lw=lw, alpha=alpha)
logsum = logsum + logs[i]
def format_log_axes(axes, ylabel):
"""
Function to format a series of axes displaying simple log curves.
"""
for ax in axes:
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
ax.grid(True, ls=':')
for ax in axes[1:-1]:
plt.setp(ax.get_yticklabels(), visible=False)
axes[0].set_ylabel(ylabel)
axes[-1].set_ylabel(ylabel)
axes[-1].yaxis.set_ticks_position('right')
axes[-1].yaxis.set_label_position('right')\
def make_wellview(ntrack=5, figsize=(5, 5)):
"""
Function for creating a blank, multi-track well viewer with a well header
area, a well log header area for each track, and a well log data area.
"""
fig = plt.figure(num=1, figsize=figsize)
fig.clf()
nrow = 30
ncol = ntrack
ttl = 0
hdr = 1
dat = 4
axd = [plt.subplot2grid((nrow, ncol), (dat, 0), rowspan=nrow-3)]
axh = [plt.subplot2grid((nrow, ncol), (hdr, 0), rowspan=3, sharex=axd[0])]
for i in range(1, ncol):
axd.append(plt.subplot2grid((nrow, ncol), (dat, i), rowspan=nrow-2, sharey=axd[0]))
axh.append(plt.subplot2grid((nrow, ncol), (hdr, i), rowspan=3, sharex=axd[i], sharey=axh[0]))
axttl = plt.subplot2grid((nrow, ncol), (ttl, 0), colspan=ncol)
for ax in fig.get_axes():
ax.tick_params(which='both', direction='in')
curs = MultiCursor(fig.canvas, axd, vertOn=False, horizOn=True,
lw=1, c=u'0.3')
wview = {'fig': fig, 'axd': axd, 'axh': axh, 'axttl': axttl, 'curs': curs}
return wview
def plot_log(wview, tracknum, aura_log, fmtstr='%.1f', numlogs=1, logpos=1, xscale='normal'):
"""
Function to plot logs in a Well Viewer created using the
aura.plot.make_wellvew() function.
Input logs are required to be aura.well.AuraLog() objects.
"""
axd = wview['axd']
axh = wview['axh']
if xscale=='log':
axd[tracknum].set_xscale('log')
axh[tracknum].set_xscale('log')
axd[tracknum].plot(aura_log.data, aura_log.zref,
color=aura_log.c, lw=aura_log.lw)
axd[tracknum].set_xlim(aura_log.plt_range)
xlim = axd[tracknum].get_xlim()
data_range = np.abs(xlim[1] - xlim[0])
if xscale == 'log':
data_lim_offset0 = xlim[0]
data_lim_offset1 = xlim[1]
else:
data_lim_offset0 = data_range*0.02 + xlim[0]
data_lim_offset1 = -data_range*0.02 + xlim[1]
axh[tracknum].plot([data_lim_offset0, data_lim_offset1], (logpos, logpos),
c=aura_log.c, lw=aura_log.lw)
bbox = dict(fc='white', ec='white', alpha=1.0, pad=0.5)
axh[tracknum].text(data_lim_offset0, logpos, fmtstr % xlim[0],
va='top', ha='left', color=aura_log.c,
bbox=bbox, fontsize=aura_log.fs)
axh[tracknum].text(data_lim_offset1, logpos, fmtstr % xlim[1],
va='top', ha='right', color=aura_log.c,
bbox=bbox, fontsize=aura_log.fs)
if xscale=='log':
xpos_logname = np.sqrt(np.cumsum(xlim))
else:
xpos_logname = np.mean(xlim)
if len(aura_log.units)>0:
axh[tracknum].text(xpos_logname, logpos, aura_log.name+' ('+aura_log.units+')',
va='bottom', ha='center', color=aura_log.c,
fontsize=aura_log.fs)
else:
axh[tracknum].text(xpos_logname, logpos, aura_log.name,
va='bottom', ha='center', color=aura_log.c,
fontsize=aura_log.fs)
axh[tracknum].set_ylim([0, numlogs+1])
axh[tracknum].set_xlim(xlim)
def format_wellview(wview, ylabel='Depth', title_text='Title', ylim='none'):
"""
Once all well data are plotted in an aura wellviewer figure, call this
function to make the well viewer figure look nice.
"""
fig = wview['fig']
axd = wview['axd']
axh = wview['axh']
axttl = wview['axttl']
axd[0].invert_yaxis()
if ylim != 'none':
axd[0].set_ylim(ylim)
ntrack = len(axd)
count = 1
for (axdi, axhi) in zip(axd, axh):
axdi.grid(True, which='major', ls=':', lw=0.5)
axdi.grid(True, which='minor', ls=':', lw=0.5)
axdi.minorticks_on()
axdi.xaxis.set_ticks_position('top')
axdi.xaxis.set_label_position('top')
[label.set_visible(False) for label in axdi.get_xticklabels()]
axhi.set_facecolor('white')
axhi.set_frame_on(True)
axhi.grid(False)
axhi.xaxis.set_visible(False)
axhi.yaxis.set_visible(False)
axdi.yaxis.set_ticks_position('both')
if (count==1):
axdi.set_ylabel(ylabel)
elif count==ntrack:
axdi.set_ylabel(ylabel)
axdi.yaxis.set_ticks_position('right')
axdi.yaxis.set_label_position('right')
else:
axdi.tick_params(labelright=True)
[label.set_visible(False) for label in axdi.get_yticklabels()]
count += 1
axttl.set_facecolor('#fcfcc4')
axttl.set_frame_on(True)
axttl.grid(False)
axttl.xaxis.set_visible(False)
axttl.yaxis.set_visible(False)
axttl.text(0.5, 0.5, title_text, ha='center', va='center', weight='normal')
axttl.set_xlim([0, 1])
axttl.set_ylim([0, 1])
fig.tight_layout(w_pad=0.00, h_pad=0.00)
def add_van_krevelan_template(ax, lw=2, fs=14, c='k'):
T1 = np.array([[ 3.89993785, 97.72755495],
[ 4.27284027, 140.94126413],
[ 3.89993785, 182.6648454 ],
[ 3.89993785, 227.36868248],
[ 3.89993785, 260.15149634],
[ 3.89993785, 291.44418229],
[ 4.27284027, 324.22699615],
[ 4.27284027, 376.38147274],
[ 4.27284027, 419.59518192],
[ 4.27284027, 456.84837949],
[ 5.39154755, 489.63119334],
[ 5.01864512, 514.96336769],
[ 6.13735239, 559.66720477],
[ 7.25605966, 601.39078604],
[ 7.62896209, 629.70321619],
[ 8.00186451, 659.50577425],
[ 10.61218148, 701.22935552],
[ 12.84959602, 735.50229728],
[ 15.08701057, 769.77523904],
[ 16.95152268, 793.61728548],
[ 20.68054692, 824.90997144],
[ 23.29086389, 856.20265739],
[ 25.90118086, 877.06444803],
[ 28.1385954 , 897.92623867],
[ 32.61342449, 921.76828511],
[ 37.08825357, 941.13994785],
[ 40.07147296, 962.00173848],
[ 43.8004972 , 976.90301751]])
T2 = np.array([[ 8.37476694, 99.21768285],
[ 8.37476694, 123.05972929],
[ 8.00186451, 160.31292686],
[ 8.00186451, 202.03650813],
[ 8.74766936, 242.26996151],
[ 8.37476694, 286.97379858],
[ 8.37476694, 321.24674035],
[ 8.37476694, 355.51968211],
[ 9.12057178, 382.34198435],
[ 10.61218148, 415.12479821],
[ 13.22249845, 450.88786788],
[ 16.95152268, 479.20029803],
[ 20.68054692, 498.57196076],
[ 24.78247359, 522.4140072 ],
[ 28.1385954 , 543.27579784],
[ 32.61342449, 567.11784428],
[ 36.34244873, 583.50925121],
[ 40.81727781, 601.39078604],
[ 49.02113114, 620.76244878],
[ 54.24176507, 635.6637278 ],
[ 60.58110628, 652.05513473],
[ 64.31013052, 660.99590215],
[ 69.53076445, 666.95641376],
[ 75.49720323, 675.89718117],
[ 80.71783717, 680.36756488]])
T3 = np.array([[ 14.34120572, 39.61256675],
[ 18.07022996, 56.00397367],
[ 23.66376631, 67.9249969 ],
[ 34.47793661, 78.35589221],
[ 43.8004972 , 85.80653173],
[ 51.25854568, 97.72755495],
[ 59.83530143, 108.15845027],
[ 67.29334991, 117.09921768],
[ 74.37849596, 124.5498572 ],
[ 83.32815413, 127.530113 ],
[ 96.00683654, 137.96100832],
[103.46488502, 142.43139203],
[115.39776259, 145.41164783],
[128.82224984, 152.86228735],
[138.89061529, 158.82279896],
[147.46737104, 163.29318266],
[159.77315103, 167.76356637],
[169.84151647, 172.23395008],
[179.90988191, 173.72407798],
[187.74083282, 178.19446169],
[193.33436917, 181.1747175 ]])
L1 = np.array([[1.70913611e-01, 2.35936918e+00],
[1.99673710e+02, 9.99254936e+02]])
L2 = np.array([[5.43816035e-01, 2.35936918e+00],
[1.99673710e+02, 5.98410530e+02]])
ax.plot(T1[:, 0], T1[:, 1], color=c, lw=lw)
ax.plot(T2[:, 0], T2[:, 1], color=c, lw=lw)
ax.plot(T3[:, 0], T3[:, 1], color=c, lw=lw)
ax.plot(L1[:, 0], L1[:, 1], color=c, ls='--', lw=lw)
ax.plot(L2[:, 0], L2[:, 1], color=c, ls='--', lw=lw)
ax.text(T1[-1, 0], T1[-1, 1], 'Type I', ha='left', va='center', fontsize=fs, color=c)
ax.text(T2[-1, 0], T2[-1, 1], 'Type II', ha='left', va='center', fontsize=fs, color=c)
ax.text(T3[-1, 0], T3[-1, 1], 'Type III', ha='right', va='bottom', fontsize=fs, color=c)
ax.text(80, 35, 'Type IV', fontsize=fs, color=c)
ax.text(110, 875, 'Oil', fontsize=fs, color=c)
ax.text(157, 626, 'Mixed', fontsize=fs, color=c)
ax.text(166, 382, 'Gas', fontsize=fs, color=c)
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None,
cmap=plt.cm.Blues, fig=None, flip_text_color=False):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
if not title:
if normalize:
title = 'Normalized Confusion Matrix'
else:
title = 'Confusion Matrix, Without Normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized Confusion Matrix')
else:
print('Confusion Matrix, Without Normalization')
print(cm)
if not fig:
fig = plt.figure(num=1)
fig.clf()
ax = fig.add_subplot(111)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks= | np.arange(cm.shape[0]) | numpy.arange |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (1, 10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration(self, rank=4):
default_shape = tuple(np.random.randint(1, 15, size=rank))
input_features = [('data', datatypes.Array(*default_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features=input_features,
output_features=[('output', None)],
disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [tuple(np.random.randint(1, 15, size=rank)),
tuple(np.random.randint(1, 15, size=rank))]
flexible_shape_utils.add_multiarray_ndshape_enumeration(
spec, feature_name='data', enumerated_shapes=shapes)
shapes.append(default_shape)
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration_rank3(self):
self.test_shape_flexibility_enumeration(rank=3)
def test_shape_flexibility_enumeration_rank2(self):
self.test_shape_flexibility_enumeration(rank=2)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_dynamic_weight_conv(self):
input_dim = (1, 3, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (4, 3, 3, 3)
output_dim = (1, 4, 14, 14)
kernel_channels = input_dim[0]
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('input', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='two_input_conv_layer',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
input_name=['input', 'weight'],
output_name='output')
# Assigning everything to ones should cover the execution path
# and engine failures, but is not a complete check on numerics.
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'input': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)
@pytest.mark.xfail
def test_dynamic_weight_deconv(self):
# Expect to fail in Core ML 3
input_dim = (1, 1, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (1, 1, 3, 3)
output_dim = (1, 1, 18, 18)
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('data', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='deconv',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
is_deconv=True,
input_name=['data', 'weight'],
output_name='output')
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'data': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected)
def test_batched_mat_mul_cpu(self, cpu_only=True):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input_ = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input_, expected, useCPUOnly=cpu_only,
output_name_shape_dict=shape_dict)
self.assertEqual(len(outShape), builder._get_rank('output'))
def test_batched_mat_mul_gpu(self):
self.test_batched_mat_mul_cpu(cpu_only=False)
def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_batched_mat_mul_with_transposes_gpu(self):
self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False)
def test_batched_mat_mul_single_input_cpu(self,
model_precision=_MLMODEL_FULL_PRECISION,
cpu_only=True):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=cpu_only)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION,
cpu_only=True)
def test_batched_mat_mul_single_input_gpu(self):
self.test_batched_mat_mul_single_input_cpu(model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nan_bug_cpu(self):
input_shape = [2,2]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
for axis in [0,1]:
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.array([[0.5, 0.5],[1e8, 1e8]])
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_softmax_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_softmax_nd_gpu(self):
self.test_softmax_nd_cpu(cpu_only=False)
def test_concat_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_concat_nd_gpu(self):
self.test_concat_nd_cpu(cpu_only=False)
def test_fill_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_fill_like_gpu(self):
self.test_fill_like_cpu(cpu_only=False)
def test_fill_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(shape), builder._get_rank('output'))
def test_fill_static_gpu(self):
self.test_fill_static_cpu(cpu_only=False)
def test_fill_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_fill_dynamic_gpu(self):
self.test_fill_dynamic_cpu(cpu_only=False)
def test_broadcast_to_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_broadcast_to_like_gpu(self):
self.test_broadcast_to_like_cpu(cpu_only=False)
def test_broadcast_to_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_broadcast_to_static_gpu(self):
self.test_broadcast_to_static_cpu(cpu_only=False)
def test_broadcast_to_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_broadcast_to_dynamic_gpu(self):
self.test_broadcast_to_dynamic_cpu(cpu_only=False)
# Test Rank being set to unknown when one of the input rank is unknown
# For max rank case
def test_unknown_rank(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('x', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['x', 'shape'],
output_name='y')
condition = np.random.randint(0, 2, input_shape).astype(np.float32)
builder.add_load_constant_nd(name='load_constant_condition',
output_name='condition',
constant_value=condition,
shape=input_shape)
builder.add_where_broadcastable(name='where',
input_names=['condition', 'x', 'y'],
output_name='output')
self.assertEqual(builder._get_rank('output'), -1)
def test_trigonometry_cpu(self, cpu_only=True):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=cpu_only)
def test_trigonometry_gpu(self):
self.test_trigonometry_cpu(cpu_only=False)
def test_exp2_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_exp2_gpu(self):
self.test_exp2_cpu(cpu_only=False)
def test_elementwise_binary_cpu(self, cpu_only=True):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_binary_gpu(self):
self.test_elementwise_binary_cpu(cpu_only=False)
def test_elementwise_boolean_unary_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_boolean_unary_gpu(self):
self.test_elementwise_boolean_unary_cpu(cpu_only=False)
def test_logical_not_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_logical_not_gpu(self):
self.test_logical_not_cpu(cpu_only=False)
def test_stack_cpu(self, cpu_only=True):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(input_rank + 1, builder._get_rank('output'))
def test_stack_gpu(self):
self.test_stack_cpu(cpu_only=False)
def test_ceil_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_ceil_gpu(self):
self.test_ceil_cpu(cpu_only=False)
def test_floor_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_floor_gpu(self):
self.test_floor_cpu(cpu_only=False)
def test_round_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_round_gpu(self):
self.test_round_cpu(cpu_only=False)
def test_sign_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_sign_gpu(self):
self.test_sign_cpu(cpu_only=False)
def test_clip_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_clip_gpu(self):
self.test_clip_cpu(cpu_only=False)
def test_split_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_gpu(self):
self.test_split_nd_cpu(cpu_only=False)
def test_split_nd_with_split_sizes_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_with_split_sizes_gpu(self):
self.test_split_nd_with_split_sizes_cpu(cpu_only=False)
def test_slice_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_static_gpu(self):
self.test_slice_static_cpu(cpu_only=False)
def test_slice_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = np.random.rand(*input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
input_names = ['data']
inputs = dict()
inputs['data'] = x
if num_inputs == 2:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids)))]
input_names = ['data', 'begin_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
elif num_inputs == 3:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids)))]
input_names = ['data', 'begin_ids', 'end_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
elif num_inputs == 4:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
elif num_inputs == 5:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides', 'begin_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
elif num_inputs == 6:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks))),
('end_masks', datatypes.Array(len(end_masks)))]
input_names = ['data', 'begin_ids', 'end_ids',
'strides', 'begin_masks', 'end_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
inputs['end_masks'] = np.array(end_masks, dtype=np.int32)
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 2:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 3:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
strides=strides, begin_masks=begin_masks,
end_masks=end_masks)
elif num_inputs == 4:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 5:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_masks=end_masks)
elif num_inputs == 6:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output')
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_dynamic_gpu(self):
self.test_slice_dynamic_cpu(cpu_only=False)
def test_tile_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=5, size=rank)
for rep_rank in range(1,rank+1):
reps = list(np.random.randint(low=1, high=9, size=rep_rank))
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_tile('Tile', 'data', 'output', reps)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.tile(x, reps)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_tile_gpu(self):
self.test_tile_cpu(cpu_only=False)
def test_sliding_windows_cpu(self, cpu_only=True):
def numpy_sliding_windows(a, np_axis, np_size, np_step):
n = (a.shape[np_axis] - np_size) // np_step + 1
shape = list(a.shape)
shape[np_axis] = n
if np_axis < 0:
np_axis += len(shape)
shape.insert(np_axis + 1, np_size)
strides = list(a.strides)
effstride = strides[np_axis] * np_step
strides.insert(np_axis, effstride)
return np.lib.stride_tricks.as_strided(a, shape, strides)
for rank in range(1, 5):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
output_shape = list(input_shape)
window_size = np.random.randint(low=1, high=input_shape[axis])
length = 0
while length <= 0:
step = np.random.randint(low=1, high=input_shape[axis])
length = (input_shape[axis] - window_size) // step + 1
output_shape[axis] = length
pos_axis = axis if axis >= 0 else axis + rank
output_shape.insert(pos_axis + 1, window_size)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sliding_windows('sliding_windows',
input_name='data',
output_name='output',
axis=axis,
window_size=window_size,
step=step)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': numpy_sliding_windows(x, axis, window_size, step)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(rank+1, builder._get_rank('output'))
def test_sliding_windows_gpu(self):
self.test_sliding_windows_cpu(cpu_only=False)
def test_range_static_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
for param in params:
start, end, step = param
input_features = [('multiplicative_input', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_range_static('range_static', 'output_range',
end=end, start=start, step=step)
builder.add_multiply_broadcastable(
name='multiply_broadcastable',
input_names=['multiplicative_input', 'output_range'],
output_name='output')
# save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
inputs = dict()
inputs['multiplicative_input'] = np.ones((1,), dtype=np.float64)
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_static_gpu(self):
self.test_range_static_cpu(cpu_only=False)
def test_range_dynamic_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
# input size == 1: end is input, start and step are read from parameters
# input size == 2: end, start are inputs, step is read from parameters
# input size == 3: start, end, step are all inputs, none of the parameters are used.
for num_inputs in [1, 2, 3]:
for param in params:
inputs = dict()
start, end, step = param
if num_inputs == 1:
input_features = [('end', datatypes.Array(1))]
elif num_inputs == 2:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1))]
elif num_inputs == 3:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1)),
('step', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 1:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end'],
start=start, step=step)
elif num_inputs == 2:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start'],
step=step)
elif num_inputs == 3:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
inputs['step'] = step * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start', 'step'])
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_dynamic_gpu(self):
self.test_range_dynamic_cpu(cpu_only=False)
def test_linear_activation_different_ranks_cpu(self, cpu_only=True):
for input_dim in [(10, 15), (10, 15, 2, 3),
(10, 2, 4, 15, 1, 4), (6,)]:
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_linear_activation_different_ranks_gpu(self):
self.test_linear_activation_different_ranks_cpu(cpu_only=False)
def test_topk_cpu(self, cpu_only=True):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
K = [3, 5]
axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]]
for ii, input_shape in enumerate(test_input_shapes):
for k in K:
for n_inputs in [1, 2]:
for bottom_k_flag in [False, True]:
for axis in axes[ii]:
for negative_axis in [False, True]:
if negative_axis:
axis = axis - len(input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('values', None), ('indices', None)]
input_names = ['data']
output_names = ['values', 'indices']
if n_inputs == 2:
input_names.append('k_in')
input_features.append(('k_in', datatypes.Array(1)))
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_topk('topk', input_names, output_names,
axis=axis, use_bottom_k=bottom_k_flag)
else:
builder.add_topk('topk', input_names, output_names,
k=k, axis=axis, use_bottom_k=bottom_k_flag)
data = np.random.randint(low=0, high=int(np.prod(input_shape)), size=input_shape)
data = data.astype(np.float32)
input = {'data': data}
if n_inputs == 2:
input['k_in'] = k * np.ones([1], dtype=np.float32)
# numpy reference values
if bottom_k_flag:
ref_indices = np.argsort(data, axis=axis)
else:
ref_indices = np.argsort(-data, axis=axis)
slc = [slice(None)] * len(input_shape)
slc[axis] = slice(0, k)
ref_indices = ref_indices[tuple(slc)]
ref_values = np.take_along_axis(data, ref_indices, axis=axis)
expected = {'values': ref_values, 'indices': ref_indices}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_topk_gpu(self):
self.test_topk_cpu(cpu_only=False)
def test_const_pad_cpu(self, cpu_only=True):
def get_reference(data, pads, value):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=pads.shape)
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
ctr = 0
for shape in shapes:
rank = len(shape)
for force_zeros_in_end in [0, 2, 6]:
for max_pad_value in range(1, 6):
for n_inputs in [1, 2]:
pads = np.random.randint(low=0, high=max_pad_value, size=(rank, 2))
if force_zeros_in_end > 2 * rank:
continue
# pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2))
if force_zeros_in_end != 0:
pads[-force_zeros_in_end:] = 0
data = np.random.rand(*shape)
reference = get_reference(data, pads, value)
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten())
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_gpu(self):
self.test_const_pad_cpu(cpu_only=False)
def test_const_pad_mode2_cpu(self, cpu_only=True):
def get_reference(data, output_shape, value, left_pad=False):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=(len(output_shape), 2))
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
pads = np.zeros((len(output_shape), 2))
if left_pad:
pads[:, 0] = np.array(output_shape) - np.array(data.shape)
else:
pads[:, 1] = np.array(output_shape) - np.array(data.shape)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)]
ctr = 0
for ii, shape in enumerate(shapes):
rank = len(shape)
for left_pad in [True, False]:
for n_inputs in [1, 2]:
data = np.random.rand(*shape)
reference = get_reference(data, out_shapes[ii], value, left_pad)
pads = np.zeros((rank, 2))
tmp = np.zeros((rank))
for i in range(rank):
if out_shapes[ii][i] == shape[i]:
tmp[i] = 0
else:
tmp[i] = out_shapes[ii][i]
if left_pad:
pads[:, 0] = tmp
else:
pads[:, 1] = tmp
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_to_given_output_size_mode=True)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten(), pad_to_given_output_size_mode=True)
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_mode2_gpu(self):
self.test_const_pad_mode2_cpu(cpu_only=False)
def test_nms_cpu(self, cpu_only=True):
def _compute_iou_matrix(boxes):
# input is (N,4), in order [center_w, center_h, width, height]
assert len(boxes.shape) == 2
assert boxes.shape[1] == 4
boxes = boxes.astype(np.float)
center_w, center_h, width, height = np.split(boxes, 4, axis=1) # outs are all (N,1)
top = center_h + 0.5 * height
bottom = center_h - 0.5 * height
left = center_w - 0.5 * width
right = center_w + 0.5 * width
area = width * height
hB = np.minimum(top, np.transpose(top))
wB = np.minimum(right, np.transpose(right))
hA = np.maximum(bottom, np.transpose(bottom))
wA = np.maximum(left, np.transpose(left))
intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA)
union_area = area + np.transpose(area) - intersection_area
iou = intersection_area / union_area
return iou
def _nms_TF(boxes, scores, iou_threshold, score_threshold, per_class_suppression, M):
# boxes is (B,N,4), in order [center_w, center_h, width, height]
# scores is (B,N,C)
# output shapes: (B,M,4), (B,M,C), (B,M), (B,)
'''
this is implementation of CoreML's NMS layer
'''
B, N, C = scores.shape
iou_threshold = iou_threshold.astype(np.float32)
score_threshold = score_threshold.astype(np.float32)
# convert box ids to TF style
center_w, center_h, width, height = np.split(boxes, 4, axis=-1) # outs are all (B,N,1)
y1 = center_h - 0.5 * height
y2 = center_h + 0.5 * height
x1 = center_w - 0.5 * width
x2 = center_w + 0.5 * width
boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4)
out1 = np.zeros((B, M, 4))
out2 = np.zeros((B, M, C))
out3 = -1 * np.ones((B, M))
out4 = np.zeros((B,))
for b in range(B):
box_coord_matrix = boxes_tf[b, :, :] # (N,4)
score_vector = np.max(scores[b, :, :], axis=-1) # (N,)
if not per_class_suppression:
# this is the simple case as TF directly supports it
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=box_coord_matrix.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=score_vector.shape)
ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
ids = sess.run(ids_g, feed_dict={box_coord_matrix_pl: box_coord_matrix, score_vector_pl: score_vector})
else:
# this is slightly complicated as TF does not directly support it
class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,)
sorted_score_ids = np.argsort(-score_vector)
box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0)
score_vector2 = np.take(score_vector, sorted_score_ids)
class_ids = np.take(class_ids, sorted_score_ids)
classes_seen = dict()
ids_intermediate = np.array([], dtype=np.int)
for n in range(N):
if class_ids[n] in classes_seen:
continue
c = class_ids[n]
classes_seen[c] = True
current_class_ids = np.where(class_ids == c)[0]
if len(current_class_ids) > 0:
feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0)
feed_in2 = np.take(score_vector2, current_class_ids)
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=feed_in1.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=feed_in2.shape)
cur_ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
cur_ids = sess.run(cur_ids_g, feed_dict={box_coord_matrix_pl: feed_in1,
score_vector_pl: feed_in2})
from_sort_ids = np.take(current_class_ids, cur_ids)
ids_intermediate = np.append(ids_intermediate, from_sort_ids)
ids_intermediate.sort()
ids = np.take(sorted_score_ids, ids_intermediate)
xx = len(ids)
if xx == 0:
ids = np.array([np.argmax(score_vector)])
xx = 1
if xx > M:
ids = ids[:M]
xx = len(ids)
out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0)
out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0)
out3[b, :xx] = ids
out4[b] = xx
return out1, out2, out3, out4
iou_threshold_percentile = [0, 30, 80, 100]
score_threshold_percentile_arr = [0, 40, 100]
N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out
number_of_test = 0
for N_M in N_M_pairs_to_test:
for B in [1, 5]:
for C in [1, 7]:
N, M = N_M
boxes = np.random.rand(B, N, 4)
scores = np.random.rand(B, N, C)
iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N)
iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape(iou_matrix.shape[0], -1)
for per_class_suppression in [False, True]:
for iou_thresh in iou_threshold_percentile:
for score_thresh in score_threshold_percentile_arr:
for is_dynamic in [False, True]:
if score_thresh == 0:
score_threshold = np.min(scores) - 1
elif score_thresh == 100:
score_threshold = np.max(scores) + 1
else:
score_threshold = np.percentile(scores, score_thresh) + .01
if iou_thresh == 0:
iou_threshold = np.maximum(np.min(iou_matrix) - .01, 0.0)
else:
iou_threshold = np.percentile(iou_matrix, iou_thresh) + .01
number_of_test += 1
tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF(boxes, scores, iou_threshold,
score_threshold,
per_class_suppression,
M)
expected = dict()
expected['selected_boxes'] = tf_boxes
expected['selected_scores'] = tf_scores
expected['selected_box_ids'] = tf_ids
expected['number_of_boxes'] = tf_num_boxes
# define CoreML model
input_features = [('boxes', datatypes.Array(B,N,4)), ('scores', datatypes.Array(B,N,C))]
output_features = [('selected_boxes', None), ('selected_scores', None),
('selected_box_ids', None), ('number_of_boxes', None)]
input_names = ['boxes', 'scores']
if is_dynamic:
input_names.extend(['iou_threshold', 'score_threshold', 'max_boxes'])
input_features.append(('iou_threshold', datatypes.Array(1, )))
input_features.append(('score_threshold', datatypes.Array(1, )))
input_features.append(('max_boxes', datatypes.Array(1, )))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
input_dict = dict()
input_dict['boxes'] = boxes
input_dict['scores'] = scores
if is_dynamic:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
per_class_suppression=per_class_suppression)
input_dict['iou_threshold'] = iou_threshold * np.ones([1], dtype=np.float)
input_dict['score_threshold'] = score_threshold * np.ones([1], dtype=np.float)
input_dict['max_boxes'] = M * np.ones([1], dtype=np.float)
else:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
iou_threshold=iou_threshold, score_threshold=score_threshold,
max_boxes=M, per_class_suppression=per_class_suppression)
self._test_model(builder.spec, input_dict, expected, useCPUOnly=cpu_only)
def test_nms_gpu(self):
self.test_nms_cpu(cpu_only=False)
def test_rank_preserving_reshape(self):
input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)]
target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)]
output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_rank_preserving_reshape(
name='rank_preserving_reshape', input_name='data',
output_name='output', output_shape=target_shapes[i])
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_expand_dims(self):
input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)]
axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)]
output_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (1, 10, 1, 5), (10, 5, 1, 1), (1, 1, 1, 10)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_expand_dims(
name='expand_dims', input_name='data', output_name='output',
axes=axes[i]
)
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze(self):
input_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1),
(10, 5, 1, 1), (1,), (10, 5, 1, 1), (3, 1, 7)]
axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)]
output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', axes=list(axes[i]))
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze_all(self):
input_shapes = [
(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1), (10, 5, 1, 1), (1,),
(10, 5, 1, 1), (3, 1, 7), (3,), (5, 6)
]
for input_shape in input_shapes:
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', squeeze_all=True)
x = np.random.rand(*input_shape)
input = {'data': x}
reference = np.squeeze(x)
if not reference.shape:
reference = np.reshape(reference, (1,))
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(-1, builder._get_rank('output'))
def test_argmax_argmin(self):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
# (1+2+3+4+5) * 2^3 = 120 test cases
for input_shape in test_input_shapes:
for negative_axis in [False, True]:
for mode in ['argmax', 'argmin']:
for keep_dims in [True, False]:
for axis in np.arange(len(input_shape)):
if negative_axis:
axis_val = axis - len(input_shape)
else:
axis_val = axis
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
x = np.random.rand(*input_shape)
if mode == 'argmax':
builder.add_argmax('argmax', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmax(x, axis=axis_val)
else:
builder.add_argmin('argmin', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmin(x, axis=axis_val)
if keep_dims:
np_out = np.expand_dims(np_out, axis=axis_val)
elif len(input_shape) == 1:
np_out = np.expand_dims(np_out, axis=axis_val)
input = {'data': x}
expected = {'output': np_out}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(np_out.shape), builder._get_rank('output'))
def test_get_shape(self):
dims = [1, 2, 3, 4, 5]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_get_shape(name='get_shape_layer', input_name='data',
output_name='output')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': np.array(input_shape)}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(1, builder._get_rank('output'))
def test_load_constant_nd(self):
dims = [2, 3, 4, 5, 6]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_load_constant_nd('load_const_nd_layer', 'tmp',
constant_value=np.ones(input_shape),
shape=input_shape)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output',
mode='ADD')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': feed['data'] + 1}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(rank, builder._get_rank('output'))
@unittest.skip('fix')
def test_simple_array_alloc_scatter(self):
alloc_shape = [2, 3, 4]
value_shape = [1, 3, 4]
input_features = [('alloc_shape', datatypes.Array(len(alloc_shape))),
('value', datatypes.Array(*value_shape)),
('index', datatypes.Array(1))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic_layer', input_name='alloc_shape',
output_name='array', value=np.float(0.0))
# CoreML input order: container (array), indices, slices (value)
builder.add_scatter(name='scatter_layer',
input_names=['array', 'index', 'value'],
output_name='output')
value = np.random.rand(*value_shape).astype('float')
feed = {'alloc_shape': np.array(alloc_shape, dtype='float'),
'value': value,
'index': np.array([1], dtype='float')}
ref = np.zeros(alloc_shape)
ref[1, :, :] = value
expected = {'output': ref}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
def test_erf_activation_cpu(self, cpu_only=True):
input_features = [('data', datatypes.Array(10, 45))]
output_features = [('output', datatypes.Array(10, 45))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_erf(name='erf', input_name='data',
output_name='output')
x = np.random.rand(10, 45)
input = {'data': x}
expected = {
'output': np.asarray([math.erf(i) for i in
x.flatten().tolist()]).reshape(10, 45)
}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_erf_activation_gpu(self):
self.test_erf_activation_cpu(cpu_only=False)
def test_gelu_activation(self):
for mode in ['EXACT', 'TANH_APPROXIMATION', 'SIGMOID_APPROXIMATION']:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_gelu(name='gelu', input_name='data',
output_name='output', mode=mode)
x = np.random.rand(*shape)
input = {'data': x}
exact = np.asarray([0.5 * i * (1.0 + math.erf(i / math.sqrt(2)))
for i in x.flatten().tolist()]).reshape(*shape)
expected = {'output': exact}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_lower_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_lower_triangular('tril', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.tril(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_lower_triangular_gpu(self):
self.test_lower_triangular_cpu(cpu_only=False)
def test_upper_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_upper_triangular('triu', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.triu(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_upper_triangular_gpu(self):
self.test_upper_triangular_cpu(cpu_only=False)
def test_where_broadcastable_cpu(self, cpu_only=True):
for _ in range(150):
rank_cond = np.random.randint(low=1, high=6)
rank_true = np.random.randint(low=1, high=6)
rank_false = np.random.randint(low=1, high=6)
rank_out = max(rank_cond, rank_true, rank_false)
shape_cond = np.random.randint(low=2, high=8, size=rank_cond)
shape_true = np.random.randint(low=2, high=8, size=rank_true)
shape_false = np.random.randint(low=2, high=8, size=rank_false)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_cond: dims.append(shape_cond[i])
if -i <= rank_true: dims.append(shape_true[i])
if -i <= rank_false: dims.append(shape_false[i])
dim = np.random.choice(dims)
if -i <= rank_cond: shape_cond[i] = np.random.choice([1, dim])
if -i <= rank_true: shape_true[i] = np.random.choice([1, dim])
if -i <= rank_false: shape_false[i] = np.random.choice([1, dim])
input_features = [
('cond', datatypes.Array(*shape_cond)),
('true', datatypes.Array(*shape_true)),
('false', datatypes.Array(*shape_false))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_where_broadcastable('if_broadcastable', input_names=['cond', 'true', 'false'],
output_name='output')
cond = np.random.choice([1.0, 0.0], size=shape_cond)
true = np.random.rand(*shape_true)
false = np.random.rand(*shape_false)
input = {'cond': cond, 'true': true, 'false': false}
expected = {'output': np.where(cond, true, false)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))
def test_where_broadcastable_gpu(self):
self.test_where_broadcastable_cpu(cpu_only=False)
def test_random_normal_like_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(5, -1, -1):
if rank > 0:
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
else: # one extra test to test more moments
shape = np.array([10, 10, 10, 10, 10000])
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_like(name='random_normal_like',
input_name='tensor',
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'tensor': np.random.rand(*shape)}
expected = {'output': np.random.normal(mean, stddev, shape)}
if rank > 0:
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
else: # one extra test to test more moments
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=6)
def test_random_normal_like_gpu(self):
self.test_random_normal_like_cpu(cpu_only=False)
def test_random_normal_static_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_static(name='random_normal_static',
output_name='tmp',
output_shape=list(shape),
mean=mean, stddev=stddev, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_normal_static_gpu(self):
self.test_random_normal_static_cpu(cpu_only=False)
def test_random_normal_dynamic_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_dynamic(name='random_normal_dynamic',
input_names=['shape'],
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_normal_dynamic_gpu(self):
self.test_random_normal_dynamic_cpu(cpu_only=False)
def test_random_uniform_like_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_like(name='random_uniform_like',
input_name='tensor',
output_name='output',
minval=minval, maxval=maxval, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_like_gpu(self):
self.test_random_uniform_like_cpu(cpu_only=False)
def test_random_uniform_static_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_static(name='random_uniform_static',
output_name='tmp',
output_shape=list(shape),
minval=minval, maxval=maxval, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_static_gpu(self):
self.test_random_uniform_static_cpu(cpu_only=False)
def test_random_uniform_dynamic_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_dynamic(name='random_uniform_dynamic',
input_names=['shape'],
output_name='output',
minval=minval, maxval=maxval, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_uniform_dynamic_gpu(self):
self.test_random_uniform_dynamic_cpu(cpu_only=False)
def test_random_bernoulli_like_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_like(name='random_bernoulli_like',
input_name='tensor',
output_name='output',
prob=prob, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_like_gpu(self):
self.test_random_bernoulli_like_cpu(cpu_only=False)
def test_random_bernoulli_static_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_static(name='random_bernoulli_static', output_name='tmp',
output_shape=list(shape), prob=prob, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_static_gpu(self):
self.test_random_bernoulli_static_cpu(cpu_only=False)
def test_random_bernoulli_dynamic_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_dynamic(name='random_bernoulli_dynamic',
input_names=['shape'],
output_name='output',
prob=prob, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_dynamic_gpu(self):
self.test_random_bernoulli_dynamic_cpu(cpu_only=False)
def test_categorical_distribution_cpu_shapes(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
num_samples = np.random.randint(low=10, high=1000)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name='data',
output_name='output',
num_samples=num_samples)
x = np.random.randint(low=0, high=20, size=shape).astype(np.float32)
inputs = {'data': x}
shape[-1] = num_samples
expected = {'output': np.random.rand(*shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True, validate_shapes_only=True)
def test_categorical_distribution_cpu_logits(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=True,
seed=42)
x = np.random.rand(*shape)
inputs = {input_name: x}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
logits = x.reshape(2, num_class)
probs = [softmax(logits[0]), softmax(logits[1])]
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_categorical_distribution_cpu_probs(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=False,
seed=42)
x = np.random.rand(*shape)
probs = x.reshape(2, num_class)
probs[0], probs[1] = softmax(probs[0]), softmax(probs[1])
inputs = {input_name: np.reshape(probs, shape)}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
probs = probs.reshape(2, num_class)
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_reverse_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
reverse_dim = [np.random.choice([True, False]) for _ in range(rank)]
axes = [i for i in range(rank) if reverse_dim[i] == True]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_reverse('reverse', 'data', 'output', reverse_dim)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.flip(x, axis=axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reverse_gpu(self):
self.test_reverse_cpu(cpu_only=False)
def test_matrix_band_part_cpu(self, cpu_only=True):
for rank in range(2, 6):
for _ in range(20):
num_lower = np.random.randint(low=-7, high=8)
num_upper = np.random.randint(low=-7, high=8)
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_matrix_band_part('matrix_band_part', 'data', 'output',
num_lower=num_lower, num_upper=num_upper)
x = np.random.rand(*shape)
input = {'data': x}
rows, cols = shape[-2:]
band = np.ones((rows, cols))
for m in range(rows):
for n in range(cols):
band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and (num_upper < 0 or (n - m) <= num_upper)
expected = {'output': np.multiply(band, x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_matrix_band_part_gpu(self):
self.test_matrix_band_part_cpu(cpu_only=False)
def test_flatten_to_2d_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank + 1):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_flatten_to_2d('flatten_to_2d', 'data', 'output', axis=axis)
x = np.random.rand(*shape)
np_axis = axis + rank if axis < 0 else axis
pl, pr = 1, 1
for i in range(0, np_axis):
pl *= shape[i]
for i in range(np_axis, len(shape)):
pr *= shape[i]
new_shape = [pl, pr]
ref = x.reshape(new_shape)
input = {'data': x}
expected = {'output': ref}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(2, builder._get_rank('output'))
def test_flatten_to_2d_gpu(self):
self.test_flatten_to_2d_cpu(cpu_only=False)
def test_reshape_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = n // np.prod(target_shape)
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_like(name='reshape_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_reshape_like_gpu(self):
self.test_reshape_like_cpu(cpu_only=False)
def test_reshape_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_static(name='reshape_static',
input_name='data',
output_name='output',
output_shape=target_shape)
data = np.random.rand(*input_shape)
inputs = {'data': data}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(len(target_shape), builder._get_rank('output'))
def test_reshape_static_gpu(self):
self.test_reshape_static_cpu(cpu_only=False)
def test_reshape_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_dynamic(name='reshape_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_reshape_dynamic_gpu(self):
self.test_reshape_dynamic_cpu(cpu_only=False)
def test_reduce_sum_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sum('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.add.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_sum_gpu(self):
self.test_reduce_sum_cpu(cpu_only=False)
def test_reduce_prod_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_prod('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.multiply.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_prod_gpu(self):
self.test_reduce_prod_cpu(cpu_only=False)
def test_reduce_mean_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_mean('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.mean(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_mean_gpu(self):
self.test_reduce_mean_cpu(cpu_only=False)
def test_reduce_max_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_max('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.maximum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_max_gpu(self):
self.test_reduce_max_cpu(cpu_only=False)
def test_reduce_min_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_min('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.minimum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_min_gpu(self):
self.test_reduce_min_cpu(cpu_only=False)
def test_reduce_l2_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l2('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sqrt(np.sum(np.square(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_l2_gpu(self):
self.test_reduce_l2_cpu(cpu_only=False)
def test_reduce_l1_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l1('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.abs(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_l1_gpu(self):
self.test_reduce_l1_cpu(cpu_only=False)
def test_reduce_sumsquare_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sumsquare('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.square(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_sumsquare_gpu(self):
self.test_reduce_sumsquare_cpu(cpu_only=False)
def test_reduce_logsum_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsum('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(x, axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_logsum_gpu(self):
self.test_reduce_logsum_cpu(cpu_only=False)
def test_reduce_logsumexp_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsumexp('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(np.exp(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_logsumexp_gpu(self):
self.test_reduce_logsumexp_cpu(cpu_only=False)
def test_reverse_sequence_cpu(self, cpu_only=True):
for rank in range(2, 6):
for i in range(20):
input_shape = np.random.randint(low=2, high=6, size=rank)
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
while pos_batch_axis >= pos_seq_axis:
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
input_features = [('data', datatypes.Array(*input_shape)),
('lengths', datatypes.Array(input_shape[batch_axis]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reverse_sequence('reverse_sequence', ['data', 'lengths'],
'output', batch_axis=batch_axis,
seq_axis=seq_axis)
data = | np.random.rand(*input_shape) | numpy.random.rand |
import numpy
import SimpleITK as sitk
from radiomics_ria import base, cShape, deprecated
class RadiomicsShape(base.RadiomicsFeaturesBase):
def __init__(self, inputImage, inputMask, **kwargs):
assert inputMask.GetDimension() == 3, 'Shape features are only available in 3D. If 2D, use shape2D instead'
super(RadiomicsShape, self).__init__(inputImage, inputMask, **kwargs)
def _initSegmentBasedCalculation(self):
self.pixelSpacing = numpy.array(self.inputImage.GetSpacing()[::-1])
self.maskArray = (sitk.GetArrayFromImage(self.inputMask) == self.label)
self.imageArray = self._applyBinning(self.imageArray)
pass
def update_focus(self, focus):
mask = (self.imageArray == focus) & (self.maskArray)
mask = mask * 1
cpif = sitk.ConstantPadImageFilter()
padding = numpy.tile(1, 3)
try:
cpif.SetPadLowerBound(padding)
cpif.SetPadUpperBound(padding)
except TypeError:
# newer versions of SITK/python want a tuple or list
cpif.SetPadLowerBound(padding.tolist())
cpif.SetPadUpperBound(padding.tolist())
mask_itk = sitk.GetImageFromArray(mask)
mask_itk.SetSpacing(self.pixelSpacing)
mask = sitk.GetArrayFromImage(cpif.Execute(mask_itk))
self.labelledVoxelCoordinates = numpy.where(mask != 0)
Np = len(self.labelledVoxelCoordinates[0])
self.SurfaceArea, self.Volume, self.diameters = cShape.calculate_coefficients(mask, self.pixelSpacing)
# self.Volume = self.pixelSpacing[0] * self.pixelSpacing[1] * self.pixelSpacing[2] * Np
# if self.Volume == 0:
# self.Volume = 0.001
# if self.SurfaceArea == 0:
# self.SurfaceArea = 0.001
# Compute eigenvalues and -vectors
if self.Volume != 0:
coordinates = numpy.array(self.labelledVoxelCoordinates, dtype='int').transpose((1, 0))
physicalCoordinates = coordinates * self.pixelSpacing[None, :]
try:
physicalCoordinates -= numpy.mean(physicalCoordinates, axis=0) # Centered at 0
except Exception:
pass
physicalCoordinates /= numpy.sqrt(Np)
covariance = numpy.dot(physicalCoordinates.T.copy(), physicalCoordinates)
self.eigenValues = | numpy.linalg.eigvals(covariance) | numpy.linalg.eigvals |
import numpy as np
import copy
import logging
from prettytable import PrettyTable
from utils_for_wp import np_filter
from wp_reid_dataset import WPReID
class DataBank(object):
def __init__(self, minframes):
data_dict = WPReID().get_dict()
self.images_dir_list = copy.deepcopy(data_dict['dir'])
self.gps_info = data_dict['gps']
self.trajectory_info = data_dict['trajectory']
self.test_info, self.probe_index, self.gallery_index, self.junk_index = self._preprocess(data_dict, minframes)
self.test_person_num = np.unique(self.test_info[:, 0]).size
self.test_image_num = np.sum(self.test_info[:, 4])
self.test_frames_len_min = np.min(self.test_info[:, 4])
self.test_frames_len_max = np.max(self.test_info[:, 4])
self.test_cam_num = np.unique(self.test_info[:, 1]).size
def _get_raw_data_info(self, data_dict):
print(data_dict['info'])
probe_info = np.asarray(data_dict['probe'], np.int64)
gallery_info = np.asarray(data_dict['gallery'], np.int64)
return probe_info, gallery_info
def _check(self, test_info, probe_info, gallery_info):
assert np.unique(test_info[:, 2]).size == test_info.shape[0]
if self.minframes is not None:
probe_info_new = []
probe_info_drop = []
for probe_i in range(probe_info.shape[0]):
data_info = probe_info[probe_i]
p_id = data_info[0]
p_cam_id = data_info[1]
g_info = np_filter(gallery_info, [p_id])
g_cam_id = np.unique(g_info[:, 1])
if np.setdiff1d(g_cam_id, np.asarray([p_cam_id])).size == 0: # there is no tracklet of this person in the gallery set with different camera id.
probe_info_drop.append(data_info)
else:
probe_info_new.append(data_info)
print('After drop videos less than: test {:2d} frames, check cam number'.format(self.minframes))
if len(probe_info_drop) > 0:
for drop_info in probe_info_drop:
print('No related gallery track with different camera id. Drop probe ' + str(drop_info))
probe_info = np.stack(probe_info_new)
test_info = self._merge_to_test(probe_info, gallery_info)
else:
print('All probe track have related gallery track with different camera id.')
assert np.sum(test_info[:, 3] - test_info[:, 2] - test_info[:, 4]) == 0
assert np.sum(probe_info[:, 3] - probe_info[:, 2] - probe_info[:, 4]) == 0
assert np.sum(gallery_info[:, 3] - gallery_info[:, 2] - gallery_info[:, 4]) == 0
test_id = np.unique(test_info[:, 0])
probe_id = np.unique(probe_info[:, 0])
gallery_id = np.unique(gallery_info[:, 0])
assert -1 not in set(test_id) # junk id set to be -1, it should have been removed.
assert np.setdiff1d(probe_id, gallery_id).size == 0
assert set(test_id) == set(probe_id).union(set(gallery_id))
for probe_i in range(probe_info.shape[0]):
data_info = probe_info[probe_i]
p_id = data_info[0]
p_cam_id = data_info[1]
g_info = np_filter(gallery_info, [p_id])
g_cam_id = np.unique(g_info[:, 1])
if not np.setdiff1d(g_cam_id, np.asarray([p_cam_id])).size > 0:
print('All gallery trackets have the same camera id with probe tracklet for ID: ' + str(p_id))
assert np.unique(test_info[:, 2]).size == np.unique(np.concatenate((probe_info, gallery_info))[:, 2]).size
assert np.unique(test_info[:, 2]).size == test_info.shape[0]
assert np.unique(probe_info[:, 2]).size == probe_info.shape[0]
assert np.unique(gallery_info[:, 2]).size == gallery_info.shape[0]
return test_info, probe_info
@staticmethod
def _get_index(rawset, subset):
index = []
for i_probe in range(subset.shape[0]):
begin = subset[i_probe, 2]
temp_index = np.where(rawset[:, 2] == begin)[0]
assert temp_index.size == 1
temp_index = temp_index[0]
index.append(temp_index)
index = np.asarray(index, dtype=np.int64)
return index
def _merge_to_test(self, probe_info, gallery_info):
begin_idx_box = gallery_info[:, 2].tolist()
temp_info = []
for probe_i in range(probe_info.shape[0]):
probe_i_info = probe_info[probe_i]
if probe_i_info[2] not in begin_idx_box:
temp_info.append(probe_i_info)
begin_idx_box.append(probe_i_info[2])
if len(temp_info) == 0:
return gallery_info.copy()
else:
temp_info = np.asarray(temp_info, np.int64)
test_info = np.concatenate((temp_info, gallery_info), axis=0)
return test_info
def _preprocess(self, data_dict, minframes):
probe_info, gallery_info = self._get_raw_data_info(data_dict)
test_info = self._merge_to_test(probe_info, gallery_info)
if test_info[:, 4].max() > 1:
self.is_image_dataset = False
self.minframes = minframes
else:
self.is_image_dataset = True
self.minframes = None
self._print_info(test_info, probe_info, gallery_info, 'Raw')
if self.minframes is not None:
test_info = test_info[test_info[:, 4] >= self.minframes]
probe_info = probe_info[probe_info[:, 4] >= self.minframes]
gallery_info = gallery_info[gallery_info[:, 4] >= self.minframes]
test_info, probe_info = self._check(test_info, probe_info, gallery_info)
if self.minframes is not None:
self._print_info(test_info, probe_info, gallery_info, 'After Drop')
probe_idx = DataBank._get_index(test_info, probe_info)
gallery_idx = DataBank._get_index(test_info, gallery_info)
junk_idx = np.where(test_info[:, 0] == -1)[0]
return test_info, probe_idx, gallery_idx, junk_idx
def _print_info(self, test_info, probe_info, gallery_info, extra_info: str):
GalleryInds = np.unique(gallery_info[:, 0])
probeInds = np.unique(probe_info[:, 0])
print('Gallery ID diff Probe ID: %s' % np.setdiff1d(GalleryInds, probeInds))
table = PrettyTable([extra_info, 'Test', 'Probe', 'Gallery'])
table.add_row(['#ID',
np.unique(test_info[:, 0]).size,
np.unique(probe_info[:, 0]).size,
np.unique(gallery_info[:, 0]).size])
table.add_row(['#Track',
test_info.shape[0],
probe_info.shape[0],
gallery_info.shape[0]])
table.add_row(['#Image',
np.sum(test_info[:, 4]),
np.sum(probe_info[:, 4]),
np.sum(gallery_info[:, 4])])
table.add_row(['#Cam',
np.unique(test_info[:, 1]).size,
np.unique(probe_info[:, 1]).size,
np.unique(gallery_info[:, 1]).size])
table.add_row(['MaxLen',
np.max(test_info[:, 4]),
np.max(probe_info[:, 4]),
np.max(gallery_info[:, 4])])
table.add_row(['MinLen',
np.min(test_info[:, 4]),
np.min(probe_info[:, 4]),
| np.min(gallery_info[:, 4]) | numpy.min |
"""
Plot gas, tar, char, water, and water vapor from primary and secondary
reactions based on Blasi / Chan / Liden kinetic schemes for biomass pyrolysis.
This combined scheme is referred to as the Cpc 2016 kinetic scheme. A similar
scheme but without water reaction was proposed in Papadikis 2010 paper.
References:
Blasi, 1993. Combustion Science and Technology, 90, pp 315–340.
<NAME>, Krieger, 1985. Fuel, 64, pp 1505–1513.
<NAME>, Scott, 1988. Chem. Eng. Comm., 65, pp 207-221.
<NAME>, 2010. Fuel Processing Technology, 91, pp 68–79.
"""
import numpy as np
import matplotlib.pyplot as py
# Parameters
# ------------------------------------------------------------------------------
T = 773 # temperature for rate constants, K
mc = 0.20 # moisture content as weight fraction, (-)
dt = 0.01 # time step, delta t
tmax = 25 # max time, s
t = np.linspace(0, tmax, num=tmax/dt) # time vector
nt = len(t) # total number of time steps
# Function for Cpc 2016 Kinetic Scheme
# ------------------------------------------------------------------------------
def cpc(wood, gas, tar, char, water, vapor, T, dt, s=1):
"""
Primary and secondary kinetic reactions for Cpc 2016 scheme based on
Blasi 1993, Chan 1985, and Liden 1988 kinetics. Same scheme as presented in
Papadikis 2010 but with the addition of the water reaction.
Parameters
----------
wood = wood concentration, kg/m^3
gas = gas concentation, kg/m^3
tar = tar concentation, kg/m^3
char = char concentation, kg/m^3
water = water concentration based on moisture content, kg/m^3
vapor = water vapor concentration, kg/m^3
T = temperature, K
dt = time step, s
s = 1 primary reactions only, 2 primary and secondary reactions
Returns
-------
nwood = new wood concentration, kg/m^3
ngas = new gas concentration, kg/m^3
ntar = new tar concentration, kg/m^3
nchar = new char concentration, kg/m^3
nwater = new water concentration, kg/m^3
nvapor = new water vapor concentration, kg/m^3
"""
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas from Chan 1985
A2 = 2.0e8; E2 = 133 # wood -> tar from Chan 1985
A3 = 1.08e7; E3 = 121 # wood -> char from Chan 1985
A4 = 4.28e6; E4 = 107.5 # tar -> gas from Liden 1988
A5 = 1.0e6; E5 = 108 # tar -> char from Blasi 1993
Aw = 5.13e6; Ew = 87.9 # water -> water vapor from Chan 1985
R = 0.008314 # universal gas constant, kJ/mol*K
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas
K2 = A2 * | np.exp(-E2 / (R * T)) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
ab = np.array(np.loadtxt('ab_plot.txt'))
c = np.array(np.loadtxt('c_plot.txt'))
fu= np.array(np.loadtxt('fu_plot.txt'))
#fu= np.array(np.loadtxt('fu50e3_plot.txt'))
fo = np.array(np.loadtxt('fo_plot.txt'))
x_all_1 = np.insert(c.T[0], [0],ab.T[0])#, ra_cepfo.degree)
x_all_2 = np.insert(x_all_1, [0], fo.T[0])
x_all = np.insert(x_all_2, [0], fu.T[0])
y_all_1 = np.insert(c.T[1], [0],ab.T[1])#, ra_cepfo.degree)
y_all_2 = np.insert(y_all_1, [0], fo.T[1])
y_all = np.insert(y_all_2, [0], fu.T[1])
z_all_1 = | np.insert(c.T[2], [0],ab.T[2]) | numpy.insert |
'''
Common methods for beamtools package
Created Fri May 12
@author: cpkmanchee
'''
import numpy as np
from beamtools.file_formats import file_formats
import dill
__all__ = ['saveObj','loadObj','normalize','rmbg','gaussian','sech2','lorentzian',
'gaussian2D','rk4','moments','d4sigma','roi','alias_dict']
class Func:
def __init__(self, value=None, index=None):
self.val = value
self.ind = index
def at(self,x):
return np.interp(x, self.ind, self.val)
def diff(self):
self.gradient = np.gradient(self.val)/np.gradient(self.ind)
def diff_at(self,x):
return np.interp(x,self.ind,self.gradient)
class FitResult():
def __init__(self, ffunc, ftype, popt, pcov=0, indep_var='time', bgform='constant'):
self.ffunc = ffunc
self.ftype = ftype
self.popt = popt
self.pcov = pcov
self.iv = indep_var
self.bgform = bgform
def subs(self,x):
return self.ffunc(x,*self.popt)
def get_args(self):
return inspect.getargspec(self.ffunc)
class DataObj(dict):
def __init__(self,d):
self.__dict__ = d
def fields(self):
return self.__dict__.keys()
def properties(self):
[print(k,v) for k,v in file_formats[self.filetype].items()]
return
def saveObj(obj, filename):
with open(filename, 'wb') as output:
dill.dump(obj, output, -1)
def loadObj(filename):
with open(filename, 'rb') as input:
obj = dill.load(input)
return obj
def normalize(f, offset=0, method='normal'):
'''Normalize array of data. Optional offset.
'''
norm = (f-f.min())/(f.max()-f.min()) + offset
if method.lower() in ['area']:
norm = norm/np.sum(norm)
return norm
def rmbg(data, fit=None, form='constant'):
'''Removes background from data
data = [x,y]
if sending poly fit params: p[0]*x**(N-1) + ... + p[N-1]
return --> y - background
'''
if fit is None:
#estimate background from given form
if form.lower() in alias_dict['constant']:
p = min(y)
elif form.lower() in alias_dict['linear']:
p = np.linalg.solve([[1,x[0]],[1,x[-1]]], [y[0],y[-1]])
p = np.flipud(p)
elif form.lower() in alias_dict['quadratic']:
index = | np.argmin(y) | numpy.argmin |
import os.path
import pickle
import shutil
import tempfile
import unittest
import pcl
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
_data = [(i, 2 * i, 3 * i + 0.2) for i in range(5)]
_DATA = """0.0, 0.0, 0.2;
1.0, 2.0, 3.2;
2.0, 4.0, 6.2;
3.0, 6.0, 9.2;
4.0, 8.0, 12.2"""
class TestListIO(unittest.TestCase):
def setUp(self):
self.p = pcl.PointCloud(_data)
def testFromList(self):
for i, d in enumerate(_data):
pt = self.p[i]
assert np.allclose(pt, _data[i])
def testToList(self):
l = self.p.to_list()
assert np.allclose(l, _data)
class TestNumpyIO(unittest.TestCase):
def setUp(self):
self.a = np.array(np.mat(_DATA, dtype=np.float32))
self.p = pcl.PointCloud(self.a)
def testFromNumpy(self):
for i, d in enumerate(_data):
pt = self.p[i]
assert np.allclose(pt, _data[i])
def testToNumpy(self):
a = self.p.to_array()
self.assertTrue(np.alltrue(a == self.a))
def test_asarray(self):
p = pcl.PointCloud(self.p) # copy
# old0 = p[0]
a = np.asarray(p) # view
a[:] += 6
assert_array_almost_equal(p[0], a[0])
# Regression test: deleting a second view would previously
# reset the view count to zero.
b = np.asarray(p)
del b
self.assertRaises(ValueError, p.resize, 2 * len(p))
def test_pickle(self):
"""Test pickle support."""
# In this testcase because picking reduces to pickling NumPy arrays.
s = pickle.dumps(self.p)
p = pickle.loads(s)
self.assertTrue(np.all(self.a == p.to_array()))
# copy the output of seg
SEGDATA = """ 0.352222 -0.151883 2;
-0.106395 -0.397406 1;
-0.473106 0.292602 1;
-0.731898 0.667105 -2;
0.441304 -0.734766 1;
0.854581 -0.0361733 1;
-0.4607 -0.277468 4;
-0.916762 0.183749 1;
0.968809 0.512055 1;
-0.998983 -0.463871 1;
0.691785 0.716053 1;
0.525135 -0.523004 1;
0.439387 0.56706 1;
0.905417 -0.579787 1;
0.898706 -0.504929 1"""
SEGINLIERS = """-0.106395 -0.397406 1;
-0.473106 0.292602 1;
0.441304 -0.734766 1;
0.854581 -0.0361733 1;
-0.916762 0.183749 1;
0.968809 0.512055 1;
-0.998983 -0.463871 1;
0.691785 0.716053 1;
0.525135 -0.523004 1;
0.439387 0.56706 1;
0.905417 -0.579787 1;
0.898706 -0.504929 1"""
SEGINLIERSIDX = [1, 2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]
SEGCOEFF = [0.0, 0.0, 1.0, -1.0]
class TestSegmentPlane(unittest.TestCase):
def setUp(self):
self.a = np.array(np.mat(SEGDATA, dtype=np.float32))
self.p = pcl.PointCloud()
self.p.from_array(self.a)
def testLoad(self):
npts = self.a.shape[0]
self.assertEqual(npts, self.p.size)
self.assertEqual(npts, self.p.width)
self.assertEqual(1, self.p.height)
def testSegmentPlaneObject(self):
seg = self.p.make_segmenter()
seg.set_optimize_coefficients(True)
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_distance_threshold(0.01)
indices, model = seg.segment()
self.assertListEqual(indices, SEGINLIERSIDX)
self.assertListEqual(model, SEGCOEFF)
def test_pcd_read():
TMPL = """# .PCD v.7 - Point Cloud Data file format
VERSION .7
FIELDS x y z
SIZE 4 4 4
TYPE F F F
COUNT 1 1 1
WIDTH %(npts)d
HEIGHT 1
VIEWPOINT 0.1 0 0.5 0 1 0 0
POINTS %(npts)d
DATA ascii
%(data)s"""
a = np.array(np.mat(SEGDATA, dtype=np.float32))
npts = a.shape[0]
tmp_file = tempfile.mkstemp(suffix='.pcd')[1]
with open(tmp_file, "w") as f:
f.write(TMPL % {"npts": npts, "data": SEGDATA.replace(";", "")})
p = pcl.load(tmp_file)
assert p.width == npts
assert p.height == 1
for i, row in enumerate(a):
pt = np.array(p[i])
ssd = sum((row - pt) ** 2)
assert ssd < 1e-6
assert_array_equal(p.sensor_orientation,
np.array([0, 1, 0, 0], dtype=np.float32))
assert_array_equal(p.sensor_origin,
np.array([.1, 0, .5, 0], dtype=np.float32))
def test_copy():
a = np.random.randn(100, 3).astype(np.float32)
p1 = pcl.PointCloud(a)
p2 = pcl.PointCloud(p1)
assert_array_equal(p2.to_array(), a)
SEGCYLMOD = [0.0552167, 0.0547035, 0.757707,
-0.0270852, -4.41026, -2.88995, 0.0387603]
SEGCYLIN = 11462
class TestSegmentCylinder(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "table_scene_mug_stereo_textured_noplane.pcd")
def testSegment(self):
seg = self.p.make_segmenter_normals(50)
seg.set_optimize_coefficients(True)
seg.set_model_type(pcl.SACMODEL_CYLINDER)
seg.set_method_type(pcl.SAC_RANSAC)
seg.set_normal_distance_weight(0.1)
seg.set_max_iterations(10000)
seg.set_distance_threshold(0.05)
seg.set_radius_limits(0, 0.1)
indices, model = seg.segment()
self.assertEqual(len(indices), SEGCYLIN)
# npexp = np.array(SEGCYLMOD)
# npmod = np.array(model)
# ssd = sum((npexp - npmod) ** 2)
# self.assertLess(ssd, 1e-6)
class TestSave(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "table_scene_mug_stereo_textured_noplane.pcd")
self.tmpdir = tempfile.mkdtemp(suffix='pcl-test')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testSave(self):
for ext in ["pcd", "ply"]:
d = os.path.join(self.tmpdir, "foo." + ext)
pcl.save(self.p, d)
p = pcl.load(d)
self.assertEqual(self.p.size, p.size)
class TestFilter(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "flydracyl.pcd")
def testFilter(self):
mls = self.p.make_moving_least_squares()
mls.set_search_radius(0.5)
mls.set_polynomial_order(2)
mls.set_polynomial_fit(True)
f = mls.process()
# new instance is returned
self.assertNotEqual(self.p, f)
# mls filter retains the same number of points
self.assertEqual(self.p.size, f.size)
class TestExtract(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "flydracyl.pcd")
def testExtractPos(self):
p2 = self.p.extract([1, 2, 3], False)
# new instance is returned
self.assertNotEqual(self.p, p2)
self.assertEqual(p2.size, 3)
def testExtractNeg(self):
p2 = self.p.extract([1, 2, 3], True)
self.assertNotEqual(self.p, p2)
self.assertEqual(p2.size, self.p.size - 3)
class TestExceptions(unittest.TestCase):
def setUp(self):
self.p = pcl.PointCloud(np.arange(9, dtype=np.float32).reshape(3, 3))
def testIndex(self):
self.assertRaises(IndexError, self.p.__getitem__, self.p.size)
self.assertRaises(Exception, self.p.get_point, self.p.size, 1)
def testResize(self):
# XXX MemoryError isn't actually the prettiest exception for a
# negative argument. Don't hesitate to change this test to reflect
# better exceptions.
self.assertRaises(MemoryError, self.p.resize, -1)
class TestSegmenterNormal(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "table_scene_mug_stereo_textured_noplane.pcd")
def _tpos(self, c):
self.assertEqual(c.size, 22745)
self.assertEqual(c.width, 22745)
self.assertEqual(c.height, 1)
self.assertTrue(c.is_dense)
def _tneg(self, c):
self.assertEqual(c.size, 1015)
self.assertEqual(c.width, 1015)
self.assertEqual(c.height, 1)
self.assertTrue(c.is_dense)
def testFilterPos(self):
fil = self.p.make_statistical_outlier_filter()
fil.set_mean_k(50)
self.assertEqual(fil.mean_k, 50)
fil.set_std_dev_mul_thresh(1.0)
self.assertEqual(fil.stddev_mul_thresh, 1.0)
c = fil.filter()
self._tpos(c)
def testFilterNeg(self):
fil = self.p.make_statistical_outlier_filter()
fil.set_mean_k(50)
fil.set_std_dev_mul_thresh(1.0)
self.assertEqual(fil.negative, False)
fil.set_negative(True)
self.assertEqual(fil.negative, True)
c = fil.filter()
self._tneg(c)
def testFilterPosNeg(self):
fil = self.p.make_statistical_outlier_filter()
fil.set_mean_k(50)
fil.set_std_dev_mul_thresh(1.0)
c = fil.filter()
self._tpos(c)
fil.set_negative(True)
c = fil.filter()
self._tneg(c)
class TestVoxelGridFilter(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests" + os.path.sep + "table_scene_mug_stereo_textured_noplane.pcd")
def testFilter(self):
fil = self.p.make_voxel_grid_filter()
fil.set_leaf_size(0.01, 0.01, 0.01)
c = fil.filter()
self.assertTrue(c.size < self.p.size)
self.assertEqual(c.size, 719)
class TestPassthroughFilter(unittest.TestCase):
def setUp(self):
self.p = pcl.load("tests/table_scene_mug_stereo_textured_noplane.pcd")
def testFilter(self):
fil = self.p.make_passthrough_filter()
fil.set_filter_field_name("z")
fil.set_filter_limits(0, 0.75)
c = fil.filter()
self.assertTrue(c.size < self.p.size)
self.assertEqual(c.size, 7751)
def testFilterBoth(self):
total = self.p.size
fil = self.p.make_passthrough_filter()
fil.set_filter_field_name("z")
fil.set_filter_limits(0, 0.75)
front = fil.filter().size
fil.set_filter_limits(0.75, 100)
back = fil.filter().size
self.assertEqual(total, front + back)
class TestKdTree(unittest.TestCase):
def setUp(self):
rng = | np.random.RandomState(42) | numpy.random.RandomState |
#only run if CountyHospitalCombined.csv is not yet made, script will take a while to gather geolocation data because of API restrictions.
#This python script combines together 2 of the data sets (Hospitals.csv, and co-est2019-alldata.csv)
# into one county wise dataset (CountyHospitalCombined.csv)
#<NAME>
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sys.path.append('..')
import lib
Hospitals = lib.loadHospitals()
USCountyPopData = lib.loadCountyPopEst()
#the most important information in Hospitals is COUNTY and BEDS
# this can be used to create a dataset of total hospital beds per county
# other interesting columns is TRAUMA and HELIPAD
hospitalCounties = Hospitals['COUNTY']#Only 1604 counties present
hospitalStates = Hospitals['STATE']#we also need states to specify which county
hospitalBeds = Hospitals['BEDS']#NAN are stored as -999 this needs changed. 662 nans
hospitalTrauma = Hospitals['TRAUMA']#many changes need to be made before data is usable
#^actually seems like most entries are not there so this may be useless information
hospitalHelipad = Hospitals['HELIPAD']#ready off the bat
hospitalOwner = Hospitals['OWNER']#GOVERNMENT, NON-PROFIT, PROPRIETARY
#print(hospitalBeds.value_counts())
#plt.hist(HospitalCounties)
#plt.hist(HospitalBeds)
#plt.hist(HospitalTrauma)
#plt.hist(HospitalHelipad)
#create a list of all counties
#start with USCountyPopData[CTYNAME] but filter out the numbers for the states
countyMask = (USCountyPopData['SUMLEV']==50).to_numpy()#this code filters by counties/like
countiesNames = USCountyPopData['CTYNAME'].iloc[countyMask]#one issue here is that some states have counties with the same names
statesNames = USCountyPopData['STNAME'].iloc[countyMask]#so lets store state names also
countiesPops = USCountyPopData['POPESTIMATE2019'].iloc[countyMask]
hospitalStatesExp = []
for stateCode in hospitalStates:
hospitalStatesExp.append(lib.stateDict[stateCode])
hospitalStatesExp = pd.Series(hospitalStatesExp)
#have to remove Palau, American Samoa, Northern Mariana Islands, US Virgin Islands, Guam, and Puerto Rico
hospitalStatesExpMask = (~hospitalStatesExp.isin(['Palau', 'American Samoa', 'Northern Mariana Islands', 'U.S. Virgin Islands', 'Guam', 'Puerto Rico'])).to_numpy()
hospitalStatesExp = hospitalStatesExp.iloc[hospitalStatesExpMask]
hospitalCounties = hospitalCounties.iloc[hospitalStatesExpMask]
hospitalBeds = pd.Series(hospitalBeds.iloc[hospitalStatesExpMask],dtype=np.float)
#hospitalTrauma = hospitalTrauma.iloc[hospitalStatesExpMask] decided info was not worth effort
hospitalHelipad = hospitalHelipad.iloc[hospitalStatesExpMask]
hospitalOwner = hospitalOwner.iloc[hospitalStatesExpMask]
#create hospitalOwnerInts to NON-PROFIT, PROPRIETARY, and GOVERNMENT per county
nonProfHOint = np.zeros(hospitalOwner.shape, dtype=np.intc)
privateHOint = np.zeros(hospitalOwner.shape, dtype=np.intc)
governmHOint = np.zeros(hospitalOwner.shape, dtype=np.intc)
for n, owner in enumerate(hospitalOwner):
if('NON-PROFIT' in owner):
nonProfHOint[n] += 1
elif('PROPRIETARY' in owner):
privateHOint[n] += 1
elif('GOVERNMENT' in owner):
governmHOint[n] += 1
nonProfHOint = pd.Series(nonProfHOint)
privateHOint = pd.Series(privateHOint)
governmHOint = pd.Series(governmHOint)
#turn -999 to NaNs in HospitalBeds
hospitalBeds = hospitalBeds.where(hospitalBeds >= 0)
#turn Ys to 1 and Ns to 0
hospitalHelipadInt = np.zeros(hospitalHelipad.shape, dtype=np.intc)
for (n, hosHeli) in enumerate(hospitalHelipad):
if (hosHeli=='Y'):
hospitalHelipadInt[n] += 1
hospitalHelipadInt = pd.Series(hospitalHelipadInt)
people = np.sum(countiesPops.to_numpy())
beds = np.nansum(hospitalBeds.to_numpy())
helipads = np.sum(hospitalHelipadInt.to_numpy())
#interesting totals
#remove county/bourough/parish/Census Area from countiesNames and capitalize\
countiesHolder=np.array(countiesNames, dtype=str)
for n, county in enumerate(countiesNames):
if(county.endswith(' County')):
county = county[:-7]
if(county.endswith(' Borough')):
county = county[:-8]
if(county.endswith(' Municipality')):
county = county[:-13]
if(county.endswith(' Census Area')):
county = county[:-12]
if(county.endswith(' Parish')):
county = county[:-7]
countiesHolder[n] = county.upper()
county=countiesHolder[204]
state=statesNames[204]
#^LA for testing
#Abstract hospitalOwner to NON-PROFIT, PROPRIETARY, and GOVERNMENT per county
nonProfHperCounty = np.zeros(countiesNames.shape, dtype=np.intc)
privateHperCounty = np.zeros(countiesNames.shape, dtype=np.intc)
governmHperCounty = np.zeros(countiesNames.shape, dtype=np.intc)
#empty lists to be filled
bedsPerCounty = np.zeros(countiesNames.shape, dtype=np.double)
helipadsPerCounty = np.zeros(countiesNames.shape, dtype=np.intc)
#Loop for fetching county wise data from Hospitals
#O(n^2)
for n, (county, state) in enumerate(zip(countiesHolder, statesNames.to_numpy())):
countiesMask=(hospitalCounties==county).to_numpy()
statesMask=(hospitalStatesExp==state).to_numpy()
stateAndCountyMask = countiesMask&statesMask
bedsPerCounty[n] += np.nansum(hospitalBeds.iloc[stateAndCountyMask].to_numpy())
helipadsPerCounty[n] += np.nansum(hospitalHelipadInt.iloc[stateAndCountyMask].to_numpy())
nonProfHperCounty[n] += np.sum(nonProfHOint.iloc[stateAndCountyMask].to_numpy())
privateHperCounty[n] += np.sum(privateHOint.iloc[stateAndCountyMask].to_numpy())
governmHperCounty[n] += np.sum(governmHOint.iloc[stateAndCountyMask].to_numpy())
#bedsPerCounty = pd.Series(bedsPerCounty)
#helipadsPerCounty = pd.Series(helipadsPerCounty)
#nonProfHperCounty = pd.Series(nonProfHperCounty)
#privateHperCounty = pd.Series(privateHperCounty)
#governmHperCounty = pd.Series(governmHperCounty)
#countiesNames_partial = countiesNames.iloc[:1]
#statesNames_partial = statesNames.iloc[:1]
#this takes more than an hour because of api limits
countiesLocs = lib.countyState_to_LatLong(countiesNames, statesNames)
countiesLat = np.zeros(countiesNames.shape, dtype=float)
countiesLon = | np.zeros(countiesNames.shape, dtype=float) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 22:17:33 2021
@author: sjfre
"""
#%% Import & Data Setup - MUST RUN
import numpy as np
import matplotlib.pyplot as plt
from numpy import genfromtxt
import pandas as pd
from datetime import date
from datetime import datetime as dt
from scipy import interpolate
path='/Users/sjfre/Documents/DataAnalysis/Class Files/Lab 1/'
fname='weekly_in_situ_co2_mlo.csv'
data=genfromtxt(path+fname,delimiter=',',skip_header=44)
df=pd.read_csv(path+fname, sep=',',header=44)
#%% Figure 1: Year Data + Linear & Quadratic Fit
CO2=[]
TIME=[]
for i in df.values[:,0]:
a=date.fromisoformat(i)
day2=dt(a.year,a.month,a.day)
day0=dt(a.year,1,1)
days=day2-day0
Fraction_year=(days.days)/365.
TIME.append(a.year + Fraction_year)
for i in df.values[:,1]:
CO2.append(i)
plt.plot(TIME, CO2, '-b')
fit = np.polyfit(TIME, CO2, 2, full=True)
lfit = np.polyfit(TIME, CO2, 1, full=True)
pred = np.poly1d(fit[0])
lin = np.poly1d(lfit[0])
plt.plot(TIME, pred(TIME),'-r')
plt.plot(TIME, lin(TIME),'-g')
plt.title("Year Data With Fit")
plt.legend(['Year', 'Quadratic Fit', 'Linear Fit'])
#%% Figure 2: Year Data De-Trended
plt.figure()
co2D = CO2 - pred(TIME)
plt.plot(TIME, co2D, '-r')
plt.title("Year Data, De-Trended")
#%% Figure 3: Fractional Year Cycles + Mean
plt.figure()
TIME_1=[]
for i in df.values[:,0]:
a=date.fromisoformat(i)
day2=dt(a.year,a.month,a.day)
day0=dt(a.year,1,1)
days=day2-day0
Fraction_year=(days.days)/365.
TIME_1.append(Fraction_year)
plt.plot(TIME_1, co2D, 'or')
bin_size = 0.02
season_bin = np.arange(0,1.01, bin_size)
mean = []
for i in season_bin:
res = np.where(np.logical_and(TIME_1 >= i-bin_size/2., TIME_1 <= i+bin_size/2.))
mean.append(np.mean(co2D[res]))
plt.plot(season_bin, mean, '-g')
plt.title("Fractional Year Cycles")
plt.legend(['Fractional Year Values', 'Mean'])
#%% Figure 4: Mean Mapped to Original Years
plt.figure()
inter = interpolate.interp1d(season_bin, mean)
season = inter(TIME_1)
plt.plot(TIME, season, '-g')
plt.title("Mean Mapped to Original Years")
#%% Figure 5: Year Data Offset from Mean
plt.figure()
plt.plot(TIME, co2D - season, 'og')
plt.title("Year Data Relative to Mean")
#%% Stationary Tests
from statsmodels.tsa.stattools import adfuller
print('\nADF stationary test for the original data')
result = adfuller(CO2)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
# With a p-value of 0.9968, the data is 99.68% likely to be non-stationary
# and we cannot reject that there is a unit root.
#%% Histograms
plt.figure()
import seaborn as sns
sns.histplot(data=CO2, kde=True,bins=50)
plt.xlabel('CO2 Concentration (ppb)',size=25)
plt.ylabel('Density',size=25)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.title("CO2 Original",size=20)
plt.figure()
sns.histplot(data=co2D, kde=True,bins=50)
plt.xlabel('CO2 Concentration (ppb)',size=25)
plt.ylabel('Density',size=25)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.title("CO2 De-Trend and De-Season",size=20)
#%% Gaussian Noise
from random import gauss, seed
seed(1)
y1 = []
for i in range(1000):
value = gauss(0,1)
y1.append(value)
y2 = []
for i in range(10000):
value = gauss(0,1)
y2.append(value)
plt.figure()
sns.histplot(data=y1, kde=True,bins=100)
plt.title("1000 Sample")
plt.figure()
sns.histplot(data=y2, kde=True,bins=100)
plt.title("10000 Sample")
#%% FFT of Noise
from scipy.fft import fft
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
plt.figure()
y2_g = fft(y2)
N_g = len(y2)
T = 1./52
x2_g = np.linspace(0., 1./(2.*T), N_g//2)
plt.loglog(x2_g[1:N_g//2], 2.0/N_g * np.abs(y2_g[1:N_g//2]), '-g')
plt.loglog(x2_g[1:N_g//2], smooth(2.0/N_g * np.abs(y2_g[1:N_g//2]),10), '-r')
#%% Smoothing CO2 Data
plt.figure()
co2D_s = smooth(co2D - season, 110)
plt.plot(TIME, co2D - season, '-y')
plt.plot(TIME, co2D_s, '-r')
#%% FFT of CO2
plt.figure()
yf = fft(co2D - season)
yf_s = fft(co2D_s)
N = len(co2D)
xf = np.linspace(0., 1./(2.*T), N//2)
plt.loglog(x2_g[1:N_g//2], 2.0/N_g * np.abs(y2_g[1:N_g//2]), '-g')
plt.loglog(xf[1:N//2], 2.0/N * np.abs(yf[1:N//2]), '-b')
plt.loglog(xf[1:N//2], 2.0/N * | np.abs(yf_s[1:N//2]) | numpy.abs |
coords = [[[0,9],[5,9]],
[[8,0],[0,8]],
[[9,4],[3,4]],
[[2,2],[2,1]],
[[7,0],[7,4]],
[[6,4],[2,0]],
[[0,9],[2,9]],
[[3,4],[1,4]],
[[0,0],[8,8]],
[[5,5],[8,2]]]
coords = [[[284,294], [733,743]],
[[625,347], [653,375]],
[[561,848], [561,181]],
[[836,102], [836,339]],
[[946,941], [22,17]],
[[18,954], [956,16]],
[[370,142], [370,700]],
[[990,966], [677,966]],
[[366,603], [366,465]],
[[728,942], [57,271]],
[[615,493], [847,493]],
[[584,281], [301,281]],
[[125,356], [301,180]],
[[941,569], [555,183]],
[[151,116], [509,116]],
[[41,18], [841,818]],
[[627,670], [627,630]],
[[965,472], [965,100]],
[[93,404], [330,641]],
[[475,963], [475,514]],
[[389,389], [389,326]],
[[842,565], [842,576]],
[[454,700], [650,700]],
[[73,810], [73,319]],
[[450,212], [450,284]],
[[316,392], [316,697]],
[[915,592], [578,592]],
[[622,485], [434,485]],
[[109,853], [952,10]],
[[305,73], [305,222]],
[[27,489], [157,489]],
[[191,979], [867,979]],
[[527,329], [527,292]],
[[301,645], [301,162]],
[[639,730], [176,730]],
[[46,964], [46,458]],
[[727,422], [435,714]],
[[28,552], [404,552]],
[[33,108], [33,21]],
[[227,249], [327,249]],
[[414,903], [784,903]],
[[69,422], [888,422]],
[[422,924], [103,605]],
[[793,353], [450,10]],
[[714,682], [714,972]],
[[201,745], [410,745]],
[[408,713], [408,847]],
[[174,842], [818,198]],
[[863,353], [775,353]],
[[199,780], [670,780]],
[[877,947], [340,410]],
[[163,202], [163,91]],
[[955,919], [955,585]],
[[836,271], [533,271]],
[[258,366], [728,836]],
[[582,749], [582,12]],
[[80,40], [80,704]],
[[287,213], [287,635]],
[[390,546], [390,194]],
[[837,511], [538,810]],
[[473,281], [902,281]],
[[851,865], [731,745]],
[[918,59], [445,532]],
[[796,215], [796,248]],
[[875,111], [604,111]],
[[660,805], [538,805]],
[[507,850], [145,850]],
[[585,861], [585,52]],
[[426,74], [700,348]],
[[206,405], [529,405]],
[[418,333], [418,17]],
[[368,457], [33,792]],
[[186,81], [957,852]],
[[505,283], [113,283]],
[[20,878], [462,878]],
[[750,237], [69,918]],
[[15,280], [358,623]],
[[798,981], [500,683]],
[[965,970], [22,970]],
[[950,970], [148,970]],
[[660,392], [660,884]],
[[862,405], [862,527]],
[[801,283], [801,361]],
[[71,837], [136,837]],
[[651,438], [945,144]],
[[524,607], [614,517]],
[[348,955], [138,955]],
[[957,164], [404,717]],
[[531,581], [454,504]],
[[710,185], [710,271]],
[[822,86], [822,966]],
[[745,233], [490,488]],
[[350,823], [663,823]],
[[824,67], [447,444]],
[[846,667], [796,617]],
[[666,24], [666,906]],
[[640,39], [640,145]],
[[654,481], [985,481]],
[[581,894], [416,729]],
[[443,11], [697,11]],
[[318,627], [799,146]],
[[113,78], [891,856]],
[[181,149], [179,151]],
[[451,74], [451,262]],
[[458,726], [314,726]],
[[218,662], [533,662]],
[[965,108], [527,108]],
[[782,481], [896,367]],
[[557,927], [557,938]],
[[506,242], [941,677]],
[[948,778], [948,629]],
[[567,816], [567,956]],
[[323,773], [323,364]],
[[864,980], [864,12]],
[[611,699], [611,886]],
[[613,392], [901,104]],
[[528,905], [156,905]],
[[632,206], [798,40]],
[[338,237], [919,818]],
[[256,889], [11,644]],
[[835,52], [55,832]],
[[464,144], [322,144]],
[[254,747], [254,509]],
[[866,892], [866,916]],
[[827,946], [30,149]],
[[899,84], [177,806]],
[[134,634], [357,634]],
[[781,492], [244,492]],
[[817,762], [817,976]],
[[818,749], [818,860]],
[[262,480], [263,480]],
[[409,576], [409,698]],
[[242,151], [981,890]],
[[149,519], [149,557]],
[[42,990], [42,930]],
[[687,974], [50,337]],
[[758,382], [465,382]],
[[760,861], [760,934]],
[[17,835], [17,915]],
[[645,923], [645,648]],
[[702,116], [72,746]],
[[153,162], [955,964]],
[[185,101], [918,834]],
[[554,179], [554,353]],
[[879,673], [879,949]],
[[368,13], [368,512]],
[[582,105], [591,114]],
[[146,291], [600,745]],
[[609,538], [930,538]],
[[320,604], [320,146]],
[[566,698], [443,575]],
[[167,708], [844,31]],
[[712,630], [712,421]],
[[912,930], [64,82]],
[[980,931], [87,38]],
[[23,893], [888,28]],
[[640,435], [676,435]],
[[701,516], [190,516]],
[[684,145], [62,767]],
[[127,471], [91,435]],
[[685,197], [78,197]],
[[103,493], [103,522]],
[[309,986], [309,850]],
[[938,270], [938,300]],
[[295,72], [354,72]],
[[948,889], [948,455]],
[[254,733], [254,175]],
[[95,329], [942,329]],
[[19,672], [19,445]],
[[206,807], [206,934]],
[[886,961], [886,690]],
[[117,386], [117,292]],
[[199,59], [668,528]],
[[299,263], [299,878]],
[[28,295], [638,905]],
[[10,140], [276,406]],
[[279,526], [921,526]],
[[485,128], [856,499]],
[[418,398], [186,398]],
[[296,577], [296,521]],
[[514,261], [10,765]],
[[691,673], [776,758]],
[[131,430], [152,430]],
[[858,85], [62,85]],
[[394,846], [270,970]],
[[827,913], [827,376]],
[[634,669], [910,669]],
[[12,53], [945,986]],
[[782,467], [782,421]],
[[159,832], [109,832]],
[[793,807], [79,93]],
[[120,584], [356,584]],
[[645,16], [645,355]],
[[526,685], [217,376]],
[[296,305], [296,929]],
[[954,144], [954,839]],
[[748,88], [103,733]],
[[523,804], [473,754]],
[[524,316], [524,756]],
[[696,183], [912,183]],
[[288,564], [55,797]],
[[568,103], [568,348]],
[[468,626], [682,412]],
[[163,163], [961,961]],
[[762,824], [27,89]],
[[623,625], [32,34]],
[[865,343], [490,718]],
[[259,458], [259,33]],
[[944,660], [944,176]],
[[781,804], [826,759]],
[[15,702], [15,553]],
[[403,310], [918,825]],
[[438,734], [835,734]],
[[825,13], [825,245]],
[[129,611], [370,611]],
[[49,939], [172,939]],
[[687,906], [687,532]],
[[629,482], [273,126]],
[[727,218], [424,218]],
[[447,451], [233,451]],
[[142,779], [813,779]],
[[527,27], [527,804]],
[[482,55], [482,200]],
[[39,264], [806,264]],
[[884,636], [458,636]],
[[467,121], [199,389]],
[[856,925], [856,666]],
[[666,359], [378,359]],
[[11,946], [705,946]],
[[491,281], [940,730]],
[[86,112], [918,944]],
[[974,807], [974,707]],
[[445,67], [914,536]],
[[953,394], [953,822]],
[[468,398], [157,87]],
[[231,620], [231,646]],
[[979,869], [979,911]],
[[450,330], [450,79]],
[[675,659], [617,659]],
[[66,181], [66,723]],
[[181,406], [181,192]],
[[908,334], [908,526]],
[[254,891], [282,891]],
[[777,791], [127,141]],
[[469,58], [694,58]],
[[954,957], [566,569]],
[[957,957], [123,123]],
[[741,359], [741,986]],
[[763,526], [763,101]],
[[857,427], [600,170]],
[[527,756], [490,719]],
[[625,249], [397,249]],
[[798,702], [712,702]],
[[868,75], [868,853]],
[[332,296], [332,629]],
[[211,829], [100,940]],
[[12,139], [12,218]],
[[655,978], [655,242]],
[[99,852], [855,96]],
[[486,267], [486,855]],
[[474,90], [474,244]],
[[948,491], [186,491]],
[[896,59], [278,677]],
[[295,732], [629,732]],
[[860,936], [860,556]],
[[143,790], [143,26]],
[[371,847], [395,847]],
[[739,301], [739,44]],
[[384,716], [748,716]],
[[848,423], [848,923]],
[[855,23], [218,660]],
[[381,805], [381,438]],
[[451,610], [91,610]],
[[906,957], [191,957]],
[[118,675], [169,675]],
[[836,818], [95,818]],
[[368,945], [825,488]],
[[165,299], [899,299]],
[[392,327], [926,861]],
[[663,16], [131,548]],
[[630,302], [888,302]],
[[206,869], [206,331]],
[[979,413], [979,204]],
[[894,860], [62,28]],
[[444,897], [962,379]],
[[550,158], [550,885]],
[[845,736], [811,736]],
[[846,857], [12,857]],
[[981,730], [981,154]],
[[694,835], [88,835]],
[[21,101], [21,385]],
[[19,960], [964,15]],
[[283,721], [450,721]],
[[59,136], [758,835]],
[[287,313], [719,313]],
[[471,252], [849,630]],
[[682,189], [168,189]],
[[10,921], [774,157]],
[[884,598], [884,540]],
[[207,615], [207,443]],
[[627,408], [67,408]],
[[285,36], [285,792]],
[[116,585], [254,585]],
[[183,86], [183,702]],
[[220,138], [868,138]],
[[833,68], [286,615]],
[[367,534], [766,534]],
[[907,514], [621,228]],
[[133,593], [133,581]],
[[164,727], [768,123]],
[[566,227], [566,555]],
[[983,988], [105,110]],
[[620,177], [620,821]],
[[612,413], [612,176]],
[[168,889], [168,210]],
[[871,487], [559,175]],
[[399,870], [761,870]],
[[236,976], [582,630]],
[[699,216], [699,887]],
[[153,745], [790,745]],
[[444,749], [444,257]],
[[808,165], [939,165]],
[[546,525], [95,976]],
[[583,179], [373,389]],
[[235,816], [840,816]],
[[744,89], [832,89]],
[[425,317], [465,357]],
[[267,235], [114,82]],
[[887,59], [572,374]],
[[808,237], [808,626]],
[[431,352], [400,383]],
[[815,376], [815,905]],
[[249,218], [989,958]],
[[120,435], [357,198]],
[[807,551], [490,234]],
[[910,524], [910,725]],
[[802,304], [447,659]],
[[789,228], [678,339]],
[[229,322], [52,322]],
[[658,393], [506,393]],
[[378,438], [378,569]],
[[163,981], [473,671]],
[[537,984], [935,586]],
[[58,945], [966,37]],
[[132,696], [565,263]],
[[136,813], [136,284]],
[[606,656], [298,348]],
[[533,572], [673,712]],
[[872,912], [301,341]],
[[16,287], [16,613]],
[[571,541], [980,950]],
[[117,495], [35,495]],
[[85,79], [682,676]],
[[425,431], [117,739]],
[[982,984], [10,12]],
[[28,75], [431,478]],
[[259,529], [259,436]],
[[762,267], [170,859]],
[[323,135], [929,741]],
[[81,238], [561,718]],
[[128,213], [876,961]],
[[649,466], [649,540]],
[[715,863], [119,863]],
[[830,624], [794,660]],
[[123,968], [977,114]],
[[489,466], [489,811]],
[[27,10], [980,963]],
[[255,732], [255,484]],
[[574,829], [431,829]],
[[548,743], [22,217]],
[[903,297], [903,763]],
[[684,774], [64,154]],
[[260,823], [683,823]],
[[422,211], [422,826]],
[[10,196], [988,196]],
[[108,802], [15,802]],
[[104,70], [104,452]],
[[885,59], [885,36]],
[[68,854], [68,774]],
[[731,935], [731,718]],
[[657,986], [617,986]],
[[732,292], [732,32]],
[[841,56], [841,83]],
[[74,108], [862,896]],
[[654,895], [323,895]],
[[374,952], [374,217]],
[[90,723], [750,63]],
[[246,89], [911,754]],
[[453,301], [755,301]],
[[983,988], [23,28]],
[[81,705], [133,757]],
[[752,743], [752,397]],
[[53,243], [449,639]],
[[451,811], [451,187]],
[[26,672], [26,699]],
[[254,861], [943,861]],
[[643,740], [643,966]],
[[486,655], [149,318]],
[[375,146], [375,973]],
[[76,293], [103,293]],
[[246,398], [246,248]],
[[324,392], [595,121]],
[[130,577], [131,577]],
[[380,623], [549,454]],
[[224,181], [985,942]],
[[310,223], [310,594]],
[[23,982], [23,738]],
[[19,858], [832,858]],
[[726,531], [726,578]],
[[730,433], [196,433]],
[[606,599], [242,599]],
[[444,832], [444,238]],
[[198,870], [47,870]],
[[944,473], [795,473]],
[[737,386], [178,945]],
[[328,902], [328,644]],
[[422,851], [567,851]],
[[674,781], [215,781]],
[[920,757], [302,757]],
[[225,932], [640,517]],
[[359,337], [791,337]],
[[935,430], [935,262]],
[[772,850], [280,358]],
[[175,829], [175,451]],
[[938,204], [234,908]],
[[253,749], [308,749]],
[[704,458], [468,458]],
[[222,95], [743,616]],
[[968,840], [123,840]],
[[491,619], [491,889]],
[[979,580], [979,459]],
[[901,193], [171,923]],
[[246,155], [246,680]],
[[711,755], [247,755]],
[[671,734], [475,734]],
[[803,783], [129,109]],
[[145,890], [920,115]],
[[463,521], [463,700]],
[[782,99], [782,311]],
[[547,467], [630,467]],
[[14,88], [795,869]],
[[653,899], [653,90]],
[[488,874], [488,570]],
[[93,879], [645,327]],
[[320,658], [40,938]],
[[611,246], [611,22]],
[[258,935], [258,829]],
[[931,436], [931,263]],
[[252,460], [252,461]],
[[490,382], [965,382]],
[[242,89], [242,617]],
[[271,111], [595,435]],
[[462,706], [242,486]],
[[557,328], [747,328]],
[[486,99], [486,333]],
[[156,40], [488,372]],
[[323,482], [138,297]],
[[595,539], [812,756]],
[[923,861], [377,315]],
[[934,952], [256,274]],
[[314,777], [314,12]],
[[508,47], [508,144]],
[[888,807], [701,807]],
[[745,774], [878,907]],
[[740,716], [740,215]],
[[62,43], [62,12]],
[[571,196], [454,196]],
[[568,107], [408,107]],
[[549,676], [404,676]],
[[595,573], [595,970]],
[[148,168], [193,123]],
[[763,71], [759,71]],
[[797,64], [307,64]],
[[959,984], [32,57]],
[[457,562], [634,562]],
[[127,521], [601,47]],
[[112,296], [112,120]],
[[148,755], [451,755]],
[[636,494], [870,494]],
[[910,242], [945,277]],
[[912,911], [912,892]],
[[759,815], [759,314]],
[[391,285], [391,959]],
[[455,460], [182,460]],
[[112,78], [112,385]],
[[842,179], [842,592]],
[[236,424], [421,424]],
[[508,907], [30,907]],
[[637,219], [34,822]],
[[503,375], [503,205]],
[[570,533], [626,533]],
[[658,11], [658,94]],
[[179,286], [326,433]],
[[918,214], [200,932]],
[[339,887], [81,887]],
[[794,91], [50,835]],
[[225,356], [225,261]],
[[80,160], [80,335]],
[[148,64], [847,763]],
[[595,393], [941,393]]]
field_size = 1000
field = [[0 for i in range(field_size)] for i in range(field_size)]
count = 0
import numpy as np
for coord_pair in coords:
start = coord_pair[0]
end = coord_pair[1]
if start[1] != end[1] and start[0] != end[0]:
continue
range = max(1, max(abs(start[0] - end[0]), abs(start[1] - end[1])))
x_coords = np.linspace(start[0], end[0], range+1, dtype=int)
y_coords = np.linspace(start[1], end[1], range+1, dtype=int)
coords = | np.array([x_coords, y_coords]) | numpy.array |
"""
Tests for conformers.py.
"""
import numpy as np
import unittest
from rdkit import Chem
from vs_utils.utils.rdkit_utils import conformers
class TestConformerGenerator(unittest.TestCase):
"""
Tests for ConformerGenerator.
"""
def setUp(self):
"""
Set up tests.
"""
aspirin_smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O aspirin'
self.mol = Chem.MolFromSmiles(aspirin_smiles.split()[0])
self.mol.SetProp('_Name', 'aspirin')
assert self.mol.GetNumConformers() == 0
self.engine = conformers.ConformerGenerator()
def test_generate_conformers(self):
"""
Generate molecule conformers using default parameters.
"""
mol = self.engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
# check that molecule names are retained
assert self.mol.GetProp('_Name') == mol.GetProp('_Name')
def test_mmff94_minimization(self):
"""
Generate conformers and minimize with MMFF94 force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_mmff94s_minimization(self):
"""
Generate conformers and minimize with MMFF94s force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94s')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_embed_molecule(self):
"""
Test ConformerGenerator.embed_molecule.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
def test_minimize_conformers(self):
"""
Test ConformerGenerator.minimize_conformers.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
start = self.engine.get_conformer_energies(mol)
self.engine.minimize_conformers(mol)
finish = self.engine.get_conformer_energies(mol)
# check that all minimized energies are lower
assert np.all(start > finish), (start, finish)
def test_get_conformer_energies(self):
"""
Test ConformerGenerator.get_conformer_energies.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
energies = self.engine.get_conformer_energies(mol)
# check that the number of energies matches the number of
# conformers
assert len(energies) == mol.GetNumConformers()
def test_prune_conformers(self):
"""
Test ConformerGenerator.prune_conformers.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
engine.minimize_conformers(mol)
energies = engine.get_conformer_energies(mol)
pruned = engine.prune_conformers(mol)
pruned_energies = engine.get_conformer_energies(pruned)
# check that the number of conformers is not to large
assert pruned.GetNumConformers() <= engine.max_conformers
# check that the number of conformers has not increased
assert pruned.GetNumConformers() <= mol.GetNumConformers()
# check that lowest energy conformer was selected
assert np.allclose(min(energies), min(pruned_energies))
# check that pruned energies are taken from the original set
for energy in pruned_energies:
assert np.allclose(min(np.fabs(energies - energy)), 0)
# check that conformers are in order of increasing energy
sort = np.argsort(pruned_energies)
assert np.array_equal(sort, np.arange(len(pruned_energies))), sort
def test_get_conformer_rmsd(self):
"""
Test ConformerGenerator.get_conformer_rmsd.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
rmsd = engine.get_conformer_rmsd(mol)
# check for a valid distance matrix
assert rmsd.shape[0] == rmsd.shape[1] == mol.GetNumConformers()
assert np.allclose( | np.diag(rmsd) | numpy.diag |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This file defines the functions used to build **badlands** meshes and surface grids.
"""
import os
import time
import numpy as np
from scipy.interpolate import griddata
if 'READTHEDOCS' not in os.environ:
from badlands import (partitionTIN, FVmethod, elevationTIN, raster2TIN, waveSed,
eroMesh, strataMesh, isoFlex, stratiWedge, carbMesh, forceSim)
def construct_mesh(input, filename, verbose=False):
"""
The following function is taking parsed values from the XML to:
* build model grids & meshes,
* initialise Finite Volume discretisation,
* define the partitioning when parallelisation is enable.
Args:
input: class containing XML input file parameters.
filename: (str) this is a string containing the path to the regular grid file.
verbose : (bool) when :code:`True`, output additional debug information (default: :code:`False`).
Returns
-------
recGrid
class describing the regular grid characteristics.
FVmesh
class describing the finite volume mesh.
force
class describing the forcing parameters.
tMesh
class describing the TIN mesh.
lGIDs
numpy 1D array containing the node indices.
fixIDs
numpy 1D array containing the fixed node indices.
inGIDs
numpy 1D array containing the node indices inside the mesh.
totPts
total number of points in the mesh.
elevation
numpy array containing the elevations for the domain.
cumdiff
cumulative total erosion/deposition changes
cumhill
cumulative hillslope erosion/deposition changes
cumfail
cumulative failure induced erosion/deposition changes
cumflex
cumlative changes induced by flexural isostasy
strata
stratigraphic class parameters
mapero
underlying erodibility map characteristics
tinFlex
class describing the flexural TIN mesh.
flex
class describing the flexural isostasy functions.
wave
class describing the wave functions.
straTIN
class describing the stratigraphic TIN mesh.
carbTIN
class describing the carbonate TIN mesh.
"""
cumflex = None
flex = None
wave = None
tinFlex = None
strata = None
mapero = None
# Get DEM regular grid and create Badlands TIN.
recGrid = raster2TIN.raster2TIN(filename, areaDelFactor=input.Afactor)
fixIDs = recGrid.boundsPt + recGrid.edgesPt
force = forceSim.forceSim(input.seafile, input.seapos, input.rainMap,
input.rainTime, input.rainVal, input.orographic, input.orographiclin,
input.rbgd, input.rmin, input.rmax, input.rzmax, input.windx,
input.windy, input.tauc, input.tauf, input.nm,
input.cw, input.hw, input.ortime, input.tectFile,
input.tectTime, recGrid.regX, recGrid.regY, input.riverPos,
input.riverTime, input.riverQws, input.riverRck, input.riverNb,
input.rockNb, input.tDisplay, input.carbValSp1, input.carbValSp2,
input.carbTime)
if input.disp3d:
force.time3d = input.time3d
if input.merge3d == 0. or input.merge3d > recGrid.resEdges:
force.merge3d = input.Afactor * recGrid.resEdges * 0.5
else:
force.merge3d = input.merge3d
# Partition the TIN
walltime = time.clock()
FVmesh = FVmethod.FVmethod(recGrid.tinMesh['vertices'], recGrid.tinMesh['triangles'],
recGrid.tinMesh['edges'])
# Perform partitioning by equivalent domain splitting
partitionIDs, RowProc, ColProc = partitionTIN.simple(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1])
FVmesh.partIDs = partitionIDs
# Get each partition global node ID
inGIDs = np.where(partitionIDs == 0)[0]
# Build Finite Volume discretisation
# Define overlapping partitions
lGIDs, localTIN = partitionTIN.overlap(recGrid.tinMesh['vertices'][:, 0], recGrid.tinMesh['vertices'][:, 1],
RowProc, ColProc, 2*recGrid.resEdges, verbose)
# Set parameters of the finite volume mesh
tMesh = FVmethod.FVmethod(localTIN['vertices'], localTIN['triangles'], localTIN['edges'])
# Define Finite Volume parameters
walltime = time.clock()
totPts = len(recGrid.tinMesh['vertices'][:, 0])
FVmesh.neighbours = np.zeros((totPts, 20), dtype=np.int32, order='F')
FVmesh.neighbours.fill(-2)
FVmesh.edge_length = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.vor_edges = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.control_volumes = np.zeros(totPts, dtype=np.float)
# Compute Finite Volume parameters
tGIDs, tNgbh, tEdgs, tVors, tVols = tMesh.construct_FV(inGIDs, lGIDs, totPts,
recGrid.resEdges*input.Afactor, verbose)
FVmesh.neighbours[tGIDs,:tMesh.maxNgbh] = tNgbh
FVmesh.edge_length[tGIDs,:tMesh.maxNgbh] = tEdgs
FVmesh.vor_edges[tGIDs,:tMesh.maxNgbh] = tVors
FVmesh.control_volumes[tGIDs] = tVols
if verbose:
print(" - FV mesh ", time.clock() - walltime)
# Define TIN parameters
if input.flexure:
elevation, cumdiff, cumhill, cumfail, cumflex, inIDs, parentIDs = _define_TINparams(totPts, input, FVmesh, recGrid, verbose)
else:
elevation, cumdiff, cumhill, cumfail, inIDs, parentIDs = _define_TINparams(totPts, input, FVmesh, recGrid, verbose)
# Build stratigraphic and erodibility meshes
if ((input.laytime and input.laytime > 0) and
(input.erolays and input.erolays >= 0)):
strata, mapero = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
elif (input.laytime and input.laytime > 0):
strata = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
elif (input.erolays and input.erolays >= 0):
mapero = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
# Set default to no rain
force.update_force_TIN(FVmesh.node_coords[:,:2])
# Flexural isostasy initialisation
if input.flexure:
flex, tinFlex, cumflex = _init_flexure(FVmesh, input, recGrid, force, elevation,
cumdiff, cumflex, totPts, verbose)
# Wavesed grid initialisation
if input.waveSed:
ref_elev = get_reference_elevation(input,recGrid,elevation)
wave = _init_wavesed(input,ref_elev, recGrid, force, verbose)
wave.build_tree(FVmesh.node_coords[:,:2])
# Stratigraphic TIN initialisation
if input.rockNb > 0:
layNb = int((input.tEnd - input.tStart)/input.laytime)+2
bPts = recGrid.boundsPt
ePts = recGrid.edgesPt
if input.restart:
straTIN = stratiWedge.stratiWedge(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.actlay, input.outDir, input.strath5file,
input.rockNb, recGrid.regX, recGrid.regY, elevation, input.rockCk, cumdiff,
input.rfolder, input.rstep)
else:
straTIN = stratiWedge.stratiWedge(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.actlay, input.outDir, input.strath5file,
input.rockNb, recGrid.regX, recGrid.regY, elevation, input.rockCk)
else:
straTIN = None
# Stratigraphic grid in case of carbonate and/or pelagic growth functions
if input.carbonate:
layNb = int((input.tEnd - input.tStart)/input.tDisplay)+2
bPts = recGrid.boundsPt
ePts = recGrid.edgesPt
if input.carbonate2:
nbSed = 3
else:
nbSed = 2
if input.restart:
carbTIN = carbMesh.carbMesh(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.outDir, input.strath5file, input.baseMap, nbSed,
recGrid.regX, recGrid.regY, elevation, input.rfolder, input.rstep)
else:
carbTIN = carbMesh.carbMesh(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.outDir, input.strath5file, input.baseMap, nbSed,
recGrid.regX, recGrid.regY, elevation)
else:
carbTIN = None
return recGrid, FVmesh, force, tMesh, lGIDs, fixIDs, \
inIDs, parentIDs, inGIDs, totPts, elevation, cumdiff, \
cumhill, cumfail, cumflex, strata, mapero, tinFlex, flex, wave, \
straTIN, carbTIN
def reconstruct_mesh(recGrid, input, verbose=False):
"""
The following function is used after 3D displacements to:
* rebuild model grids & meshes,
* reinitialise Finite Volume discretisation,
* redefine the partitioning when parallelisation is enable.
Args:
recGrid: class describing the regular grid characteristics.
input: class containing XML input file parameters.
verbose : (bool) when :code:`True`, output additional debug information (default: :code:`False`).
Returns
-------
FVmesh
class describing the finite volume mesh.
tMesh
class describing the TIN mesh.
lGIDs
numpy 1D array containing the node indices.
inIDs
numpy 1D array containing the local node indices inside the mesh.
inGIDs
numpy 1D array containing the node indices inside the mesh.
totPts
total number of points in the mesh.
"""
walltime = time.clock()
FVmesh = FVmethod.FVmethod(recGrid.tinMesh['vertices'], recGrid.tinMesh['triangles'],
recGrid.tinMesh['edges'])
# Perform partitioning by equivalent domain splitting
partitionIDs, RowProc, ColProc = partitionTIN.simple(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1])
FVmesh.partIDs = partitionIDs
# Get each partition global node ID
inGIDs = np.where(partitionIDs == 0)[0]
if verbose:
print(" - partition TIN amongst processors ", time.clock() - walltime)
# Define overlapping partitions
walltime = time.clock()
lGIDs, localTIN = partitionTIN.overlap(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1],
RowProc, ColProc, 2*recGrid.resEdges,
verbose)
# Set parameters of the finite volume mesh
tMesh = FVmethod.FVmethod(localTIN['vertices'], localTIN['triangles'], localTIN['edges'])
# Define Finite Volume parameters
totPts = len(recGrid.tinMesh['vertices'][:, 0])
FVmesh.neighbours = np.zeros((totPts, 20), dtype=np.int32, order='F')
FVmesh.neighbours.fill(-2)
FVmesh.edge_length = | np.zeros((totPts, 20), dtype=np.float, order='F') | numpy.zeros |
# docking.py
#import necessary modules.
import krpc
import time
import numpy as np
def acquire_target(conn,vessel,target):
# conn is a krpc.connect() object, vessel is a vessel object, target is a string.
# vessel_list is a list of all vessels currently in use.
vessel_list = conn.space_center.vessels
# n is the number of vessels currently in use.
n = len(vessel_list)
# the loop cycles through all vessels in the list, and if the vessel's name matches the target string provided, it sets it as the target vessel.
for i in range(0,n):
vessel_name = vessel_list[i].name
if vessel_name == target:
conn.space_center.target_vessel = vessel_list[i]
return None
def orbit_finder(conn,vessel):
# IMPORTANT: The more circular the vessel's initial orbit is, the more accurate the orbit finder will be.
# conn is a krpc.connect() object, vessel is a vessel object.
while True:
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# finds the period of the current orbit.
vessel_period = vessel.orbit.period
# creates an array of times ranging from 0 to the vessel's period to be used to test where best to fire engines for a Hohmann transfer.
times = np.linspace(0,vessel_period,1000)
# retrieves the current time.
current_time = conn.space_center.ut
# retrieves the vessel's current speed.
current_v = vessel.orbit.speed
#retrieves the current orbital radius.
vessel_radius = vessel.orbit.radius
# the Hohmann transfer orbit used to intercept the target will have a semi-major axis of the average of the original vessel orbit and the target orbit.
needed_a = (vessel_radius + target.orbit.radius)/2
# retrieves the gravitational constant.
G = conn.space_center.g
# retrieves the mass of the body the vessel is orbiting.
M = vessel.orbit.body.mass
# plugs in the previously defined variables into the vis-viva equation to find the needed velocity.
needed_v = np.sqrt(G*M*(2/vessel_radius - 1/needed_a))
# finds the delta V needed for the maneuver.
delta_v = needed_v - current_v
# the program loops over each time in the times array and creates a maneuver node at that time with the needed delta V.
# Using the orbit created by the node, it then checks how close the target and vessel will be when the vessel is at apoapsis (since that is when the vessel will cross the target's orbit).
# If the distance is close enough, the node is returned to be used as a variable in later programs.
# If the distance is not close enough, the node is deleted and the program moves on to the next time.
for i in times:
maneuver_time = current_time + i
node = vessel.control.add_node(maneuver_time, prograde=delta_v)
possible_orbit = node.orbit
time_to_pred_apoapsis = possible_orbit.period / 2
node_position = np.array(node.position(vessel.orbit.body.reference_frame))
node_unit_vector = np.array([node_position[0]/np.linalg.norm(node_position),node_position[1]/np.linalg.norm(node_position),node_position[2]/np.linalg.norm(node_position)])
possible_apoapsis = possible_orbit.apoapsis
vessel_position_at_apoapsis = possible_apoapsis * -1 * node_unit_vector
target_position_at_apoapsis = target.orbit.position_at(maneuver_time+time_to_pred_apoapsis,target.orbit.body.reference_frame)
dist_vector = [vessel_position_at_apoapsis[0] - target_position_at_apoapsis[0],vessel_position_at_apoapsis[1] - target_position_at_apoapsis[1],vessel_position_at_apoapsis[2] - target_position_at_apoapsis[2]]
dist = np.linalg.norm(dist_vector)
if dist < 1700:
return node
else:
vessel.control.remove_nodes()
# If no maneuvers result in a close enough distance, the program warps the vessel forward in time, and then restarts the search.
# Since the vessel has fast-forwarded, new times will be available to check.
conn.space_center.rails_warp_factor = 4
time.sleep(3)
conn.space_center.rails_warp_factor = 0
def first_slowdown(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# initializes a reference frame centered on the target. This makes it easy to measure how far the vessel is from the target and to get the velocity relative to the target.
target_ref = target.orbital_reference_frame
# sets the autopilot reference frame to the target-centered reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# returns the velocity of the vessel relative to the target as a tuple.
velocity_vector = vessel.flight(target_ref).velocity
# using the velocity vector, it sets the direction the autopilot should point the vessel to retrograde.
vessel.auto_pilot.target_direction = (-velocity_vector[0],-velocity_vector[1],-velocity_vector[2])
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = np.linalg.norm(current_position)
# when the vessel is 15000 meters away, the engines turn on to slow the vessel relative to the target.
# once the vessel's relative speed is less than 40 m/s, the engines turn off and this function exits.
if current_distance < 15000:
conn.space_center.rails_warp_factor = 0
vessel.control.throttle = 1
if vessel.flight(target_ref).speed < 40:
vessel.control.throttle = 0
return None
# when the distance is greater than 30000 meters, the vessel warps to save time. Then it goes back to normal time flow.
elif current_distance > 30000:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def zeroing_inclination(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# sets the autopilot reference frame to the vessel's orbital reference frame (so that the normal/anti-normal directions are basis vectors).
vessel.auto_pilot.reference_frame = vessel.orbital_reference_frame
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# finds the relative incline of the target (in radians).
incline = vessel.orbit.relative_inclination(target)
# finds the angular separation from the periapsis to the ascending node (in radians).
an_location = vessel.orbit.true_anomaly_at_an(target)
# finds the angular separation between the vessel and the periapsis (in radians).
vessel_location = vessel.orbit.true_anomaly
# when the incline is small enough, the engines should turn off and the function will exit.
if abs(incline) < 0.0005:
vessel.control.throttle = 0
return None
# if the inclination is positive, the vessel points in the anti-normal direction. If the inclination is negative, the vessel points in the normal direction.
if incline > 0:
vessel.auto_pilot.target_direction = (0,0,-1)
else:
vessel.auto_pilot.target_direction = (0,0,1)
# if the angular separation of the ascending node and the vessel is low, the engines should fire. Otherwise, the engines should not fire.
if abs(an_location-vessel_location) < 0.05:
vessel.control.throttle = 1
else:
vessel.control.throttle = 0
# controls if the vessel should warp forward in time depending on how close the ascending node and the vessel are (to save time).
if abs(an_location-vessel_location) > np.pi / 12:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def execute_transfer_burn(conn,vessel,node):
# conn is a krpc.connect() object, vessel is a vessel object, and node is a node object.
# arrived is a boolean that will ensure that if the vessel gets to the maneuver node, it will not stop the engines until the maneuver is complete.
arrived = False
while True:
# sets the reference frame to the vessel's orbital reference frame so that the velocity vector is a basis vector.
vessel.auto_pilot.reference_frame = vessel.orbital_reference_frame
# engages the autopilot.
vessel.auto_pilot.engage()
# sets the target direction to prograde.
vessel.auto_pilot.target_direction = (0,1,0)
# if the vessel has ever been 20 seconds from the node, fire the engines.
if node.time_to < 20 or arrived == True:
arrived = True
vessel.control.throttle = 1
# once the vessel orbit's apoapsis is higher than the node orbit's apoapsis, the burn is complete and the engines turn off, the node is deleted, and this function exits.
if node.orbit.apoapsis_altitude < vessel.orbit.apoapsis_altitude:
vessel.control.throttle = 0
node.remove()
return None
# determines if the vessel should warp forward in time to save time.
if node.time_to > 400:
conn.space_center.rails_warp_factor = 4
else:
conn.space_center.rails_warp_factor = 0
def second_slowdown(conn,vessel):
# conn is a krpc.connect() object, vessel is a vessel object.
# retrieves the target vessel as a vessel object.
target = conn.space_center.target_vessel
# initializes a reference frame centered on the target. This makes it easy to measure how far the vessel is from the target and to get the velocity relative to the target.
target_ref = target.orbital_reference_frame
# sets the autopilot reference frame to the target-centered reference frame.
vessel.auto_pilot.reference_frame = target_ref
# engages the autopilot.
vessel.auto_pilot.engage()
while True:
# returns the velocity of the vessel relative to the target as a tuple.
velocity_vector = vessel.flight(target_ref).velocity
# using the velocity vector, it sets the direction the autopilot should point the vessel to prograde.
vessel.auto_pilot.target_direction = (velocity_vector[0],velocity_vector[1],velocity_vector[2])
# waits until the vessel is pointed in the right direction.
vessel.auto_pilot.wait()
# retrieves the current time.
current_time = conn.space_center.ut
# returns the position of the vessel relative to the target as a tuple.
current_position = vessel.orbit.position_at(current_time,target_ref)
# finds the current distance by finding the magnitude of the current position.
current_distance = | np.linalg.norm(current_position) | numpy.linalg.norm |
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from frispy import EOM
class TestEOM(TestCase):
def setUp(self):
super().setUp()
# Coordinates for the default
self.phi = 0
self.theta = 0
self.vel = np.array([1, 0, 0])
self.ang_vel = np.array([0, 0, 62])
self.kwargs = {
"area": 0.058556, # m^2
"I_zz": 0.002352, # kg*m^2
"I_xx": 0.001219, # kg*m^2
"mass": 0.175, # kg
}
def test_smoke(self):
eom = EOM(**self.kwargs)
assert eom is not None
def test_eom_has_properties(self):
eom = EOM(**self.kwargs)
assert hasattr(eom, "model")
assert hasattr(eom, "environment")
def test_compute_forces(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
for f in ["F_lift", "F_drag", "F_grav", "F_total", "Acc"]:
assert f in result
assert result[f].shape == (3,)
assert result[f].dtype == np.float
def test_F_drag_direction(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
assert result["F_drag"][0] < 0 # backwards
assert result["F_drag"][1] == 0
assert result["F_drag"][2] == 0
def test_F_lift_cross_component(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
assert result["F_lift"][1] == 0 # from cross product
def test_F_grav_direction(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
assert result["F_grav"][0] == 0
assert result["F_grav"][1] == 0
assert result["F_grav"][2] < 0 # downwards
def test_F_total_Acc_relation(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
assert all(result["F_total"] == result["Acc"] * eom.mass)
def test_compute_torques_smoke(self):
eom = EOM(**self.kwargs)
result = eom.compute_forces(self.phi, self.theta, self.vel, self.ang_vel)
result = eom.compute_torques(self.vel, result)
assert "torque_amplitude" in result
assert isinstance(result["torque_amplitude"], float)
for t in ["T_x_lab", "T_y_lab", "T_x", "T_y", "T_z", "T"]:
assert t in result
assert result[t].shape == (3,)
assert result[t].dtype == np.float
def test_compute_derivatives_smoke(self):
eom = EOM(**self.kwargs)
coords = np.array([0, 0, 1, 10, 0, 0, 0, 0, 0, 0, 0, 62])
der = eom.compute_derivatives(0, coords)
assert der.shape == (12,)
assert der.dtype == np.float
def test_rotation_matrix(self):
def trig_functions(phi, theta):
return np.sin(phi), np.cos(phi), np.sin(theta), | np.cos(theta) | numpy.cos |
"""
This module contains our thermodynamic calculations. Calculation of pressure, fugacity coefficient, and max density are handled by an Eos object so that these functions can be used with any EOS. The thermodynamics module contains a series of wrapper to handle the inputs and outputs of these functions.
"""
import numpy as np
from scipy import interpolate
import scipy.optimize as spo
from scipy.ndimage.filters import gaussian_filter1d
import copy
import logging
import despasito.utils.general_toolbox as gtb
from despasito import fundamental_constants as constants
import despasito.utils.general_toolbox as gtb
logger = logging.getLogger(__name__)
def pressure_vs_volume_arrays(
T,
xi,
Eos,
min_density_fraction=(1.0 / 500000.0),
density_increment=5.0,
max_volume_increment=1.0e-4,
pressure_min=100,
maxiter=25,
multfactor=2,
extended_npts=20,
max_density=None,
density_max_opts={},
**kwargs
):
r"""
Output arrays with specific volume and pressure arrays calculated from the given EOS.
This function is fundamental to every calculation, the options of which are passed through higher level calculation with the keyword variable ``density_opts``.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
min_density_fraction : float, Optional, default=(1.0/500000.0)
Fraction of the maximum density used to calculate, and is equal to, the minimum density of the density array. The minimum density is the reciprocal of the maximum specific volume used to calculate the roots.
density_increment : float, Optional, default=5.0
The increment between density values in the density array.
max_volume_increment : float, Optional, default=1.0E-4
Maximum increment between specific volume array values. After conversion from density to specific volume, the increment values are compared to this value.
pressure_min : float, Optional, default=100
Ensure pressure curve reaches down to this value
multfactor : int, Optional, default=2
Multiplication factor to extend range
extended_npts : int, Optional, default=20
Number of points in extended range
maxiter : int, Optional, default=25
Number of times to multiply range by to obtain full pressure vs. specific volume curve
max_density : float, Optional, default=None
[mol/:math:`m^3`] Maximum molar density defined, if default of None is used then the Eos object method, density_max is used.
density_max_opts : dict, Optional, default={}
Keyword arguments for density_max method for EOS object
Returns
-------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_arrays' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.any(np.isnan(xi)):
raise ValueError("Given mole fractions are NaN")
if isinstance(xi, list):
xi = np.array(xi)
# estimate the maximum density based on the hard sphere packing fraction, part of EOS
if not max_density:
max_density = Eos.density_max(xi, T, **density_max_opts)
elif gtb.isiterable(max_density):
logger.error(
" Maxrho should be type float. Given value: {}".format(max_density)
)
max_density = max_density[0]
if max_density > 1e5:
raise ValueError(
"Max density of {} mol/m^3 is not feasible, check parameters.".format(
max_density
)
)
# min rho is a fraction of max rho, such that minrho << rhogassat
minrho = max_density * min_density_fraction
# list of densities for P,rho and P,v
if (max_density - minrho) < density_increment:
raise ValueError(
"Density range, {}, is less than increment, {}. Check parameters used in Eos.density_max().".format(
(max_density - minrho), density_increment
)
)
rholist = np.arange(minrho, max_density, density_increment)
# check rholist to see when the spacing
vspace = (1.0 / rholist[:-1]) - (1.0 / rholist[1:])
if np.amax(vspace) > max_volume_increment:
vspaceswitch = np.where(vspace > max_volume_increment)[0][-1]
rholist_2 = (
1.0
/ np.arange(
1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment
)[::-1]
)
rholist = np.append(rholist_2, rholist[vspaceswitch + 2 :])
# compute Pressures (Plist) for rholist
Plist = Eos.pressure(rholist, T, xi)
# Make sure enough of the pressure curve is obtained
for i in range(maxiter):
if Plist[0] > pressure_min:
rhotmp = np.linspace(rholist[0] / 2, rholist[0], extended_npts)[:-1]
Ptmp = Eos.pressure(rhotmp, T, xi)
Plist = np.append(Ptmp, Plist)
rholist = np.append(rhotmp, rholist)
else:
break
# Flip Plist and rholist arrays
Plist = Plist[:][::-1]
rholist = rholist[:][::-1]
vlist = 1.0 / rholist
return vlist, Plist
def pressure_vs_volume_spline(vlist, Plist):
r"""
Fit arrays of specific volume and pressure values to a cubic Univariate Spline.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Returns
-------
Pvspline : obj
Function object of pressure vs. specific volume
roots : list
List of specific volume roots. Subtract a system pressure from the output of Pvsrho to find density of vapor and/or liquid densities.
extrema : list
List of specific volume values corresponding to local minima and maxima.
"""
# Larger sigma value
Psmoothed = gaussian_filter1d(Plist, sigma=1.0e-2)
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed)
roots = Pvspline.roots().tolist()
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed, k=4)
extrema = Pvspline.derivative().roots().tolist()
if extrema:
if len(extrema) > 2:
extrema = extrema[0:2]
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if np.any(np.isnan(Plist)):
roots = [np.nan]
return Pvspline, roots, extrema
def pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=[], **kwargs):
r"""
Plot pressure vs. specific volume.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Pvspline : obj
Function object of pressure vs. specific volume
markers : list, Optional, default=[]
List of plot markers used in plot
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_plot' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(vlist, Plist, label="Orig.")
plt.plot(vlist, Pvspline(vlist), label="Smoothed")
plt.plot([vlist[0], vlist[-1]], [0, 0], "k")
for k in range(len(markers)):
plt.plot([markers[k], markers[k]], [min(Plist), max(Plist)], "k")
plt.xlabel("Specific Volume [$m^3$/mol]"), plt.ylabel("Pressure [Pa]")
# plt.ylim(min(Plist)/2,np.abs(min(Plist))/2)
plt.legend(loc="best")
plt.tight_layout()
plt.show()
except Exception:
logger.error("Matplotlib package is not installed, could not plot")
def calc_saturation_properties(
T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs
):
r"""
Computes the saturated pressure, gas and liquid densities for a single component system.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
tol : float, Optional, default=1e-6
Tolerance to accept pressure value
Pconverged : float, Optional, default=1.0
If the pressure is negative (under tension), we search from a value just above vacuum
Returns
-------
Psat : float
[Pa] Saturation pressure given system information
rhov : float
[mol/:math:`m^3`] Density of vapor at saturation pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at saturation pressure
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.count_nonzero(xi) != 1:
if np.count_nonzero(xi > 0.1) != 1:
raise ValueError(
"Multiple components have compositions greater than 10%, check code for source"
)
else:
ind = np.where((xi > 0.1) == True)[0]
raise ValueError(
"Multiple components have compositions greater than 0. Do you mean to obtain the saturation pressure of {} with a mole fraction of {}?".format(
Eos.beads[ind], xi[ind]
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
if not extrema or len(extrema) < 2 or np.any(np.isnan(roots)):
logger.warning(" The component is above its critical point")
Psat, rhol, rhov = np.nan, np.nan, np.nan
else:
ind_Pmin1 = np.argwhere(np.diff(Plist) > 0)[0][0]
ind_Pmax1 = np.argmax(Plist[ind_Pmin1:]) + ind_Pmin1
Pmaxsearch = Plist[ind_Pmax1]
Pminsearch = max(Pconverged, np.amin(Plist[ind_Pmin1:ind_Pmax1]))
# Using computed Psat find the roots in the maxwell construction to give liquid (first root) and vapor (last root) densities
Psat = spo.minimize_scalar(
objective_saturation_pressure,
args=(Plist, vlist),
bounds=(Pminsearch, Pmaxsearch),
method="bounded",
)
Psat = Psat.x
obj_value = objective_saturation_pressure(Psat, Plist, vlist)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist - Psat)
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if obj_value < tol:
logger.debug(
" Psat found: {} Pa, obj value: {}, with {} roots and {} extrema".format(
Psat, obj_value, np.size(roots), np.size(extrema)
)
)
if len(roots) == 2:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:] - Psat, 1)
vroot = -yroot / slope
if vroot < 0.0:
vroot = np.finfo(float).eps
rho_tmp = spo.minimize(
pressure_spline_error,
1.0 / vroot,
args=(Psat, T, xi, Eos),
bounds=[(1.0 / (vroot * 1e2), 1.0 / (1.1 * roots[-1]))],
)
roots = np.append(roots, [1.0 / rho_tmp.x])
rhol = 1.0 / roots[0]
rhov = 1.0 / roots[2]
else:
logger.warning(
" Psat NOT found: {} Pa, obj value: {}, consider decreasing 'pressure_min' option in density_opts".format(
Psat, obj_value
)
)
Psat, rhol, rhov = np.nan, np.nan, np.nan
tmpv, _, _ = calc_vapor_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
tmpl, _, _ = calc_liquid_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
logger.debug(" phiv: {}, phil: {}".format(tmpv, tmpl))
return Psat, rhol, rhov
def objective_saturation_pressure(shift, Pv, vlist):
r"""
Objective function used to calculate the saturation pressure.
Parameters
----------
shift : float
[Pa] Guess in Psat value used to translate the pressure vs. specific volume curve
Pv : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
vlist : numpy.ndarray
[mol/:math:`m^3`] Specific volume array. Length depends on values in density_opts passed to :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
obj_value : float
Output of objective function, the addition of the positive area between first two roots, and negative area between second and third roots, quantity squared.
"""
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Pv - shift)
if len(roots) >= 3:
a = Pvspline.integral(roots[0], roots[1])
b = Pvspline.integral(roots[1], roots[2])
elif len(roots) == 2:
a = Pvspline.integral(roots[0], roots[1])
# If the curve hasn't decayed to 0 yet, estimate the remaining area as a triangle. This isn't super accurate but we are just using the saturation pressure to get started.
slope, yroot = np.polyfit(vlist[-4:], Pv[-4:] - shift, 1)
b = (
Pvspline.integral(roots[1], vlist[-1])
+ (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2
)
# raise ValueError("Pressure curve only has two roots. If the curve hasn't fully decayed, either increase maximum specific volume or decrease 'pressure_min' in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`.")
elif np.any(np.isnan(roots)):
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing pressure."
)
else:
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing min_density_fraction"
)
# pressure_vs_volume_plot(vlist, Pv-shift, Pvspline, markers=extrema)
return (a + b) ** 2
def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhov: P {} Pa, roots {} m^3/mol".format(P, roots))
flag_NoOpt = False
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]) < 0:
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vlist[0],
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
flag = 3
rho_tmp = np.nan
elif l_roots == 1:
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
elif l_roots == 2:
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and yi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure.".format(
T, xi
)
)
rho_tmp = 1.0 / roots[2]
flag = 0
if flag in [0, 2]: # vapor or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
if rho_tmp * 1.01 > Eos.density_max(xi, T, maxpack=0.99):
tmp[1] = Eos.density_max(xi, T, maxpack=0.99)
if (
pressure_spline_error(tmp[0], P, T, xi, Eos)
* pressure_spline_error(tmp[1], P, T, xi, Eos)
) < 0:
rho_tmp = spo.brentq(
pressure_spline_error,
tmp[0],
tmp[1],
args=(P, T, xi, Eos),
rtol=0.0000001,
)
else:
if Plist[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
logger.debug(" Vapor Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
return rho_tmp, flag
def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
# Get roots and local minima and maxima
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhol: P {} Pa, roots {} m^3/mol".format(P, str(roots)))
flag_NoOpt = False
if extrema:
if len(extrema) == 1:
logger.warning(
" One extrema at {}, assume weird minima behavior. Check your parameters.".format(
1 / extrema[0]
)
)
# Assess roots, what is the liquid density
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]):
try:
bounds = (1 / vlist[0], Eos.density_max(xi, T, maxpack=0.99))
rho_tmp = spo.least_squares(
pressure_spline_error,
np.mean(bounds),
args=(P, T, xi, Eos),
bounds=bounds,
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1.0 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
flag = 3
logger.error(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
str(T), str(xi)
)
)
rho_tmp = np.nan
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
elif l_roots == 2: # 2 roots
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and xi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else: # There should be three roots, but the values of specific volume don't go far enough to pick up the last one
flag = 1
rho_tmp = 1.0 / roots[0]
elif l_roots == 1: # 1 root
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
rho_tmp = 1.0 / roots[0]
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
if flag in [1, 2]: # liquid or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
P_tmp = [
pressure_spline_error(tmp[0], P, T, xi, Eos),
pressure_spline_error(tmp[1], P, T, xi, Eos),
]
if (P_tmp[0] * P_tmp[1]) < 0:
rho_tmp = spo.brentq(
pressure_spline_error, tmp[0], tmp[1], args=(P, T, xi, Eos), rtol=1e-7
)
else:
if P_tmp[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x[0]
logger.debug(" Liquid Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
return rho_tmp, flag
def pressure_spline_error(rho, Pset, T, xi, Eos):
"""
Calculate difference between set point pressure and computed pressure for a given density.
Used to ensure an accurate value from the EOS rather than an estimate from a spline.
Parameters
----------
rho : float
[mol/:math:`m^3`] Density of system
Pset : float
[Pa] Guess in pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
Returns
-------
pressure_spline_error : float
[Pa] Difference in set pressure and predicted pressure given system conditions.
"""
Pguess = Eos.pressure(rho, T, xi)
return Pguess - Pset
def calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
yi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phiv : float
Fugacity coefficient of vapor at system pressure
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhov, flagv = calc_vapor_density(P, T, yi, Eos, density_opts)
if flagv == 4:
phiv = np.ones_like(yi)
rhov = 0.0
logger.info(" rhov set to 0.")
elif flagv == 3:
phiv = np.array([np.nan, np.nan])
else:
phiv = Eos.fugacity_coefficient(P, rhov, yi, T)
return phiv, rhov, flagv
def calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phil : float
Fugacity coefficient of liquid at system pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true.
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhol, flagl = calc_liquid_density(P, T, xi, Eos, density_opts)
if flagl == 3:
phil = np.array([np.nan, np.nan])
else:
phil = Eos.fugacity_coefficient(P, rhol, xi, T)
return phil, rhol, flagl
def calc_new_mole_fractions(phase_1_mole_fraction, phil, phiv, phase=None):
r"""
Calculate the alternative phase composition given the composition and fugacity coefficients of one phase, and the fugacity coefficients of the target phase.
Parameters
----------
phase_1_mole_fraction : numpy.ndarray
Mole fraction of each component, sum(mole fraction) must equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
phiv : float
Fugacity coefficient of vapor at system pressure
phase : str, default=None
Use either 'vapor' or 'liquid' to define the mole fraction **being computed**. Default is None and it will fail to ensure the user specifies the correct phase
Returns
-------
phase_2_mole_fraction : numpy.ndarray
Mole fraction of each component computed from fugacity coefficients, sum(xi) should equal 1.0 when the solution is found, but the resulting values may not during an equilibrium calculation (e.g. bubble point).
"""
if phase == None or phase not in ["vapor", "liquid"]:
raise ValueError(
"The user must specify the desired mole fraction as either 'vapor' or 'liquid'."
)
if np.sum(phase_1_mole_fraction) != 1.0:
raise ValueError("Given mole fractions must add up to one.")
if np.any(np.isnan(phiv)):
raise ValueError("Vapor fugacity coefficients should not be NaN")
if np.any(np.isnan(phil)):
raise ValueError("Liquid fugacity coefficients should not be NaN")
phase_2_mole_fraction = np.zeros(len(phase_1_mole_fraction))
ind = np.where(phase_1_mole_fraction != 0.0)[0]
if phase == "vapor":
for i in ind:
phase_2_mole_fraction[i] = phase_1_mole_fraction[i] * phil[i] / phiv[i]
elif phase == "liquid":
for i in ind:
phase_2_mole_fraction[i] = phase_1_mole_fraction[i] * phiv[i] / phil[i]
return phase_2_mole_fraction
def equilibrium_objective(phase_1_mole_fraction, phil, phiv, phase=None):
r"""
Computes the objective value used to determine equilibrium between phases. sum(phase_1_mole_fraction * phase_1_phi / phase_2_phi ) - 1.0, where `phase` is phase 2.
Parameters
----------
phase_1_mole_fraction : numpy.ndarray
Mole fraction of each component, sum(mole fraction) must equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
phiv : float
Fugacity coefficient of vapor at system pressure
phase : str, default=None
Use either 'vapor' or 'liquid' to define the mole fraction **being computed**. Default is None and it will fail to ensure the user specifies the correct phase
Returns
-------
objective_value : numpy.ndarray
Objective value indicating how close to equilibrium we are
"""
if phase == None or phase not in ["vapor", "liquid"]:
raise ValueError(
"The user must specify the desired mole fraction as either 'vapor' or 'liquid'."
)
if np.sum(phase_1_mole_fraction) != 1.0:
raise ValueError("Given mole fractions must add up to one.")
if np.any(np.isnan(phiv)):
raise ValueError("Vapor fugacity coefficients should not be NaN")
if np.any(np.isnan(phil)):
raise ValueError("Liquid fugacity coefficients should not be NaN")
if phase == "vapor":
objective_value = float((np.nansum(phase_1_mole_fraction * phil / phiv)) - 1.0)
elif phase == "liquid":
objective_value = float((np.nansum(phase_1_mole_fraction * phiv / phil)) - 1.0)
return objective_value
def _clean_plot_data(x_old, y_old):
r"""
Reorder array and remove duplicates, then repeat process for the corresponding array.
Parameters
----------
x_old : numpy.ndarray
Original independent variable
y_old : numpy.ndarray
Original dependent variable
Returns
-------
x_new : numpy.ndarray
New independent variable
y_new : numpy.ndarray
New dependent variable
"""
x_new = np.sort(np.array(list(set(x_old))))
y_new = np.array([y_old[np.where(np.array(x_old) == x)[0][0]] for x in x_new])
return x_new, y_new
def calc_Prange_xi(
T,
xi,
yi,
Eos,
density_opts={},
Pmin=None,
Pmax=None,
maxiter=200,
mole_fraction_options={},
ptol=1e-2,
xytol=0.01,
maxfactor=2,
minfactor=0.5,
Pmin_allowed=100,
**kwargs
):
r"""
Obtain minimum and maximum pressure values for bubble point calculation.
The liquid mole fraction is set and the objective function at each of those values is of opposite sign.
Parameters
----------
T : float
Temperature of the system [K]
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : float, Optional, default=200
Maximum number of iterations in both the loop to find Pmin and the loop to find Pmax
Pmin : float, Optional, default=1000.0
[Pa] Minimum pressure in pressure range that restricts searched space.
Pmax : float, Optional, default=100000
If no local minima or maxima are identified for the liquid composition at this temperature, this value is used as an initial estimate of the maximum pressure range.
Pmin_allowed : float, Optional, default=100
Minimum allowed pressure in search, before looking for a super critical fluid
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
ptol : float, Optional, default=1e-2
If two iterations in the search for the maximum pressure are within this tolerance, the search is discontinued
xytol : float, Optional, default=0.01
If the sum of absolute relative difference between the vapor and liquid mole fractions are less than this total, the pressure is assumed to be super critical and the maximum pressure is sought at a lower value.
maxfactor : float, Optional, default=2
Factor to multiply by the pressure if it is too low (produces liquid or positive objective value). Not used if an unfeasible maximum pressure is found to bound the problem (critical for NaN result).
minfactor : float, Optional, default=0.5
Factor to multiply by the minimum pressure if it is too high (produces critical value).
Returns
-------
Prange : list
List of min and max pressure range
Pguess : float
An interpolated guess in the equilibrium pressure from Prange
"""
if len(kwargs) > 0:
logger.debug(
"'calc_Prange_xi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _yi_global
# Guess a range from Pmin to the local max of the liquid curve
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
flag_hard_min = False
if Pmin != None:
flag_hard_min = True
if gtb.isiterable(Pmin):
Pmin = Pmin[0]
elif len(extrema):
Pmin = min(Pvspline(extrema))
if Pmin < 0:
Pmin = 1e3
else:
Pmin = 1e3
flag_hard_max = False
if Pmax != None:
flag_hard_max = True
if gtb.isiterable(Pmax):
Pmax = Pmax[0]
elif len(extrema):
Pmax = max(Pvspline(extrema))
else:
Pmax = 1e5
if Pmax < Pmin:
Pmax = Pmin * maxfactor
Prange = np.array([Pmin, Pmax])
#################### Find Minimum Pressure and Objective Function Value ###############
# Root of min from liquid curve is absolute minimum
ObjRange = np.zeros(2)
yi_range = yi
flag_max = False
flag_min = False
flag_critical = False
flag_liquid = False
flag_vapor = False
p = Prange[0]
for z in range(maxiter):
# Liquid properties
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
p, T, xi, Eos, density_opts=density_opts
)
if any(np.isnan(phil)):
logger.error("Estimated minimum pressure is too high.")
flag_max = True
flag_liquid = True
ObjRange[1] = np.inf
Prange[1] = p
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
continue
if flagl in [1, 2]: # 'liquid' phase is as expected
# Calculate vapor phase properties and obj value
yi_range, phiv_min, flagv_min = calc_vapor_composition(
yi_range,
xi,
phil,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(xi, phil, phiv_min, phase="vapor")
if np.any(np.isnan(yi_range)):
logger.info("Estimated minimum pressure produces NaN")
flag_max = True
flag_liquid = True
Prange[1] = p
ObjRange[1] = obj
phiv_max, flagv_max = phiv_min, flagv_min
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# If within tolerance of liquid mole fraction
elif np.sum(np.abs(xi - yi_range) / xi) < xytol and flagv_min == 2:
logger.info(
"Estimated minimum pressure reproduces xi: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if (
flag_max or flag_hard_max
) and flag_liquid: # If a liquid phase exists at a higher pressure, this must bound the lower pressure
flag_min = True
ObjRange[0] = obj
Prange[0] = p
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
if np.abs(Prange[1] - Prange[0]) < ptol:
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
elif (
flag_min or flag_hard_min
) and flag_vapor: # If the 'liquid' phase is vapor at a lower pressure, this must bound the upper pressure
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
elif (
flag_critical
): # Couldn't find phase by lowering pressure, now raise it
ObjRange[0] = obj
Prange[0] = p
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * p
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else:
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
if p < Pmin_allowed: # Less than a kPa and can't find phase, go up
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
# If 'vapor' phase is liquid or unattainable
elif flagv_min not in [0, 2, 4]:
logger.info(
"Estimated minimum pressure produces liquid: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if flag_hard_min and p <= Pmin:
flag_critical = True
if flag_max:
flag_max = False
flag_liquid = True
if flag_critical: # Looking for a super critical fluid
Prange[0] = p
ObjRange[0] = obj
flag_min = True
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for a vapor
Prange[1] = p
ObjRange[1] = obj
flag_max = True
phiv_max, flagv_max = phiv_min, flagv_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
# Found minimum pressure!
elif obj > 0:
logger.info(
"Found estimated minimum pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
break
elif obj < 0:
logger.info(
"Estimated minimum pressure too high: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
flag_liquid = True
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
else:
raise ValueError(
"This shouldn't happen: xi {}, phil {}, flagl {}, yi {}, phiv {}, flagv {}, obj {}, flags: {} {} {}".format(
xi,
phil,
flagl,
yi_range,
phiv_min,
flagv_min,
obj,
flag_min,
flag_max,
flag_critical,
)
)
else:
logger.info(
"Estimated minimum pressure produced vapor as a 'liquid' phase: {}, Range {}".format(
p, Prange
)
)
flag_vapor = True
flag_min = True
Prange[0] = p
ObjRange[0] = np.nan
if flag_max or flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = maxfactor * Prange[0]
if (
(flag_hard_min or flag_min)
and (flag_hard_max or flag_max)
and (p < Prange[0] or p > Prange[1])
):
# if (p < Prange[0] and Prange[0] != Prange[1]) or (flag_max and p > Prange[1]):
p = (Prange[1] - Prange[0]) / 1 + Prange[0]
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}".format(
p, xi, T
)
)
if flag_hard_min and Pmin == p:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if z == maxiter - 1:
raise ValueError(
"Maximum Number of Iterations Reached: Proper minimum pressure for liquid density could not be found"
)
# A flag value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
#################### Find Maximum Pressure and Objective Function Value ###############
# Be sure guess in upper bound is larger than lower bound
if Prange[1] <= Prange[0]:
Prange[1] = Prange[0] * maxfactor
ObjRange[1] == 0.0
flag_min = (
False
) # Signals that the objective value starts to increase again and we must go back
p = Prange[1]
Parray = [Prange[1]]
ObjArray = [ObjRange[1]]
for z in range(maxiter):
# Liquid properties
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
p, T, xi, Eos, density_opts=density_opts
)
if any(np.isnan(phil)):
logger.info(
"Liquid fugacity coefficient should not be NaN, pressure could be too high."
)
flag_max = True
Prange[1] = p
ObjRange[1] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
continue
# Calculate vapor phase properties and obj value
yi_range, phiv_max, flagv_max = calc_vapor_composition(
yi_range,
xi,
phil,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(xi, phil, phiv_max, phase="vapor")
# If 'vapor' phase is a liquid
if flagv_max not in [0, 2, 4] or np.any(np.isnan(yi_range)):
logger.info(
"New Maximum Pressure: {} isn't vapor, flag={}, Obj Func: {}, Range {}".format(
p, flagv_max, obj, Prange
)
)
if flag_critical: # looking for critical fluid
Prange[0] = p
ObjRange[0] = obj
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for vapor phase
flag_max = True
Prange[1] = p
ObjRange[1] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# If 'liquid' composition is reproduced
elif np.sum(np.abs(xi - yi_range) / xi) < xytol: # If less than 2%
logger.info(
"Estimated Maximum Pressure Reproduces xi: {}, Obj. Func: {}".format(
p, obj
)
)
flag_max = True
ObjRange[1] = obj
Prange[1] = p
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# Suitable objective value found
elif obj < 0:
logger.info(
"New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(
p, flagv_max, obj, Prange
)
)
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
break
else:
Parray.append(p)
ObjArray.append(obj)
# In an objective value "well"
if (z > 0 and ObjArray[-1] > 1.1 * ObjArray[-2]) or flag_min:
if not flag_min:
flag_min = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"Maximum Pressure (if it exists) between Pressure: {} and Obj Range: {}".format(
Prange, ObjRange
)
)
P0 = np.mean(Prange)
scale_factor = 10 ** (np.ceil(np.log10(P0)))
args = (xi, T, Eos, density_opts, mole_fraction_options, scale_factor)
p = gtb.solve_root(
lambda x, xi, T, Eos, density_opts, mole_fraction_options, scale_factor: objective_bubble_pressure(
x * scale_factor,
xi,
T,
Eos,
density_opts,
mole_fraction_options,
),
args=args,
x0=P0 / scale_factor,
method="TNC",
bounds=Prange / scale_factor,
)
p = p[0] * scale_factor
obj = objective_bubble_pressure(
p,
xi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if p < 0:
parray = np.linspace(Prange[0], Prange[1], 20)
obj_array = []
for ptmp in parray:
obj_tmp = objective_dew_pressure(
ptmp,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
obj_array.append(obj_tmp)
spline = interpolate.Akima1DInterpolator(parray, obj_array)
p_min = spline.derivative().roots()
if len(p_min) > 1:
obj_tmp = []
for p_min_tmp in p_min:
obj_tmp.append(
objective_bubble_pressure(
p_min_tmp, xi, T, Eos, density_opts=density_opts
)
)
p_min = p_min[obj_tmp == np.nanmin(obj_tmp)]
elif len(p_min) == 0:
logger.error(
"Could not find minimum in pressure range:\n Pressure: {}\n Obj Value: {}".format(
parray, obj_array
)
)
p = p_min
obj = objective_bubble_pressure(
p, xi, T, Eos, density_opts=density_opts
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if obj > 0:
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
else:
logger.error(
"Could not find maximum in pressure range:\n Pressure range {} best {}\n Obj Value range {} best {}".format(
Prange, p, ObjRange, obj
)
)
break
elif flag_max:
logger.info(
"New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
logger.info(
"New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if not flag_hard_max:
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
if flag_hard_max:
p = (Prange[1] - Prange[0]) * np.random.rand(1)[0] + Prange[0]
else:
p = np.nanmax([-intercept / slope, maxfactor * Prange[1]])
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}".format(
p, xi, T
)
)
if np.abs(Prange[1] - Prange[0]) < ptol:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if z == maxiter - 1 or flag_min:
if flag_min:
logger.error(
"Cannot reach objective value of zero. Final Pressure: {}, Obj. Func: {}".format(
p, obj
)
)
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
Prange = np.array([np.nan, np.nan])
Pguess = np.nan
else:
logger.info(
"[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))
)
logger.info("Initial guess in pressure: {} Pa".format(Pguess))
_yi_global = yi_range
return Prange, Pguess
def calc_Prange_yi(
T,
xi,
yi,
Eos,
density_opts={},
mole_fraction_options={},
Pmin=None,
Pmax=None,
Pmin_allowed=100,
maxiter=200,
ptol=1e-2,
xytol=0.01,
maxfactor=2,
minfactor=0.5,
**kwargs
):
r"""
Obtain min and max pressure values.
The vapor mole fraction is set and the objective function at each of those values is of opposite sign.
Parameters
----------
T : float
Temperature of the system [K]
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : float, Optional, default=200
Maximum number of iterations in both the loop to find Pmin and the loop to find Pmax
Pmin : float, Optional, default=1000.0
[Pa] Minimum pressure in pressure range that restricts searched space. Used if local minimum isn't available for pressure curve for vapor composition.
Pmax : float, Optional, default=100000
If no local minima or maxima are identified for the liquid composition at this temperature, this value is used as an initial estimate of the maximum pressure range.
Pmin_allowed : float, Optional, default=100
Minimum allowed pressure in search, before looking for a super critical fluid
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
ptol : float, Optional, default=1e-2
If two iterations in the search for the maximum pressure are within this tolerance, the search is discontinued
xytol : float, Optional, default=0.01
If the sum of absolute relative difference between the vapor and liquid mole fractions are less than this total, the pressure is assumed to be super critical and the maximum pressure is sought at a lower value.
maxfactor : float, Optional, default=2
Factor to multiply by the pressure if it is too low (produces liquid or positive objective value). Not used if an unfeasible maximum pressure is found to bound the problem (critical for NaN result).
minfactor : float, Optional, default=0.5
Factor to multiply by the minimum pressure if it is too high (produces critical value).
Returns
-------
Prange : list
List of min and max pressure range
Pguess : float
An interpolated guess in the equilibrium pressure from Prange
"""
if len(kwargs) > 0:
logger.debug(
"'calc_Prange_yi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _xi_global
# Guess a range from Pmin to the local max of the liquid curve
vlist, Plist = pressure_vs_volume_arrays(T, yi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
# Calculation the highest pressure possible
flag_hard_min = False
if Pmin != None:
flag_hard_min = True
if gtb.isiterable(Pmin):
Pmin = Pmin[0]
elif len(extrema):
Pmin = min(Pvspline(extrema))
if Pmin < 0:
Pmin = 1e3
else:
Pmin = 1e3
flag_hard_max = False
if Pmax != None:
flag_hard_max = True
if gtb.isiterable(Pmax):
Pmax = Pmax[0]
elif len(extrema):
Pmax = max(Pvspline(extrema))
else:
Pmax = 1e5
if Pmax < Pmin:
Pmax = Pmin * maxfactor
Prange = np.array([Pmin, Pmax])
ObjRange = np.zeros(2)
xi_range = xi
#################### Find Minimum Pressure and Objective Function Value ###############
flag_min = False
flag_max = False
flag_critical = False
flag_vapor = False
p = Prange[0]
for z in range(maxiter):
# Vapor properties
phiv, _, flagv = calc_vapor_fugacity_coefficient(
p, T, yi, Eos, density_opts=density_opts
)
if any(np.isnan(phiv)):
logger.error("Estimated minimum pressure is too high.")
flag_max = True
ObjRange[1] = np.inf
Prange[1] = p
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
continue
if flagv in [0, 2, 4]:
# Calculate the liquid phase properties
xi_range, phil_min, flagl_min = calc_liquid_composition(
xi_range,
yi,
phiv,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(yi, phil_min, phiv, phase="liquid")
if np.any(np.isnan(xi_range)):
logger.info("Estimated Minimum Pressure produces NaN")
flag_max = True
flag_vapor = True
Prange[1] = p
ObjRange[1] = obj
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
elif (
np.sum(np.abs(yi - xi_range) / yi) < xytol and flagl_min == 2
): # If within 2% of liquid mole fraction
logger.info(
"Estimated Minimum Pressure Reproduces yi: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if (
flag_critical
): # Couldn't find phase by lowering pressure, now raise it
ObjRange[0] = obj
Prange[0] = p
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * p
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else:
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phil_max, flagl_max = phil_min, flagl_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Pmin_allowed: # Less than a kPa and can't find phase, go up
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
elif obj < 0:
Prange[0] = p
ObjRange[0] = obj
logger.info(
"Obtained estimated Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
break
elif obj > 0:
flag_max = True
logger.info(
"Estimated Minimum Pressure too High: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
ObjRange[1] = obj
Prange[1] = p
phil_max, flagl_max = phil_min, flagl_min
p = (Prange[1] - Prange[0]) * minfactor + Prange[0]
else:
logger.info(
"Estimated Minimum Pressure Produced Liquid instead of Vapor Phase: {}, Range {}".format(
p, Prange
)
)
if flag_hard_min and p <= Pmin:
flag_critical = True
if flag_max:
flag_max = False
if flag_critical: # Looking for a super critical fluid
Prange[0] = p
ObjRange[0] = obj
flag_min = True
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for a vapor
Prange[1] = p
ObjRange[1] = obj
flag_max = True
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
if Prange[0] > Prange[1]:
if flag_max and not flag_min and not flag_hard_min:
Prange[0] = minfactor * Prange[1]
ObjRange[0] = ObjRange[1]
elif not flag_hard_max:
Prange[1] = maxfactor * Prange[0]
ObjRange[1] = ObjRange[0]
else:
raise ValueError("Pmin should never be greater than Pmax")
if (
(flag_max or flag_hard_max)
and (flag_min or flag_hard_min)
and not Prange[0] <= p <= Prange[1]
):
p = (Prange[1] - Prange[0]) * np.random.rand(1)[0] + Prange[0]
if flag_hard_min and Pmin == p:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}, results in a supercritical value without a coexistent fluid.".format(
p, xi, T
)
)
if z == maxiter - 1:
raise ValueError(
"Maximum Number of Iterations Reached: Proper minimum pressure for liquid density could not be found"
)
# Be sure guess in pressure is larger than lower bound
if Prange[1] <= Prange[0]:
Prange[1] = Prange[0] * 1.1
if z == 0:
ObjRange[1] == 0.0
## Check Pmax
flag_sol = False
flag_vapor = False
flag_min = False
p = Prange[1]
Parray = [Prange[1]]
ObjArray = [ObjRange[1]]
for z in range(maxiter):
# Calculate objective value
phiv, _, flagv = calc_vapor_fugacity_coefficient(
p, T, yi, Eos, density_opts=density_opts
)
xi_range, phil, flagl = calc_liquid_composition(
xi_range,
yi,
phiv,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(yi, phil, phiv, phase="liquid")
if z == 0:
ObjRange[1] = obj
if flagv not in [0, 2, 4]: # Ensure vapor is produced
flag_vapor = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"New Max Pressure: {} doesn't produce vapor, flag={}, Obj Func: {}, Range {}".format(
Prange[1], flagv, ObjRange[1], Prange
)
)
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
elif obj > 0: # Check pressure range
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
logger.info(
"New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(
Prange[1], flagv, ObjRange[1], Prange
)
)
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_sol = True
flag_min = False
break
elif flag_vapor:
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
logger.info(
"New Max Pressure: {}, Obj. Func: {}, Range {}".format(
Prange[0], ObjRange[0], Prange
)
)
else:
Parray.append(p)
ObjArray.append(obj)
# In an objective value "well"
if (z > 0 and ObjArray[-1] < 1.1 * ObjArray[-2]) or flag_min:
if not flag_min:
flag_min = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"Maximum Pressure (if it exists) between Pressure: {} and Obj Range: {}".format(
Prange, ObjRange
)
)
P0 = np.mean(Prange)
scale_factor = 10 ** (np.ceil(np.log10(P0)))
args = (yi, T, Eos, density_opts, mole_fraction_options, scale_factor)
p = gtb.solve_root(
lambda x, yi, T, Eos, density_opts, mole_fraction_options, scale_factor: -objective_dew_pressure(
x * scale_factor,
yi,
T,
Eos,
density_opts,
mole_fraction_options,
),
args=args,
x0=P0 / scale_factor,
method="TNC",
bounds=Prange / scale_factor,
)
p = p[0] * scale_factor
obj = objective_dew_pressure(
p,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if p < 0:
parray = np.linspace(Prange[0], Prange[1], 20)
obj_array = []
for ptmp in parray:
obj_tmp = objective_dew_pressure(
ptmp,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
obj_array.append(obj_tmp)
spline = interpolate.Akima1DInterpolator(parray, obj_array)
p_min = spline.derivative().roots()
if len(p_min) > 1:
obj_tmp = []
for p_min_tmp in p_min:
obj_tmp.append(
objective_bubble_pressure(
p_min_tmp, xi, T, Eos, density_opts=density_opts
)
)
p_min = p_min[obj_tmp == np.nanmin(obj_tmp)]
elif len(p_min) == 0:
logger.error(
"Could not find minimum in pressure range:\n Pressure: {}\n Obj Value: {}".format(
parray, obj_array
)
)
p = p_min
obj = objective_bubble_pressure(
p, xi, T, Eos, density_opts=density_opts
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if obj > 0:
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
else:
logger.error(
"Could not find maximum in pressure range:\n Pressure range {} best {}\n Obj Value range {} best {}".format(
Prange, p, ObjRange, obj
)
)
break
elif flag_hard_max:
logger.info(
"New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
logger.info(
"New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if not flag_hard_max:
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
p = np.nanmax([-intercept / slope, maxfactor * Prange[1]])
if z == maxiter - 1 or flag_min:
if flag_min:
logger.error(
"Cannot reach objective value of zero. Final Pressure: {}, Obj. Func: {}".format(
p, obj
)
)
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
Prange = np.array([np.nan, np.nan])
Pguess = np.nan
elif flag_sol:
logger.info(
"[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))
)
logger.info("Initial guess in pressure: {} Pa".format(Pguess))
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
_xi_global = xi_range
return Prange, Pguess
def calc_vapor_composition(
yi,
xi,
phil,
P,
T,
Eos,
density_opts={},
maxiter=50,
tol=1e-6,
tol_trivial=0.05,
**kwargs
):
r"""
Find vapor mole fraction given pressure, liquid mole fraction, and temperature.
Objective function is the sum of the predicted "mole numbers" predicted by the computed fugacity coefficients. Note that by "mole number" we mean that the prediction will only sum to one when the correct pressure is chosen in the outer loop. In this inner loop, we seek to find a mole fraction that is converged to reproduce itself in a prediction. If it hasn't, the new "mole numbers" are normalized into mole fractions and used as the next guess.
In the case that a guess doesn't produce a gas or critical fluid, we use another function to produce a new guess.
Parameters
----------
yi : numpy.ndarray
Guess in vapor mole fraction of each component, sum(xi) should equal 1.0
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : int, Optional, default=50
Maximum number of iteration for both the outer pressure and inner vapor mole fraction loops
tol : float, Optional, default=1e-6
Tolerance in sum of predicted yi "mole numbers"
tol_trivial : float, Optional, default=0.05
If the vapor and liquid mole fractions are within this tolerance, search for a different composition
kwargs : NA, Optional
Other other keyword arguments for :func:`~despasito.thermodynamics.calc.find_new_yi`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
phiv : float
Fugacity coefficient of vapor at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if np.any(np.isnan(phil)):
raise ValueError(
"Cannot obtain vapor mole fraction with fugacity coefficients of NaN"
)
global _yi_global
yi_total = [np.sum(yi)]
yi /= np.sum(yi)
flag_check_vapor = True # Make sure we only search for vapor compositions once
flag_trivial_sol = (
True
) # Make sure we only try to find alternative to trivial solution once
logger.info(" Solve yi: P {}, T {}, xi {}, phil {}".format(P, T, xi, phil))
for z in range(maxiter):
yi_tmp = yi / np.sum(yi)
# Try yi
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
if (
any(np.isnan(phiv)) or flagv == 1
) and flag_check_vapor: # If vapor density doesn't exist
flag_check_vapor = False
if all(yi_tmp != 0.0) and len(yi_tmp) == 2:
logger.debug(" Composition doesn't produce a vapor, let's find one!")
yi_tmp = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
flag_trivial_sol = False
if np.any(np.isnan(yi_tmp)):
phiv, _, flagv = [np.nan, np.nan, 3]
yinew = yi_tmp
break
else:
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
else:
logger.debug(
" Composition doesn't produce a vapor, we need a function to search compositions for more than two components."
)
yinew = yi
elif np.sum(np.abs(xi - yi_tmp) / xi) < tol_trivial and flag_trivial_sol:
flag_trivial_sol = False
if all(yi_tmp != 0.0) and len(yi_tmp) == 2:
logger.debug(
" Composition produces trivial solution, let's find a different one!"
)
yi_tmp = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
flag_check_vapor = False
else:
logger.debug(
" Composition produces trivial solution, using random guess to reset"
)
yi_tmp = np.random.rand(len(yi_tmp))
yi_tmp /= np.sum(yi_tmp)
if np.any(np.isnan(yi_tmp)):
phiv, _, flagv = [np.nan, np.nan, 3]
yinew = yi_tmp
break
else:
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
else:
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
yinew[np.isnan(yinew)] = 0.0
yi2 = yinew / np.sum(yinew)
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
if any(np.isnan(phiv)):
phiv = np.nan
logger.error(
"Fugacity coefficient of vapor should not be NaN, pressure could be too high."
)
# Check for bouncing between values
if len(yi_total) > 3:
tmp1 = np.abs(np.sum(yinew) - yi_total[-2]) + np.abs(
yi_total[-1] - yi_total[-3]
)
if tmp1 < np.abs(np.sum(yinew) - yi_total[-1]) and flagv != flagv2:
logger.debug(
" Composition bouncing between values, let's find the answer!"
)
bounds = np.sort([yi_tmp[0], yi2[0]])
yi2, obj = bracket_bounding_yi(
P, T, phil, xi, Eos, bounds=bounds, density_opts=density_opts
)
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
_yi_global = yi2
logger.info(
" Inner Loop Final (from bracketing bouncing values) yi: {}, Final Error on Smallest Fraction: {}".format(
yi2, obj
)
)
break
logger.debug(
" yi guess {}, yi calc {}, phiv {}, flag {}".format(
yi_tmp, yinew, phiv, flagv
)
)
logger.debug(
" Old yi_total: {}, New yi_total: {}, Change: {}".format(
yi_total[-1], np.sum(yinew), np.sum(yinew) - yi_total[-1]
)
)
# Check convergence
if abs(np.sum(yinew) - yi_total[-1]) < tol:
ind_tmp = np.where(yi_tmp == min(yi_tmp[yi_tmp > 0]))[0]
if np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp] < tol:
_yi_global = yi2
logger.info(
" Inner Loop Final yi: {}, Final Error on Smallest Fraction: {}%".format(
yi2,
np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp] * 100,
)
)
break
if z < maxiter - 1:
yi_total.append(np.sum(yinew))
yi = yinew
## If yi wasn't found in defined number of iterations
ind_tmp = np.where(yi_tmp == min(yi_tmp[yi_tmp > 0.0]))[0]
if flagv == 3:
yi2 = yinew / np.sum(yinew)
logger.info(" Could not converged mole fraction")
phiv2 = np.full(len(yi_tmp), np.nan)
flagv2 = np.nan
elif z == maxiter - 1:
yi2 = yinew / np.sum(yinew)
tmp = np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp]
logger.warning(
" More than {} iterations needed. Error in Smallest Fraction: {}%".format(
maxiter, tmp * 100
)
)
if tmp > 0.1: # If difference is greater than 10%
yinew = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
yi2 = yinew / np.sum(yinew)
y1 = spo.least_squares(
objective_find_yi,
yi2[0],
bounds=(0.0, 1.0),
args=(P, T, phil, xi, Eos, density_opts),
)
yi = y1.x[0]
yi2 = np.array([yi, 1 - yi])
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
obj = objective_find_yi(yi2, P, T, phil, xi, Eos, density_opts=density_opts)
logger.warning(
" Find yi with root algorithm, yi {}, obj {}".format(yi2, obj)
)
if obj > tol:
logger.error("Could not converge mole fraction")
phiv2 = np.full(len(yi_tmp), np.nan)
flagv2 = 3
return yi2, phiv2, flagv2
def calc_liquid_composition(
xi,
yi,
phiv,
P,
T,
Eos,
density_opts={},
maxiter=20,
tol=1e-6,
tol_trivial=0.05,
**kwargs
):
r"""
Find liquid mole fraction given pressure, vapor mole fraction, and temperature.
Objective function is the sum of the predicted "mole numbers" predicted by the computed fugacity coefficients. Note that by "mole number" we mean that the prediction will only sum to one when the correct pressure is chosen in the outer loop. In this inner loop, we seek to find a mole fraction that is converged to reproduce itself in a prediction. If it hasn't, the new "mole numbers" are normalized into mole fractions and used as the next guess.
In the case that a guess doesn't produce a liquid or critical fluid, we use another function to produce a new guess.
Parameters
----------
xi : numpy.ndarray
Guess in liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
phiv : float
Fugacity coefficient of liquid at system pressure
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : int, Optional, default=20
Maximum number of iteration for both the outer pressure and inner vapor mole fraction loops
tol : float, Optional, default=1e-6
Tolerance in sum of predicted xi "mole numbers"
tol_trivial : float, Optional, default=0.05
If the vapor and liquid mole fractions are within this tolerance, search for a different composition
kwargs : dict, Optional
Optional keywords for :func:`~despasito.thermodynamics.calc.find_new_xi`
Returns
-------
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
"""
global _xi_global
if np.any(np.isnan(phiv)):
raise ValueError(
"Cannot obtain liquid mole fraction with fugacity coefficients of NaN"
)
xi /= np.sum(xi)
xi_total = [np.sum(xi)]
flag_check_liquid = True # Make sure we only search for liquid compositions once
flag_trivial_sol = (
True
) # Make sure we only try to find alternative to trivial solution once
logger.info(" Solve xi: P {}, T {}, yi {}, phiv {}".format(P, T, yi, phiv))
for z in range(maxiter):
xi_tmp = xi / np.sum(xi)
# Try xi
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
if (any(np.isnan(phil)) or flagl in [0, 4]) and flag_check_liquid:
flag_check_liquid = False
if all(xi_tmp != 0.0) and len(xi_tmp) == 2:
logger.debug(
" Composition doesn't produce a liquid, let's find one!"
)
xi_tmp = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
flag_trivial_sol = False
if np.any(np.isnan(xi_tmp)):
phil, rhol, flagl = [np.nan, np.nan, 3]
xinew = xi_tmp
break
else:
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
else:
logger.debug(
" Composition doesn't produce a liquid, we need a function to search compositions for more than two components."
)
xinew = xi
elif np.sum(np.abs(yi - xi_tmp) / yi) < tol_trivial and flag_trivial_sol:
flag_trivial_sol = False
if all(xi_tmp != 0.0) and len(xi_tmp) == 2:
logger.debug(
" Composition produces trivial solution, let's find a different one!"
)
xi_tmp = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
flag_check_liquid = False
else:
logger.debug(
" Composition produces trivial solution, using random guess to reset"
)
xi_tmp = np.random.rand(len(xi_tmp))
xi_tmp /= np.sum(xi_tmp)
if np.any(np.isnan(xi_tmp)):
phil, rhol, flagl = [np.nan, np.nan, 3]
xinew = xi_tmp
break
else:
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
else:
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
xinew[np.isnan(xinew)] = 0.0
logger.debug(
" xi guess {}, xi calc {}, phil {}".format(
xi_tmp, xinew / np.sum(xinew), phil
)
)
logger.debug(
" Old xi_total: {}, New xi_total: {}, Change: {}".format(
xi_total[-1], np.sum(xinew), np.sum(xinew) - xi_total[-1]
)
)
# Check convergence
if abs(np.sum(xinew) - xi_total[-1]) < tol:
ind_tmp = np.where(xi_tmp == min(xi_tmp[xi_tmp > 0]))[0]
xi2 = xinew / np.sum(xinew)
if np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp] < tol:
_xi_global = xi2
logger.info(
" Inner Loop Final xi: {}, Final Error on Smallest Fraction: {}%".format(
xi2,
np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp] * 100,
)
)
break
if z < maxiter - 1:
xi_total.append(np.sum(xinew))
xi = xinew
xi2 = xinew / np.sum(xinew)
ind_tmp = np.where(xi_tmp == min(xi_tmp[xi_tmp > 0]))[0]
if z == maxiter - 1:
tmp = np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp]
logger.warning(
" More than {} iterations needed. Error in Smallest Fraction: {} %%".format(
maxiter, tmp * 100
)
)
if tmp > 0.1: # If difference is greater than 10%
xinew = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
xinew = spo.least_squares(
objective_find_xi,
xinew[0],
bounds=(0.0, 1.0),
args=(P, T, phiv, yi, Eos, density_opts),
)
xi = xinew.x[0]
xi_tmp = np.array([xi, 1 - xi])
obj = objective_find_xi(xi_tmp, P, T, phiv, yi, Eos, density_opts=density_opts)
logger.warning(
" Find xi with root algorithm, xi {}, obj {}".format(xi_tmp, obj)
)
return xi_tmp, phil, flagl
def find_new_yi(
P, T, phil, xi, Eos, bounds=(0.01, 0.99), npoints=30, density_opts={}, **kwargs
):
r"""
Search vapor mole fraction combinations for a new estimate that produces a vapor density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
bounds : tuple, Optional, default=(0.01, 0.99)
These bounds dictate the lower and upper boundary for the first component in a binary system.
npoints : float, Optional, default=30
Number of points to test between the bounds.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
"""
if len(kwargs) > 0:
logger.debug(
" 'find_new_yi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
yi_ext = np.linspace(bounds[0], bounds[1], npoints) # Guess for yi
obj_ext = np.zeros(len(yi_ext))
flag_ext = np.zeros(len(yi_ext))
for i, yi in enumerate(yi_ext):
yi = np.array([yi, 1 - yi])
obj, flagv = objective_find_yi(
yi, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
flag_ext[i] = flagv
obj_ext[i] = obj
tmp = np.count_nonzero(~np.isnan(obj_ext))
logger.debug(" Number of valid mole fractions: {}".format(tmp))
if tmp == 0:
yi_final = np.nan
obj_final = np.nan
else:
# Remove any NaN
obj_tmp = obj_ext[~np.isnan(obj_ext)]
yi_tmp = yi_ext[~np.isnan(obj_ext)]
# Fit spline
spline = interpolate.Akima1DInterpolator(yi_tmp, obj_tmp)
yi_min = spline.derivative().roots()
if len(yi_min) > 1:
# Remove local maxima
yi_concav = spline.derivative(nu=2)(yi_min)
yi_min = [yi_min[i] for i in range(len(yi_min)) if yi_concav[i] > 0.0]
# Add end points if relevant
if len(yi_tmp) > 1:
if obj_tmp[0] < obj_tmp[1]:
yi_min.insert(0, yi_tmp[0])
if obj_tmp[-1] < obj_tmp[-2]:
yi_min.append(yi_tmp[-1])
yi_min = np.array(yi_min)
## Remove trivial solution
obj_trivial = np.abs(yi_min - xi[0]) / xi[0]
ind = np.where(obj_trivial == min(obj_trivial))[0][0]
logger.debug(
" Found multiple minima: {}, discard {} as trivial solution".format(
yi_min, yi_min[ind]
)
)
# Remove liquid roots
yi_min = np.array([yi_min[ii] for ii in range(len(yi_min)) if ii != ind])
if len(yi_min) > 1:
lyi = len(yi_min)
obj_tmp2 = np.zeros(lyi)
flagv_tmp2 = np.zeros(lyi)
for ii in range(lyi):
obj_tmp2[ii], flagv_tmp2[ii] = objective_find_yi(
yi_min[ii],
P,
T,
phil,
xi,
Eos,
density_opts=density_opts,
return_flag=True,
)
yi_tmp2 = [
yi_min[ii] for ii in range(len(yi_min)) if flagv_tmp2[ii] != 1
]
if len(yi_tmp2):
obj_tmp2 = [
obj_tmp2[ii]
for ii in range(len(obj_tmp2))
if flagv_tmp2[ii] != 1
]
yi_min = [yi_tmp2[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]]
else:
yi_min = [yi_min[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]]
if not len(yi_min):
# Choose values with lowest objective function
ind = np.where(np.abs(obj_tmp) == min(np.abs(obj_tmp)))[0][0]
obj_final = obj_tmp[ind]
yi_final = yi_tmp[ind]
else:
yi_final = yi_min[0]
obj_final = spline(yi_min[0])
logger.debug(" Found new guess in yi: {}, Obj: {}".format(yi_final, obj_final))
if not gtb.isiterable(yi_final):
yi_final = np.array([yi_final, 1 - yi_final])
return yi_final
def bracket_bounding_yi(
P,
T,
phil,
xi,
Eos,
bounds=(0.01, 0.99),
maxiter=50,
tol=1e-7,
density_opts={},
**kwargs
):
r"""
Search binary vapor mole fraction combinations for a new estimate that produces a vapor density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
bounds : tuple, Optional, default=(0.01, 0.99)
These bounds dictate the lower and upper boundary for the first component in a binary system.
maxiter : int, Optional, default=50
Maximum number of iterations
tol : float, Optional, default=1e-7
Tolerance to quit search for yi
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.size(bounds) != 2:
raise ValueError("Given bounds on y1 must be of length two.")
bounds = np.array(bounds)
obj_bounds = np.zeros(2)
flag_bounds = np.zeros(2)
obj_bounds[0], flag_bounds[0] = objective_find_yi(
bounds[0], P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
obj_bounds[1], flag_bounds[1] = objective_find_yi(
bounds[1], P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
if flag_bounds[0] == flag_bounds[1]:
logger.error(
" Both mole fractions have flag, {}, continue seeking convergence".format(
flag_bounds[0]
)
)
y1 = bounds[1]
flagv = flag_bounds[1]
else:
flag_high_vapor = False
for i in np.arange(maxiter):
y1 = np.mean(bounds)
obj, flagv = objective_find_yi(
y1, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
if not flag_high_vapor:
ind = np.where(flag_bounds == flagv)[0][0]
if flagv == 0 and obj > 1 / tol:
flag_high_vapor = True
bounds[0], obj_bounds[0], flag_bounds[0] = (
bounds[ind],
obj_bounds[ind],
flag_bounds[ind],
)
ind = 1
else:
if obj < obj_bounds[0]:
ind = 0
else:
ind = 1
bounds[ind], obj_bounds[ind], flag_bounds[ind] = y1, obj, flagv
logger.debug(
" Bouncing mole fraction new bounds: {}, obj: {}, flag: {}".format(
bounds, obj_bounds, flag_bounds
)
)
# Check convergence
if np.abs(bounds[1] - bounds[0]) < tol:
break
ind_array = np.where(flag_bounds == 0)[0]
if np.size(ind_array) == 1:
ind = ind_array[0]
else:
ind = np.where(obj_bounds == np.min(obj_bounds))[0][0]
y1, flagv = bounds[ind], flag_bounds[ind]
if i == maxiter - 1:
logger.debug(
" Bouncing mole fraction, max iterations ended with, y1={}, flagv={}".format(
y1, flagv
)
)
else:
logger.debug(
" Bouncing mole fractions converged to y1={}, flagv={}".format(y1, flagv)
)
return np.array([y1, 1 - y1]), flagv
def objective_find_yi(yi, P, T, phil, xi, Eos, density_opts={}, return_flag=False):
r"""
Objective function for solving for stable vapor mole fraction.
Parameters
----------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
return_flag : bool, Optional, default=False
If True, the objective value and flagv is returned, otherwise, just the objective value is returned
Returns
-------
obj : numpy.ndarray
Objective function for solving for vapor mole fractions
flag : int, Optional
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed. Only outputted when `return_flag` is True
"""
if type(yi) == float or np.size(yi) == 1:
if gtb.isiterable(yi):
yi = np.array([yi[0], 1 - yi[0]])
else:
yi = np.array([yi, 1 - yi])
elif isinstance(yi, list):
yi = np.array(yi)
yi /= np.sum(yi)
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
yi2 = yinew / np.sum(yinew)
if np.any(np.isnan(yi2)):
obj = np.nan
else:
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
obj = np.sum(np.abs(yinew - xi * phil / phiv2))
logger.debug(
" Guess yi: {}, calc yi: {}, diff={}, flagv {}".format(yi, yi2, obj, flagv)
)
if return_flag:
return obj, flagv
else:
return obj
def find_new_xi(
P, T, phiv, yi, Eos, density_opts={}, bounds=(0.001, 0.999), npoints=30, **kwargs
):
r"""
Search liquid mole fraction combinations for a new estimate that produces a liquid density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phiv : float
Fugacity coefficient of vapor at system pressure
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
bounds : tuple, Optional, default=(0.001, 0.999)
These bounds dictate the lower and upper boundary for the first component in a binary system.
npoints : float, Optional, default=30
Number of points to test between the bounds.
Returns
-------
xi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
"""
if len(kwargs) > 0:
logger.debug(
" 'find_new_xi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
xi_ext = np.linspace(bounds[0], bounds[1], npoints) # Guess for yi
obj_ext = np.zeros(len(xi_ext))
flag_ext = np.zeros(len(xi_ext))
for i, xi in enumerate(xi_ext):
xi = np.array([xi, 1 - xi])
obj, flagl = objective_find_xi(
xi, P, T, phiv, yi, Eos, density_opts=density_opts, return_flag=True
)
flag_ext[i] = flagl
obj_ext[i] = obj
tmp = np.count_nonzero(~np.isnan(obj_ext))
logger.debug(" Number of valid mole fractions: {}".format(tmp))
if tmp == 0:
xi_final = np.nan
obj_final = np.nan
else:
# Remove any NaN
obj_tmp = obj_ext[~np.isnan(obj_ext)]
xi_tmp = xi_ext[~np.isnan(obj_ext)]
spline = interpolate.Akima1DInterpolator(xi_tmp, obj_tmp)
xi_min = spline.derivative().roots()
if len(xi_min) > 1:
# Remove local maxima
xi_concav = spline.derivative(nu=2)(xi_min)
xi_min = [xi_min[i] for i in range(len(xi_min)) if xi_concav[i] > 0.0]
# Add end points if relevant
if len(xi_tmp) > 1:
if obj_tmp[0] < obj_tmp[1]:
xi_min.insert(0, xi_tmp[0])
if obj_tmp[-1] < obj_tmp[-2]:
xi_min.append(xi_tmp[-1])
xi_min = np.array(xi_min)
# Remove trivial solution
obj_trivial = np.abs(xi_min - yi[0]) / yi[0]
ind = np.where(obj_trivial == min(obj_trivial))[0][0]
logger.debug(
" Found multiple minima: {}, discard {} as trivial solution".format(
xi_min, xi_min[ind]
)
)
xi_min = np.array([xi_min[ii] for ii in range(len(xi_min)) if ii != ind])
if not len(xi_min):
# Choose values with lowest objective function
ind = np.where(np.abs(obj_tmp) == min(np.abs(obj_tmp)))[0][0]
obj_final = obj_tmp[ind]
xi_final = xi_tmp[ind]
else:
xi_final = xi_min[0]
obj_final = spline(xi_min[0])
logger.debug(" Found new guess in xi: {}, Obj: {}".format(xi_final, obj_final))
if not gtb.isiterable(xi_final):
xi_final = np.array([xi_final, 1 - xi_final])
return xi_final
def objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts={}, return_flag=False):
r"""
Objective function for solving for stable vapor mole fraction.
Parameters
----------
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phiv : float
Fugacity coefficient of vapor at system pressure
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
return_flag : bool, Optional, default=False
If True, the objective value and flagl is returned, otherwise, just the objective value is returned
Returns
-------
obj : numpy.ndarray
Objective function for solving for liquid mole fractions
flag : int, Optional
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed. Only outputted when `return_flag` is True
"""
if isinstance(xi, float) or len(xi) == 1:
if gtb.isiterable(xi):
xi = np.array([xi[0], 1 - xi[0]])
else:
xi = np.array([xi, 1 - xi])
elif isinstance(xi, list):
xi = np.array(xi)
xi /= np.sum(xi)
phil, _, flagl = calc_liquid_fugacity_coefficient(
P, T, xi, Eos, density_opts=density_opts
)
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
xi2 = xinew / np.sum(xinew)
if np.any(np.isnan(xi2)):
obj = np.nan
else:
phil2, _, flagl2 = calc_liquid_fugacity_coefficient(
P, T, xi2, Eos, density_opts=density_opts
)
obj = np.sum(np.abs(xinew - xi * phiv / phil2))
logger.debug(
" Guess xi: {}, calc xi: {}, diff={}, flagl {}".format(xi, xi2, obj, flagl)
)
if return_flag:
return obj, flagl
else:
return obj
def objective_bubble_pressure(
P, xi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs
):
r"""
Objective function used to search pressure values and solve outer loop of constant temperature bubble point calculations.
Parameters
----------
P : float
[Pa] Guess in pressure of the system
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
Returns
-------
obj_value : float
:math:`\sum\frac{x_{i}\phi_{l}}{\phi_v}-1`
"""
if len(kwargs) > 0:
logger.debug(
"'objective_bubble_pressure' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _yi_global
if P < 0:
return 10.0
logger.info("P Guess: {} Pa".format(P))
# find liquid density
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi, Eos, density_opts=density_opts
)
yinew, phiv, flagv = calc_vapor_composition(
_yi_global,
xi,
phil,
P,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
_yi_global = yinew / np.sum(yinew)
# given final yi recompute
phiv, rhov, flagv = calc_vapor_fugacity_coefficient(
P, T, _yi_global, Eos, density_opts=density_opts
)
Pv_test = Eos.pressure(rhov, T, _yi_global)
obj_value = equilibrium_objective(xi, phil, phiv, phase="vapor")
logger.info("Obj Func: {}, Pset: {}, Pcalc: {}".format(obj_value, P, Pv_test[0]))
return obj_value
def objective_dew_pressure(
P, yi, T, Eos, density_opts={}, mole_fraction_options={}, **kwargs
):
r"""
Objective function used to search pressure values and solve outer loop of constant temperature dew point calculations.
Parameters
----------
P : float
[Pa] Guess in pressure of the system
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
Returns
-------
obj_value : list
:math:`\sum\frac{y_{i}\phi_v}{\phi_l}-1`
"""
if len(kwargs) > 0:
logger.debug(
"'objective_dew_pressure' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _xi_global
if P < 0:
return 10.0
logger.info("P Guess: {} Pa".format(P))
# find liquid density
phiv, rhov, flagv = calc_vapor_fugacity_coefficient(
P, T, yi, Eos, density_opts=density_opts
)
xinew, phil, flagl = calc_liquid_composition(
_xi_global,
yi,
phiv,
P,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
_xi_global = xinew / np.sum(xinew)
# given final yi recompute
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, _xi_global, Eos, density_opts=density_opts
)
Pv_test = Eos.pressure(rhol, T, _xi_global)
obj_value = equilibrium_objective(yi, phil, phiv, phase="liquid")
logger.info("Obj Func: {}, Pset: {}, Pcalc: {}".format(obj_value, P, Pv_test[0]))
return obj_value
def calc_dew_pressure(
yi,
T,
Eos,
density_opts={},
mole_fraction_options={},
Pguess=None,
method="bisect",
pressure_options={},
Psat_set=1e7,
**kwargs
):
r"""
Calculate dew point mole fraction and pressure given system vapor mole fraction and temperature.
Parameters
----------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
Pguess : float, Optional, default=None
[Pa] Guess the system pressure at the dew point. A negative value will force an estimation based on the saturation pressure of each component.
Psat_set : float, Optional, default=1e+7
[Pa] Set the saturation pressure if the pure component is above the critical point in these conditions
method : str, Optional, default="bisect"
Choose the method used to solve the dew point calculation
pressure_options : dict, Optional, default={}
Options used in the given method, "method", to solve the outer loop in the solving algorithm
kwargs
Keyword arguments for :func:`~despasito.thermodynamics.calc.calc_saturation_properties`
Returns
-------
P : float
[Pa] Pressure of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
flagl : int
Flag identifying the fluid type for the liquid mole fractions, expected is liquid, 1. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
flagv : int
Flag identifying the fluid type for the vapor mole fractions, expected is vapor or 0. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
obj : float
Objective function value
"""
if len(kwargs) > 0:
logger.debug(
"'calc_dew_pressure' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _xi_global
# Estimate pure component vapor pressures
Psat = np.zeros_like(yi)
for i in range(np.size(yi)):
yi_tmp = np.zeros_like(yi)
yi_tmp[i] = 1.0
Psat[i], _, _ = calc_saturation_properties(
T, yi_tmp, Eos, density_opts=density_opts, **kwargs
)
if np.isnan(Psat[i]):
Psat[i] = Psat_set
logger.warning(
"Component, {}, is above its critical point. Psat is assumed to be {}.".format(
i + 1, Psat[i]
)
)
# Estimate initial pressure
if Pguess is None:
P = 1.0 / np.sum(yi / Psat)
else:
P = Pguess
# Estimate initial xi
if "_xi_global" not in globals() or any(np.isnan(_xi_global)):
_xi_global = P * (yi / Psat)
_xi_global /= np.sum(_xi_global)
_xi_global = copy.deepcopy(_xi_global)
logger.info("Guess xi in calc_dew_pressure with Psat: {}".format(_xi_global))
xi = _xi_global
Prange, Pestimate = calc_Prange_yi(
T,
xi,
yi,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
**kwargs
)
if np.any(np.isnan(Prange)):
raise ValueError(
"Neither a suitable pressure range, or guess in pressure could be found nor was given."
)
else:
if Pguess is not None:
if Pguess > Prange[1] or Pguess < Prange[0]:
logger.warning(
"Given guess in pressure, {}, is outside of the identified pressure range, {}. Using estimated pressure, {}.".format(
Pguess, Prange, Pestimate
)
)
P = Pestimate
else:
logger.warning(
"Using given guess in pressure, {}, that is inside identified pressure range.".format(
Pguess
)
)
P = Pguess
else:
P = Pestimate
P = gtb.solve_root(
objective_dew_pressure,
args=(yi, T, Eos, density_opts, mole_fraction_options),
x0=P,
method=method,
bounds=Prange,
options=pressure_options,
)
# find vapor density and fugacity
phiv, rhov, flagv = calc_vapor_fugacity_coefficient(
P, T, yi, Eos, density_opts=density_opts
)
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi, Eos, density_opts=density_opts
)
if "tol" in mole_fraction_options:
if mole_fraction_options["tol"] > 1e-10:
mole_fraction_options["tol"] = 1e-10
obj = objective_dew_pressure(
P,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"Final Output: Obj {}, P {} Pa, flagl {}, xi {}".format(
obj, P, flagl, _xi_global
)
)
return P, xi, flagl, flagv, obj
def calc_bubble_pressure(
xi,
T,
Eos,
density_opts={},
mole_fraction_options={},
Pguess=None,
Psat_set=1e7,
method="bisect",
pressure_options={},
**kwargs
):
r"""
Calculate bubble point mole fraction and pressure given system liquid mole fraction and temperature.
Parameters
----------
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
Pguess : float, Optional, default=None
[Pa] Guess the system pressure at the dew point. A value of None will force an estimation based on the saturation pressure of each component.
Psat_set : float, Optional, default=1e+7
[Pa] Set the saturation pressure if the pure component is above the critical point in these conditions
method : str, Optional, default="bisect"
Choose the method used to solve the dew point calculation
pressure_options : dict, Optional, default={}
Options used in the given method, ``method``, to solve the outer loop in the solving algorithm
kwargs
Keyword arguments for :func:`~despasito.thermodynamics.calc.calc_saturation_properties`
Returns
-------
P : float
[Pa] Pressure of the system
yi : numpy.ndarray
Mole fraction of each component, sum(yi) should equal 1.0
flagv : int
Flag identifying the fluid type for the vapor mole fractions, expected is vapor or 0. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
flagl : int
Flag identifying the fluid type for the liquid mole fractions, expected is liquid, 1. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
obj : float
Objective function value
"""
if len(kwargs) > 0:
logger.debug(
"'calc_bubble_pressure' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _yi_global
Psat = np.zeros_like(xi)
for i in range(np.size(xi)):
xi_tmp = np.zeros_like(xi)
xi_tmp[i] = 1.0
Psat[i], _, _ = calc_saturation_properties(
T, xi_tmp, Eos, density_opts=density_opts, **kwargs
)
if np.isnan(Psat[i]):
Psat[i] = Psat_set
logger.warning(
"Component, {}, is above its critical point. Psat is assumed to be {}.".format(
i + 1, Psat[i]
)
)
# Estimate initial pressure
if Pguess == None:
P = 1.0 / np.sum(xi / Psat)
else:
P = Pguess
if "_yi_global" not in globals() or any(np.isnan(_yi_global)):
_yi_global = xi * Psat / P
_yi_global /= np.nansum(_yi_global)
_yi_global = copy.deepcopy(_yi_global)
logger.info("Guess yi in calc_bubble_pressure with Psat: {}".format(_yi_global))
yi = _yi_global
Prange, Pestimate = calc_Prange_xi(
T,
xi,
yi,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
**kwargs
)
if np.any(np.isnan(Prange)):
raise ValueError(
"Neither a suitable pressure range, or guess in pressure could be found nor was given."
)
else:
if Pguess != None:
if Pguess > Prange[1] or Pguess < Prange[0]:
logger.warning(
"Given guess in pressure, {}, is outside of the identified pressure range, {}. Using estimated pressure, {}.".format(
Pguess, Prange, Pestimate
)
)
P = Pestimate
else:
logger.warning(
"Using given guess in pressure, {}, that is inside identified pressure range.".format(
Pguess
)
)
P = Pguess
else:
P = Pestimate
P = gtb.solve_root(
objective_bubble_pressure,
args=(xi, T, Eos, density_opts, mole_fraction_options),
x0=P,
method=method,
bounds=Prange,
options=pressure_options,
)
# find liquid density and fugacity
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi, Eos, density_opts=density_opts
)
phiv, rhov, flagv = calc_vapor_fugacity_coefficient(
P, T, yi, Eos, density_opts=density_opts
)
if "tol" in mole_fraction_options:
if mole_fraction_options["tol"] > 1e-10:
mole_fraction_options["tol"] = 1e-10
obj = objective_bubble_pressure(
P,
xi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"Final Output: Obj {}, P {} Pa, flagv {}, yi {}".format(
obj, P, flagv, _yi_global
)
)
return P, _yi_global, flagv, flagl, obj
def hildebrand_solubility(
rhol, xi, T, Eos, dT=0.1, tol=1e-4, density_opts={}, **kwargs
):
r"""
Calculate the solubility parameter based on temperature and composition.
This function is based on the method used in <NAME>., <NAME>, and <NAME> *Calculation of Solubility Parameter Using Perturbed-Chain SAFT and Cubic-Plus-Association Equations of State* Ind. Eng. Chem. Res. 2008. 47, 9663-9669.
Parameters
----------
rhol : float
Liquid molar density [mol/:math:`m^3`]
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
T : float
Temperature of the system [K]
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
dT : float, Optional, default=0.1
Change in temperature used in calculating the derivative with central difference method
tol : float, Optional, default=1e-4
This cutoff value evaluates the extent to which the integrand of the calculation has decayed. If the last value if the array is greater than tol, then the remaining area is estimated as a triangle, where the intercept is estimated from an interpolation of the previous four points.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
delta : float
Solubility parameter [:math:`Pa^(1/2)`], ratio of cohesive energy and molar volume
"""
if len(kwargs) > 0:
logger.debug(
"'hildebrand_solubility' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
R = constants.Nav * constants.kb
RT = T * R
if gtb.isiterable(rhol):
logger.info("rhol should be a float, not {}".format(rhol))
# Find dZdT
vlist, Plist1 = pressure_vs_volume_arrays(
T - dT, xi, Eos, **density_opts, max_density=rhol
)
vlist2, Plist2 = pressure_vs_volume_arrays(
T + dT, xi, Eos, **density_opts, max_density=rhol
)
vlist, Plist = pressure_vs_volume_arrays(
T, xi, Eos, **density_opts, max_density=rhol
)
if any(vlist != vlist2):
logger.error("Dependant variable vectors must be identical.")
int_tmp = (Plist2 - Plist1) / (2 * dT) / R - Plist / (RT)
integrand_list = gaussian_filter1d(int_tmp, sigma=0.1)
# Calculate U_res
integrand_spline = interpolate.InterpolatedUnivariateSpline(
vlist, integrand_list, ext=1
)
U_res = -RT * integrand_spline.integral(1 / rhol, vlist[-1])
# Check if function converged before taking integral, if not, correct area
if integrand_list[-1] > tol:
slope, yroot = | np.polyfit(vlist[-4:], integrand_list[-4:], 1) | numpy.polyfit |
import os
import sys
import numpy as np
import scipy
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import cv2
from PIL import Image
from skimage import io
from skimage.transform import resize
import matplotlib.pyplot as plt
sys.path.append('../')
from model.models import CRNet
from config.cfg import cfg
def viz(img_path, model=CRNet()):
model = model.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
model.load_state_dict(torch.load('./model/crnet.pth'))
model = model.to(device)
model.eval()
image = resize(io.imread(img_path), (224, 224), mode='constant')
image[:, :, 0] -= np.mean(image[:, :, 0])
image[:, :, 1] -= np.mean(image[:, :, 1])
image[:, :, 2] -= np.mean(image[:, :, 2])
image = | np.transpose(image, [2, 0, 1]) | numpy.transpose |
import os
import numpy as np
import time
import argparse
import sys
from math import ceil
from random import Random
import time
import random
import torch
import torch.distributed as dist
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.functional as F
from torch.multiprocessing import Process
import torchvision
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import torchvision.models as models
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import datetime
import LocalSGD as optim
import util_v4 as util
from comm_helpers import SyncAllreduce, SyncAllreduce_1, SyncAllreduce_2
import os
from scipy.io import loadmat
import json
from scipy import io
from dataset.cifar import get_cifar10, get_emnist, get_svhn
from torch.optim.lr_scheduler import LambdaLR
import math
parser = argparse.ArgumentParser(description='CIFAR-10 baseline')
parser.add_argument('--name','-n',
default="default",
type=str,
help='experiment name, used for saving results')
parser.add_argument('--backend',
default="nccl",
type=str,
help='experiment name, used for saving results')
parser.add_argument('--GPU_list',
default='0',
type=str,
help='gpu list')
parser.add_argument('--dataset',
default="cifar10",
type=str,
help='dataset name')
parser.add_argument('--model',
default="res_gn",
type=str,
help='neural network model')
parser.add_argument('--alpha',
default=0.2,
type=float,
help='alpha')
parser.add_argument('--gmf',
default=0,
type=float,
help='global momentum factor')
parser.add_argument('--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument('--basicLabelRatio',
default=0.4,
type=float,
help='basicLabelRatio')
parser.add_argument('--bs',
default=64,
type=int,
help='batch size on each worker')
parser.add_argument('--epoch',
default=300,
type=int,
help='total epoch')
parser.add_argument('--cp',
default=8,
type=int,
help='communication period / work per clock')
parser.add_argument('--print_freq',
default=100,
type=int,
help='print info frequency')
parser.add_argument('--rank',
default=0,
type=int,
help='the rank of worker')
parser.add_argument('--size',
default=8,
type=int,
help='number of workers')
parser.add_argument('--seed',
default=1,
type=int,
help='random seed')
parser.add_argument('--num_comm_ue',
default=11,
type=int,
help='communication user number')
parser.add_argument('--iid',
default=1,
type=int,
help='iid')
parser.add_argument('--class_per_device',
default=1,
type=int,
help='class_per_device')
parser.add_argument('--labeled',
default=0,
type=int,
help='labeled all data')
parser.add_argument('--H',
default=0,
type=int,
help='whether use hierarchical method')
parser.add_argument('--save', '-s',
action='store_true',
help='whether save the training results')
parser.add_argument('--ip_address',
default="10.129.2.142",
type=str,
help='ip_address')
parser.add_argument('--master_port',
default="29021",
type=str,
help='master port')
parser.add_argument('--experiment_name',
default="Major1_setting1",
type=str,
help='name of this experiment')
parser.add_argument('--k-img', default=65536, type=int, ### 65536
help='number of examples')
parser.add_argument('--num_data_server', default=1000, type=int,
help='number of samples in server')
parser.add_argument('--num-data-server', default=1000, type=int,
help='number of labeled examples in server')
parser.add_argument('--num-devices', default=10, type=int,
help='num of devices')
args = parser.parse_args()
def get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps,
num_training_steps,
num_cycles=7./16.,
last_epoch=-1,lr_weight=1):
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
no_progress = float(current_step - num_warmup_steps) / \
float(max(1, num_training_steps - num_warmup_steps))
num_cycles = 7.0/16.0*(1024*1024 - num_warmup_steps)/(1024*200 - num_warmup_steps)
return max(0.00000, math.cos(math.pi * num_cycles * no_progress))
return LambdaLR(optimizer, _lr_lambda, last_epoch)
######### Assign Ranks to different GPUs
GRU_list = [i for i in args.GPU_list]
if args.H:
increase_tmp = args.size//len(GRU_list)
else:
increase_tmp = (args.size+1)//len(GRU_list)
ranks_list = np.arange(0,args.size).tolist()
rank_group = []
for rank_id in range(len(GRU_list)):
if rank_id == len(GRU_list)-1:
ranks = ranks_list[rank_id*increase_tmp:]
else:
ranks = ranks_list[rank_id*increase_tmp:(rank_id+1)*increase_tmp]
rank_group.append(ranks)
for group_id in range(len(GRU_list)):
if args.rank in set(rank_group[group_id]):
os.environ["CUDA_VISIBLE_DEVICES"] = GRU_list[group_id]
break
device = 'cuda' if torch.cuda.is_available() else 'cpu'
DATASET_GETTERS = {'cifar10': get_cifar10, 'emnist': get_emnist, 'svhn':get_svhn}
### generate the index of the server dataset and the device dataset
if args.iid:
path_device_idxs = f'{args.dataset}_post_data/iid/{args.size - 1 - args.H}_{args.num_data_server}'
else:
path_device_idxs = f'{args.dataset}_post_data/noniid/{args.size - 1 - args.H}_{args.num_data_server}_{args.class_per_device}_{args.basicLabelRatio}'
if args.dataset == 'emnist':
if args.iid:
path_device_idxs = f'{args.dataset}_post_data/iid/{47}_{args.num_data_server}'
else:
path_device_idxs = f'{args.dataset}_post_data/noniid/{47}_{args.num_data_server}_{args.class_per_device}_{args.basicLabelRatio}'
device_ids = np.load(path_device_idxs + '/device_idxs' + '.npy', allow_pickle=True).item()
server_idxs = np.load(path_device_idxs + '/server_idxs' + '.npy', allow_pickle=True).item()
device_ids = device_ids['device_idxs']
server_idxs = server_idxs['server_idxs']
if args.num_comm_ue < args.size - 1 - args.H:
ue_list_epoches = np.load(path_device_idxs + '/ue_list_epoch' + '.npy', allow_pickle=True).item()
ue_list_epoches = ue_list_epoches['ue_list_epoch']
else:
ue_list_epoches = []
print('get dataset')
labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset](
'./data', args.k_img, args.k_img * len(device_ids), device_ids, server_idxs)
print('get dataset, done')
train_sampler = RandomSampler
labeled_trainloader = DataLoader(
labeled_dataset,
sampler=train_sampler(labeled_dataset),
batch_size=args.bs,
num_workers=0,
drop_last=True)
unlabeled_trainloader_list = []
for id in range(len(unlabeled_dataset)):
unlabeled_trainloader = DataLoader(
unlabeled_dataset[id],
sampler=train_sampler(unlabeled_dataset[id]),
batch_size=args.bs,
num_workers=0,
drop_last=True)
unlabeled_trainloader_list.append(unlabeled_trainloader)
test_loader = DataLoader(test_dataset,
batch_size=args.bs,
shuffle=False)
print(args)
def run(rank, size, G):
# initiate experiments folder
save_path = './results_v0/'
if not os.path.exists(save_path):
os.makedirs(save_path)
folder_name = save_path+args.name+'/'
if rank == 0 and os.path.isdir(folder_name)==False and args.save:
os.makedirs(folder_name)
dist.barrier()
# seed for reproducibility
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
# load datasets
if args.H:
if args.dataset == 'emnist':
labeled_set = [0,48,49,50,51]
if rank in set(labeled_set):
train_loader = labeled_trainloader
else:
train_loader = unlabeled_trainloader_list[rank - 1]
else:
if rank == 0 or rank == args.size -1:
train_loader = labeled_trainloader
else:
train_loader = unlabeled_trainloader_list[rank - 1]
else:
if rank == 0:
train_loader = labeled_trainloader
else:
train_loader = unlabeled_trainloader_list[rank - 1]
# define neural nets model, criterion, and optimizer
model = util.select_model(args.model, args).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
alpha=args.alpha,
gmf=args.gmf,
size=size,
momentum=0.9,
nesterov = True,
weight_decay=1e-4)
args.iteration = args.k_img // args.bs
total_steps = 1024 * args.iteration
# total_steps = args.epoch * args.iteration
warmup_epoch = 5
if args.dataset == 'emnist':
warmup_epoch = 0
total_steps = args.epoch * args.iteration
scheduler = get_cosine_schedule_with_warmup(
optimizer, warmup_epoch * args.iteration, total_steps,lr_weight=1)
batch_meter = util.Meter(ptag='Time')
comm_meter = util.Meter(ptag='Time')
best_test_accuracy = 0
req = None
acc_list = []
print('Now train the model')
for epoch in range(args.epoch):
if rank == 0:
begin_time = time.time()
train(rank, model, criterion, optimizer,scheduler, batch_meter, comm_meter,
train_loader, epoch, device, ue_list_epoches, G)
### test the server model
if rank == 0:
test_acc = evaluate(model, test_loader)
acc_list.append(round(test_acc, 2))
print('test acc',epoch, test_acc,time.time() - begin_time)
if args.H:
filename = f"./results_v0/{args.experiment_name}_{args.dataset}_iid{args.iid}_UE{args.size - 1}_{args.basicLabelRatio}_{args.model}_bs{args.bs}_H1_cp{args.cp}.txt"
else:
filename = f"./results_v0/{args.experiment_name}_{args.dataset}_iid{args.iid}_UE{args.size - 1 - args.H}_{args.basicLabelRatio}_comUE{args.num_comm_ue}_{args.model}_bs{args.bs}_H0_cp{args.cp}.txt"
if filename:
with open(filename, 'w') as f:
json.dump(acc_list, f)
path_checkpoint = f"./checkpoint/{args.experiment_name}/"
if not os.path.exists(path_checkpoint):
os.makedirs(path_checkpoint)
torch.save({'epoch': epoch,'model_state_dict': model.state_dict()}, path_checkpoint+f'{rank}_weights.pth')
def evaluate(model, test_loader):
model.eval()
top1 = util.AverageMeter()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data = data.cuda(non_blocking = True)
target = target.cuda(non_blocking = True)
outputs = model(data)
acc1 = util.comp_accuracy(outputs, target)
top1.update(acc1[0].item(), data.size(0))
return top1.avg
def train(rank, model, criterion, optimizer,scheduler, batch_meter, comm_meter,
loader, epoch, device, ue_list_epoches, G):
model.train()
top1 = util.Meter(ptag='Prec@1')
iter_time = time.time()
if args.H:
if args.dataset == 'emnist':
group1 = [0] + np.arange(1, 11).tolist()
group2 = [48] + np.arange(11, 21).tolist()
group3 = [49] + | np.arange(21, 31) | numpy.arange |
# -*- coding: utf-8 -*-
""" Lots of functions for drawing and plotting visiony things """
# TODO: New naming scheme
# viz_<funcname> should clear everything. The current axes and fig: clf, cla.
# # Will add annotations
# interact_<funcname> should clear everything and start user interactions.
# show_<funcname> should always clear the current axes, but not fig: cla #
# Might # add annotates? plot_<funcname> should not clear the axes or figure.
# More useful for graphs draw_<funcname> same as plot for now. More useful for
# images
import logging
import itertools as it
import utool as ut # NOQA
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError as ex:
ut.printex(
ex,
'try pip install mpl_toolkits.axes_grid1 or something. idk yet',
iswarning=False,
)
raise
# import colorsys
import pylab
import warnings
import numpy as np
from os.path import relpath
try:
import cv2
except ImportError as ex:
print('ERROR PLOTTOOL CANNOT IMPORT CV2')
print(ex)
from wbia.plottool import mpl_keypoint as mpl_kp
from wbia.plottool import color_funcs as color_fns
from wbia.plottool import custom_constants
from wbia.plottool import custom_figure
from wbia.plottool import fig_presenter
DEBUG = False
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def is_texmode():
return mpl.rcParams['text.usetex']
# Bring over moved functions that still have dependants elsewhere
TAU = np.pi * 2
distinct_colors = color_fns.distinct_colors
lighten_rgb = color_fns.lighten_rgb
to_base255 = color_fns.to_base255
DARKEN = ut.get_argval(
'--darken', type_=float, default=(0.7 if ut.get_argflag('--darken') else None)
)
# logger.info('DARKEN = %r' % (DARKEN,))
all_figures_bring_to_front = fig_presenter.all_figures_bring_to_front
all_figures_tile = fig_presenter.all_figures_tile
close_all_figures = fig_presenter.close_all_figures
close_figure = fig_presenter.close_figure
iup = fig_presenter.iup
iupdate = fig_presenter.iupdate
present = fig_presenter.present
reset = fig_presenter.reset
update = fig_presenter.update
ORANGE = custom_constants.ORANGE
RED = custom_constants.RED
GREEN = custom_constants.GREEN
BLUE = custom_constants.BLUE
YELLOW = custom_constants.YELLOW
BLACK = custom_constants.BLACK
WHITE = custom_constants.WHITE
GRAY = custom_constants.GRAY
LIGHTGRAY = custom_constants.LIGHTGRAY
DEEP_PINK = custom_constants.DEEP_PINK
PINK = custom_constants.PINK
FALSE_RED = custom_constants.FALSE_RED
TRUE_GREEN = custom_constants.TRUE_GREEN
TRUE_BLUE = custom_constants.TRUE_BLUE
DARK_GREEN = custom_constants.DARK_GREEN
DARK_BLUE = custom_constants.DARK_BLUE
DARK_RED = custom_constants.DARK_RED
DARK_ORANGE = custom_constants.DARK_ORANGE
DARK_YELLOW = custom_constants.DARK_YELLOW
PURPLE = custom_constants.PURPLE
LIGHT_BLUE = custom_constants.LIGHT_BLUE
UNKNOWN_PURP = custom_constants.UNKNOWN_PURP
TRUE = TRUE_BLUE
FALSE = FALSE_RED
figure = custom_figure.figure
gca = custom_figure.gca
gcf = custom_figure.gcf
get_fig = custom_figure.get_fig
save_figure = custom_figure.save_figure
set_figtitle = custom_figure.set_figtitle
set_title = custom_figure.set_title
set_xlabel = custom_figure.set_xlabel
set_xticks = custom_figure.set_xticks
set_ylabel = custom_figure.set_ylabel
set_yticks = custom_figure.set_yticks
VERBOSE = ut.get_argflag(('--verbose-df2', '--verb-pt'))
# ================
# GLOBALS
# ================
TMP_mevent = None
plotWidget = None
def show_was_requested():
"""
returns True if --show is specified on the commandline or you are in
IPython (and presumably want some sort of interaction
"""
return not ut.get_argflag(('--noshow')) and (
ut.get_argflag(('--show', '--save')) or ut.inIPython()
)
# return ut.show_was_requested()
class OffsetImage2(mpl.offsetbox.OffsetBox):
"""
TODO: If this works reapply to mpl
"""
def __init__(
self,
arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
mpl.offsetbox.OffsetBox.__init__(self)
self._dpi_cor = dpi_cor
self.image = mpl.offsetbox.BboxImage(
bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = | np.asarray(arr) | numpy.asarray |
"""
Constraint functions for grasp sampling
Author: <NAME>
"""
from abc import ABCMeta, abstractmethod
import numpy as np
class GraspConstraintFn(object):
"""
Abstract constraint functions for grasp sampling.
"""
__metaclass__ = ABCMeta
def __init__(self, config):
# set params
self._config = config
def __call__(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
return self.satisfies_constraints(grasp)
@abstractmethod
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
pass
class DiscreteApproachGraspConstraintFn(GraspConstraintFn):
"""
Constrains the grasp approach direction into a discrete set of
angles from the world z direction.
"""
def __init__(self, config):
# init superclass
GraspConstraintFn.__init__(self, config)
self._max_approach_angle = self._config['max_approach_angle']
self._angular_tolerance = self._config['angular_tolerance']
self._angular_step = self._config['angular_step']
self._T_camera_world = self._config['camera_pose']
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid by evaluating the
angle between the approach axis and the world z direction.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
# find grasp angle in world coordinates
axis_world = self._T_camera_world.rotation.dot(grasp.approach_axis)
angle = np.arccos(-axis_world[2])
# check closest available angle
available_angles = np.array([0.0])
if self._angular_step > 0:
available_angles = np.arange(start=0.0,
stop=self._max_approach_angle,
step=self._angular_step)
diff = np.abs(available_angles - angle)
angle_index = | np.argmin(diff) | numpy.argmin |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
#%%
#import sys
import mne
#import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
#import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
#import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from pandas import DataFrame
from sklearn import linear_model
import statsmodels.api as sm
#import csv
os.chdir(os.path.join("D:\\", "git","BrainTools","projects","NLR_MEG"))
from plotit3 import plotit3
from plotsig3 import plotsig3
from plotit2 import plotit2
from plotsig2 import plotsig2
from plotcorr3 import plotcorr3
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = 'D://subjects'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = os.path.join("D:\\","NLR_MEG")
os.chdir(raw_dir)
import seaborn as sns
sns.set(style="darkgrid")
#%%
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
session1 = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
subs2 = ['NLR_102_RS','NLR_110_HH','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF', # 162, 201 only had the second session
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM', # 'NLR_170_GM': no EOG channel
'NLR_180_ZD','NLR_201_GS',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420', 'NLR_HB275','NLR_GB355']
session2 = ['102_rs160815','110_hh160809','145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828','nlr_hb275170828','nlr_gb355170907']
subIndex1 = np.nonzero(np.in1d(subs,subs2))[0]
subIndex2 = np.empty([1,len(subIndex1)],dtype=int)[0]
for i in range(0,len(subIndex1)):
subIndex2[i] = np.nonzero(np.in1d(subs2,subs[subIndex1[i]]))[0]
twre_index = [87,93,108,66,116,85,110,71,84,92,87,86,63,81,60,55,71,63,68,67,64,127,79,
73,59,84,79,91,57,67,77,57,80,53,72,58,85,79,116,117,107,78,66,101,67]
twre_index = np.array(twre_index)
brs = [87,102,108,78,122,91,121,77,91,93,93,88,75,90,66,59,81,84,81,72,71,121,
81,75,66,90,93,101,56,78,83,69,88,60,88,73,82,81,115,127,124,88,68,110,96]
brs = np.array(brs)
twre_index1 = twre_index[subIndex1]
twre_index2_all = [90,76,94,115,
85,75,82,64,75,
63,83,77,84,75,
68,79,
62,90,105,75,71,
69,83,76,62,73,94]
twre_index2_all = np.array(twre_index2_all)
twre_index2 = twre_index2_all[subIndex2]
brs1 = brs[subIndex1]
brs2_all = [98,88,102,110,99,91,88,79,105,86,81,88,89,77,83,81,86,98,116,104,86,90,91,97,57,99,102]
brs2_all = np.array(brs2_all)
brs2 = brs2_all[subIndex2]
twre_diff = np.subtract(twre_index2,twre_index1)
brs_diff = np.subtract(brs2,brs1)
swe_raw = [62, 76, 74, 42, 75, 67, 76, 21, 54, 35, 21, 61, 45, 48, 17, 11, 70, 19, 10, 57,
12, 86, 53, 51, 13, 28, 54, 25, 27, 10, 66, 18, 18, 20, 37, 23, 17, 36, 79, 82,
74, 64, 42, 78, 35]
swe_raw = np.array(swe_raw)
lwid = [49,60,60,51,62,54,65,23,44,35,31,52,44,39,27,30,57,33,24,48,19,66,45,
43,22,33,51,36,35,25,55,34,26,26,39,27,24,29,61,71,65,56,36,62,51]
lwid = np.array(lwid)
rf = [88,103,95,67,120,85,108,71,91,87,88,76,76,93,60,40,86,61,66,81,59,130,93,85,49,76,90,96,42,64,74,49,84,56,
76,61,80,89,111,120,132,88,65,102,72]
rf = np.array(rf)
age = [125.6885, 132.9501, 122.0434, 138.4349, 97.6347, 138.1420, 108.2457, 98.0631, 105.8147, 89.9132,
87.6465, 131.8660, 123.7174, 95.959, 112.416, 133.8042, 152.4639, 103.4823, 89.8475, 138.4020,
93.8568, 117.0814, 123.6202, 122.9304, 109.1656, 90.6058,
111.9593,86.0381,147.2063,95.8699,148.0802,122.5896,88.7162,123.0495,110.6645,105.3069,88.9143,95.2879,106.2852,
122.2915,114.4389,136.1496,128.6246,137.9216,122.7528]
age = np.divide(age, 12)
wasi_vocab = [51,62,52,39,80,59,56,np.nan,52,47,64,44,49,48,55,53,44,44,53,45,62,
76,45,55,48,56,41,43,40,52,54,50,62,67,59,48,60,60,62,79,74,44,49,50,60]
wasi_mr = [47,64,44,58,60,51,56,np.nan,56,43,37,37,51,55,36,33,52,48,49,41,51,
56,56,53,42,41,46,51,34,51,50,51,55,53,44,44,47,59,66,74,65,53,54,47,60]
n_subjects = len(subs)
c_table = ( (0.6510, 0.8078, 0.8902), # Blue, Green, Red, Orange, Purple, yellow
(0.1216, 0.4706, 0.7059),
(0.6980, 0.8745, 0.5412),
(0.2000, 0.6275, 0.1725),
(0.9843, 0.6039, 0.6000),
(0.8902, 0.1020, 0.1098),
(0.9922, 0.7490, 0.4353),
(1.0000, 0.4980, 0),
(0.7922, 0.6980, 0.8392),
(0.4157, 0.2392, 0.6039),
(1.0000, 1.0000, 0.6000),
(0.6941, 0.3490, 0.1569))
fname_data = op.join(raw_dir, 'session1_data_loose_depth8_normal.npy')
#%%
"""
Here we load the data for Session 1
"""
t0 = time.time()
os.chdir(raw_dir)
X13 = np.load(fname_data)
orig_times = np.load('session1_times.npy')
tstep = np.load('session1_tstep.npy')
n_epochs = np.load('session1_n_averages.npy')
tmin = -0.1
""" Downsample the data """
ss = 3 # was originally 2
sample = np.arange(0,len(orig_times),ss)
sRate = 600 / ss
times = orig_times[sample]
tstep = ss*tstep
X11 = X13[:,sample,:,:]
del X13
X11 = np.abs(X11)
print("\n\nElasped time: %0.2d mins %0.2d secs\n\n" % (divmod(time.time()-t0, 60)))
#%%
""" Grouping subjects """
reading_thresh = 80
m1 = np.logical_and(np.transpose(twre_index) > reading_thresh, np.transpose(age) <= 13)
m2 = np.logical_and(np.transpose(twre_index) <= reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(brs) >= reading_thresh, np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(brs) < reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(swe_raw) >= np.median(swe_raw), np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(swe_raw) < np.median(swe_raw), np.transpose(age) <= 13)
orig_twre = twre_index
orig_age = age
orig_swe = swe_raw
m3 = np.mean(n_epochs,axis=1) < 40
m1[np.where(m3)] = False
m2[np.where(m3)] = False
twre_index = twre_index[np.where(~m3)[0]]
age = age[np.where(~m3)[0]]
#swe_raw = swe_raw[np.where(~m3)[0]]
good_readers = np.where(m1)[0]
poor_readers = np.where(m2)[0]
a1 = np.transpose(age) > np.mean(age)
a2 = np.logical_not(a1)
a1[np.where(m3)] = False
a2[np.where(m3)] = False
old_readers = np.where(a1)[0]
young_readers = np.where(a2)[0]
#wasi_vocab_G = [wasi_vocab[i] for i in good_readers]
#wasi_vocab_P = [wasi_vocab[i] for i in poor_readers]
#wasi_mr_G = [wasi_mr[i] for i in good_readers]
#wasi_mr_P = [wasi_mr[i] for i in poor_readers]
#age_G = [orig_age[i] for i in good_readers]
#age_P = [orig_age[i] for i in poor_readers]
#twre_G = [orig_twre[i] for i in good_readers]
#twre_P = [orig_twre[i] for i in poor_readers]
#
#n,p = stats.ttest_ind(wasi_vocab_G,wasi_vocab_P,nan_policy='omit')
#n,p = stats.ttest_ind(wasi_mr_G,wasi_mr_P,nan_policy='omit')
#n,p = stats.ttest_ind(age_G,age_P,nan_policy='omit')
#n,p = stats.ttest_ind(twre_G,twre_P,nan_policy='omit')
all_subject = []
all_subject.extend(good_readers)
all_subject.extend(poor_readers)
all_subject.sort()
fs_vertices = [np.arange(10242)] * 2
n_epoch = np.empty((45,4))
n_epoch[:,0] = [np.int(n_epochs[i,0]) for i in range(0,45)]
n_epoch[:,1] = [np.int(n_epochs[i,3]) for i in range(0,45)]
n_epoch[:,2] = [np.int(n_epochs[i,5]) for i in range(0,45)]
n_epoch[:,3] = [np.int(n_epochs[i,8]) for i in range(0,45)]
removal = np.sum(60 - n_epoch, axis = 1)
a = [removal[i] for i in zip(good_readers)]
b = [removal[i] for i in zip(poor_readers)]
c = [removal[i] for i in zip(all_subject)]
d = [removal[i] for i in zip(young_readers)]
e = [removal[i] for i in zip(old_readers)]
stats.ttest_ind(a,b)
stats.ttest_ind(d,e)
stats.pearsonr(c,age)
stats.pearsonr(c,twre_index)
figureDir = '%s/figures' % raw_dir
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(age, c, deg=1)
ax.plot(age, fit[0] * age + fit[1], color=[0,0,0])
ax.plot(age, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Age')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_age.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_age.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(twre_index, c, deg=1)
ax.plot(twre_index, fit[0] * twre_index + fit[1], color=[0,0,0])
ax.plot(twre_index, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Reading skill')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_reading.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_reading.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir) #regexp=aparc_label_name
#aparc_label_name = 'PHT_ROI'#'_IP'#'IFSp_ROI'#'STSvp_ROI'#'STSdp_ROI'#'PH_ROI'#'TE2p_ROI' #'SFL_ROI' #'IFSp_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc.a2009s',surf_name='white',
subjects_dir=fs_dir) #, regexp=aparc_label_name)
#%%
#TE2p_mask_lh = mne.Label.get_vertices_used(TE2p_label[0])
#TE2p_mask_rh = mne.Label.get_vertices_used(TE2p_label[1])
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
TE2a_label_lh = [label for label in labels if label.name == 'L_TE2a_ROI-lh'][0]
TE2a_label_rh = [label for label in labels if label.name == 'R_TE2a_ROI-rh'][0]
TF_label_lh = [label for label in labels if label.name == 'L_TF_ROI-lh'][0]
TF_label_rh = [label for label in labels if label.name == 'R_TF_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
a8C_label_lh = [label for label in labels if label.name == 'L_8C_ROI-lh'][0]
a8C_label_rh = [label for label in labels if label.name == 'R_8C_ROI-rh'][0]
p946v_label_lh = [label for label in labels if label.name == 'L_p9-46v_ROI-lh'][0]
p946v_label_rh = [label for label in labels if label.name == 'R_p9-46v_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFSa_label_lh = [label for label in labels if label.name == 'L_IFSa_ROI-lh'][0]
IFSa_label_rh = [label for label in labels if label.name == 'R_IFSa_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
a43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
a43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
a9_46v_lh = [label for label in labels if label.name == 'L_a9-46v_ROI-lh'][0]
a9_46v_rh = [label for label in labels if label.name == 'R_a9-46v_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
STSva_label_lh = [label for label in labels if label.name == 'L_STSva_ROI-lh'][0]
STSva_label_rh = [label for label in labels if label.name == 'R_STSva_ROI-rh'][0]
STSda_label_lh = [label for label in labels if label.name == 'L_STSda_ROI-lh'][0]
STSda_label_rh = [label for label in labels if label.name == 'R_STSda_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
TPOJ2_label_lh = [label for label in labels if label.name == 'L_TPOJ2_ROI-lh'][0]
TPOJ2_label_rh = [label for label in labels if label.name == 'R_TPOJ2_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
V4_label_lh = [label for label in labels if label.name == 'L_V4_ROI-lh'][0]
V4_label_rh = [label for label in labels if label.name == 'R_V4_ROI-rh'][0]
LIPd_label_lh = [label for label in labels if label.name == 'L_LIPd_ROI-lh'][0]
LIPd_label_rh = [label for label in labels if label.name == 'R_LIPd_ROI-rh'][0]
LIPv_label_lh = [label for label in labels if label.name == 'L_LIPv_ROI-lh'][0]
LIPv_label_rh = [label for label in labels if label.name == 'R_LIPv_ROI-rh'][0]
IPS1_label_lh = [label for label in labels if label.name == 'L_IPS1_ROI-lh'][0]
IPS1_label_rh = [label for label in labels if label.name == 'R_IPS1_ROI-rh'][0]
_7Am_label_lh = [label for label in labels if label.name == 'L_7Am_ROI-lh'][0]
_7Am_label_rh = [label for label in labels if label.name == 'R_7Am_ROI-rh'][0]
VIP_label_lh = [label for label in labels if label.name == 'L_VIP_ROI-lh'][0]
VIP_label_rh = [label for label in labels if label.name == 'R_VIP_ROI-rh'][0]
_7AL_label_lh = [label for label in labels if label.name == 'L_7AL_ROI-lh'][0]
_7AL_label_rh = [label for label in labels if label.name == 'R_7AL_ROI-rh'][0]
PBelt_label_lh = [label for label in labels if label.name == 'L_PBelt_ROI-lh'][0]
PBelt_label_rh = [label for label in labels if label.name == 'R_PBelt_ROI-rh'][0]
PSL_label_lh = [label for label in labels if label.name == 'L_PSL_ROI-lh'][0]
PSL_label_rh = [label for label in labels if label.name == 'R_PSL_ROI-rh'][0]
LBelt_label_lh = [label for label in labels if label.name == 'L_LBelt_ROI-lh'][0]
LBelt_label_rh = [label for label in labels if label.name == 'R_LBelt_ROI-rh'][0]
A1_label_lh = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
A1_label_rh = [label for label in labels if label.name == 'R_A1_ROI-rh'][0]
MBelt_label_lh = [label for label in labels if label.name == 'L_MBelt_ROI-lh'][0]
MBelt_label_rh = [label for label in labels if label.name == 'R_MBelt_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
A4_label_lh = [label for label in labels if label.name == 'L_A4_ROI-lh'][0]
A4_label_rh = [label for label in labels if label.name == 'R_A4_ROI-rh'][0]
PFcm_label_lh = [label for label in labels if label.name == 'L_PFcm_ROI-lh'][0]
PFcm_label_rh = [label for label in labels if label.name == 'R_PFcm_ROI-rh'][0]
PFm_label_lh = [label for label in labels if label.name == 'L_PFm_ROI-lh'][0]
PFm_label_rh = [label for label in labels if label.name == 'R_PFm_ROI-rh'][0]
_4_label_lh = [label for label in labels if label.name == 'L_4_ROI-lh'][0]
_4_label_rh = [label for label in labels if label.name == 'R_4_ROI-rh'][0]
_1_label_lh = [label for label in labels if label.name == 'L_1_ROI-lh'][0]
_1_label_rh = [label for label in labels if label.name == 'R_1_ROI-rh'][0]
_2_label_lh = [label for label in labels if label.name == 'L_2_ROI-lh'][0]
_2_label_rh = [label for label in labels if label.name == 'R_2_ROI-rh'][0]
_3a_label_lh = [label for label in labels if label.name == 'L_3a_ROI-lh'][0]
_3a_label_rh = [label for label in labels if label.name == 'R_3a_ROI-rh'][0]
_3b_label_lh = [label for label in labels if label.name == 'L_3b_ROI-lh'][0]
_3b_label_rh = [label for label in labels if label.name == 'R_3b_ROI-rh'][0]
_43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
_43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
_6r_label_lh = [label for label in labels if label.name == 'L_6r_ROI-lh'][0]
_6r_label_rh = [label for label in labels if label.name == 'R_6r_ROI-rh'][0]
OP1_label_lh = [label for label in labels if label.name == 'L_OP1_ROI-lh'][0]
OP1_label_rh = [label for label in labels if label.name == 'R_OP1_ROI-rh'][0]
OP23_label_lh = [label for label in labels if label.name == 'L_OP2-3_ROI-lh'][0]
OP23_label_rh = [label for label in labels if label.name == 'R_OP2-3_ROI-rh'][0]
OP4_label_lh = [label for label in labels if label.name == 'L_OP4_ROI-lh'][0]
OP4_label_rh = [label for label in labels if label.name == 'R_OP4_ROI-rh'][0]
PFop_label_lh = [label for label in labels if label.name == 'L_PFop_ROI-lh'][0]
PFop_label_rh = [label for label in labels if label.name == 'R_PFop_ROI-rh'][0]
A5_label_lh = [label for label in labels if label.name == 'L_A5_ROI-lh'][0]
A5_label_rh = [label for label in labels if label.name == 'R_A5_ROI-rh'][0]
STV_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
STV_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
PF_label_lh = [label for label in labels if label.name == 'L_PF_ROI-lh'][0]
PF_label_rh = [label for label in labels if label.name == 'R_PF_ROI-rh'][0]
PFt_label_lh = [label for label in labels if label.name == 'L_PFt_ROI-lh'][0]
PFt_label_rh = [label for label in labels if label.name == 'R_PFt_ROI-rh'][0]
p47r_label_lh = [label for label in labels if label.name == 'L_p47r_ROI-lh'][0]
p47r_label_rh = [label for label in labels if label.name == 'R_p47r_ROI-rh'][0]
FOP5_label_lh = [label for label in labels if label.name == 'L_FOP5_ROI-lh'][0]
FOP5_label_rh = [label for label in labels if label.name == 'R_FOP5_ROI-rh'][0]
FOP4_label_lh = [label for label in labels if label.name == 'L_FOP4_ROI-lh'][0]
FOP4_label_rh = [label for label in labels if label.name == 'R_FOP4_ROI-rh'][0]
FOP3_label_lh = [label for label in labels if label.name == 'L_FOP3_ROI-lh'][0]
FOP3_label_rh = [label for label in labels if label.name == 'R_FOP3_ROI-rh'][0]
FOP2_label_lh = [label for label in labels if label.name == 'L_FOP2_ROI-lh'][0]
FOP2_label_rh = [label for label in labels if label.name == 'R_FOP2_ROI-rh'][0]
Ig_label_lh = [label for label in labels if label.name == 'L_Ig_ROI-lh'][0]
Ig_label_rh = [label for label in labels if label.name == 'R_Ig_ROI-rh'][0]
AVI_label_lh = [label for label in labels if label.name == 'L_AVI_ROI-lh'][0]
AVI_label_rh = [label for label in labels if label.name == 'R_AVI_ROI-rh'][0]
_47l_label_lh = [label for label in labels if label.name == 'L_47l_ROI-lh'][0]
_47l_label_rh = [label for label in labels if label.name == 'R_47l_ROI-rh'][0]
temp1_label_lh = [label for label in anat_label if label.name == 'Pole_occipital-lh'][0]
#temp1_label_rh = [label for label in anat_label if label.name == 'parsopercularis-rh'][0]
temp2_label_lh = [label for label in anat_label if label.name == 'S_occipital_ant-lh'][0]
#temp2_label_rh = [label for label in anat_label if label.name == 'parsorbitalis-rh'][0]
temp3_label_lh = [label for label in anat_label if label.name == 'G_and_S_occipital_inf-lh'][0]
#temp3_label_rh = [label for label in anat_label if label.name == 'parstriangularis-rh'][0]
temp4_label_lh = [label for label in anat_label if label.name == 'S_calcarine-lh'][0]
#temp4_label_rh = [label for label in anat_label if label.name == 'precentral-rh'][0]
#%%
""" Lexical task: Word - Noise """
data11 = X11[:,:,all_subject,5] - X11[:,:,all_subject,8]
data11 = np.transpose(data11,[2,1,0])
data11_good = X11[:,:,good_readers,5] - X11[:,:,good_readers,8]
data11_good = np.transpose(data11_good,[2,1,0])
data11_poor = X11[:,:,poor_readers,5] - X11[:,:,poor_readers,8]
data11_poor = np.transpose(data11_poor,[2,1,0])
""" Dot task: Word - Noise """
data12 = X11[:,:,all_subject,0] - X11[:,:,all_subject,3]
data12 = np.transpose(data12,[2,1,0])
data12_good = X11[:,:,good_readers,0] - X11[:,:,good_readers,3]
data12_good = np.transpose(data12_good,[2,1,0])
data12_poor = X11[:,:,poor_readers,0] - X11[:,:,poor_readers,3]
data12_poor = np.transpose(data12_poor,[2,1,0])
""" Lexical task: High contrast - Low contrast """
#data12 = X11[:,31:65,all_subject,5] - X11[:,31:65,all_subject,7]
#data12 = np.transpose(data12,[2,1,0])
#data12[:,:,medial_vertices] = 0.
#%%
""" Spatio-temporal clustering: session 1 Lexical task"""
t0 = time.time()
print("\n\n Start time: %s \n\n" % time.ctime())
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
s_space = mne.grade_to_tris(5)
# Left hemisphere
s_space_lh = s_space[s_space[:,0] < 10242]
#connectivity = mne.spatial_tris_connectivity(s_space_lh, remap_vertices = True)
connectivity = mne.spatial_tris_connectivity(s_space)
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.spatio_temporal_cluster_1samp_test(data11[:,:,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < p_threshold)[0]
#fsave_vertices = [np.arange(10242), np.array([], int)]
fsave_vertices = [np.arange(10242), np.arange(10242)]
#fsave_vertices = [np.arange(10242), np.array([], int)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\n Elasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
#%%
""" Just source estimates """
stat_fun = partial(mne.stats.ttest_1samp_no_p)
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., len(good_readers) - 1)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data12_good[:,:,:])), fs_vertices, tmin, tstep, subject='fsaverage')
brain3_1 = temp3.plot(hemi='both', subjects_dir=fs_dir, views = 'lat', initial_time=0.35, #['lat','ven','med']
clim=dict(kind='value', lims=[1.7, t_threshold, 3.5]))#clim=dict(kind='value', lims=[2, t_threshold, 7]), size=(800,800))
#%%
""" Spatio-temporal clustering: session 1 Dot task"""
dur_thresh = 100
t0 = time.time()
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.permutation_cluster_1samp_test(data12[:,166:199,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
fsave_vertices = [np.arange(10242), np.arange(10242)]
dot_stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
brain3 = dot_stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[0, 10, 50]),background='white',foreground='black')
#%%
""" ROI definition """
dur_thresh = 100
"""
plot(self, subject=None, surface='inflated', hemi='lh', colormap='auto',
time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800, background='black',
foreground='white', initial_time=None, time_unit='s')
"""
brain1 = stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
""" Sort out vertices here """
#temp_frontal_label_l = mne.Label(FOP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP4_label_lh.pos, \
# values= FOP4_label_lh.values)
#
#brain1.add_label(temp_frontal_label_l, borders=True, color=c_table[8])
#
#lh_label = stc_all_cluster_vis.in_label(temp_frontal_label_l)
#data = lh_label.data
#lh_label.data[data < dur_thresh] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = stc_all_cluster_vis.in_label(temp_labels)
#frontal_vertices_l = temp.vertices[0]
#
#new_label = mne.Label(frontal_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color=c_table[8])
""" Done """
os.chdir('figures')
#brain1.save_image('Lexical_LH_STClustering.pdf', antialiased=True)
#brain1.save_image('Lexical_LH_STClustering.png', antialiased=True)
os.chdir('..')
brain1.add_label(A1_label_lh, borders=True, color=[0,0,0]) # Show A1
temp_auditory_label_l = mne.Label(A4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A4_label_lh.pos,values= A4_label_lh.values) + \
mne.Label(A5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A5_label_lh.pos,values= A5_label_lh.values) + \
mne.Label(STSdp_label_lh.vertices, hemi='lh',name=u'sts_l',pos=STSdp_label_lh.pos,values= STSdp_label_lh.values)+ \
mne.Label(TPOJ1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=TPOJ1_label_lh.pos,values= TPOJ1_label_lh.values)+ \
mne.Label(PBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PBelt_label_lh.pos,values= PBelt_label_lh.values)+ \
mne.Label(LBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=LBelt_label_lh.pos,values= LBelt_label_lh.values)
#brain1.add_label(temp_auditory_label_l, borders=True, color=c_table[2])
lh_label = stc_all_cluster_vis.in_label(temp_auditory_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
stg_vertices_l = temp.vertices[0]
new_label = mne.Label(stg_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[1])
#brain1.remove_labels()
temp_auditory2_label_l = mne.Label(PFcm_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFcm_label_lh.pos,values= PFcm_label_lh.values) + \
mne.Label(RI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=RI_label_lh.pos,values= RI_label_lh.values)+ \
mne.Label(PF_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PF_label_lh.pos,values= PF_label_lh.values)
#brain1.add_label(temp_auditory2_label_l, borders=True, color=c_table[0])
lh_label = stc_all_cluster_vis.in_label(temp_auditory2_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
tpj_vertices_l = temp.vertices[0]
tpj_vertices_l = np.sort(np.concatenate((tpj_vertices_l, \
[16, 2051, 2677, 2678, 2679, 5042, 8296, 8297, 8299, 8722, 8723, 9376])))
new_label = mne.Label(tpj_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[0])
#brain1.add_label(_1_label_lh, borders=True, color=c_table[4])
temp_motor_label_l = mne.Label(_3a_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3a_label_lh.pos,values= _3a_label_lh.values) + \
mne.Label(_3b_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3b_label_lh.pos,values= _3b_label_lh.values) + \
mne.Label(_4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_4_label_lh.pos,values= _4_label_lh.values) + \
mne.Label(_1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_1_label_lh.pos,values= _1_label_lh.values)
#brain1.add_label(temp_motor_label_l, borders=True, color=c_table[4])
lh_label = stc_all_cluster_vis.in_label(temp_motor_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
motor_vertices_l = temp.vertices[0]
new_label = mne.Label(motor_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[4])
temp_broca_label_l = \
mne.Label(a44_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a44_label_lh.pos,values= a44_label_lh.values) + \
mne.Label(a45_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a45_label_lh.pos,values= a45_label_lh.values) + \
mne.Label(AVI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=AVI_label_lh.pos,values= AVI_label_lh.values) + \
mne.Label(FOP5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP5_label_lh.pos,values= FOP5_label_lh.values) + \
mne.Label(_47l_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_47l_label_lh.pos,values= _47l_label_lh.values)
#brain1.add_label(temp_broca_label_l, borders=True, color=c_table[6])
lh_label = stc_all_cluster_vis.in_label(temp_broca_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
broca_vertices_l = temp.vertices[0]
broca_vertices_l = np.sort(np.concatenate((broca_vertices_l,[1187,3107,3108,3109,6745,7690,7691])))
new_label = mne.Label(broca_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[6])
temp_sylvian_label_l = mne.Label(OP23_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP23_label_lh.pos,values= OP23_label_lh.values) + \
mne.Label(Ig_label_lh.vertices, hemi='lh',name=u'sts_l',pos=Ig_label_lh.pos,values= Ig_label_lh.values) + \
mne.Label(OP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP4_label_lh.pos,values= OP4_label_lh.values) + \
mne.Label(OP1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP1_label_lh.pos,values= OP1_label_lh.values) + \
mne.Label(FOP2_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP2_label_lh.pos,values= FOP2_label_lh.values) + \
mne.Label(_6r_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_6r_label_lh.pos,values= _6r_label_lh.values)
#brain1.add_label(temp_sylvian_label_l, borders=True, color=c_table[8])
lh_label = stc_all_cluster_vis.in_label(temp_sylvian_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
sylvian_vertices_l = temp.vertices[0]
sylvian_vertices_l = np.sort(np.concatenate((sylvian_vertices_l,[905,1892,2825,2526,4157,4158,4159,6239,8290,8293,9194,9203])))
new_label = mne.Label(sylvian_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[8])
# right hemisphere
#brain2 = stc_all_cluster_vis.plot(
# hemi='rh', views='lateral', subjects_dir=fs_dir,
# time_label='Duration significant (ms)', size=(800, 800),
# smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
#
#stg_vertices_r = A5_label_rh.vertices
#stg_vertices_r = np.sort([2001,2002,2419,2420,2421,2418,2754,2417,13075,13076,13077,13078,\
# 13079,13080,13081,12069,12070,12071,12072])
#new_label = mne.Label(stg_vertices_r, hemi='rh')
#brain2.add_label(new_label, borders=True, color=c_table[5])
#
#os.chdir('figures')
#brain2.save_image('RH_STClustering.pdf', antialiased=True)
#brain2.save_image('RH_STClustering.png', antialiased=True)
#os.chdir('..')
# V1
#lh_label = dot_stc_all_cluster_vis.in_label(V1_label_lh)
#data = lh_label.data
#lh_label.data[data < 50] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = dot_stc_all_cluster_vis.in_label(temp_labels)
#tV1_vertices_l = temp.vertices[0]
#new_label = mne.Label(tV1_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color='r')
#
#M = np.mean(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1)
#errM = np.std(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
#t0 = time.time()
#plotit2(times, M, errM, 5, 0, yMin=0, yMax=2.7, subject = 'all')
#plotsig2(times,nReps,X, 5, 0, all_subject, boot_pVal)
np.save('STG_Vert', stg_vertices_l)
np.save('IFG_Vert', broca_vertices_l)
np.save('TPJ_Vert', tpj_vertices_l)
np.save('Motor_Vert', motor_vertices_l)
np.save('Sylvian_Vert', sylvian_vertices_l)
np.save('STG_Vert_r', stg_vertices_r)
#%%
figureDir = '%s/figures' % raw_dir
nReps = 3000
boot_pVal = 0.05
#%%
""" Left STG: Word vs. Noise """
stg_vertices_l = np.load('STG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
MM = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
diffScore = np.mean((MM[:,:,5]-MM[:,:,8]), axis = 1)
diffScore2 = np.mean((MM[:,:,0]-MM[:,:,3]), axis = 1)
del temp1
plt.figure()
plt.clf()
plt.plot(times, diffScore)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Lexical task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, diffScore2)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Fixation task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[stg_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[stg_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
""" Plot """
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t1 = 300
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+100],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4.5])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('STG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Equivalence test"""
import statsmodels
sstep = 10
p = np.empty((int(len(range(0,800,sstep))),1))
lower_p = np.empty((int(len(range(0,800,sstep))),1))
upper_p = np.empty((int(len(range(0,800,sstep))),1))
for tt, ttime in zip(range(0, len(range(0,800,sstep))),range(0,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t_window1_dot = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_fix = temp_meg
err = 0.8 * np.std(temp_meg_lex - temp_meg_fix)
# p[tt], a, b = statsmodels.stats.weightstats.ttost_paired(temp_meg_lex, temp_meg_fix, err, -err)
xx, lower_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,-err)
xx, upper_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,err)
p[tt] = max(lower_p[tt], upper_p[tt])*2
plt.figure()
plt.clf()
plt.plot(range(0,800,sstep), p)
plt.plot([0, 800],[0.05,0.05],'--')
plt.xlabel('Time after stimulus onset (ms)')
plt.ylabel('P-value from the equivalence test')
os.chdir(figureDir)
plt.savefig('STG_equivalence.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_equivalence.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#tempa = np.random.normal(100,5,(1,100))
#tempb = np.random.normal(10,5,(1,100))
#err = 0.8*5
#tempp, fjsdk, fjdskl = statsmodels.stats.weightstats.ttost_paired(tempa[0], tempb[0], err, -err)
#xxx, xxxx = stats.ttest_rel(tempa[0],tempb[0])
#%%
"""Correlation over time"""
sstep = 10
tstart = 0
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+10],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('STG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
del M, M1, M2
#%%
""" Broca """
broca_vertices_l = np.load('IFG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
plt.figure()
plt.clf()
plt.plot(times, M[:,5]-M[:,8])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Lexical task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, M[:,0]-M[:,3])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Fixation task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[broca_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
"""Plot"""
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-2,3])
os.chdir('figures')
plt.savefig('IFG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
#t1 = 400
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4])
os.chdir('figures')
plt.savefig('IFG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('IFG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Correlation over time"""
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
sstep = 10
tstart = 200
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+50],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('IFG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" TPJ """
tpj_vertices_l = np.load('TPJ_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[tpj_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: TPJ')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: TPJ')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: TPJ')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: TPJ')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: TPJ')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: TPJ')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[tpj_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[tpj_vertices_l,:,:,:],axis=0)
del temp1, temp2
t_window1 = np.multiply(np.divide(np.add([400,500],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
os.chdir('figures')
plt.savefig('TPJ_corr_lexical.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_corr_lexical.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
np.corrcoef(temp_read,temp_meg)
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
""" Correlation: Dot task """
t_window1_dot = np.multiply(np.divide(np.add([300,400],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
os.chdir('figures')
plt.savefig('TPJ_corr_dot.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_corr_dot.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
np.corrcoef(temp_read,temp_meg)
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
""" Task effects: Word response in lexical vs. dot task """
t0 = time.time()
task1 = 0
task2 = 5
temp2_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task2], axis = 0)
temp2_poor = np.mean(M2[np.int(t_window1[0]):np.int(t_window1[1]),:,task2], axis = 0)
temp3_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task1], axis = 0)
temp3_poor = np.mean(M2[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task1], axis = 0)
temp2 = np.concatenate((temp2_good,temp2_poor)) # lexical
temp3 = np.concatenate((temp3_good,temp3_poor)) # dot
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp3, temp2, deg=1)
ax.plot(temp3, fit[0] * temp3 + fit[1], color=[0,0,0])
ax.plot(temp3_poor, temp2_poor, 'o', markerfacecolor=[.5,.5,.5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=[.5,.5,.5], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([0, 7])
plt.xlim([0, 7])
r, p = stats.pearsonr(temp3,temp2)
print('TPJ: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('TPJ_lexical_dot.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_dot.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
del M, M1, M2
#%%
""" Motor """
M = np.mean(np.mean(tX11[motor_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(tX11[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[motor_vertices_l,:,:,:],axis=0)
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 0, 3, yMin=0, yMax=2.3, subject = 'all')
plotsig2(times,nReps,X, 0, 3, all_subject, boot_pVal)
C = np.mean(X11[motor_vertices_l,:,:,0],axis=0) - np.mean(X11[motor_vertices_l,:,:,3],axis=0)
#corr = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.5,np.str(times[np.where(corr == np.max(corr))[0][0]]))
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.4,np.str(np.max(corr)))
os.chdir(figureDir)
plt.savefig('Motor_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 0, 3, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 0, 3, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 0, 3, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 0, 3, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 5, 8, yMin=0, yMax=2.3, subject = 'all')
plotsig2(times,nReps,X, 5, 8, all_subject, boot_pVal)
#C = np.mean(X11[motor_vertices_l,:,:,5],axis=0) - np.mean(X11[motor_vertices_l,:,:,8],axis=0)
#corr2 = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.5,np.str(times[np.where(corr2 == np.max(corr2))[0][0]]))
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.4,np.str(np.max(corr2)))
os.chdir(figureDir)
plt.savefig('Motor_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 5, 8, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 5, 8, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 5, 8, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 5, 8, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
#%%
""" Sylvian """
M = np.mean(np.mean(tX11[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(tX11[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[sylvian_vertices_l,:,:,:],axis=0)
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 0, 3, yMin=0, yMax=2.7, subject = 'all')
plotsig2(times,nReps,X, 0, 3, all_subject, boot_pVal)
#C = np.mean(X11[sylvian_vertices_l,:,:,0],axis=0) - np.mean(X11[sylvian_vertices_l,:,:,3],axis=0)
#corr = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.5,np.str(times[np.where(corr == np.max(corr))[0][0]]))
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.4,np.str(np.max(corr)))
os.chdir(figureDir)
plt.savefig('Sylvian_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 0, 3, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 0, 3, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 0, 3, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 0, 3, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 5, 8, yMin=0, yMax=2.7, subject = 'all')
plotsig2(times,nReps,X, 5, 8, all_subject, boot_pVal)
#C = np.mean(X11[sylvian_vertices_l,:,:,5],axis=0) - np.mean(X11[sylvian_vertices_l,:,:,8],axis=0)
#corr2 = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.5,np.str(times[np.where(corr2 == np.max(corr2))[0][0]]))
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.4,np.str(np.max(corr2)))
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 5, 8, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 5, 8, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 5, 8, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 5, 8, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
#%%
""" Making bar plots """
t_window1 = np.multiply(np.divide(np.add([300,600],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
dot_window1 = np.multiply(np.divide(np.add([300,600],[100,100]),1000.), sRate)
dot_window1 = [np.int(i) for i in dot_window1]
t_window2 = np.multiply(np.divide(np.add([600,700],[100,100]),1000.), sRate)
t_window2 = [np.int(i) for i in t_window2]
dot_early = np.multiply(np.divide(np.add([300,400],[100,100]),1000.), sRate)
dot_early = [np.int(i) for i in dot_early]
dot_late = np.multiply(np.divide(np.add([500,600],[100,100]),1000.), sRate)
dot_late = [np.int(i) for i in dot_late]
#temp_vertices = broca_vertices_l
temp_vertices = stg_vertices_l
# AUD 1
# Lexical task
task = 5
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[temp_vertices,:,:,:],axis=0)
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
medNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+1], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
medNoise1_good_err = np.std(medNoise1_good) / np.sqrt(len(medNoise1_good))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[temp_vertices,:,:,:],axis=0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
medNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task+1], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
medNoise1_poor_err = np.std(medNoise1_poor) / np.sqrt(len(medNoise1_poor))
#lowNoise2_poor = np.mean(M2[t_window2[0]:t_window2[1],:,task], axis = 0) - np.mean(M2[t_window2[0]:t_window2[1],:,task+3], axis = 0)
#lowNoise2_poor_err = np.std(lowNoise2_poor) / np.sqrt(len(lowNoise2_poor))
#medNoise2_poor = np.mean(M2[t_window2[0]:t_window2[1],:,task+1], axis = 0) - np.mean(M2[t_window2[0]:t_window2[1],:,task+3], axis = 0)
#medNoise2_poor_err = np.std(medNoise2_poor) / np.sqrt(len(medNoise2_poor))
del temp2
# Dot task
task = 0
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[temp_vertices,:,:,:],axis=0)
dot_lowNoise1_good = np.mean(M1[dot_window1[0]:dot_window1[1],:,task], axis = 0) - np.mean(M1[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_lowNoise1_good_err = np.std(dot_lowNoise1_good) / np.sqrt(len(dot_lowNoise1_good))
dot_medNoise1_good = np.mean(M1[dot_window1[0]:dot_window1[1],:,task+1], axis = 0) - np.mean(M1[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_medNoise1_good_err = np.std(dot_medNoise1_good) / np.sqrt(len(dot_medNoise1_good))
dot_lowNoise2_early_good = np.mean(M1[dot_early[0]:dot_early[1],:,task], axis = 0) - np.mean(M1[dot_early[0]:dot_early[1],:,task+3], axis = 0)
dot_lowNoise2_early_good_err = np.std(dot_lowNoise2_early_good) / np.sqrt(len(dot_lowNoise2_early_good))
dot_lowNoise2_late_good = np.mean(M1[dot_late[0]:dot_late[1],:,task], axis = 0) - np.mean(M1[dot_late[0]:dot_late[1],:,task+3], axis = 0)
dot_lowNoise2_late_good_err = np.std(dot_lowNoise2_late_good) / np.sqrt(len(dot_lowNoise2_late_good))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[temp_vertices,:,:,:],axis=0)
dot_lowNoise1_poor = np.mean(M2[dot_window1[0]:dot_window1[1],:,task], axis = 0) - np.mean(M2[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_lowNoise1_poor_err = np.std(dot_lowNoise1_poor) / np.sqrt(len(dot_lowNoise1_poor))
dot_medNoise1_poor = np.mean(M2[dot_window1[0]:dot_window1[1],:,task+1], axis = 0) - np.mean(M2[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_medNoise1_poor_err = np.std(dot_medNoise1_poor) / np.sqrt(len(dot_medNoise1_poor))
dot_lowNoise2_early_poor = np.mean(M2[dot_early[0]:dot_early[1],:,task], axis = 0) - np.mean(M2[dot_early[0]:dot_early[1],:,task+3], axis = 0)
dot_lowNoise2_early_poor_err = np.std(dot_lowNoise2_early_poor) / np.sqrt(len(dot_lowNoise2_early_poor))
dot_lowNoise2_late_poor = np.mean(M2[dot_late[0]:dot_late[1],:,task], axis = 0) - np.mean(M2[dot_late[0]:dot_late[1],:,task+3], axis = 0)
dot_lowNoise2_late_poor_err = np.std(dot_lowNoise2_late_poor) / np.sqrt(len(dot_lowNoise2_late_poor))
del temp2
"""
Correlation
"""
aaa = np.array(subs)
temp_meg1 = np.concatenate((dot_lowNoise1_good,dot_lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
temp_brs = np.concatenate((brs[good_readers],brs[poor_readers]))
temp_rf = np.concatenate((rf[good_readers],rf[poor_readers]))
temp_raw = np.concatenate((orig_swe[good_readers],orig_swe[poor_readers]))
temp_age = np.concatenate((orig_age[good_readers],orig_age[poor_readers]))
#temp_read = temp_raw
#temp_id = np.where(temp_meg>4.5)[0]
#temp_meg = np.concatenate((temp_meg[0:temp_id], temp_meg[temp_id+1:len(temp_meg)]))
#temp_read = np.concatenate((temp_read[0:temp_id], temp_read[temp_id+1:len(temp_read)]))
plt.figure(20)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg1, temp_read, deg=1)
ax.plot(temp_meg1, fit[0] * temp_meg1 + fit[1], color=[0,0,0])
#fit = np.polyfit(dot_lowNoise1_good, orig_twre[good_readers], deg=1)
#ax.plot(dot_lowNoise1_good, fit[0] * dot_lowNoise1_good + fit[1], color=c_table[5])
ax.plot(temp_meg1, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(dot_lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=c_table[5], markeredgecolor=[1,1,1], markersize=10)
#plt.xlim([-1,4])
#for i, txt in enumerate(temp_age):
# ax.annotate(temp_age[i], (temp_meg1[i], temp_read[i]))
#plt.ylim([-1,6])
#plt.xlim([50,130])
np.corrcoef(temp_read,temp_meg1)
r, p = stats.pearsonr(temp_read,temp_meg1)
print('dot(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],dot_lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],dot_lowNoise1_good)
print('dot(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],dot_lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],dot_lowNoise1_poor)
print('dot(poor): correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Lexical task """
temp_meg2 = np.concatenate((lowNoise1_good,lowNoise1_poor))
plt.figure(21)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg2, temp_read, deg=1)
ax.plot(temp_meg2, fit[0] * temp_meg2 + fit[1], color=[0,0,0])
#fit = np.polyfit(lowNoise1_good, orig_twre[good_readers], deg=1)
#ax.plot(lowNoise1_good, fit[0] * lowNoise1_good + fit[1], color=c_table[5])
ax.plot(temp_meg2, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=c_table[5], markeredgecolor=[1,1,1], markersize=10)
#plt.xlim([-1,4])
np.corrcoef(temp_read,temp_meg2)
r, p = stats.pearsonr(temp_read,temp_meg2)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_lexical.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lexical.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Dot vs. Lexical """
plt.figure(22)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg2, temp_meg1, deg=1)
ax.plot(temp_meg2, fit[0] * temp_meg2 + fit[1], color=[0,0,0])
#fit = np.polyfit(lowNoise1_good, orig_twre[good_readers], deg=1)
#ax.plot(lowNoise1_good, fit[0] * lowNoise1_good + fit[1], color=c_table[5])
ax.plot(temp_meg2, temp_meg1, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=c_table[5], markeredgecolor=[1,1,1], markersize=10)
#plt.xlim([-1,4])
#plt.ylim([-1,4])
np.corrcoef(temp_meg1,temp_meg2)
r, p = stats.pearsonr(temp_meg1,temp_meg2)
print('Dot_Lexical: correlation = %.4f, p = %.4f' %(r, p))
#np.corrcoef(orig_twre[good_readers],lowNoise1_good)
#r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
#print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
#np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
#r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
#print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot_lexical.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_lexical.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp_meg3 = np.concatenate((dot_lowNoise2_early_good,dot_lowNoise2_early_poor))
plt.figure(23)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg3, temp_read, deg=1)
ax.plot(temp_meg3, fit[0] * temp_meg3 + fit[1], color=[0,0,0])
ax.plot(temp_meg3, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
np.corrcoef(temp_read,temp_meg3)
r, p = stats.pearsonr(temp_read,temp_meg3)
print('dot(all)_early: correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot_early.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_early.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp_meg4 = np.concatenate((dot_lowNoise2_late_good,dot_lowNoise2_late_poor))
plt.figure(23)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg4, temp_read, deg=1)
ax.plot(temp_meg4, fit[0] * temp_meg4 + fit[1], color=[0,0,0])
ax.plot(temp_meg4, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
np.corrcoef(temp_read,temp_meg4)
r, p = stats.pearsonr(temp_read,temp_meg4)
print('dot(all)_late: correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot_late.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_late.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure(24)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg4, temp_meg3, deg=1)
ax.plot(temp_meg4, fit[0] * temp_meg4 + fit[1], color=[0,0,0])
ax.plot(temp_meg4, temp_meg3, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
np.corrcoef(temp_meg3,temp_meg4)
r, p = stats.pearsonr(temp_meg3,temp_meg4)
print('dot(all)_late: correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot_early_late.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_early_late.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure(25)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_age, temp_meg1, deg=1)
ax.plot(temp_age, fit[0] * temp_age + fit[1], color=[0,0,0])
ax.plot(temp_age, temp_meg1, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
np.corrcoef(temp_meg1,temp_age)
r, p = stats.pearsonr(temp_meg1,temp_age)
print('dot(all)_age: correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_dot_age.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_age.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure(26)
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_age, temp_meg2, deg=1)
ax.plot(temp_age, fit[0] * temp_age + fit[1], color=[0,0,0])
ax.plot(temp_age, temp_meg2, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
np.corrcoef(temp_meg2,temp_age)
r, p = stats.pearsonr(temp_meg2,temp_age)
print('lexical(all)_age: correlation = %.4f, p = %.4f' %(r, p))
os.chdir('figures')
plt.savefig('STG_corr_lexical_age.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lexical_age.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Right STG: Word vs. Noise """
stg_vertices_r = np.load('STG_Vert_r.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[stg_vertices_r,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_r,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[stg_vertices_r,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_r,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[stg_vertices_r,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[stg_vertices_r,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_r,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[stg_vertices_r,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_r,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[stg_vertices_r,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('STGr_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STGr_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[stg_vertices_r,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[stg_vertices_r,:,:,:],axis=0)
del temp1, temp2
t_window1 = np.multiply(np.divide(np.add([400,500],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]): | np.int(t_window1[1]) | numpy.int |
""" Based on torchvision.models.resnet
This is basically resnet50, just to get you started.
"""
import logging
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import LambdaLR
import pugh_torch as pt
from pugh_torch.models import resnet50
from pugh_torch.modules import conv3x3, conv1x1
from pugh_torch.modules.meta import BatchLinear
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy
import albumentations as A
from albumentations.pytorch import ToTensorV2
import cv2
from scipy.interpolate import griddata
import numpy as np
from pathlib import Path
from math import sqrt
from dataset import ImageNetSample, SingleImageDataset, unnormalize_coords
from callbacks import RasterMontageCallback, LinearHistogramCallback
log = logging.getLogger(__name__)
this_file_path = Path(__file__).resolve()
this_file_dir = this_file_path.parent
class HyperHead(nn.Module):
"""For the multi-heads in a HyperNetwork"""
def __init__(self, f_in, hypo_in, hypo_out):
super().__init__()
self.hypo_in = hypo_in
self.hypo_out = hypo_out
self.weight_linear = nn.Linear(f_in, hypo_in * hypo_out)
self.bias_linear = nn.Linear(f_in, hypo_out)
self._hyper_weight_init(self.weight_linear)
self._hyper_bias_init(self.bias_linear)
def _hyper_weight_init(self, m):
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in")
m.weight.data = m.weight.data / 1.0e2
with torch.no_grad():
m.bias.uniform_(-1 / self.hypo_in, 1 / self.hypo_in)
def _hyper_bias_init(self, m):
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in")
m.weight.data = m.weight.data / 1.0e2
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(m.weight)
with torch.no_grad():
m.bias.uniform_(-1 / fan_in, 1 / fan_in)
def forward(self, x):
batch = x.shape[0]
weight = self.weight_linear(x)
weight = weight.reshape(batch, self.hypo_out, self.hypo_in)
bias = self.bias_linear(x)
return weight, bias
class HyperNetwork(nn.Module):
def __init__(
self,
input,
hidden,
hypo_network,
activation="relu",
):
"""
input : int
number feature in from embedding
hidden : list
List of ints, hidden layer nodes for hyper network
hypo_network : nn.Sequential
Sequential hypo_network
"""
super().__init__()
self.activation = pt.modules.Activation(activation)
self.layers = nn.ModuleList()
if hidden:
self.layers.append(nn.Linear(input, hidden[0]))
self.layers.append(self.activation)
for h_in, h_out in zip(hidden, hidden[1:]):
self.layers.append(nn.Linear(h_in, h_out))
self.layers.append(self.activation)
num_encoding = hidden[-1]
else:
num_encoding = input
# Create all the heads to predict the hyponetwork parameters
self.heads = nn.ModuleList()
for module in hypo_network:
if not isinstance(module, nn.Linear):
continue
f_out, f_in = module.weight.shape
self.heads.append(HyperHead(num_encoding, f_in, f_out))
def forward(self, x):
# Run it through the hidden layers
for layer in self.layers:
x = layer(x)
# Run it through the heads
outputs = []
for head in self.heads:
outputs.append(head(x))
# unpack the outputs
weights = [x[0] for x in outputs]
biases = [x[1] for x in outputs]
return weights, biases
class SIREN(nn.Sequential):
def __init__(self, layers, bias=True):
"""
Parameters
----------
layers : list
List of integers describing the numbers of node per layer where
layers[0] is the number of input features and
"""
modules = []
modules.append(BatchLinear(layers[0], layers[1], bias=bias))
modules.append(pt.modules.Activation("sine", modules[-1], first=True))
# hidden layers
for f_in, f_out in zip(layers[1:-2], layers[2:-1]):
modules.append(BatchLinear(f_in, f_out, bias=bias))
modules.append(pt.modules.Activation("sine", modules[-1]))
modules.append(BatchLinear(layers[-2], layers[-1]))
super().__init__(*modules)
def forward(self, input, weights=None, biases=None):
bl_count = 0
if weights is None or biases is None:
assert weights is None and biases is None
return super().forward(input)
for module in self:
if isinstance(module, BatchLinear):
input = module(input, weights[bl_count], biases[bl_count])
bl_count += 1
else:
input = module(input)
return input
class HyperSIRENPTL(pt.LightningModule):
"""Trainable network that contains 3 main components:
1. Encoder - any CNN for feature extraction. Here it's ResNet50 and
it produces a 256 element feature vector
2. HyperNetwork - A FC network that predicts weights and biases
for a SIREN network.
3. SIREN - Technically in this usecase, this has no learnable parameters
because it uses the output of the HyperNetwork.
End goal is to produce better SIREN initializations for learning
coordinate->image mappings faster.
"""
def __init__(
self,
*,
cfg=None,
encoder={},
hyper={},
siren={},
learning_rate=0.002,
optimizer="adamw",
optimizer_kwargs={},
):
super().__init__()
self.save_hyperparameters()
self.cfg = cfg
self.encoder_cfg = encoder
self.hyper_cfg = hyper
self.siren_cfg = siren
self.learning_rate = learning_rate
self.optimizer = optimizer
self.optimizer_kwargs = optimizer_kwargs
embedding_size = encoder.get("size", 256)
self.encoder_net = resnet50(pretrained=True, num_classes=embedding_size)
self.encoder_activation = pt.modules.Activation(hyper.get("activation", "relu"))
siren_nodes = [2, *siren.get("layers", [128] * 5), 3]
self.siren_net = SIREN(siren_nodes)
hyper_hidden = hyper.get("layers", [256])
self.hyper_net = HyperNetwork(embedding_size, hyper_hidden, self.siren_net)
self.siren_loss_fn = pt.losses.get_functional_loss(
siren.get("loss", "mse_loss")
)
def forward(self, imgs, coords=None):
embedding = self.encoder_net(imgs) # (B, embedded_feat)
siren_weights, siren_biases = self.hyper_net(
self.encoder_activation(embedding)
) # (B, long_flattened_params)
if coords is None:
return embedding, siren_weights, siren_biases
pred = self.siren_net(coords, siren_weights, siren_biases)
return embedding, siren_weights, siren_biases, pred
def _log_common(self, split, logits, target, loss):
self.log(f"{split}_loss", loss, prog_bar=True)
def _log_loss(self, split, pred, target):
# Makes it easier to directly compare techniques that have a different
# loss function
loss = F.mse_loss(pred, target)
self.log(f"{split}_mse_loss", loss, prog_bar=True)
return loss
def training_step(self, batch, batch_nb):
coords, rgb_vals, imgs = batch
batch_size = coords.shape[0]
embedding, siren_weights, siren_biases, pred = self(imgs, coords)
self._log_loss("train", pred, rgb_vals)
self.last_logits = pred
loss = 0
siren_loss = self.siren_loss_fn(pred, rgb_vals)
loss += siren_loss
# Regularization encourages a gaussian prior on embedding from context encoder
if self.encoder_cfg.get("loss_weight"):
embedding_reg = (
self.encoder_cfg["loss_weight"] * (embedding * embedding).mean()
)
loss += embedding_reg
# Regularization encourages a lower frequency representation of the image
# Not sure i believe that, but its what the paper says.
# if self.hyper_cfg.get("loss_weight"):
# n_params = sum([w.shape[-1] * w.shape[-2] for w in siren_weights])
# cum_mag = sum([torch.sum(w * w, dim=(-1, -2)) for w in siren_weights])
# hyper_reg = self.hyper_cfg["loss_weight"] * (cum_mag / n_params).mean()
# loss += hyper_reg
# The variance of each predicted layers should be approximately equal to
# initialization for well behaved training and to avoid vanishing
# gradients.
# First Layer: np.sqrt(6 / num_input) / self.frequency,
# This would be similar to:
# = sqrt(2/3) / (self.frequency * sqrt(num_input))
# Rest: m.weight.uniform_(-1 / num_input, 1 / num_input)
if self.hyper_cfg.get("loss_weight"):
hyper_reg = 0
w = siren_weights[0]
fan_in = w.shape[-1]
# Empirically, the trained network had just under twice this std
expected_std_first = torch.tensor(1 / (3 * fan_in)).to(w.device)
actual_std_first = torch.std(w)
actual_mean_first = torch.mean(w)
hyper_loss_std_layer_0 = F.mse_loss(expected_std_first, actual_std_first)
hyper_reg += hyper_loss_std_layer_0
hyper_loss_mean_layer_0 = (
actual_mean_first * actual_mean_first
) # Maybe these should be weighted.
hyper_reg += hyper_loss_mean_layer_0
self.log("hyper_loss_std_layer_0", hyper_loss_std_layer_0)
self.log("hyper_loss_mean_layer_0", hyper_loss_mean_layer_0)
for i, w in enumerate(siren_weights[1:]):
fan_in = w.shape[-1]
# Assumes the 30 w0 frequency
# This 2 is just here because impirically i saw that trained weights ha
# TODO: maybe multiply this std by 2. Empirically, trained networks had twice the std
expected_std = torch.tensor(sqrt(6) / 3 / (30 * sqrt(fan_in))).to(
w.device
)
actual_std = torch.std(w)
actual_mean = torch.mean(w)
hyper_reg_loss_std = F.mse_loss(expected_std, actual_std)
hyper_reg_loss_mean = (
actual_mean * actual_mean
) # Maybe these should be weighted.
self.log(f"hyper_loss_std_layer_{i}", hyper_reg_loss_std)
self.log(f"hyper_loss_mean_layer_{i}", hyper_reg_loss_mean)
hyper_reg += hyper_reg_loss_std
hyper_reg += hyper_reg_loss_mean
self.log("hyper_reg", hyper_reg)
loss += hyper_reg
self._log_common("train", pred, rgb_vals, loss)
return siren_loss
def validation_step(self, batch, batch_nb):
coords, rgb_vals, imgs = batch
embedding, siren_weights, siren_biases, pred = self(imgs, coords)
loss = self._log_loss("val", pred, rgb_vals)
self.log("val_loss", loss)
return loss
def configure_optimizers(self):
optimizers = []
schedulers = []
optimizers.append(
pt.optimizers.get_optimizer(getattr(self, "optimizer", "adamw"))(
self.parameters(),
lr=self.learning_rate,
**getattr(self, "optimizer_kwargs", {}),
),
)
schedulers.append(
{
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizers[0], patience=2
),
"monitor": "val_loss",
}
)
log.info(
f"Using default pugh_torch optimizers {optimizers} and schedulers {schedulers}"
)
return optimizers, schedulers
def configure_callbacks(self):
"""Moves trainer callback declaration into the model so the same
training script can be shared across experiments.
This is not standard pytorch-lightning
Returns
-------
callbacks : list
List of callback objects to initialize the Trainer object with.
"""
callbacks = [
RasterMontageCallback(rgb_transform="imagenet", logging_batch_interval=200)
]
return callbacks
def train_dataloader(self):
transform = A.Compose(
[
A.Resize(256, 256),
A.RandomCrop(*self.cfg.dataset["shape"]),
A.HorizontalFlip(),
A.Normalize(
mean=[0.485, 0.456, 0.406], # this is RGB order.
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
dataset = ImageNetSample(
split="train", transform=transform, num_sample=self.cfg.dataset.num_sample
)
loader = DataLoader(
dataset,
shuffle=True,
pin_memory=self.cfg.dataset.pin_memory,
num_workers=self.cfg.dataset.num_workers,
batch_size=self.cfg.dataset.batch_size,
)
return loader
def val_dataloader(self):
transform = A.Compose(
[
A.Resize(256, 256),
A.RandomCrop(*self.cfg.dataset["shape"]),
A.Normalize(
mean=[0.485, 0.456, 0.406], # this is RGB order.
std=[0.229, 0.224, 0.225],
),
ToTensorV2(),
]
)
dataset = ImageNetSample(
split="val", transform=transform, num_sample=self.cfg.dataset.num_sample
)
n_exemplar = len(dataset)
step = n_exemplar // self.cfg.dataset.num_val_subset
indices = list(range(0, n_exemplar))[0 : n_exemplar * step : step]
dataset = torch.utils.data.Subset(dataset, indices)
loader = DataLoader(
dataset,
shuffle=False,
pin_memory=self.cfg.dataset.pin_memory,
num_workers=self.cfg.dataset.num_workers,
batch_size=self.cfg.dataset.batch_size,
)
return loader
def rasterize(model, weights=None, biases=None, shape=(224, 224)):
"""Rasterize an entire image from a trained siren network
Parameters
----------
model : nn.Module
Uninitialized SIREN network
weights : list of torch.Tensor
Must have the same number of layers as model. Each layer has a batch dimension.
biases : list of torch.Tensor
Must have the same number of layers as model. Each layer has a batch dimension.
shape : tuple
Output (H,W) resolution
"""
model = model.eval()
ny, nx = shape
# (X, Y)
meshgrid = np.meshgrid(np.arange(0, nx, 1), | np.arange(0, ny, 1) | numpy.arange |
"""Test the pandas wrapper class."""
import numpy as np
import numpy.testing as nt
import pandas as pd
import pandas.testing as pt
import pytest
from scipy import sparse
import sklearn.pipeline as pline
from sklearn.preprocessing import FunctionTransformer, MinMaxScaler
import src.preprocessing as pp
@pytest.fixture
def data():
data = {
'f1': | np.array([100, 110, 98, 1500, 30]) | numpy.array |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the array editor.
"""
# Standard library imports
import os
import sys
try:
from unittest.mock import Mock, ANY
except ImportError:
from mock import Mock, ANY # Python 2
# Third party imports
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from qtpy.QtCore import Qt
from flaky import flaky
# Local imports
from spyder.plugins.variableexplorer.widgets.arrayeditor import ArrayEditor, ArrayModel
# =============================================================================
# Utility functions
# =============================================================================
def launch_arrayeditor(data, title="", xlabels=None, ylabels=None):
"""Helper routine to launch an arrayeditor and return its result."""
dlg = ArrayEditor()
assert dlg.setup_and_check(data, title, xlabels=xlabels, ylabels=ylabels)
dlg.show()
dlg.accept() # trigger slot connected to OK button
return dlg.get_value()
def setup_arrayeditor(qbot, data, title="", xlabels=None, ylabels=None):
"""Setups an arrayeditor."""
dlg = ArrayEditor()
dlg.setup_and_check(data, title, xlabels=xlabels, ylabels=ylabels)
dlg.show()
qbot.addWidget(dlg)
return dlg
# =============================================================================
# Tests
# =============================================================================
def test_object_arrays(qtbot):
"""Test that object arrays are working properly."""
arr = np.array([u'a', 1, [2]], dtype=object)
assert_array_equal(arr, launch_arrayeditor(arr, "object array"))
def test_object_arrays_display(qtbot):
"""
Test that value_to_display is being used to display the values of
object arrays.
"""
arr = np.array([[np.array([1, 2])], 2], dtype=object)
dlg = setup_arrayeditor(qtbot, arr)
idx = dlg.arraywidget.model.index(0, 0)
assert u'[Numpy array]' == dlg.arraywidget.model.data(idx)
def test_type_errors(qtbot):
"""
Verify that we don't get a TypeError for certain structured arrays.
Fixes spyder-ide/spyder#5254.
"""
arr = np.ones(2, dtype=[('X', 'f8', (2,10)), ('S', 'S10')])
dlg = setup_arrayeditor(qtbot, arr)
qtbot.keyClick(dlg.arraywidget.view, Qt.Key_Down, modifier=Qt.ShiftModifier)
contents = dlg.arraywidget.model.get_value(dlg.arraywidget.model.index(0, 0))
assert_array_equal(contents, np.ones(10))
def test_arrayeditor_format(qtbot):
"""Changes the format of the array and validates its selected content."""
arr = np.array([1, 2, 3], dtype=np.float32)
dlg = setup_arrayeditor(qtbot, arr, "test array float32")
qtbot.keyClick(dlg.arraywidget.view, Qt.Key_Down, modifier=Qt.ShiftModifier)
qtbot.keyClick(dlg.arraywidget.view, Qt.Key_Down, modifier=Qt.ShiftModifier)
contents = dlg.arraywidget.view._sel_to_text(dlg.arraywidget.view.selectedIndexes())
assert contents == "1\n2\n"
dlg.arraywidget.view.model().set_format("%.18e")
assert dlg.arraywidget.view.model().get_format() == "%.18e"
qtbot.keyClick(dlg.arraywidget.view, Qt.Key_Down, modifier=Qt.ShiftModifier)
qtbot.keyClick(dlg.arraywidget.view, Qt.Key_Down, modifier=Qt.ShiftModifier)
contents = dlg.arraywidget.view._sel_to_text(dlg.arraywidget.view.selectedIndexes())
assert contents == "1.000000000000000000e+00\n2.000000000000000000e+00\n"
def test_arrayeditor_with_string_array(qtbot):
arr = np.array(["kjrekrjkejr"])
assert arr == launch_arrayeditor(arr, "string array")
def test_arrayeditor_with_unicode_array(qtbot):
arr = np.array([u"ñññéáíó"])
assert arr == launch_arrayeditor(arr, "unicode array")
def test_arrayeditor_with_masked_array(qtbot):
arr = np.ma.array([[1, 0], [1, 0]], mask=[[True, False], [False, False]])
assert_array_equal(arr, launch_arrayeditor(arr, "masked array"))
def test_arrayeditor_with_record_array(qtbot):
arr = np.zeros((2, 2), {'names': ('red', 'green', 'blue'),
'formats': (np.float32, np.float32, np.float32)})
assert_array_equal(arr, launch_arrayeditor(arr, "record array"))
@pytest.mark.skipif(not os.name == 'nt', reason="It segfaults sometimes on Linux")
def test_arrayeditor_with_record_array_with_titles(qtbot):
arr = np.array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'),
(('title 2', 'y'), '>f4')])
assert_array_equal(arr, launch_arrayeditor(arr, "record array with titles"))
def test_arrayeditor_with_float_array(qtbot):
arr = np.random.rand(5, 5)
assert_array_equal(arr, launch_arrayeditor(arr, "float array",
xlabels=['a', 'b', 'c', 'd', 'e']))
def test_arrayeditor_with_complex_array(qtbot):
arr = np.round(np.random.rand(5, 5)*10)+\
np.round(np.random.rand(5, 5)*10)*1j
assert_array_equal(arr, launch_arrayeditor(arr, "complex array",
xlabels=np.linspace(-12, 12, 5),
ylabels=np.linspace(-12, 12, 5)))
def test_arrayeditor_with_bool_array(qtbot):
arr_in = np.array([True, False, True])
arr_out = launch_arrayeditor(arr_in, "bool array")
assert arr_in is arr_out
def test_arrayeditor_with_int8_array(qtbot):
arr = np.array([1, 2, 3], dtype="int8")
assert_array_equal(arr, launch_arrayeditor(arr, "int array"))
def test_arrayeditor_with_float16_array(qtbot):
arr = np.zeros((5,5), dtype=np.float16)
assert_array_equal(arr, launch_arrayeditor(arr, "float16 array"))
def test_arrayeditor_with_3d_array(qtbot):
arr = np.zeros((3,3,4))
arr[0,0,0]=1
arr[0,0,1]=2
arr[0,0,2]=3
assert_array_equal(arr, launch_arrayeditor(arr, "3D array"))
def test_arrayeditor_with_empty_3d_array(qtbot):
arr = np.zeros((0, 10, 2))
assert_array_equal(arr, launch_arrayeditor(arr, "3D array"))
arr = np.zeros((1, 10, 2))
assert_array_equal(arr, launch_arrayeditor(arr, "3D array"))
def test_arrayeditor_edit_1d_array(qtbot):
exp_arr = np.array([1, 0, 2, 3, 4])
arr = | np.arange(0, 5) | numpy.arange |
from __future__ import print_function, division
import configargparse #pip install configargparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import PIL
import PIL.Image
print(torch.__version__)
from TorchClassifier.Datasetutil.Visutil import imshow, vistestresult
from TorchClassifier.Datasetutil.Torchdatasetutil import loadTorchdataset
from TorchClassifier.myTorchModels.TorchCNNmodels import createTorchCNNmodel
# from TFClassifier.Datasetutil.TFdatasetutil import loadTFdataset #loadtfds, loadkerasdataset, loadimagefolderdataset
# from TFClassifier.myTFmodels.CNNsimplemodels import createCNNsimplemodel
# from TFClassifier.Datasetutil.Visutil import plot25images, plot9imagesfromtfdataset, plot_history
# from TFClassifier.myTFmodels.optimizer_factory import build_learning_rate, setupTensorboardWriterforLR
model = None
device = None
# import logger
parser = configargparse.ArgParser(description='myTorchClassify')
parser.add_argument('--data_name', type=str, default='CIFAR10',
help='data name: hymenoptera_data, CIFAR10, flower_photos')
parser.add_argument('--data_type', default='torchvisiondataset', choices=['trainvalfolder', 'traintestfolder', 'torchvisiondataset'],
help='the type of data')
parser.add_argument('--data_path', type=str, default='./../ImageClassificationData',
help='path to get data') #/Developer/MyRepo/ImageClassificationData
parser.add_argument('--img_height', type=int, default=28,
help='resize to img height, 224')
parser.add_argument('--img_width', type=int, default=28,
help='resize to img width, 224')
parser.add_argument('--save_path', type=str, default='./outputs/',
help='path to save the model')
# network
parser.add_argument('--model_name', default='mobilenet', choices=['mlpmodel1', 'lenet', 'resnetmodel1', 'vggmodel1', 'cnnmodel1'],
help='the network')
parser.add_argument('--arch', default='Pytorch', choices=['Tensorflow', 'Pytorch'],
help='Model Name, default: Pytorch.')
parser.add_argument('--learningratename', default='warmupexpdecay', choices=['fixedstep', 'fixed', 'warmupexpdecay'],
help='learning rate name')
parser.add_argument('--optimizer', default='Adam', choices=['SGD', 'Adam'],
help='select the optimizer')
parser.add_argument('--batchsize', type=int, default=32,
help='batch size')
parser.add_argument('--epochs', type=int, default=15,
help='epochs')
parser.add_argument('--GPU', type=bool, default=True,
help='use GPU')
parser.add_argument('--TPU', type=bool, default=False,
help='use TPU')
parser.add_argument('--MIXED_PRECISION', type=bool, default=False,
help='use MIXED_PRECISION')
parser.add_argument('--TAG', default='0915',
help='setup the experimental TAG to differentiate different running results')
parser.add_argument('--reproducible', type=bool, default=False,
help='get reproducible results we can set the random seed for Python, Numpy and PyTorch')
args = parser.parse_args()
def test_model(model, dataloaders, class_names, criterion, batch_size):
numclasses = len(class_names)
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(numclasses))
class_total = list(0. for i in range(numclasses))
model.eval()
if 'test' in dataloaders.keys():
test_loader=dataloaders['test']
else:
print("test dataset not available")
return
# iterate over test data
bathindex = 0
for data, target in test_loader:
bathindex = bathindex +1
# move tensors to GPU if CUDA is available
# if train_on_gpu:
# data, target = data.cuda(), target.cuda()
data = data.to(device)
target = target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(data)
if type(outputs) is tuple: #model may output multiple tensors as tuple
outputs, _ = outputs
# calculate the batch loss
loss = criterion(outputs, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(outputs, 1)
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred))
train_on_gpu = torch.cuda.is_available()
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
if i<len(target.data):#the actual batch size of the last batch is smaller than the batch_size
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(numclasses):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
class_names[i], 100 * class_correct[i] / class_total[i],
| np.sum(class_correct[i]) | numpy.sum |
# -*- coding: utf-8 -*-
import numpy as np
import json
import torch.utils.data as data
import os
import torch
import h5py
import torch.nn.functional as F
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
class VideoDataSet(data.Dataset):
def __init__(self, opt, subset="train", mode="train"):
self.temporal_scale = opt["temporal_scale"]
self.input_feat_dim = opt['input_feat_dim']
self.temporal_gap = 1. / self.temporal_scale
self.subset = 'val' if ('train' in subset) else 'test'
self.mode = mode
self.feature_path = opt["feature_path"]
self.gap = opt['stitch_gap']
self.short_ratio = opt['short_ratio']
self.video_anno = opt['video_anno']
self.thumos_classes = opt["thumos_classes"]
if self.mode == 'train':
if 'val' in self.subset:
self.video_windows = load_json('./Utils/video_win_val.json')
elif 'test' in self.subset:
self.video_windows = load_json('./Utils/video_win_test.json')
elif self.mode == 'inference':
self.video_windows = load_json('./Utils/video_win_infer.json')
self._getDatasetDict()
self.anchor_xmin = [self.temporal_gap * i for i in range(self.temporal_scale)]
self.anchor_xmax = [self.temporal_gap * i for i in range(1, self.temporal_scale + 1)]
def _getDatasetDict(self):
anno_database = load_json(self.video_anno)['database']
self.video_dict = {}
class_list = []
for video_name, video_info in anno_database.items():
video_subset = 'val' if video_info['subset'] == 'train' else 'test'
for item in video_info['annotations']:
class_list.append(item['label'])
item['segment'][0] = float(item['segment'][0])
item['segment'][1] = float(item['segment'][1])
if self.subset in video_subset:
self.video_dict[video_name] = video_info
self.video_list = [win['v_name'] for win in self.video_windows]
if os.path.exists(self.thumos_classes):
with open(self.thumos_classes, 'r') as f:
self.classes = json.load(f)
else:
class_list = list(set(class_list))
class_list = sorted(class_list)
self.classes = {'Background': 0}
for i,cls in enumerate(class_list):
self.classes[cls] = i + 1
with open(self.thumos_classes, 'w') as f:
f.write(json.dumps(self.classes))
def __getitem__(self, index):
if self.mode == "train":
video_data, match_score_action, match_score_start, match_score_end, gt_bbox, num_gt, num_frms = self._get_train_data_label(index)
return video_data, match_score_action, match_score_start, match_score_end, gt_bbox, num_gt, num_frms
else:
video_data, num_frms = self._get_train_data_label(index)
return index, video_data, num_frms #, match_score_action, gt_iou_map
def _get_train_data_label(self, index):
# General data
video_name = self.video_windows[index]['v_name']
w_start = int(self.video_windows[index]['w_start'])
w_end = int(self.video_windows[index]['w_end'])
fps_org = self.video_windows[index]['fps']
num_frms_win = w_end - w_start + 1
# Get video feature
rgb_features = h5py.File(os.path.join(self.feature_path, 'rgb_' + self.subset + '.h5'), 'r')
rgb_data = rgb_features[video_name][:]
rgb_data = torch.Tensor(rgb_data)
rgb_data = torch.transpose(rgb_data, 0, 1)
flow_features = h5py.File(os.path.join(self.feature_path, 'flow_' + self.subset + '.h5'), 'r')
flow_data = flow_features[video_name][:]
flow_data = torch.Tensor(flow_data)
flow_data = torch.transpose(flow_data, 0, 1)
if num_frms_win > self.temporal_scale * self.short_ratio:
return self._get_train_data_label_org(rgb_data, flow_data, video_name, w_start, w_end, fps_org)
else:
return self._get_train_data_label_stitch(rgb_data, flow_data, video_name, w_start, w_end, fps_org)
def _get_train_data_label_stitch(self, rgb_data, flow_data, video_name, w_start, w_end, fps_org):
num_frms1 = w_end - w_start + 1
video_data = torch.zeros(self.input_feat_dim, self.temporal_scale)
# Left part: original length
rgb_data1 = rgb_data[:, w_start: w_end+1]
flow_data1 = flow_data[:, w_start: w_end+1]
video_data[:, :num_frms1] = torch.cat((rgb_data1, flow_data1), dim=0)
# Right part: rescaled length
num_frms2 = self.temporal_scale - num_frms1 - self.gap
rgb_data2 = F.interpolate(rgb_data1[None,:,:], size=num_frms2, mode='linear', align_corners=True).squeeze(0)
flow_data2 = F.interpolate(flow_data1[None,:,:], size=num_frms2, mode='linear', align_corners=True).squeeze(0)
video_data[:, -num_frms2:] = torch.cat((rgb_data2, flow_data2), dim=0)
# Get annotations
video_info = self.video_dict[video_name]
video_labels_org = video_info['annotations']
if self.mode == 'train':
video_labels_frm = []
for label in video_labels_org:
label_start_frm =label['segment'][0] * fps_org
label_end_frm = label['segment'][1] * fps_org
if round(label_start_frm) >= w_start and round(label_end_frm) <= w_end:
label_frm = {}
label_frm['segment'] = []
label_frm['segment'].append(label_start_frm - w_start)
label_frm['segment'].append(label_end_frm - w_start)
label_frm['label'] = label['label']
video_labels_frm.append(label_frm)
# Get gt_iou_map
gt_bbox = []
for j in range(len(video_labels_frm)):
tmp_info = video_labels_frm[j]
tmp_start_f = max(min(num_frms1-1, round(tmp_info['segment'][0] )), 0)
tmp_end_f = max(min(num_frms1-1, round(tmp_info['segment'][1] )), 0)
tmp_start = tmp_start_f / self.temporal_scale
tmp_end = tmp_end_f / self.temporal_scale
tmp_class = self.classes[tmp_info['label']]
gt_bbox.append([tmp_start, tmp_end, tmp_class])
for j in range(len(video_labels_frm)):
tmp_info = video_labels_frm[j]
tmp_start_f = max(min(num_frms2-1, round(tmp_info['segment'][0] / num_frms1 * num_frms2)), 0) + num_frms1 + self.gap
tmp_end_f = max(min(num_frms2-1, round(tmp_info['segment'][1] / num_frms1 * num_frms2)), 0) + num_frms1 + self.gap
tmp_start = tmp_start_f / self.temporal_scale
tmp_end = tmp_end_f / self.temporal_scale
tmp_class = self.classes[tmp_info['label']]
gt_bbox.append([tmp_start, tmp_end, tmp_class])
# Get actionness scores
match_score_action = [0] * self.temporal_scale
for bbox in gt_bbox:
left_frm = max(round(bbox[0] * self.temporal_scale), 0)
right_frm = min(round(bbox[1] * self.temporal_scale), self.temporal_scale-1)
match_score_action[left_frm:right_frm+1] = [bbox[2]] * (right_frm + 1 - left_frm)
match_score_action = torch.Tensor(match_score_action)
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox)
if gt_bbox.shape[0] == 0:
print(gt_bbox.shape)
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_len_small = 3 * self.temporal_gap
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
#####################################################################################################
##########################################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(self.anchor_xmin)):
match_score_start.append(np.max(
self._ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(self.anchor_xmin)):
match_score_end.append(np.max(
self._ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.Tensor(match_score_start)
match_score_end = torch.Tensor(match_score_end)
############################################################################################################
max_num_box = 50
gt_bbox = torch.tensor(gt_bbox, dtype=torch.float32)
gt_bbox_padding = gt_bbox.new(max_num_box, gt_bbox.size(1)).zero_()
num_gt = min(gt_bbox.size(0), max_num_box)
gt_bbox_padding[:num_gt, :] = gt_bbox[:num_gt]
return video_data, match_score_action, match_score_start, match_score_end, gt_bbox_padding, num_gt, num_frms1
else:
return video_data, num_frms1
def _get_train_data_label_org(self, rgb_data_org, flow_data_org, video_name, w_start, w_end, fps):
video_data = torch.zeros(self.input_feat_dim, self.temporal_scale)
rgb_data = rgb_data_org[:, w_start: w_end+1]
flow_data = flow_data_org[:, w_start: w_end+1]
num_frms = min(rgb_data.shape[-1], self.temporal_scale)
video_data[:, :num_frms] = torch.cat((rgb_data[:, :num_frms], flow_data[:,:num_frms]), dim=0)
if self.mode == 'train':
# Get annotations
video_info = self.video_dict[video_name]
video_labels = video_info['annotations']
num_frms_org = rgb_data_org.shape[-1]
video_labels_frm = []
for label in video_labels:
label_start_frm = max(0, round(label['segment'][0] * fps))
label_end_frm = min(round(label['segment'][1] * fps), num_frms_org - 1)
if label_start_frm >= w_start and label_end_frm <= w_end:
label_frm = {}
label_frm['segment'] = []
label_frm['segment'].append(label_start_frm - w_start)
label_frm['segment'].append(label_end_frm - w_start)
label_frm['label'] = label['label']
video_labels_frm.append(label_frm)
# Get gt_iou_map
gt_bbox = []
for j in range(len(video_labels_frm)):
tmp_info = video_labels_frm[j]
tmp_start_f = max(min(num_frms-1, tmp_info['segment'][0]), 0)
tmp_end_f = max(min(num_frms-1, tmp_info['segment'][1]), 0)
tmp_start = tmp_start_f / self.temporal_scale
tmp_end = tmp_end_f / self.temporal_scale
tmp_class = self.classes[tmp_info['label']]
gt_bbox.append([tmp_start, tmp_end, tmp_class])
# Get actionness scores
match_score_action = [0] * self.temporal_scale
for bbox in gt_bbox:
left_frm = max(round(bbox[0] * self.temporal_scale), 0)
right_frm = min(round(bbox[1] * self.temporal_scale), self.temporal_scale-1)
match_score_action[left_frm:right_frm+1] = [bbox[2]] * (right_frm + 1 - left_frm)
match_score_action = torch.Tensor(match_score_action)
####################################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox)
if gt_bbox.shape[0] == 0:
print(gt_bbox.shape)
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_len_small = 3 * self.temporal_gap
gt_start_bboxs = | np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1) | numpy.stack |
from typing import Union, Sequence
import astro_dynamo.model
import astro_dynamo.snap
import astro_dynamo.targets
import matplotlib.figure
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.axes import SubplotBase
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
def plot_snap_projections(model: Union[
astro_dynamo.model.DynamicalModel, astro_dynamo.snap.SnapShot],
axs: Sequence[SubplotBase] = None,
plotmax: float = 10.,
vmin: float = 1e-5,
vmax: float = 1e-2,
cmap: Union[
str, matplotlib.colors.Colormap] = plt.cm.get_cmap(
'nipy_spectral')) -> matplotlib.figure.Figure:
"""Plot the projections of a DynamicalModel or SnapShot into the three axes in axs if supplied, otherwise draws
a new figure. Distances will be physical if a model with a physical distance scale is supplied."""
if isinstance(model, astro_dynamo.model.MilkyWayModel):
snap, d_scale = model.snap.cpu(), model.d_scale.cpu()
elif isinstance(model, astro_dynamo.model.DynamicalModel):
snap, d_scale = model.snap, 1.0
elif isinstance(model, astro_dynamo.model.SnapShot):
snap, d_scale = model, 1.0
else:
raise ("Expected a DynamicalModel or SnapShot to plot")
if axs is None:
f, axs = plt.subplots(2, 1, sharex='col')
else:
f = axs.flatten()[0].figure
x = snap.x.cpu() * d_scale
y = snap.y.cpu() * d_scale
z = snap.z.cpu() * d_scale
m = snap.masses.detach().cpu()
projections = ((x, y), (y, z), (x, z))
projection_labels = (('x', 'y'), ('y', 'z'), ('x', 'z'))
for ax, projection, projection_label in zip(axs.flatten(), projections,
projection_labels):
ax.hexbin(projection[0], projection[1], C=m, bins='log',
extent=(-plotmax, plotmax, -plotmax, plotmax),
reduce_C_function=np.sum,
vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_xlabel(projection_label[0])
ax.set_ylabel(projection_label[1])
ax.set_xlim(-plotmax, plotmax)
ax.set_ylim(-plotmax, plotmax)
return f
def plot_disk_kinematics(model: astro_dynamo.model.DynamicalModel,
axs: Sequence[
SubplotBase] = None) -> matplotlib.figure.Figure:
"""Plots the azimuthally averaged kinematics, both mean and sigma in R_cyl, z, phi directions of a model.
The model must contain a disk kinematics target as this is what id plotted.
If supplied plots into the first 2 axes supplied in axs, otherwise creates a new figure."""
if axs is None:
f, axs = plt.subplots(2, 1, sharex='col')
else:
f = axs[0].figure
try:
disk_kinematics_obj = next(target for target in model.targets
if isinstance(target,
astro_dynamo.targets.DiskKinematics))
except IndexError:
raise TypeError("Couldnt find a DiskKinematics target in the model.")
kin_model = disk_kinematics_obj(model).detach().cpu()
r = disk_kinematics_obj.rmid.cpu()
for linestyle, kin, kin_err in zip(('-', '-.'),
(kin_model,),
(None,)):
if kin_err is None:
axs[0].plot(r, kin[1, :].detach().cpu(), 'r', linestyle=linestyle,
label='sig vphi')
axs[0].plot(r, kin[3, :].detach().cpu(), 'g', linestyle=linestyle,
label='sig vr')
axs[0].plot(r, kin[5, :].detach().cpu(), 'b', linestyle=linestyle,
label='sig vz')
else:
axs[0].errorbar(r, kin[1, :].detach().cpu().numpy(),
yerr=kin_err[1, :].cpu().numpy(),
fmt='o', color='r', ecolor='r')
axs[0].errorbar(r, kin[3, :].detach().cpu().numpy(),
yerr=kin_err[3, :].cpu().numpy(),
fmt='o', color='g', ecolor='g')
axs[0].errorbar(r, kin[5, :].detach().cpu().numpy(),
yerr=kin_err[5, :].cpu().numpy(),
fmt='o', color='b', ecolor='b')
for linestyle, kin, kin_err in zip(('-', '-.'),
(kin_model,),
(None,)):
if kin_err is None:
axs[1].plot(r, kin[0, :].detach().cpu(), 'r', linestyle=linestyle,
label='mean vphi')
axs[1].plot(r, kin[2, :].detach().cpu(), 'g', linestyle=linestyle,
label='mean vr')
axs[1].plot(r, kin[4, :].detach().cpu(), 'b', linestyle=linestyle,
label='mean vz')
else:
axs[1].errorbar(r, kin[0, :].detach().cpu().numpy(),
yerr=kin_err[0, :].cpu().numpy(),
fmt='o', color='r', ecolor='r')
axs[1].errorbar(r, kin[2, :].detach().cpu().numpy(),
yerr=kin_err[2, :].cpu().numpy(),
fmt='o', color='g', ecolor='g')
axs[1].errorbar(r, kin[4, :].detach().cpu().numpy(),
yerr=kin_err[4, :].cpu().numpy(),
fmt='o', color='b', ecolor='b')
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5))
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5))
axs[0].set_ylim(0, 0.3 * model.v_scale.cpu())
axs[1].set_ylim(-0.1 * model.v_scale.cpu(), 1.5 * model.v_scale.cpu())
axs[1].set_xlabel('r [kpc]')
axs[0].set_ylabel('[km/s]')
axs[1].set_ylabel('[km/s]')
f.subplots_adjust(hspace=0)
f.tight_layout()
return f
def plot_surface_density_profile(model: astro_dynamo.model.DynamicalModel,
ax: SubplotBase = None,
target_values: torch.Tensor = None) -> SubplotBase:
"""Plots the azimuthally averaged surface density of a model.
The model must contain a SurfaceDensity target to be plotted.
If supplied plots into axis ax, otherwise creates a new figure."""
if ax is None:
f, axs = plt.subplots(1, 1)
axs[-1, -1].axis('off')
try:
surface_density_obj = next(target for target in model.targets
if type(target) == astro_dynamo.targets.SurfaceDensity)
except IndexError:
raise TypeError("Couldn't find a SurfaceDensity target in the model.")
ax.semilogy(surface_density_obj.rmid.cpu(),
surface_density_obj(model).detach().cpu(), label='Model')
if target_values is not None:
ax.semilogy(surface_density_obj.rmid.cpu(),
target_values.detach().cpu(), label='Target')
ax.set_xlabel('r')
ax.set_ylabel(r'$\Sigma$')
ax.set_ylim(1, 1e4)
ax.legend()
return ax
def plot_disk_density_twod(model: astro_dynamo.model.DynamicalModel,
axs: Sequence[SubplotBase] = None,
target_values: torch.Tensor = None) -> matplotlib.figure.Figure:
"""Plots the azimuthally averaged surface density of a model.
The model must contain a SurfaceDensity target to be plotted.
If supplied plots into axis ax, otherwise creates a new figure."""
if axs is None:
ncol = 2 if target_values is not None else 1
f, axs = plt.subplots(2, ncol, sharex=True, sharey=True, squeeze=False)
else:
f = axs[0].figure
try:
disk_density_obj = next(target for target in model.targets
if type(target) == astro_dynamo.targets.DoubleExponentialDisk)
except IndexError:
raise TypeError(
"Couldn't find a DoubleExponentialDisk target in the model.")
model_disk_density = disk_density_obj(model).detach()
model_disk_density_normed = model_disk_density / model_disk_density.sum(
dim=1).unsqueeze(1)
data = model_disk_density.t().log10().cpu().numpy()
levels = np.max(data) + | np.arange(-10, 1) | numpy.arange |
import os
import glob
import pickle
import pcl
import torch
import torch.utils.data
import torch.nn as nn
import numpy as np
# global configurations:
from autolab_core import YamlConfig
from dexnet.grasping import GpgGraspSampler
from dexnet.grasping import RobotGripper
home_dir = os.environ['HOME']
yaml_config = YamlConfig(home_dir + "/Projects/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/Projects/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSampler(gripper, yaml_config)
class PointGraspDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = | np.delete(grasp_pc, bad_ind[0], axis=0) | numpy.delete |
import numpy as np
import scipy as sp
import scipy.linalg
import scipy.signal
import matplotlib as mpl
import matplotlib.pyplot as plt
from AeroDB import *
class ZENITH(object):
def __init__(self, aerodb, x0=None, rho=None):
self.aerodb = aerodb
self.x0 = x0
self.rho = rho
self.x_l = [self.x0]
self.t_l = [0]
@property
def aerodb(self):
return self._aerodb
@aerodb.setter
def aerodb(self, aerodb):
self._aerodb = aerodb
@property
def x(self):
return self._x
@x.setter
def x(self, x):
self._x = x
@property
def xE(self):
R = 6378.15e3
x1, x2, x3 = self.pos
xE = np.array([
[R*np.arctan(np.sqrt(x1**2 + x3**2)/(R+x2))*(x1/np.sqrt(x1**2 + x3**2))],
[np.sqrt((R+x2)**2 + (x1**2 + x3**2) - R)],
[R*np.arctan(np.sqrt(x1**2 + x3**2)/(R+x2))*(x3/np.sqrt(x1**2 + x3**2))]
])
return xE
@property
def pos(self):
return self.x[:3]
@property
def vel(self):
return self.x[3:6]
@property
def ang(self):
return self.x[6:9]
@property
def angvel(self):
return self.x[9:]
@property
def x0(self):
return self._x0
@x0.setter
def x0(self, x0):
if x0 is None:
x0 = np.zeros((12,1))
else:
pass
try:
x0.shape
except:
raise TypeError("Initial state x0 must be a numpy array.")
else:
if x0.shape[0] != 1:
x0 = x0.reshape((x0.shape[0],1))
else:
pass
if x0.shape[0] != 12:
raise ValueError("Initial state x0 must a full state (12-row) vector.")
else:
self.x = x0
self._x0 = x0
def set_rho(self, rho):
if rho is None:
self._rho = lambda h : 1.225
elif callable(rho):
self._rho = rho
elif isinstance(rho, float):
self._rho = lambda h : rho
else:
raise TypeError("Invalid rho type")
def set_I(self, I, t_I):
Idot = np.gradient(I, t_I)
self._Ifunct = lambda t : np.interp(t, t_I, I)
self._Idotfunct = lambda t : np.interp(t, t_I, Idot)
def get_I(self, t):
return self._Ifunct(t)
def get_Idot(self, t):
return self._Idotfunct(t)
def set_Ip(self, Ip, t_Ip):
Ipdot = np.gradient(Ip, t_Ip)
self._Ipfunct = lambda t : | np.interp(t, t_Ip, Ip) | numpy.interp |
from sklearn.cluster import MeanShift
from scipy.optimize import minimize
from ase import Atom
import numpy as np
from random import random
def min_dist(pos, host):
"""minimum distance from position to a host atom :param pos: vector x,y,z
position :param host: host atoms object :return: float, minimum distance
Args:
pos:
host:
"""
dummy_atom = Atom('H', position=pos)
dummy_host = host + dummy_atom
return(min(dummy_host.get_distances(-1, [i for i in range(len(host))], mic=True)))
def avg_dist(pos, host):
"""average distance from position to all host atoms :param pos: vector x,y,z
position :param host: host atoms object :return: float, average distance
Args:
pos:
host:
"""
dummy_atom = Atom('H', position=pos)
dummy_host = host + dummy_atom
return(np.average(dummy_host.get_distances(-1, [i for i in range(len(host))], mic=True)))
def find_void(pos, host):
"""finds nearest empty region in host :param pos: vector x,y,z position
:param host: host atoms object :return: vector position of center of empty
void
Args:
pos:
host:
"""
guess = pos
func = lambda pos: -1*min_dist(pos, host) # 1 param function for scipy.minimize
ans = minimize(func, guess)
return(ans.x)
def sphere_sample(radius, num_pts=None):
"""generates random positions on the surface of a sphere of certain radius
:param radius: radius of sphere surface to sample :param num_pts: number of
points to try :return: list of x,y,z positions on surface of sphere
Args:
radius:
num_pts:
"""
if num_pts == None:
num_pts = 500
vect_list = []
for i in range(num_pts):
x, y, z = [2*random()-1 for i in range(3)]
# select points inside of unit sphere
if x**2 + y**2 + z**2 > 1.0:
pass
else:
unit_vec = [x, y, z]/ | np.linalg.norm([x,y,z]) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 11:13:37 2021
@author: dv516
"""
import numpy as np
import pickle
import pyro
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(1)
from algorithms.PyBobyqa_wrapped.Wrapper_for_pybobyqa import PyBobyqaWrapper
from algorithms.Bayesian_opt_Pyro.utilities_full import BayesOpt
from algorithms.nesterov_random.nesterov_random import nesterov_random
from algorithms.simplex.simplex_method import simplex_method
from algorithms.CUATRO.CUATRO import CUATRO
from algorithms.Finite_differences.Finite_differences import finite_Diff_Newton
from algorithms.Finite_differences.Finite_differences import Adam_optimizer
from algorithms.Finite_differences.Finite_differences import BFGS_optimizer
from algorithms.SQSnobfit_wrapped.Wrapper_for_SQSnobfit import SQSnobFitWrapper
from algorithms.DIRECT_wrapped.Wrapper_for_Direct import DIRECTWrapper
from test_functions import quadratic_constrained
import matplotlib.pyplot as plt
def trust_fig(oracle, bounds):
N = 200
lim = 2
x = np.linspace(-lim, lim, N)
y = np.linspace(-lim, lim, N)
X,Y = np.meshgrid(x, y)
Z = oracle.sample_obj(X,Y)
constr = oracle.sample_constr(X,Y)
levels_list = np.logspace(-2, 1.5, 10)
fig = plt.figure(figsize = (6,4))
ax = fig.add_subplot()
ax.contour(X,Y,Z*constr, levels = levels_list)
ax.plot([bounds[0,0], bounds[0, 1]], [bounds[1,0], bounds[1, 0]], c = 'k')
ax.plot([bounds[0,0], bounds[0, 1]], [bounds[1,1], bounds[1, 1]], c = 'k')
ax.plot([bounds[0,0], bounds[0, 0]], [bounds[1,0], bounds[1, 1]], c = 'k')
ax.plot([bounds[0,1], bounds[0, 1]], [bounds[1,0], bounds[1, 1]], c = 'k')
return ax, fig
def average_from_list(solutions_list):
N = len(solutions_list)
f_best_all = np.zeros((N, 100))
for i in range(N):
f_best = np.array(solutions_list[i]['f_best_so_far'])
x_ind = np.array(solutions_list[i]['samples_at_iteration'])
for j in range(100):
ind = np.where(x_ind <= j+1)
if len(ind[0]) == 0:
f_best_all[i, j] = f_best[0]
else:
f_best_all[i, j] = f_best[ind][-1]
f_median = np.median(f_best_all, axis = 0)
# f_av = np.average(f_best_all, axis = 0)
# f_std = np.std(f_best_all, axis = 0)
f_min = np.min(f_best_all, axis = 0)
f_max = np.max(f_best_all, axis = 0)
return f_best_all, f_median, f_min, f_max
class RB:
def __init__(self, objective, ineq = []):
self.obj = objective ; self.ieq = ineq
def sample_obj(self, x, y):
return self.obj(x, y)
def sample_constr(self, x, y):
if self.ieq == []:
if (type(x) == float) or (type(x) == int):
return 1
else:
return np.ones(len(x))
elif (type(x) == float) or (type(x) == int):
temporary = [int(g(x, y)) for g in self.ieq]
return np.product(np.array(temporary))
else:
temporary = [g(x, y).astype(int) for g in self.ieq]
return np.product(np.array(temporary), axis = 0)
def fix_starting_points(complete_list, x0, init_out, only_starting_point = False):
if only_starting_point:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
complete_list[i] = dict_out
else:
for i in range(len(complete_list)):
dict_out = complete_list[i]
f_arr = dict_out['f_best_so_far']
N_eval = len(f_arr)
g_arr = dict_out['g_best_so_far']
dict_out['x_best_so_far'][0] = np.array(x0)
dict_out['f_best_so_far'][0] = init_out[0]
dict_out['g_best_so_far'][0] = np.array(init_out[1])
for j in range(1, N_eval):
if (g_arr[j] > 1e-3).any() or (init_out[0] < f_arr[j]):
dict_out['x_best_so_far'][j] = np.array(x0)
dict_out['f_best_so_far'][j] = init_out[0]
dict_out['g_best_so_far'][j] = np.array(init_out[1])
complete_list[i] = dict_out
return complete_list
def Problem_quadratic(x):
f = quadratic_constrained.quadratic_f
g = quadratic_constrained.quadratic_g
return f(x), [g(x)]
bounds = np.array([[-1.5,1.5],[-1.5,1.5]])
# x0 = np.array([1, 1])
x0 = np.array([-0.5, 1.5])
max_f_eval = 100
max_it = 100
initial_output = Problem_quadratic(x0)
quadraticTight_pybobyqa = PyBobyqaWrapper().solve(Problem_quadratic, x0, bounds=bounds.T, \
maxfun= max_f_eval, constraints=2, \
seek_global_minimum= True, \
objfun_has_noise=False)
# print(x0)
N = 10
quadraticTight_Nest_list = []
for i in range(N):
rnd_seed = i
quadratic_Nest = nesterov_random(Problem_quadratic, x0, bounds, max_iter = 100, \
constraints = 1, rnd_seed = i, alpha = 1e-3, mu = 1e-2, \
max_f_eval = max_f_eval)
quadraticTight_Nest_list.append(quadratic_Nest)
print('10 Nesterov iterations completed')
# print(x0)
N = 10
quadraticTight_simplex_list = []
for i in range(N):
rnd_seed = i
quadratic_simplex = simplex_method(Problem_quadratic, x0, bounds, max_iter = 100, \
constraints = 1, rnd_seed = i, max_f_eval = max_f_eval)
quadraticTight_simplex_list.append(quadratic_simplex)
print('10 simplex iterations completed')
quadraticTight_FiniteDiff = finite_Diff_Newton(Problem_quadratic, x0, bounds = bounds, \
con_weight = 100)
quadraticTight_BFGS = BFGS_optimizer(Problem_quadratic, x0, bounds = bounds, \
con_weight = 100)
quadraticTight_Adam = Adam_optimizer(Problem_quadratic, x0, method = 'forward', \
bounds = bounds, alpha = 0.4, \
beta1 = 0.2, beta2 = 0.1, \
max_f_eval = 100, con_weight = 100)
N_min_s = 15
init_radius = 2
method = 'Discrimination'
N = 10
quadraticTight_CUATRO_global_list = []
for i in range(N):
rnd_seed = i
quadratic_CUATRO_global = CUATRO(Problem_quadratic, x0, init_radius, bounds = bounds, \
N_min_samples = N_min_s, tolerance = 1e-10,\
beta_red = 0.9, rnd = rnd_seed, method = 'global', \
constr_handling = method)
quadraticTight_CUATRO_global_list.append(quadratic_CUATRO_global)
print('10 CUATRO global iterations completed')
N_min_s = 6
init_radius = 0.5
method = 'Fitting'
N = 10
quadraticTight_CUATRO_local_list = []
for i in range(N):
rnd_seed = i
quadratic_CUATRO_local = CUATRO(Problem_quadratic, x0, init_radius, bounds = bounds, \
N_min_samples = N_min_s, tolerance = 1e-10,\
beta_red = 0.9, rnd = rnd_seed, method = 'local', \
constr_handling = method)
quadraticTight_CUATRO_local_list.append(quadratic_CUATRO_local)
print('10 CUATRO local iterations completed')
N = 10
quadraticTight_SQSnobFit_list = []
for i in range(N):
quadratic_SQSnobFit = SQSnobFitWrapper().solve(Problem_quadratic, x0, bounds, \
maxfun = max_f_eval, constraints=2)
quadraticTight_SQSnobFit_list.append(quadratic_SQSnobFit)
print('10 SnobFit iterations completed')
N = 10
quadraticTight_DIRECT_list = []
quadratic_DIRECT_f = lambda x, grad: Problem_quadratic(x)
for i in range(N):
quadratic_DIRECT = DIRECTWrapper().solve(quadratic_DIRECT_f, x0, bounds, \
maxfun = max_f_eval, constraints=2)
quadraticTight_DIRECT_list.append(quadratic_DIRECT)
print('10 DIRECT iterations completed')
# with open('BayesQuadratic_list.pickle', 'rb') as handle:
# quadratic_Bayes_list = pickle.load(handle)
# quadratic_Bayes_list = fix_starting_points(quadratic_Bayes_list, x0, initial_output)
quadraticTight_DIRECT_list = fix_starting_points(quadraticTight_DIRECT_list, x0, initial_output)
quadraticTight_simplex_list = fix_starting_points(quadraticTight_simplex_list, x0, initial_output)
quadraticTight_pybobyqa['x_best_so_far'][0] = np.array(x0)
quadraticTight_pybobyqa['f_best_so_far'][0] = initial_output[0]
quadraticTight_pybobyqa['g_best_so_far'][0] = np.array(initial_output[1])
x_best_pyBbyqa = np.array(quadraticTight_pybobyqa['x_best_so_far'])
f_best_pyBbyqa = np.array(quadraticTight_pybobyqa['f_best_so_far'])
# x_ind_pyBbyqa = np.array(RB_pybobyqa['samples_at_iteration'])
# nbr_feval_pyBbyqa = len(RB_pybobyqa['f_store'])
x_best_finDiff = np.array(quadraticTight_FiniteDiff['x_best_so_far'])
f_best_finDiff = np.array(quadraticTight_FiniteDiff['f_best_so_far'])
x_ind_findDiff = np.array(quadraticTight_FiniteDiff['samples_at_iteration'])
x_best_BFGS = np.array(quadraticTight_BFGS['x_best_so_far'])
f_best_BFGS = np.array(quadraticTight_BFGS['f_best_so_far'])
x_ind_BFGS = np.array(quadraticTight_BFGS['samples_at_iteration'])
x_best_Adam = np.array(quadraticTight_Adam['x_best_so_far'])
f_best_Adam = np.array(quadraticTight_Adam['f_best_so_far'])
x_ind_Adam = np.array(quadraticTight_Adam['samples_at_iteration'])
plt.rcParams["font.family"] = "Times New Roman"
ft = int(15)
font = {'size': ft}
plt.rc('font', **font)
params = {'legend.fontsize': 12.5,
'legend.handlelength': 2}
plt.rcParams.update(params)
fig1 = plt.figure()
ax1 = fig1.add_subplot()
ax1.step(np.arange(len(f_best_pyBbyqa)), f_best_pyBbyqa, where = 'post', \
label = 'PyBobyqa')
ax1.step(x_ind_findDiff, f_best_finDiff, where = 'post', \
label = 'Newton Fin. Diff.')
ax1.step(x_ind_BFGS, f_best_BFGS, where = 'post', \
label = 'BFGS')
ax1.step(x_ind_Adam, f_best_Adam, where = 'post', \
label = 'Adam')
ax1.set_xlabel('Nbr. of function evaluations')
ax1.set_ylabel('Best function evaluation')
ax1.legend()
ax1.set_yscale('log')
fig1.savefig('Quadratic_plots_Tight/QuadraticTight_Deterministic_Convergence_plot.svg', format = "svg")
quadratic_f = lambda x, y: x**2 + 10*y**2 + x*y
quadratic_g = lambda x, y: 1 - x - y <= 0
oracle = RB(quadratic_f, ineq = [quadratic_g])
ax2, fig2 = trust_fig(oracle, bounds)
ax2.plot(x_best_pyBbyqa[:,0], x_best_pyBbyqa[:,1], '--x', \
label = 'PyBobyqa')
ax2.plot(x_best_finDiff[:,0], x_best_finDiff[:,1], '--x', \
label = 'Newton Fin. Diff.')
ax2.plot(x_best_BFGS[:,0], x_best_BFGS[:,1], '--x', \
label = 'BFGS')
ax2.plot(x_best_Adam[:,0], x_best_Adam[:,1], '--x', \
label = 'Adam')
ax2.set_xlabel('$x_1$')
ax2.set_ylabel('$x_2$')
ax2.legend()
ax2.set_xlim(bounds[0])
ax2.set_ylim(bounds[1])
fig2.savefig('Quadratic_plots_Tight/QuadraticTight_Deterministic_2Dspace_plot.svg', format = "svg")
fig1 = plt.figure()
ax1 = fig1.add_subplot()
ax2, fig2 = trust_fig(oracle, bounds)
for i in range(len(quadraticTight_CUATRO_global_list)):
x_best = np.array(quadraticTight_CUATRO_global_list[i]['x_best_so_far'])
f_best = np.array(quadraticTight_CUATRO_global_list[i]['f_best_so_far'])
x_ind = | np.array(quadraticTight_CUATRO_global_list[i]['samples_at_iteration']) | numpy.array |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# REP
assert len(A._ID_dict['non-collapse']) == len(A._ID_dict['repairable'])
assert len(A._ID_dict['irreparable']) == 0
# cost
DV_COST = A._DV_dict['rec_cost']
# DS1
C_CDF = DV_COST.iloc[:, 0]
C_CDF = np.around(C_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 2500], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
C_CDF = DV_COST.iloc[:, 1]
C_CDF = np.around(C_CDF / 100., decimals=0) * 100.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 25000], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
C_CDF = DV_COST.iloc[:, 2]
C_CDF = np.around(C_CDF / 1000., decimals=0) * 1000.
vals, counts = np.unique(C_CDF, return_counts=True)
assert_allclose(vals, [0, 250000], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# time
DV_TIME = A._DV_dict['rec_time']
# DS1
T_CDF = DV_TIME.iloc[:, 0]
T_CDF = np.around(T_CDF, decimals=1)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 2.5], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS2
T_CDF = DV_TIME.iloc[:, 1]
T_CDF = np.around(T_CDF, decimals=0)
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 25], rtol=0.01)
t_prob = 0.3413445
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# DS3
T_CDF = DV_TIME.iloc[:, 2]
T_CDF = np.around(T_CDF / 10., decimals=0) * 10.
vals, counts = np.unique(T_CDF, return_counts=True)
assert_allclose(vals, [0, 250], rtol=0.01)
t_prob = 0.1586555
assert_allclose(counts / NC_count, [1. - t_prob, t_prob], rtol=0.1)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert_allclose(S[('event time', 'month')], A._TIME['month'] + 1)
assert_allclose(S[('event time', 'weekday?')], A._TIME['weekday?'])
assert_allclose(S[('event time', 'hour')], A._TIME['hour'])
assert_allclose(S[('inhabitants', '')], A._POP.iloc[:, 0])
assert SD.loc[('collapses', 'collapsed'), 'mean'] == pytest.approx(0.5,
rel=0.05)
assert SD.loc[('collapses', 'mode'), 'mean'] == 0.
assert SD.loc[('collapses', 'mode'), 'count'] == pytest.approx(5000,
rel=0.05)
assert SD.loc[('red tagged', ''), 'mean'] == pytest.approx(0.5, rel=0.05)
assert SD.loc[('red tagged', ''), 'count'] == pytest.approx(5000, rel=0.05)
for col in ['irreparable', 'cost impractical', 'time impractical']:
assert SD.loc[('reconstruction', col), 'mean'] == 0.
assert SD.loc[('reconstruction', col), 'count'] == pytest.approx(5000,
rel=0.05)
RC = deepcopy(S.loc[:, ('reconstruction', 'cost')])
RC_CDF = np.around(RC / 1000., decimals=0) * 1000.
vals, counts = np.unique(RC_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]) * 1000.)
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
RT = deepcopy(S.loc[:, ('reconstruction', 'time-parallel')])
RT_CDF = np.around(RT, decimals=0)
vals, counts = np.unique(RT_CDF, return_counts=True)
assert_allclose(vals, np.array([0, 2., 3., 25., 250., 300.]))
t_prob1 = 0.3413445 / 2.
t_prob2 = 0.1586555 / 2.
assert_allclose(counts / 10000.,
[t_prob2, t_prob1 / 2., t_prob1 / 2., t_prob1, t_prob2,
0.5], atol=0.01, rtol=0.1)
assert_allclose(S.loc[:, ('reconstruction', 'time-parallel')],
S.loc[:, ('reconstruction', 'time-sequential')])
CAS = deepcopy(S.loc[:, ('injuries', 'sev1')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.075, 0.15, 0.25, 0.3, 0.5, 1.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2, 2.5, 7, 5]) / 56., atol=0.01,
rtol=0.1)
CAS = deepcopy(S.loc[:, ('injuries', 'sev2')])
CAS_CDF = np.around(CAS, decimals=3)
vals, counts = np.unique(CAS_CDF, return_counts=True)
assert_allclose(vals, [0, 0.025, 0.05, 0.1, 2.25, 4.5, 9.])
assert_allclose(counts / 10000.,
np.array([35, 1, 3.5, 2.5, 2, 7, 5]) / 56., atol=0.01,
rtol=0.1)
def test_FEMA_P58_Assessment_EDP_uncertainty_basic():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_2.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_2.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
assert_allclose(thetas, [9.80665, 12.59198, 0.074081, 0.044932], rtol=0.02)
assert_allclose(betas, [0.25, 0.25, 0.3, 0.4], rtol=0.02)
rho = RV_EDP[0].RV_set.Rho()
rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
assert_allclose(rho, rho_target, atol=0.05)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer(
[0.3, 0.4], [0.3, 0.4]),
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000. for i in
range(8)]
DMG_1_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.1]))[
0]
DMG_2_PID = mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.1, 0.1]))[
0]
DMG_1_PFA = mvn_od(np.log([0.074081, 9.80665]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
DMG_2_PFA = mvn_od(np.log([0.074081, 12.59198]),
np.array([[1, 0.3], [0.3, 1]]) * np.outer([0.3, 0.25],
[0.3, 0.25]),
lower=np.log([1e-6, 9.80665]),
upper=np.log([0.1, np.inf]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert DMG_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert DMG_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert DMG_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021 and 1022
P_target = [
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log([0.074081, 0.044932]),
np.array([[1, 0.7], [0.7, 1]]) * np.outer([0.3, 0.4],
[0.3, 0.4]),
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2011 and 2012
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2021 and 2022
P_target = [
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, np.inf, 9.80665]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 9.80665, 9.80665]),
upper=np.log([0.1, np.inf, np.inf]))[0],
mvn_od(np.log([0.074081, 9.80665, 12.59198]),
np.array([[1.0, 0.3, 0.3], [0.3, 1.0, 0.6],
[0.3, 0.6, 1.0]]) * np.outer([0.3, 0.25, 0.25],
[0.3, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 9.80665]),
upper=np.log([0.1, 9.80665, np.inf]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(P_target, P_test, atol=0.02)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == pytest.approx(DMG_1_PID, rel=0.10)
assert RED_check[2] == pytest.approx(DMG_2_PID, rel=0.10)
assert RED_check[4] == pytest.approx(DMG_1_PFA, rel=0.10)
assert RED_check[6] == pytest.approx(DMG_2_PFA, rel=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log([0.074081, 0.044932, 9.80665, 12.59198]),
np.array(
[[1.0, 0.7, 0.3, 0.3], [0.7, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.6],
[0.3, 0.3, 0.6, 1.0]]) * np.outer(
[0.3, 0.4, 0.25, 0.25],
[0.3, 0.4, 0.25, 0.25]),
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[0.05488, 0.05488, 9.80665, 9.80665]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
def test_FEMA_P58_Assessment_EDP_uncertainty_detection_limit():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test differs from the basic case in having unreliable EDP values above
a certain limit - a typical feature of interstory drifts in dynamic
simulations. Such cases should not be a problem if the limits can be
estimated and they are specified as detection limits in input file.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_3.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_3.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:, 2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_failed_analyses():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
Here we use EDP results with unique values assigned to failed analyses.
In particular, PID=1.0 and PFA=100.0 are used when an analysis fails.
These values shall be handled by detection limits of 10 and 100 for PID
and PFA, respectively.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_4.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_4.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 12.59198, 0.074081, 0.044932]
EDP_beta_target = [0.25, 0.25, 0.3, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.025)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = [
[1.0, 0.6, 0.3, 0.3],
[0.6, 1.0, 0.3, 0.3],
[0.3, 0.3, 1.0, 0.7],
[0.3, 0.3, 0.7, 1.0]]
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
assert_allclose(EDP_rho_test, EDP_rho_target, atol=0.15)
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(np.log(EDP_theta_test[2:]),
EDP_COV_test[2:,2:],
upper=np.log([0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == prob_approx(col_target, 0.03)
# DMG
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / 10000.
for i in range(8)]
DMG_1_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:,2:],
lower=np.log([0.05488, 1e-6]),
upper=np.log([0.1, 0.1]))[0]
DMG_2_PID = mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]),
upper=np.log([0.1, 0.1]))[0]
DMG_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
DMG_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0]
assert DMG_check[0] == pytest.approx(DMG_check[1], rel=0.01)
assert DMG_check[2] == pytest.approx(DMG_check[3], rel=0.01)
assert DMG_check[4] == pytest.approx(DMG_check[5], rel=0.01)
assert DMG_check[6] == pytest.approx(DMG_check[7], rel=0.01)
assert DMG_check[0] == prob_approx(DMG_1_PID, 0.03)
assert DMG_check[2] == prob_approx(DMG_2_PID, 0.03)
assert DMG_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert DMG_check[6] == prob_approx(DMG_2_PFA, 0.03)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 250., 1250.]
T_target = [0., 0.25, 1.25]
# PG 1011 and 1012
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.05488, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
]
for i in [0, 1]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 1021 and 1022
P_target = [
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 1e-6]), upper=np.log([0.1, 0.05488]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([0.05488, 0.05488]), upper=np.log([0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test[2:]), EDP_COV_test[2:, 2:],
lower=np.log([1e-6, 0.05488]), upper=np.log([0.05488, 0.1]))[0],
]
for i in [2, 3]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2011 and 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
]
for i in [4, 5]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# PG 2021 and 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, 9.80665, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 9.80665, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6]),
upper=np.log([9.80665, np.inf, 0.1, 0.1]))[0],
]
for i in [6, 7]:
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, i].values / 10., decimals=0) * 10.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(
np.around(DV_TIME.iloc[:, i].values * 100., decimals=0) / 100.,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / 10000.
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
prob_allclose(P_target, P_test, 0.04)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / 10000.).values
assert RED_check[0] == pytest.approx(RED_check[1], rel=0.01)
assert RED_check[2] == pytest.approx(RED_check[3], rel=0.01)
assert RED_check[4] == pytest.approx(RED_check[5], rel=0.01)
assert RED_check[6] == pytest.approx(RED_check[7], rel=0.01)
assert RED_check[0] == prob_approx(DMG_1_PID, 0.03)
assert RED_check[2] == prob_approx(DMG_2_PID, 0.03)
assert RED_check[4] == prob_approx(DMG_1_PFA, 0.03)
assert RED_check[6] == prob_approx(DMG_2_PFA, 0.03)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([9.80665, 9.80665, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = ((1.0 - SD.loc[('red tagged', ''), 'mean'])
* SD.loc[('red tagged', ''), 'count'] / 10000.)
assert P_no_RED_target == prob_approx(P_no_RED_test, 0.04)
def test_FEMA_P58_Assessment_EDP_uncertainty_3D():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we look at the propagation of EDP values provided for two
different directions. (3D refers to the numerical model used for response
estimation.)
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_5.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_5.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = [9.80665, 8.65433, 12.59198, 11.11239,
0.074081, 0.063763, 0.044932, 0.036788]
EDP_beta_target = [0.25, 0.25, 0.25, 0.25, 0.3, 0.3, 0.4, 0.4]
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.array([
[1.0, 0.8, 0.6, 0.5, 0.3, 0.3, 0.3, 0.3],
[0.8, 1.0, 0.5, 0.6, 0.3, 0.3, 0.3, 0.3],
[0.6, 0.5, 1.0, 0.8, 0.3, 0.3, 0.3, 0.3],
[0.5, 0.6, 0.8, 1.0, 0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3, 1.0, 0.8, 0.7, 0.6],
[0.3, 0.3, 0.3, 0.3, 0.8, 1.0, 0.6, 0.7],
[0.3, 0.3, 0.3, 0.3, 0.7, 0.6, 1.0, 0.8],
[0.3, 0.3, 0.3, 0.3, 0.6, 0.7, 0.8, 1.0]])
large_rho_ids = np.where(EDP_rho_target >= 0.5)
small_rho_ids = np.where(EDP_rho_target < 0.5)
assert_allclose(EDP_rho_test[large_rho_ids], EDP_rho_target[large_rho_ids],
atol=0.1)
assert_allclose(EDP_rho_test[small_rho_ids], EDP_rho_target[small_rho_ids],
atol=0.2)
EDP_COV_test = EDP_rho_test * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
theta_PID = np.log(EDP_theta_target[4:])
COV_PID = EDP_COV_test[4:, 4:]
# COL
COL_check = A._COL.describe().T
col_target = 1.0 - mvn_od(theta_PID, COV_PID,
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
assert COL_check['mean'].values[0] == pytest.approx(col_target, rel=0.1, abs=0.05)
# DMG
realization_count = float(A._AIM_in['general']['realizations'])
DMG_check = [len(np.where(A._DMG.iloc[:, i] > 0.0)[0]) / realization_count for i in
range(8)]
DMG_1_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PID = mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([9.80665, 1e-6, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_1_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 9.80665, 1e-6, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_1_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 9.80665, 1e-6,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_2_2_PFA = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 9.80665,
1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([np.inf, np.inf, np.inf, np.inf,
0.1, 0.1, 0.1, 0.1]))[0]
DMG_ref = [DMG_1_1_PID, DMG_1_2_PID, DMG_2_1_PID, DMG_2_2_PID,
DMG_1_1_PFA, DMG_1_2_PFA, DMG_2_1_PFA, DMG_2_2_PFA]
assert_allclose(DMG_check, DMG_ref, rtol=0.10, atol=0.01)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# COST
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
C_target = [0., 249., 624., 1251., 1875.]
T_target = [0., 0.249, 0.624, 1.251, 1.875]
# PG 1011
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([0.05488, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 0].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 0].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1012
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 0.05488, 1e-6, 1e-6]),
upper=np.log([0.05488, 0.1, 0.05488, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 1].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 1].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1021
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.1, 0.05488, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.1, 0.1, 0.05488]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 0.05488, 1e-6]),
upper=np.log([0.05488, 0.05488, 0.1, 0.05488]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 2].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 2].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
#print('------------------------')
#print('P_target')
#print(P_target)
#print('------------------------')
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 1022
P_target = [
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log([0.1, 0.1, 0.1, 0.05488]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.1, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.1, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.1, 0.1, 0.05488, 0.1]))[0], ]),
np.sum([
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 1e-6, 0.05488, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.1, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([0.05488, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.1, 0.05488, 0.05488, 0.1]))[0],
mvn_od(theta_PID, COV_PID,
lower=np.log([1e-6, 0.05488, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.1, 0.05488, 0.1]))[0], ]),
mvn_od(theta_PID, COV_PID, lower=np.log([1e-6, 1e-6, 1e-6, 0.05488]),
upper=np.log([0.05488, 0.05488, 0.05488, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 3].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 5)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 3].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 5)]
P_test = P_test[np.where(P_test > 5)]
P_test = P_test / realization_count
assert_allclose(P_target[:-1], P_test[:4], atol=0.05)
assert_allclose(C_target[:-1], C_test[:4], rtol=0.001)
assert_allclose(T_target[:-1], T_test[:4], rtol=0.001)
# PG 2011
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 4].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 4].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2012
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 5].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 5].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target[:4], P_test[:4], atol=0.05)
assert_allclose(C_target[:4], C_test[:4], rtol=0.001)
assert_allclose(T_target[:4], T_test[:4], rtol=0.001)
# PG 2021
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 6].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 6].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# PG 2022
P_target = [
mvn_od(np.log(EDP_theta_test), EDP_COV_test,
lower=np.log([1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, 9.80665, 0.1, 0.1, 0.1, 0.1]))[0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
np.sum([
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 9.80665, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, np.inf, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[9.80665, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[np.inf, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0],
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 9.80665, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, np.inf, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[
0], ]),
mvn_od(np.log(EDP_theta_test), EDP_COV_test, lower=np.log(
[1e-6, 1e-6, 1e-6, 9.80665, 1e-6, 1e-6, 1e-6, 1e-6]),
upper=np.log(
[9.80665, 9.80665, 9.80665, np.inf, 0.1, 0.1, 0.1, 0.1]))[0],
]
C_test, P_test = np.unique(
np.around(DV_COST.iloc[:, 7].values / 3., decimals=0) * 3.,
return_counts=True)
C_test = C_test[np.where(P_test > 10)]
T_test, P_test = np.unique(np.around(DV_TIME.iloc[:, 7].values * 333.33333,
decimals=0) / 333.33333,
return_counts=True)
T_test = T_test[np.where(P_test > 10)]
P_test = P_test[np.where(P_test > 10)]
P_test = P_test / realization_count
assert_allclose(P_target, P_test, atol=0.05)
assert_allclose(C_target, C_test, rtol=0.001)
assert_allclose(T_target, T_test, rtol=0.001)
# RED TAG
RED_check = A._DV_dict['red_tag'].describe().T
RED_check = (RED_check['mean'] * RED_check['count'] / realization_count).values
assert_allclose(RED_check, DMG_ref, atol=0.02, rtol=0.10)
DMG_on = np.where(A._DMG > 0.0)[0]
RED_on = np.where(A._DV_dict['red_tag'] > 0.0)[0]
assert_allclose(DMG_on, RED_on)
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
upper=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 0.05488,
0.05488, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / realization_count
assert P_no_RED_target == pytest.approx(P_no_RED_test, abs=0.03)
def test_FEMA_P58_Assessment_EDP_uncertainty_single_sample():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
In this test we provide only one structural response result and see if it
is properly handled as a deterministic value or a random EDP using the
additional sources of uncertainty.
"""
print()
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_6.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_6.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_theta_target = np.array(
[7.634901, 6.85613, 11.685934, 10.565554,
0.061364, 0.048515, 0.033256, 0.020352])
EDP_beta_target = EDP_theta_target * 1e-6
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
assert RV_EDP[0].RV_set == None
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_test == 0.0
# -------------------------------------------------------------------------
# now do the same analysis, but consider additional uncertainty
# -------------------------------------------------------------------------
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
AU = A._AIM_in['general']['added_uncertainty']
AU['beta_m'] = 0.3
AU['beta_gm'] = 0.4
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
EDP_beta_target = np.sqrt((EDP_theta_target * 1e-6)**2. +
np.ones(8)*(0.3**2. + 0.4**2.))
assert_allclose(EDP_theta_test, EDP_theta_target, rtol=0.05)
assert_allclose(EDP_beta_test, EDP_beta_target, rtol=0.1)
assert RV_EDP[0].RV_set == None
EDP_rho_target = np.zeros((8, 8))
np.fill_diagonal(EDP_rho_target, 1.0)
EDP_COV_test = EDP_rho_target * np.outer(EDP_beta_test, EDP_beta_test)
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
P_no_RED_target = mvn_od(np.log(EDP_theta_test), EDP_COV_test,
upper=np.log(
[9.80665, 9.80665, 9.80665, 9.80665, 0.05488,
0.05488, 0.05488, 0.05488]))[0]
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_target == pytest.approx(P_no_RED_test, abs=0.01)
def test_FEMA_P58_Assessment_EDP_uncertainty_zero_variance():
"""
Perform a loss assessment with customized inputs that focus on testing the
methods used to estimate the multivariate lognormal distribution of EDP
values. Besides the fitting, this test also evaluates the propagation of
EDP uncertainty through the analysis. Dispersions in other calculation
parameters are reduced to negligible levels. This allows us to test the
results against pre-defined reference values in spite of the randomness
involved in the calculations.
This test simulates a scenario when one of the EDPs is identical in all
of the available samples. This results in zero variance in that dimension
and the purpose of the test is to ensure that such cases are handled
appropriately.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_7.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_7.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())
assert np.all([rv.distribution == 'lognormal' for rv in RV_EDP])
thetas, betas = np.array([rv.theta for rv in RV_EDP]).T
EDP_theta_test = thetas
EDP_beta_test = betas
assert EDP_theta_test[4] == pytest.approx(0.061364, rel=0.05)
assert EDP_beta_test[4] < 0.061364 * 1e-3
rho = RV_EDP[0].RV_set.Rho()
EDP_rho_test = rho
EDP_rho_target = np.zeros((8, 8))
np.fill_diagonal(EDP_rho_target, 1.0)
assert_allclose(EDP_rho_test[4], EDP_rho_target[4], atol=1e-6)
# ------------------------------------------------- perform the calculation
A.define_loss_model()
A.calculate_damage()
A.calculate_losses()
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
P_no_RED_test = (1.0 - SD.loc[('red tagged', ''), 'mean']) * SD.loc[
('red tagged', ''), 'count'] / 10000.
assert P_no_RED_test == 0.0
def test_FEMA_P58_Assessment_QNT_uncertainty_independent():
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component quantities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
This test assumes that component quantities are independent.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_8.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_8.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# QNT
RV_QNT = list(A._QNT_dict.values())
QNT_theta_test, QNT_beta_test = np.array([rv.theta for rv in RV_QNT]).T
QNT_theta_target = np.ones(8) * 25.
QNT_beta_target = [25.0] * 4 + [0.4] * 4
assert_allclose(QNT_theta_test, QNT_theta_target, rtol=0.001)
assert_allclose(QNT_beta_test, QNT_beta_target, rtol=0.001)
for i in range(4):
assert RV_QNT[i].distribution == 'normal'
for i in range(4, 8):
assert RV_QNT[i].distribution == 'lognormal'
QNT_rho_target = [
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
QNT_rho_test = RV_QNT[0].RV_set.Rho()
assert_allclose(QNT_rho_test, QNT_rho_target, atol=0.001)
# ------------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
DMG_check = A._DMG.describe().T
mu_test = DMG_check['mean']
sig_test = DMG_check['std']
rho_test = A._DMG.corr()
mu_target_1 = 25.0 + 25.0 * norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))
sig_target_1 = np.sqrt(25.0 ** 2.0 * (
1 - norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0)) - (
norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))) ** 2.0))
mu_target_2 = np.exp(np.log(25.0) + 0.4 ** 2. / 2.)
sig_target_2 = np.sqrt(
(np.exp(0.4 ** 2.0) - 1.0) * np.exp(2 * np.log(25.0) + 0.4 ** 2.0))
assert_allclose(mu_test[:4], mu_target_1, rtol=0.05)
assert_allclose(mu_test[4:], mu_target_2, rtol=0.05)
assert_allclose(sig_test[:4], sig_target_1, rtol=0.05)
assert_allclose(sig_test[4:], sig_target_2, rtol=0.05)
assert_allclose(rho_test, QNT_rho_target, atol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
DV_COST = A._DV_dict['rec_cost'] / A._DMG
rho_DV_target = [
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
]
assert_allclose(DV_COST.corr(), rho_DV_target, atol=0.05)
# Uncertainty in decision variables is controlled by the correlation
# between damages
RND = [tnorm.rvs(-1., np.inf, loc=25, scale=25, size=10000) for i in
range(4)]
RND = np.sum(RND, axis=0)
P_target_PID = np.sum(RND > 90.) / 10000.
P_test_PID = np.sum(DV_COST.iloc[:, 0] < 10.01) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.02)
RND = [np.exp(norm.rvs(loc=np.log(25.), scale=0.4, size=10000)) for i in
range(4)]
RND = np.sum(RND, axis=0)
P_target_PFA = np.sum(RND > 90.) / 10000.
P_test_PFA = np.sum(DV_COST.iloc[:, 4] < 10.01) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.02)
# the same checks can be performed for reconstruction time
DV_TIME = A._DV_dict['rec_time'] / A._DMG
assert_allclose(DV_TIME.corr(), rho_DV_target, atol=0.05)
P_test_PID = np.sum(DV_TIME.iloc[:, 0] < 0.0101) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.02)
P_test_PFA = np.sum(DV_TIME.iloc[:, 4] < 0.0101) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.02)
# injuries...
DV_INJ_dict = deepcopy(A._DV_dict['injuries'])
DV_INJ0 = (DV_INJ_dict[0] / A._DMG).describe()
DV_INJ1 = (DV_INJ_dict[1] / A._DMG).describe()
assert_allclose(DV_INJ0.loc['mean', :][:4], np.ones(4) * 0.025, rtol=0.001)
assert_allclose(DV_INJ0.loc['mean', :][4:], np.ones(4) * 0.1, rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][:4], np.ones(4) * 0.005, rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][4:], np.ones(4) * 0.02, rtol=0.001)
assert_allclose(DV_INJ0.loc['std', :], np.zeros(8), atol=1e-4)
assert_allclose(DV_INJ1.loc['std', :], np.zeros(8), atol=1e-4)
# and for red tag...
# Since every component is damaged in every realization, the red tag
# results should all be 1.0
assert_allclose(A._DV_dict['red_tag'], np.ones((10000, 8)))
# ------------------------------------------------------------------------
A.aggregate_results()
# ------------------------------------------------ check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert SD.loc[('inhabitants', ''), 'mean'] == 20.0
assert SD.loc[('inhabitants', ''), 'std'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'mean'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'std'] == 0.0
assert SD.loc[('red tagged', ''), 'mean'] == 1.0
assert SD.loc[('red tagged', ''), 'std'] == 0.0
assert np.corrcoef(S.loc[:, ('reconstruction', 'cost')],
S.loc[:, ('reconstruction', 'time-sequential')])[
0, 1] == pytest.approx(1.0)
assert_allclose(A._DV_dict['rec_cost'].sum(axis=1),
S.loc[:, ('reconstruction', 'cost')])
assert_allclose(A._DV_dict['rec_time'].sum(axis=1),
S.loc[:, ('reconstruction', 'time-sequential')])
assert_allclose(A._DV_dict['rec_time'].max(axis=1),
S.loc[:, ('reconstruction', 'time-parallel')])
assert_allclose(A._DV_dict['injuries'][0].sum(axis=1),
S.loc[:, ('injuries', 'sev1')])
assert_allclose(A._DV_dict['injuries'][1].sum(axis=1),
S.loc[:, ('injuries', 'sev2')])
def test_FEMA_P58_Assessment_QNT_uncertainty_dependencies():
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component quantities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
This test checks if dependencies between component quantities are handled
appropriately.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_8.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_8.out"
for dep in ['FG', 'PG', 'DIR', 'LOC']:
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A._AIM_in['dependencies']['quantities'] = dep
A.define_random_variables()
# ---------------------------------------------- check random variables
# QNT
RV_QNT = list(A._QNT_dict.values())
QNT_theta_test, QNT_beta_test = np.array([rv.theta for rv in RV_QNT]).T
QNT_theta_target = np.ones(8) * 25.
QNT_beta_target = [25.0] * 4 + [0.4] * 4
assert_allclose(QNT_theta_test, QNT_theta_target, rtol=0.001)
assert_allclose(QNT_beta_test, QNT_beta_target, rtol=0.001)
for i in range(4):
assert RV_QNT[i].distribution == 'normal'
for i in range(4, 8):
assert RV_QNT[i].distribution == 'lognormal'
if dep == 'FG':
QNT_rho_target = np.array([
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
])
elif dep == 'PG':
QNT_rho_target = np.array([
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
])
elif dep == 'DIR':
QNT_rho_target = np.array([
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
])
elif dep == 'LOC':
QNT_rho_target = np.array([
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1],
])
QNT_rho_test = RV_QNT[0].RV_set.Rho()
assert_allclose(QNT_rho_test, QNT_rho_target, atol=0.001)
# ---------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# -------------------------------------------- check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
# Because the correlations are enforced after truncation, the marginals
# shall be unaffected by the correlation structure. Hence, the
# distribution of damaged quantities within a PG shall be identical in
# all dep cases.
# The specified dependencies are apparent in the correlation between
# damaged quantities in various PGs.
DMG_check = A._DMG.describe().T
mu_test = DMG_check['mean']
sig_test = DMG_check['std']
rho_test = A._DMG.corr()
mu_target_1 = 25.0 + 25.0 * norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))
sig_target_1 = np.sqrt(25.0 ** 2.0 * (
1 - norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0)) - (
norm.pdf(-1.0) / (1.0 - norm.cdf(-1.0))) ** 2.0))
mu_target_2 = np.exp(np.log(25.0) + 0.4 ** 2. / 2.)
sig_target_2 = np.sqrt(
(np.exp(0.4 ** 2.0) - 1.0) * np.exp(2 * np.log(25.0) + 0.4 ** 2.0))
assert_allclose(mu_test[:4], mu_target_1, rtol=0.05)
assert_allclose(mu_test[4:], mu_target_2, rtol=0.05)
assert_allclose(sig_test[:4], sig_target_1, rtol=0.05)
assert_allclose(sig_test[4:], sig_target_2, rtol=0.05)
assert_allclose(rho_test, QNT_rho_target, atol=0.05)
# ---------------------------------------------------------------------
A.calculate_losses()
# ---------------------------------------------- check loss calculation
DV_COST = A._DV_dict['rec_cost'] / A._DMG
# After the DVs are normalized by the damaged quantities, the resulting
# samples show the correlations between the DV_measure (such as
# reconstruction cost) / 1 unit of damaged component. Because this
# consequences are perfectly correlated among the components of a
# fragility group by definition, the quadrants on the main diagonal
# will follow the matrix presented below. If there are additional
# correlations defined between component quantities in different
# fragility groups (i.e. the off-diagonal quadrants of the rho matrix),
# those will be preserved in the consequences. Therefore, the
# off-diagonal quadrants need to be updated with those from QNT_rho_target
# to get an appropriate rho_DV_target.
rho_DV_target = np.array([
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
])
rho_DV_target[:4, 4:] = QNT_rho_target[:4, 4:]
rho_DV_target[4:, :4] = QNT_rho_target[:4, 4:]
assert_allclose(DV_COST.corr(), rho_DV_target, atol=0.05)
# uncertainty in decision variables is controlled by the correlation
# between damages
P_test_PID = np.sum(DV_COST.iloc[:, 0] < 10.01) / 10000.
P_test_PFA = np.sum(DV_COST.iloc[:, 4] < 10.01) / 10000.
# the first component quantities follow a truncated multivariate normal
# distribution
mu_target_PID = mu_target_1 * 4.
sig_target_PID = np.sqrt(
sig_target_1 ** 2. * np.sum(QNT_rho_target[:4, :4]))
mu_target_PID_b = mu_target_PID
sig_target_PID_b = sig_target_PID
alpha = 100.
i = 0
while (np.log(
np.abs(alpha / (mu_target_PID_b / sig_target_PID_b))) > 0.001) and (
i < 10):
alpha = -mu_target_PID_b / sig_target_PID_b
mu_target_PID_b = mu_target_PID - sig_target_PID_b * norm.pdf(
alpha) / (1.0 - norm.cdf(alpha))
sig_target_PID_b = sig_target_PID / np.sqrt(
(1.0 + alpha * norm.pdf(alpha) / (1.0 - norm.cdf(alpha))))
i += 1
xi = (90 - mu_target_PID_b) / sig_target_PID_b
P_target_PID = 1.0 - (norm.cdf(xi) - norm.cdf(alpha)) / (
1.0 - norm.cdf(alpha))
assert P_target_PID == pytest.approx(P_test_PID, rel=0.05)
# the second component quantities follow a multivariate lognormal
# distribution
mu_target_PFA = mu_target_2 * 4.
sig_target_PFA = np.sqrt(
sig_target_2 ** 2. * np.sum(QNT_rho_target[4:, 4:]))
sig_target_PFA_b = np.sqrt(
np.log(sig_target_PFA ** 2.0 / mu_target_PFA ** 2.0 + 1.0))
mu_target_PFA_b = np.log(mu_target_PFA) - sig_target_PFA_b ** 2.0 / 2.
xi = np.log(90)
P_target_PFA = 1.0 - norm.cdf(xi, loc=mu_target_PFA_b,
scale=sig_target_PFA_b)
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.05)
# the same checks can be performed for reconstruction time
DV_TIME = A._DV_dict['rec_time'] / A._DMG
assert_allclose(DV_TIME.corr(), rho_DV_target, atol=0.05)
P_test_PID = np.sum(DV_TIME.iloc[:, 0] < 0.0101) / 10000.
assert P_target_PID == pytest.approx(P_test_PID, rel=0.05)
P_test_PFA = np.sum(DV_TIME.iloc[:, 4] < 0.0101) / 10000.
assert P_target_PFA == pytest.approx(P_test_PFA, rel=0.05)
# injuries...
# Every component is damaged in every realization in this test. Once
# normalized by the quantity of components, the number of injuries
# shall be identical and unaffected by the correlation between
# component quantities.
DV_INJ_dict = deepcopy(A._DV_dict['injuries'])
DV_INJ0 = (DV_INJ_dict[0] / A._DMG).describe()
DV_INJ1 = (DV_INJ_dict[1] / A._DMG).describe()
assert_allclose(DV_INJ0.loc['mean', :][:4], np.ones(4) * 0.025,
rtol=0.001)
assert_allclose(DV_INJ0.loc['mean', :][4:], np.ones(4) * 0.1,
rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][:4], np.ones(4) * 0.005,
rtol=0.001)
assert_allclose(DV_INJ1.loc['mean', :][4:], np.ones(4) * 0.02,
rtol=0.001)
assert_allclose(DV_INJ0.loc['std', :], np.zeros(8), atol=1e-4)
assert_allclose(DV_INJ1.loc['std', :], np.zeros(8), atol=1e-4)
# and for red tag...
# since every component is damaged in every realization, the red tag
# results should all be 1.0
assert_allclose(A._DV_dict['red_tag'], np.ones((10000, 8)))
# ---------------------------------------------------------------------
A.aggregate_results()
# -------------------------------------------- check result aggregation
S = A._SUMMARY
SD = S.describe().T
assert SD.loc[('inhabitants', ''), 'mean'] == 20.0
assert SD.loc[('inhabitants', ''), 'std'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'mean'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'std'] == 0.0
assert SD.loc[('red tagged', ''), 'mean'] == 1.0
assert SD.loc[('red tagged', ''), 'std'] == 0.0
assert np.corrcoef(S.loc[:, ('reconstruction', 'cost')],
S.loc[:, ('reconstruction', 'time-sequential')])[
0, 1] == pytest.approx(1.0)
assert_allclose(A._DV_dict['rec_cost'].sum(axis=1),
S.loc[:, ('reconstruction', 'cost')])
assert_allclose(A._DV_dict['rec_time'].sum(axis=1),
S.loc[:, ('reconstruction', 'time-sequential')])
assert_allclose(A._DV_dict['rec_time'].max(axis=1),
S.loc[:, ('reconstruction', 'time-parallel')])
assert_allclose(A._DV_dict['injuries'][0].sum(axis=1),
S.loc[:, ('injuries', 'sev1')])
assert_allclose(A._DV_dict['injuries'][1].sum(axis=1),
S.loc[:, ('injuries', 'sev2')])
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies(dep='IND'):
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in component fragilities. Dispersions in other
calculation parameters are reduced to negligible levels. This allows us to
test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
print()
idx = pd.IndexSlice
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_9.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_9.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A._AIM_in['dependencies']['fragilities'] = dep
A.define_random_variables()
# ---------------------------------------------- check random variables
RV_FF = list(A._FF_dict.values())
fr_names = np.unique([rv.name[3:12] for rv in RV_FF])
fr_keys = {}
for fr_name in fr_names:
fr_list = [rv.name for rv in RV_FF if fr_name in rv.name]
fr_keys.update({fr_name: fr_list})
# fr_keys = []
# for key in A._RV_dict.keys():
# if 'FR' in key:
# fr_keys.append(key)
dimtag_target = [4 * 2 * 3, 20 * 2 * 3 * 3, 20 * 2 * 3 * 3,
20 * 2 * 3 * 3]
theta_target = [[0.048, 0.096], [0.048, 0.072, 0.096],
[2.9419, 5.8840, 11.7680], [2.9419, 5.8840, 11.7680]]
sig_target = [[0.5, 0.25], [1.0, 0.5, 0.25], [1.0, 0.5, 0.25],
[1.0, 0.5, 0.25]]
if dep == 'IND':
rho_target = np.zeros((24, 24))
np.fill_diagonal(rho_target, 1.0)
rho_sum = 360
elif dep == 'PG':
rho_target = np.ones((24, 24))
rho_sum = 360 ** 2.
elif dep == 'DIR':
rho_target = [
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]]
rho_sum = (20 * 2 * 3) ** 2. * 3
elif dep == 'LOC':
rho_target = [
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = (20 * 3) ** 2. * (2 * 9)
elif dep in ['ATC', 'CSG']:
rho_target = [
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = (20 * 3) ** 2. * (2 * 3)
elif dep == 'DS':
rho_target = [
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.]]
rho_sum = 3 ** 2 * (20 * 2 * 3)
for k, key in enumerate(sorted(fr_keys.keys())):
RV_FF_i = [A._FF_dict[rv_i] for rv_i in fr_keys[key]]
assert len(RV_FF_i) == dimtag_target[k]
FF_theta_test, FF_beta_test = np.array([rv.theta for rv in RV_FF_i]).T
if k == 0:
FF_theta_test = pd.DataFrame(
np.reshape(FF_theta_test, (12, 2))).describe()
FF_beta_test = pd.DataFrame(
np.reshape(FF_beta_test, (12, 2))).describe()
else:
FF_theta_test = pd.DataFrame(
np.reshape(FF_theta_test, (120, 3))).describe()
FF_beta_test = pd.DataFrame(
np.reshape(FF_beta_test, (120, 3))).describe()
assert_allclose(FF_theta_test.loc['mean', :].values, theta_target[k],
rtol=1e-4)
assert_allclose(FF_theta_test.loc['std', :].values,
np.zeros(np.array(theta_target[k]).shape),
atol=1e-10)
assert_allclose(FF_beta_test.loc['mean', :].values, sig_target[k],
rtol=1e-4)
assert_allclose(FF_beta_test.loc['std', :].values,
np.zeros(np.array(sig_target[k]).shape), atol=1e-10)
rho_test = RV_FF_i[0].RV_set.Rho(fr_keys[fr_names[k]])
if k == 0:
# we perform the detailed verification of rho for the first case
# only (because the others are 360x360 matrices)
assert_allclose(rho_test, rho_target)
else:
# for the other cases we check the number of ones in the matrix
assert np.sum(rho_test) == rho_sum
# RV_FR = deepcopy(A._RV_dict[key])
# assert len(RV_FR._dimension_tags) == dimtag_target[k]
#
# COV_test = RV_FR.COV
# sig_test = np.sqrt(np.diagonal(COV_test))
# rho_test = COV_test / np.outer(sig_test, sig_test)
#
# if k == 0:
# theta_test = pd.DataFrame(
# np.reshape(RV_FR.theta, (12, 2))).describe()
# sig_test = pd.DataFrame(
# np.reshape(sig_test, (12, 2))).describe()
# else:
# theta_test = pd.DataFrame(
# np.reshape(RV_FR.theta, (120, 3))).describe()
# sig_test = pd.DataFrame(
# np.reshape(sig_test, (120, 3))).describe()
#
# assert_allclose(theta_test.loc['mean', :].values, theta_target[k],
# rtol=1e-4)
# assert_allclose(theta_test.loc['std', :].values,
# np.zeros(np.array(theta_target[k]).shape),
# atol=1e-10)
#
# assert_allclose(sig_test.loc['mean', :].values, sig_target[k],
# rtol=1e-4)
# assert_allclose(sig_test.loc['std', :].values,
# np.zeros(np.array(sig_target[k]).shape), atol=1e-10)
#
# if k == 0:
# # we perform the detailed verification of rho for the first case
# # only (because the others are 360x360 matrices)
# assert_allclose(rho_test, rho_target)
#
# else:
# # for the other cases we check the number of ones in the matrix
# assert np.sum(rho_test) == rho_sum
# ---------------------------------------------------------------------
A.define_loss_model()
A.calculate_damage()
# -------------------------------------------- check damage calculation
# COL
# there shall be no collapses
assert A._COL.describe().T['mean'].values == 0
# DMG
DMG_check = A._DMG
# start with checking the damage correlations
for k in range(4):
DMG_corr = DMG_check.loc[:, idx[k + 1, :, :]].corr()
if k == 0:
DMG_corr = DMG_corr.iloc[:8, :8]
if dep in ['IND', 'ATC', 'CSG', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
[ 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1],
[-0.1, 1.0,-0.1, 1.0,-0.1, 1.0,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 1.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 1.0],
[ 1.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 1.0],
])
if k == 1:
DMG_corr = DMG_corr.iloc[:12, :12]
if dep in ['IND', 'ATC', 'CSG', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0],
])
if k == 2:
DMG_corr = DMG_corr.iloc[:20, :20]
if dep in ['IND', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1, 1.0,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1,-0.1, 0.8, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1,-0.1, 0.5, 0.6, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1,-0.1, 0.5, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.6, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep in ['ATC', 'CSG']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.5, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 1.0, 0.5,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.5, 0.5, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
if k == 3:
DMG_corr = DMG_corr.iloc[:20, :20]
if dep in ['IND', 'DS']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.0, 0.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 1.0, 0.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.0, 0.0, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'PG':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1],
[-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1,-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1],
[-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1,-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'DIR':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1,-0.1, 0.8, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1,-0.1, 0.8, 0.7, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1,-0.1, 0.7, 0.6, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.6, 0.6,-0.1,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.5,-0.1,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep == 'LOC':
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 0.7, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
elif dep in ['ATC', 'CSG']:
DMG_corr_ref = np.array([
[ 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,-0.1,-0.1,-0.1,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 1.0, 0.8, 0.7,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.8, 1.0, 0.6,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1, 0.7, 0.6, 1.0,-0.1],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,-0.1,-0.1,-0.1,-0.1, 1.0],
])
for i in range(len(DMG_corr.index)):
for j in range(len(DMG_corr.columns)):
ref_i = DMG_corr_ref[i, j]
if ref_i != 0.0:
if ref_i > 0.0:
assert DMG_corr.iloc[i, j] > 0.97 * ref_i
else:
assert DMG_corr.iloc[i, j] < 0.0
else:
assert DMG_corr.iloc[i, j] == pytest.approx(ref_i,
abs=0.15)
# then check the distribution of damage within each performance group
EDP_list = np.array(
[[[0.080000, 0.080000], [0.080000, 0.080000], [0.040000, 0.040000]],
[[7.845320, 7.845320], [7.845320, 7.845320],
[2.942000, 2.942000]]])
fr_keys = []
for key in A._RV_dict.keys():
if 'FR' in key:
fr_keys.append(key)
for k, key in enumerate(sorted(fr_keys)):
# print(key)
RV_FR = A._RV_dict[key]
# only third of the data is unique because of the 3 stories
rel_len = int(len(RV_FR._dimension_tags) / 3)
COV_test = RV_FR.COV[:rel_len, :rel_len]
theta_test = RV_FR.theta[:rel_len]
lims = np.unique(theta_test)
ndims = len(lims)
if k in [2, 3]:
ndims += 2
if (dep in ['DS', 'IND']) or k > 1:
DMG_vals = [[[0., 5., 7.5, 12.5, 17.5, 20., 25.], [0., 25.]],
[[0., 1.5, 3., 4.5, 6., 7.5, 9., 10.5, 12., 13.5,
15.,
16.5, 18., 19.5, 21., 22.5, 24., 25.5, 27., 28.5,
30.0],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.,
11., 12., 13., 14., 15., 16., 17., 18., 19.,
20.]]]
else:
DMG_vals = [[[0., 25.], [0., 25.]],
[[0., 30.], [0., 20.]]]
DMG_vals = np.array(DMG_vals)
for story in [0, 1, 2]:
for dir_ in [0, 1]:
# print(story, dir_)
idx = pd.IndexSlice
DMG_check_FG = DMG_check.loc[:, idx[k + 1, :, :]]
DMG_check_PG = DMG_check_FG.iloc[:,
story * 2 * ndims + dir_ * ndims:story * 2 * ndims + (
dir_ + 1) * ndims]
DMG_val_test = np.unique(
np.around(DMG_check_PG.values * 10., decimals=0) / 10.,
return_counts=True)
DMG_val_test = DMG_val_test[0][DMG_val_test[1] > 10]
# only check at most the first 10 elements, because the
# higher values have extremely low likelihood
ddim = min(len(DMG_val_test), 10)
DMG_val_ref = DMG_vals[np.sign(k), dir_]
for v in DMG_val_test:
assert v in DMG_val_ref
# additional tests for mutually exclusive DS2 in FG3
if (k == 2) and (dep not in ['DS', 'IND']):
DMG_tot = [[0., 30.], [0., 20.]][dir_]
DMG_DS2_test = DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1)
# the proportion of each DS in DS2 shall follow the
# pre-assigned weights
ME_test = \
DMG_check_PG.iloc[DMG_DS2_test.values > 0].iloc[:,
[1, 2, 3]].describe().T['mean'].values / DMG_tot[-1]
assert_allclose(ME_test, [0.5, 0.3, 0.2], atol=0.01)
# the sum of DMG with correlated CSGs shall be either 0.
# or the total quantity
DMG_DS2_test = np.unique(
np.around(DMG_DS2_test * 10., decimals=0) / 10.,
return_counts=True)
DMG_DS2_test = DMG_DS2_test[0][DMG_DS2_test[1] > 10]
assert_allclose(DMG_DS2_test, DMG_tot, atol=0.01)
# additional tests for simultaneous DS2 in FG4
if (k == 3) and (dep not in ['DS', 'IND']):
DMG_tot = [30.0, 20.0][dir_]
DMG_DS2_test = DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1)
# the proportion of each DS in DS2 shall follow the
# pre-assigned weights considering replacement
SIM_test = \
DMG_check_PG.iloc[DMG_DS2_test.values > 0].iloc[:,
[1, 2, 3]].describe().T['mean'].values / DMG_tot
P_rep = 0.5 * 0.7 * 0.8
SIM_ref = np.array([0.5, 0.3, 0.2]) * (
1.0 + P_rep / (1.0 - P_rep))
assert_allclose(SIM_test, SIM_ref, atol=0.02)
# the sum of DMG with correlated CSGs shall be either
# 0. or more than the total quantity
DMG_DS2_test = DMG_DS2_test.iloc[
DMG_DS2_test.values > 0]
# Even with perfect correlation, the generated random
# samples will not be identical. Hence, one of the 20
# CSGs in FG4, very rarely will belong to a different
# DS than the rest. To avoid false negatives, we test
# the third smallest value.
assert DMG_DS2_test.sort_values().iloc[
2] >= DMG_tot * 0.99
assert np.max(DMG_DS2_test.values) > DMG_tot
# the first component has 3-1 CSGs in dir 1 and 2,
# respectively
if k == 0:
dir_len = int(rel_len * 3 / 4)
# the other components have 20-20 CSGs in dir 1 and 2,
# respectively
else:
dir_len = int(rel_len / 2)
if dir_ == 0:
theta_t = theta_test[:dir_len]
COV_t = COV_test[:dir_len, :dir_len]
else:
theta_t = theta_test[dir_len:]
COV_t = COV_test[dir_len:, dir_len:]
lim_ds1 = np.where(theta_t == lims[0])[0]
lim_ds2 = np.where(theta_t == lims[1])[0]
if k > 0:
lim_ds3 = np.where(theta_t == lims[2])[0]
ndim = len(theta_t)
EDP = EDP_list[int(k > 1), story, dir_]*1.2
DS_ref_all = []
DS_ref_any = []
DS_test_all = []
DS_test_any = []
# DS0
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=np.log(np.ones(ndim) * EDP),
upper=np.ones(ndim) * np.inf)[0])
if k == 0:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.],
axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.],
axis=0)) / 10000.)
else:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.,
DMG_check_PG.iloc[:, 3] == 0.,
DMG_check_PG.iloc[:, 4] == 0.],
axis=0)) / 10000.)
# DS1
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
lower_lim[lim_ds2] = np.log(EDP)
upper_lim[lim_ds1] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3] = np.log(EDP)
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=lower_lim, upper=upper_lim)[
0])
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
lower_lim[lim_ds2[0]] = np.log(EDP)
upper_lim[lim_ds1[0]] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3[0]] = np.log(EDP)
P_any = mvn_od(np.log(theta_t), COV_t, lower=lower_lim,
upper=upper_lim)[0]
if (dep in ['DS', 'IND']):
P_any = 1.0 - (1.0 - P_any) ** len(lim_ds1)
DS_ref_any.append(P_any)
if k == 0:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.], axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.], axis=0)) / 10000.)
else:
DS_test_all.append(np.sum(np.all(
[DMG_check_PG.iloc[:, 0] > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.,
DMG_check_PG.iloc[:, 3] == 0.,
DMG_check_PG.iloc[:, 4] == 0.], axis=0)) / 10000.)
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 0] > 0.],
axis=0)) / 10000.)
# DS2
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds2] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3] = np.log(EDP)
if k < 3:
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=lower_lim,
upper=upper_lim)[0])
else:
DS_ref_all.append(0.0)
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds2[0]] = np.log(EDP)
if k > 0:
lower_lim[lim_ds3[0]] = np.log(EDP)
P_any = mvn_od(np.log(theta_t), COV_t, lower=lower_lim,
upper=upper_lim)[0]
if (dep in ['DS', 'IND']):
P_any = 1.0 - (1.0 - P_any) ** len(lim_ds1)
DS_ref_any.append(P_any)
if k == 0:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] >
DMG_val_ref[-1] - 0.1],
axis=0)) / 10000.)
elif k == 1:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] >
DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 2] == 0.],
axis=0)) / 10000.)
elif k == 2:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, [1, 2, 3]].sum(
axis=1) > DMG_val_ref[-1] - 0.1,
DMG_check_PG.iloc[:, 4] == 0.],
axis=0)) / 10000.)
elif k == 3:
# skip this case
DS_test_all.append(0.0)
if k < 2:
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 1] > 0.],
axis=0)) / 10000.)
else:
DS_test_any.append(np.sum(np.all(
[DMG_check_PG.iloc[:, [1, 2, 3]].sum(axis=1) > 0.],
axis=0)) / 10000.)
# DS3
if k > 0:
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds3] = np.log(EDP)
DS_ref_all.append(mvn_od(np.log(theta_t), COV_t,
lower=lower_lim,
upper=upper_lim)[0])
lower_lim = -np.ones(ndim) * np.inf
upper_lim = np.ones(ndim) * np.inf
upper_lim[lim_ds3[0]] = np.log(EDP)
P_any = mvn_od(np.log(theta_t), COV_t, lower=lower_lim,
upper=upper_lim)[0]
if (dep in ['DS', 'IND']):
P_any = 1.0 - (1.0 - P_any) ** len(lim_ds1)
DS_ref_any.append(P_any)
if k == 1:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] >
DMG_val_ref[-1] - 0.1],
axis=0)) / 10000.)
else:
DS_test_all.append(
np.sum(np.all([DMG_check_PG.iloc[:, 0] == 0.,
DMG_check_PG.iloc[:, 1] == 0.,
DMG_check_PG.iloc[:, 2] == 0.,
DMG_check_PG.iloc[:, 3] == 0.,
DMG_check_PG.iloc[:, 4] >
DMG_val_ref[-1] - 0.1],
axis=0)) / 10000.)
if k == 1:
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 2] > 0.],
axis=0)) / 10000.)
else:
DS_test_any.append(np.sum(
np.all([DMG_check_PG.iloc[:, 4] > 0.],
axis=0)) / 10000.)
assert_allclose(DS_ref_all, DS_test_all, atol=0.02)
assert_allclose(DS_ref_any, DS_test_any, atol=0.02)
# ---------------------------------------------------------------------
A.calculate_losses()
# ---------------------------------------------- check loss calculation
# No additional uncertainty is introduced when it comes to losses in
# this test. The decision variables and the damaged quantities shall
# follow the same distribution and have the same correlation structure.
# The damaged quantities have already been verified, so now we use them
# as reference values for testing the decision variables.
# COST and TIME and INJ
DV_COST = A._DV_dict['rec_cost']
DV_TIME = A._DV_dict['rec_time']
DV_INJ_dict = deepcopy(A._DV_dict['injuries'])
DV_INJ0 = DV_INJ_dict[0]
DV_INJ1 = DV_INJ_dict[1]
DMG_check = A._DMG
for k in range(4):
# Start with checking the correlations...
dmg = DMG_check.loc[:, (DMG_check != 0.0).any(axis=0)]
dmg_corr = dmg.loc[:, idx[k + 1, :, :]].corr()
for dv in [DV_COST, DV_TIME, DV_INJ0, DV_INJ1]:
dv = dv.loc[:, (dv != 0.0).any(axis=0)]
dv_corr = dv.loc[:, idx[k + 1, :, :]].corr()
assert_allclose(dmg_corr.values, dv_corr.values, atol=0.001)
# then check the distribution.
# After normalizing with the damaged quantities all decision
# variables in a given DS shall have the same value.
dv = ((dv / dmg).describe().T).fillna(0.0)
assert_allclose(dv['std'], np.zeros(len(dv.index)), atol=1.0)
# red tags require special checks
for f, fg_id in enumerate(sorted(A._FG_dict.keys())):
dims = [2, 3, 5, 5][f]
# take the total quantity of each performance group
FG = A._FG_dict[fg_id]
qnt = []
for PG in FG._performance_groups:
if isinstance(PG._quantity, RandomVariable):
qnt.append((PG._quantity.samples[:dims]).flatten())
else:
qnt.append(np.ones(dims) * PG._quantity)
qnt = np.array(qnt).flatten()
# flag the samples where the damage exceeds the pre-defined limit
# for red tagging
dmg = DMG_check.loc[:, idx[FG._ID, :, :]]
red_ref = dmg > 0.489 * qnt
# collect the red tag results from the analysis
red_test = A._DV_dict['red_tag'].loc[:, idx[FG._ID, :, :]]
# compare
red_diff = (red_ref - red_test).describe().T
assert_allclose(red_diff['mean'].values, 0.)
assert_allclose(red_diff['std'].values, 0.)
# ---------------------------------------------------------------------
A.aggregate_results()
# -------------------------------------------- check result aggregation
# Aggregate results are checked in detail by other tests.
# Here we only focus on some simple checks to make sure the results
# make sense.
S = A._SUMMARY
SD = S.describe().T
assert SD.loc[('inhabitants', ''), 'mean'] == 10.0
assert SD.loc[('inhabitants', ''), 'std'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'mean'] == 0.0
assert SD.loc[('collapses', 'collapsed'), 'std'] == 0.0
assert_allclose(A._DV_dict['rec_cost'].sum(axis=1),
S.loc[:, ('reconstruction', 'cost')])
assert_allclose(A._DV_dict['rec_time'].sum(axis=1),
S.loc[:, ('reconstruction', 'time-sequential')])
assert_allclose(A._DV_dict['rec_time'].max(axis=1),
S.loc[:, ('reconstruction', 'time-parallel')])
assert_allclose(A._DV_dict['injuries'][0].sum(axis=1),
S.loc[:, ('injuries', 'sev1')])
assert_allclose(A._DV_dict['injuries'][1].sum(axis=1),
S.loc[:, ('injuries', 'sev2')])
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_PG():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('PG')
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_DIR():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('DIR')
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_LOC():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('LOC')
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_ATC():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('ATC')
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_CSG():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('CSG')
def test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies_DS():
test_FEMA_P58_Assessment_FRAG_uncertainty_dependencies('DS')
def test_FEMA_P58_Assessment_DV_uncertainty_dependencies():
"""
Perform loss assessment with customized inputs that focus on testing the
propagation of uncertainty in consequence functions and decision variables.
Dispersions in other calculation parameters are reduced to negligible
levels. This allows us to test the results against pre-defined reference
values in spite of the randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test_10.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test_10.out"
dep_list = ['IND', 'FG', 'PG', 'DIR', 'LOC', 'DS']
for d in range(7):
if d > 0:
dep_COST = dep_list[[0, 1, 2, 3, 4, 5][d - 1]]
dep_TIME = dep_list[[1, 2, 3, 4, 5, 0][d - 1]]
dep_RED = dep_list[[2, 3, 4, 5, 0, 1][d - 1]]
dep_INJ = dep_list[[3, 4, 5, 0, 1, 2][d - 1]]
else:
dep_COST = np.random.choice(dep_list)
dep_TIME = np.random.choice(dep_list)
dep_RED = np.random.choice(dep_list)
dep_INJ = | np.random.choice(dep_list) | numpy.random.choice |
import tkinter as tk
from tkinter import ttk
import numpy as np
from utils import *
import time
class AI_algorithms:
def __init__(self, root):
root.title("AI algorithms")
root.geometry("500x520")
root.resizable(width=False, height=False)
self.title_var = tk.StringVar(root)
self.title_var.set("Search Simulation")
title_app = tk.Label(root, textvariable=self.title_var, font=("Tahoma", 25, 'bold'))
title_app.pack(side=tk.TOP, fill=tk.X)
self.create_canvas(420, 420)
self.add_labels()
self.btn_frame = ttk.Frame(root)
self.add_buttons()
self.btn_frame.pack()
self.start = (3, 12)
self.goal = (20, 20)
self.reset_arr()
self.print_canvas()
def add_labels(self):
self.cost_var = tk.IntVar(root)
self.cost_var.set(0)
self.expanded_count = tk.IntVar(root)
self.expanded_count.set(0)
cost_lbl = tk.Label(root, text="Path Cost : ", font=("Tahoma", 10))
cost_label = tk.Label(root, textvariable=self.cost_var, font=("Tahoma", 10))
cost_lbl.pack(side=tk.LEFT, fill=tk.X)
cost_label.pack(side=tk.LEFT, fill=tk.X)
cexp_lbl = tk.Label(root, text="Nodes Expanded : ", font=("Tahoma", 10))
cexp_label = tk.Label(root, textvariable=self.expanded_count, font=("Tahoma", 10))
cexp_label.pack(side=tk.RIGHT, fill=tk.X)
cexp_lbl.pack(side=tk.RIGHT, fill=tk.X)
def fill_canvas(self, event=None):
print("Fill button clicked! The value of The dropdown is : ", self.var_alg.get())
self.clear_canvas()
if self.var_alg.get() == 'DFS':
path = self.depthFirstSearch()
elif self.var_alg.get() == 'BFS':
path = self.breadthFirstSearch()
elif self.var_alg.get() == 'UCS':
path = self.uniformCostSearch()
else:
path = self.aStarSearch()
self.print_canvas(path)
def clear_canvas(self, event=None):
self.drawing_area.create_rectangle(0, 0, self.c_width, self.c_height, fill="white")
self.reset_arr()
self.print_canvas()
self.cost_var.set(0)
self.expanded_count.set(0)
print("Cleared")
def create_canvas(self, width, height):
self.c_width = width
self.c_height = height
self.drawing_area = tk.Canvas(root,
width=self.c_width,
height=self.c_height,
bg="white")
self.drawing_area.pack()
def add_buttons(self):
self.b_fill = ttk.Button(self.btn_frame, text="Search Goal", command=self.fill_canvas)
self.b_clear = ttk.Button(self.btn_frame, text="Clear", command=self.clear_canvas)
self.var_alg = tk.StringVar(root)
self.var_alg.set("DFS")
self.options_menu = ttk.OptionMenu(self.btn_frame, self.var_alg, "DFS", "DFS", "BFS", "UCS", "A*")
self.b_fill.pack(side=tk.LEFT)
self.b_clear.pack(side=tk.LEFT)
self.options_menu.pack(side=tk.LEFT)
def reset_arr(self):
self.arr = np.copy(search_space)
self.arr[self.start[0], self.start[1]] = 0
self.arr[self.goal[0], self.goal[1]] = 0
def print_canvas(self, path=[]):
start_x = 20 * self.start[1]
start_y = 20 * self.start[0]
end_x = 20 * (self.start[1] + 1)
end_y = 20 * (self.start[0] + 1)
self.drawing_area.create_rectangle(start_x, start_y, end_x, end_y, fill="blue")
start_x = 20 * self.goal[1]
start_y = 20 * self.goal[0]
end_x = 20 * (self.goal[1] + 1)
end_y = 20 * (self.goal[0] + 1)
self.drawing_area.create_rectangle(start_x, start_y, end_x, end_y, fill="green")
if path == []:
rows, cols = self.arr.shape
colors_arr = ["#000000", "#002730", "#013e4c", "#01657c", "#00a1c6"]
for i in range(rows):
for j in range(cols):
if (i, j) != self.start and (i, j) != self.goal:
ind = int(self.arr[i][j])
start_x = 20 * j
start_y = 20 * i
end_x = 20 * (j + 1)
end_y = 20 * (i + 1)
self.drawing_area.create_rectangle(start_x, start_y, end_x, end_y, fill=colors_arr[ind])
for i, j in path:
start_x = 20 * j + 5
start_y = 20 * i + 5
end_x = 20 * (j + 1) - 5
end_y = 20 * (i + 1) - 5
self.drawing_area.create_oval(start_x, start_y, end_x, end_y, fill="red")
##Algorithms
def getStartState(self):
return self.start
def isGoalState(self, state):
if state == self.goal:
return True
else:
return False
def getSuccessors(self, state):
r, c = state
suc = []
#left
if c > 0:
cl = c - 1
s_left = (r, cl)
suc.append((s_left, [s_left], int(self.arr[r][cl]) + 1))
#right
if c < 20:
cr = c + 1
s_right = (r, cr)
suc.append((s_right, [s_right], int(self.arr[r][cr]) + 1))
#top
if r > 0:
rt = r - 1
s_top = (rt, c)
suc.append((s_top, [s_top], int(self.arr[rt][c]) + 1))
#bottom
if r < 20:
rb = r + 1
s_bot = (rb, c)
suc.append((s_bot, [s_bot], int(self.arr[rb][c]) + 1))
return suc
def update_visited(self, state):
i, j = state
start_x = 20 * j + 5
start_y = 20 * i + 5
end_x = 20 * (j + 1) - 5
end_y = 20 * (i + 1) - 5
self.drawing_area.create_oval(start_x, start_y, end_x, end_y, fill="gray")
self.drawing_area.update()
time.sleep(0.01)
def mark_visited(self, state):
r, c = state
self.arr[r, c] = -1
self.update_visited(state)
def depthFirstSearch(self):
fringe = Stack()
start = self.getStartState()
fringe.push((start, [], 0))
visited = set([])
while (fringe.isEmpty() == False):
state, path, cost = fringe.pop()
if state not in visited:
visited.add(state)
if self.isGoalState(state):
print(state, "Found GOAL")
print(path)
print(cost)
self.expanded_count.set(len(visited))
self.cost_var.set(cost)
return path
else:
for suc in self.getSuccessors(state):
suc_state, suc_dir, suc_cost = suc
fringe.push((suc_state, path + suc_dir, cost + suc_cost))
self.mark_visited(state)
return []
def breadthFirstSearch(self):
fringe = Queue()
start = self.getStartState()
fringe.push((start, [], 0))
visited = set([])
while (fringe.isEmpty() == False):
state, path, cost = fringe.pop()
if state not in visited:
visited.add(state)
if self.isGoalState(state):
print(state, "Found GOAL")
print(path)
print(cost)
self.expanded_count.set(len(visited))
self.cost_var.set(cost)
return path
else:
for suc in self.getSuccessors(state):
suc_state, suc_dir, suc_cost = suc
fringe.push((suc_state, path + suc_dir, cost + suc_cost))
self.mark_visited(state)
return []
def uniformCostSearch(self):
print("Starting UCS")
fringe = PriorityQueue()
start = self.getStartState()
fringe.push((start, [], 0), 0)
visited = set([])
while(fringe.isEmpty() == False):
state, path, cost = fringe.pop()
if state not in visited:
visited.add(state)
if self.isGoalState(state):
print(state, "Found GOAL")
print(path)
print(cost)
self.expanded_count.set(len(visited))
self.cost_var.set(cost)
return path
else:
for suc in self.getSuccessors(state):
suc_state, suc_dir, suc_cost = suc
fringe.push((suc_state, path + suc_dir, cost + suc_cost), cost + suc_cost)
self.mark_visited(state)
return []
def manhattanDist(self, state):
dis = | np.abs(state[0] - self.goal[0]) | numpy.abs |
"""
Metrics calculation utilities.
Code of `detection_f1`, `object_dice` and `object_hausdorff` are adapted from
https://warwick.ac.uk/fac/sci/dcs/research/tia/glascontest/evaluation/evaluation_metrics_v6.zip.
"""
import functools
import torch
import numpy as np
from scipy import stats
from scipy.spatial.distance import directed_hausdorff
from skimage.measure import label
def convert_to_numpy(func):
"""Decorator for converting each argument to numpy array."""
@functools.wraps(func)
def wrapper(*args):
args = [
arg.detach().cpu().numpy() if torch.is_tensor(arg) else np.array(arg)
for arg in args
]
return func(*args)
return wrapper
def accuracy(P, G):
"""Classification accuracy.
Arguments:
P: prediction with size (B, H, W)
G: ground truth tensor with the same size as P
Returns:
accuracy: classification accuracy
"""
if torch.is_tensor(P) and torch.is_tensor(G):
return (P == G).float().mean().item()
return (np.array(P) == np.array(G)).mean()
@convert_to_numpy
def detection_f1(S, G, overlap_threshold=0.5, epsilon=1e-7):
"""F1-score for object detection.
The ground truth for each segmented object is the object in the manual annotation
that has maximum overlap with that segmented object.
A segmented glandular object that intersects with at least 50% of its ground truth
will be considered as true positive, otherwise it will be considered as false positive.
A ground truth glandular object that has no corresponding segmented object or has
less than 50% of its area overlapped by its corresponding segmented object will be
considered as false negative.
See more on https://warwick.ac.uk/fac/sci/dcs/research/tia/glascontest/evaluation/.
Arguments:
S: segmentation mask with shape (H, W)
G: ground truth mask with the same shape as S
overlap_threshold: overlap threshold for counting true positives
epsilon: numerical stability term
Returns:
f1: detection F1 score
"""
S, G = label(S), label(G)
num_S, num_G = S.max(), G.max()
if num_S == 0 and num_G == 0:
return 1
elif num_S == 0 or num_G == 0:
return 0
# matrix for identifying corresponding ground truth object in G
# for each segmented object in S (the 1st col contains labels of
# segmented objects, the 2nd col contains corresponding ground truth
# objects and the 3rd col is the true positive flags)
tp_table = np.zeros((num_S, 3))
tp_table[:, 0] = np.arange(1, num_S + 1)
for seg_idx in range(num_S):
intersect = G[S == tp_table[seg_idx, 0]]
intersect = intersect[intersect > 0]
if intersect.size > 0:
tp_table[seg_idx, 1] = stats.mode(intersect)[0]
for seg_idx in range(num_S):
if tp_table[seg_idx, 1] != 0:
seg_obj = S == tp_table[seg_idx, 0]
gt_obj = G == tp_table[seg_idx, 1]
overlap = seg_obj & gt_obj
if overlap.sum() / gt_obj.sum() > overlap_threshold:
tp_table[seg_idx, 2] = 1
TP = | np.sum(tp_table[:, 2] == 1) | numpy.sum |
import sys, os
import numpy as np
from numpy.linalg import norm
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import logging
import traceback
import timeit
import time
import math
from ast import literal_eval as make_tuple
import platform
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D, proj3d
import glob
import pickle
import myFunctions as mf
import copy
from operator import itemgetter
from os.path import join
import inspect
from scipy.optimize import fsolve, fmin_tnc, least_squares, differential_evolution, minimize, fmin_l_bfgs_b, basinhopping
import myFunctions as mf
from scipy import stats
class FluidNetwork(object):
"""
Unified framework for doing the fluid simulation. At this stage, the graph used has already been reduced, i.e., each
edge represens a segment in `segmentList` and each node represents a bifurcation. Previously, each segment may be
consisted of one or more edges. To reduce the previous graph, use the function `reduceGraph`. Also, for the sake of
consistency, the original `segmentInfoDict` has been renamed to `edgeInfoDict`, `segmentList` to `edgeList`, but
`nodeInfoDict` remains the same. Besides, the nodes and edges are now indexed by integers starting from zero for
simplicity. Use the function `convertGraph` to do the conversion.
So the `fundemental staff` that you need to have are: `edgeList`, `edgeInfoDict`, `nodeInfoDict`, `G`. These are
necessary to all of the subsquent analysis. To perform a blood flow simulation, you need to do the following steps:
1. Get the graph and the `fundemental staff` by either creating one or loading an existing one.
2. Set c and k (used in H-W equation) for each edge by `setNetwork`.
3. Set the terminating pressures by `setTerminatingPressure`.
4. Generate H-W equations for each edge and flow conservation equations for each node by `setupFluidEquations`.
5. Solve the equations by optimization and use `computerNetworkDetail` as objective function.
The fluid simulation tries to solve the network by finding a set of pressures for each node and a set of flow rates
for each edges such that H-W equations and flow conservation equations are satisfied with the given set of
terminating pressures. For a binary tree structure without merges, a solution is guaranteed to exist no matter what
the terminating pressures look like. However, for the network with merges (e.g., the GBM network with CoW), it is
possible that a solution does not exist for the given set of terminating pressures. Therefore, for these cases, we
need to check the optimization result and check whether the error in each equations are within a acceptable range.
Note that not all the functions in this class should be used. Some are just for experimental purposes!
"""
def __init__(self):
self.directory = os.path.abspath(os.path.dirname(__file__))
self.edgeList = []
self.edgeIndexList = []
self.G = nx.Graph()
self.rootNode = 0
self.edgeInfoDict = {}
self.nodeInfoDict = {}
self.nodeIndex = 0
self.edgeIndex = 0
self.spacing = 0.00040 # meter/voxel
self.eqnInfoDictList = []
self.velocityPressure = []
self.velocityPressureGroundTruth = []
self.distributeFlowEqnDict = {}
self.nodeInfoDictBefore = {}
self.nodeInfoDictAfter = {}
self.edgeInfoDictBefore = {}
self.edgeInfoDictAfter = {}
def generateNetwork(self, maxDepth=10, allowMerge=False):
"""
Generate a binary tree with random edge and node properties.
Parameters
----------
maxDepth : int, optional
Maximum depth of the graph (depth start from zero).
allowMerge : bool, optional
If True, there will be 30% change that two edges at the same depth will merge together.
"""
G = nx.Graph()
nodeDepth, edgeDepth = 0, 0
G.add_node(0, depth=nodeDepth, depthLevel=nodeDepth, nodeIndex=self.nodeIndex, isEntryNode=True) # first node
self.nodeIndex += 1
while nodeDepth <= maxDepth - 1:
nodesAtCurrentDepth = [node for node in G.nodes() if G.node[node]['depth'] == nodeDepth]
if len(nodesAtCurrentDepth) > 2:
# Determine if merge would happen
if allowMerge:
mergeAtCurrentDepth = (np.random.rand() <= 0.3) # 30% probability TODO: this should be controlled by function arguments
else:
mergeAtCurrentDepth = False
# Merge nodes if allowed
if mergeAtCurrentDepth:
numOfMerges = 1 # TODO: this should be controlled by function arguments
nodesToMerge = np.random.choice(nodesAtCurrentDepth, 2, replace=False)
newNode = self.nodeIndex
newEdgeIndex1, newEdgeIndex2 = self.edgeIndex, self.edgeIndex + 1 # TODO: allow >2 edge merge?
G.add_edge(nodesToMerge[0], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.add_edge(nodesToMerge[1], newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex + 1, segmentIndex=self.edgeIndex + 1)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 2
for currentNode in nodesAtCurrentDepth:
numOfChildEdges = len([node for node in G[currentNode].keys() if G.node[node]['depth'] > nodeDepth])
numOfNewEdges = 2 - numOfChildEdges # TODO: allow for more child edges?
for ii in range(numOfNewEdges):
newNode = self.nodeIndex
G.add_edge(currentNode, newNode, depth=edgeDepth, segmentLevel=edgeDepth, edgeIndex=self.edgeIndex, segmentIndex=self.edgeIndex)
G.node[newNode]['depth'] = nodeDepth + 1
G.node[newNode]['depthLevel'] = nodeDepth + 1
G.node[newNode]['nodeIndex'] = self.nodeIndex
G.node[newNode]['isEntryNode'] = False
self.nodeIndex += 1
self.edgeIndex += 1
nodeDepth += 1
edgeDepth += 1
# Gather data
edgeList = [0] * self.edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
nodeInfoDict, edgeInfoDict = {}, {}
for node in G.nodes():
nodeInfoDict[node] = G.node[node]
nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[node]['coord'] = []
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeInfoDict[edgeIndex] = G[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Save
self.G = G
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadNetwork(self, version=2, year=2013):
"""
Load old version of data files (that needs to be converted).
"""
directory = self.directory
if version == 1:
filename = 'basicFilesForStructureWithCoW(year={}).pkl'.format(year)
elif version == 2:
filename = 'basicFilesForStructureWithCoW2(year={}).pkl'.format(year)
elif version == 3:
filename = 'basicFilesForStructureWithCoW3(year={}).pkl'.format(year)
elif version == 4:
filename = 'basicFilesForStructureWithCoW4(year={}).pkl'.format(year)
with open(join(directory, filename), 'rb') as f:
resultDict = pickle.load(f)
with open(join(directory, 'partitionInfo.pkl'), 'rb') as f:
partitionInfo = pickle.load(f)
with open(join(directory, 'chosenVoxelsForPartition.pkl'), 'rb') as f:
chosenVoxels = pickle.load(f)
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
resultDict['resultADANDict'] = resultADANDict
resultDict['partitionInfo'] = partitionInfo
resultDict['chosenVoxels'] = chosenVoxels
self.loadedNetwork = resultDict
def reduceGraph(self, G, segmentList, segmentIndexList):
"""
Reduce the graph such that the node is either terminating or bifurcating point.
Parameters
----------
G : NetworkX graph
The graph representation of the network.
segmentList : list
A list of segments in which each segment is a simple branch.
segmentIndexList : list
A list of segment indices referring to the segments actually be used in `segmentList`.
Returns
-------
DG : NetworkX graph
The reduced graph (each edge refers to a segment).
"""
DG = nx.DiGraph()
for segmentIndex in segmentIndexList:
segment = segmentList[segmentIndex]
head, tail, secondNode = segment[0], segment[-1], segment[1]
headLevel, tailLevel = G.node[head]['depthLevel'], G.node[tail]['depthLevel']
if headLevel > tailLevel:
head, tail, secondNode = tail, head, segment[-2]
headLevel, tailLevel = tailLevel, headLevel
DG.add_path([head, tail])
for key, value in G[head][secondNode].items():
DG[head][tail][key] = value
for key, value in G.node[head].items():
DG.node[head][key] = value
for key, value in G.node[tail].items():
DG.node[tail][key] = value
return DG
def convertNetowrk(self):
"""
Convert the old version of data files into the new version used here.
"""
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
segmentIndexList = list(segmentInfoDictOld.keys())
heartLoc = (255, 251, 26) # change as needed
DG = self.reduceGraph(GOld, segmentList, segmentIndexList)
G = nx.Graph()
nodeInfoDict, edgeInfoDict = {}, {}
nodeIndex, edgeIndex = 0, 0
maxNodeDepth = np.max([DG.node[node]['depthLevel'] for node in DG.nodes()])
for currentDepth in range(maxNodeDepth + 1):
nodesAtCurrentDepth = [node for node in DG.nodes() if DG.node[node]['depthLevel'] == currentDepth]
for node in nodesAtCurrentDepth:
G.add_node(nodeIndex, depth=DG.node[node]['depthLevel'], nodeIndex=nodeIndex, coord=node)
DG.node[node]['nodeIndexHere'] = nodeIndex
if node == heartLoc:
G.node[nodeIndex]['isEntryNode'] = True
rootNode = nodeIndex
else:
G.node[nodeIndex]['isEntryNode'] = False
nodeIndex += 1
for edge in DG.edges():
depth = np.min([DG.node[edge[0]]['depthLevel'], DG.node[edge[1]]['depthLevel']])
DG[edge[0]][edge[1]]['depth'] = depth
maxEdgeDepth = np.max([DG[edge[0]][edge[1]]['depth'] for edge in DG.edges()])
for currentDepth in range(maxEdgeDepth + 1):
edgesAtCurrentDepth = [edge for edge in DG.edges() if DG[edge[0]][edge[1]]['depth'] == currentDepth]
for edge in edgesAtCurrentDepth:
G.add_edge(DG.node[edge[0]]['nodeIndexHere'], DG.node[edge[1]]['nodeIndexHere'], depth=currentDepth, edgeIndex=edgeIndex)
edgeIndex += 1
currentNodeIndex = nodeIndex
currentEdgeIndex = edgeIndex
edgeList = [[]] * edgeIndex
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
edgeList[edgeIndex] = edge
nodeIndexList = [G.node[node]['nodeIndex'] for node in G.nodes()]
edgeIndexList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in edgeList]
for node in DG.nodes():
nodeIndexHere = DG.node[node]['nodeIndexHere']
nodeInfoDict[nodeIndexHere] = DG.node[node]
nodeInfoDict[nodeIndexHere]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
nodeInfoDict[nodeIndexHere]['coord'] = []
for edge in DG.edges():
edgeIndex = G[DG.node[edge[0]]['nodeIndexHere']][DG.node[edge[1]]['nodeIndexHere']]['edgeIndex']
segmentIndex = DG[edge[0]][edge[1]]['segmentIndex']
edgeInfoDict[edgeIndex] = DG[edge[0]][edge[1]]
edgeInfoDict[edgeIndex]['length'] = DG[edge[0]][edge[1]]['pathLength'] # backward compatibility
edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# Sync between G and nodeInfoDict
for node in G.nodes():
for key, value in G.node[node].items():
nodeInfoDict[node][key] = value
# Save
self.G = G
self.edgeIndex = currentEdgeIndex
self.nodeIndex = currentNodeIndex
self.edgeList = edgeList
self.nodeIndexList = nodeIndexList
self.edgeIndexList = edgeIndexList
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.rootNode = rootNode
def adjustNetwork(self):
"""
If the network changes, recheck the correspondence between branch name and edgeIndex!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# LICA(Pre)
edgeInfoDict[0]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[0]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# LICA(Post)
edgeInfoDict[3]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[3]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Pre)
edgeInfoDict[2]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[2]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# RICA(Post)
edgeInfoDict[7]['meanRadius'] = 3.3 / (spacing * 1000) # mm->voxel
edgeInfoDict[7]['length'] = 1.5 / (spacing * 1000) # mm->voxel
# VA
# edgeInfoDict[1]['meanRadius'] = 2.0 / (spacing * 1000) # mm->voxel
edgeInfoDict[1]['length'] = 28 / (spacing * 1000) # mm->voxel
# RPCAComm
edgeInfoDict[4]['length'] = 16 / (spacing * 1000) # mm->voxel
# RMCA(first segment)
# edgeInfoDict[12]['length'] = 8 / (spacing * 1000) # mm->voxel
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def setNetwork(self, option=1, extraInfo=None):
"""
Set c and k (and possibly radius and length) for each branch
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
# Use BraVa data to set the radius and ADAN result to set the c and k
if option == 1:
minSetLength, maxSetLength = 1, 70 # mm
# Params used to fit radius to edgeLevel using the BraVa data. radius (mm) = a * np.exp(-b * edgeLevel) + c
fitResultDict = {'LMCA': {'param': [0.5569, 0.4199, 0.469]}, 'RMCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LPCA': {'param': [0.6571, 0.3252, 0.2949]}, 'RPCA': {'param': [0.7103, 0.5587, 0.3815]}, 'ACA': {'param': [0.3604, 1.0538, 0.4714]}} # new names
# fitResultDict = {'LCA': {'param': [0.5569, 0.4199, 0.469]}, 'RCA': {'param': [0.6636, 0.3115, 0.3666]}, 'LACA': {'param': [0.6571, 0.3252, 0.2949]}, 'RACA': {'param': [0.7103, 0.5587, 0.3815]}, 'PCA': {'param': [0.3604, 1.0538, 0.4714]}} # old names
a, b, c = fitResultDict['LMCA']['param']
for edgeIndex in edgeIndexList:
edgeLevel = edgeInfoDict[edgeIndex]['depth']
radius = (a * np.exp(-b * edgeLevel) + c) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['meanRadius'] = radius # voxel
length = (np.random.rand() * (maxSetLength - minSetLength) + minSetLength) / (spacing * 1000) # voxel
edgeInfoDict[edgeIndex]['pathLength'] = length # for backward compatibility
edgeInfoDict[edgeIndex]['length'] = length # voxel
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
c = c if c > 0 else 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Only set c and k using ADAN result
elif option == 2:
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
if extraInfo is not None:
excludedEdgeIndex = itemgetter('excludedEdgeIndex')(extraInfo)
slopeCRadius, interceptCRadius = resultADANDict['slopeCRadius'], resultADANDict['interceptCRadius']
# print('slopeCRadius={}, interceptCRadius={}'.format(slopeCRadius, interceptCRadius))
radiusThresholds, CKCandidates, numOfCCategory = resultADANDict['radiusThresholds'], resultADANDict['CKCandidates'], resultADANDict['numOfCCategory']
minRadius, maxRadius = np.min(radiusThresholds), np.max(radiusThresholds) # meter
slopePressureRadius, interceptPressureRadius = resultADANDict['slopePressureRadius'], resultADANDict['interceptPressureRadius']
# if extraInfo is not None:
# edgeIndexListToUse = [edgeIndex for edgeIndex in edgeIndexList if edgeIndex not in excludedEdgeIndex]
# else:
# edgeIndexListToUse = edgeIndexList
edgeIndexListToUse = edgeIndexList
for edgeIndex in edgeIndexListToUse:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
if radius > minRadius and radius < maxRadius:
binIndex = np.digitize([radius], radiusThresholds)[0] - 1
c, k = CKCandidates[binIndex], CKCandidates[-1] # assuming c is different for each branch and k is the same
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
else:
c = np.poly1d([slopeCRadius, interceptCRadius])(radius) # extrapolate
k = CKCandidates[-1] # assuming c is different for each branch and k is the same
# c = c if c > 0 else 0.1
if radius * 1000 >= 1.5 and radius * 1000 <= 2.5:
c = 1
else:
if c < 0:
c = 0.1
edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k'] = c, k
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def showFlowInfo(self):
"""
Print out flow rates for selected edges and pressure for selected nodes.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for edgeIndex in range(16):
flow = edgeInfoDict[edgeIndex]['simulationData']['flow']
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(edgeInfoDict[edgeIndex])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1 #
radius *= (spacing * 100) # convert to cm
length *= (spacing * 100) # convert to cm
print('Edge {}: flow={:.3f} cm^3/s, radius={:.4f} cm, length={:.4f} cm, c={:.4f}, k={:.4f}'.format(edgeIndex, flow, radius, length, c, k))
print('\n')
for node in range(16):
flow, pressure = itemgetter('flow', 'pressure')(nodeInfoDict[node]['simulationData'])
if flow is not None:
flow *= 10**6 # convert to cm^3/s
else:
flow = -1
if pressure is not None:
pressure /= (13560*9.8/1000) # convert to mmHg
else:
pressure = -1
print('Node {}: flow={:.3f} cm^3/s, pressure={:.3f} mmHg'.format(node, flow, pressure))
def getFlowInfoFromDeltaPressure(self, edgeIndex, deltaPressure):
"""
Calculate the required flow/velocity in order to achieve the given pressure drop for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
deltaPressure : float
The desired pressure drop with a unit of Pascal.
Returns
-------
flow : float
The required flow rate to achieve the desired pressure drop with a unit of cm^3/s.
velocity : float
The velocity in that edge corresponding to the required flow rate.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
flow = np.power(deltaPressure * c**k * (2*radius)**4.8704 / 10.67 / length, 1/k) # m^3/s
velocity = flow / (np.pi * radius**2) # m/s
return flow, velocity
def getDeltaPressureFromFlow(self, edgeIndex, flow):
"""
Calculate the required pressure drop in order to achieve the given flow for the specific edge.
Parameters
----------
edgeIndex : int
The index of the edge.
flow : float
The desired flow rate of the edge with a unit of cm^3/s.
Returns
-------
deltaPressure : float
The required pressure drop in the edge to achieve the desired flow rate with a unit of Pascal.
"""
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
deltaPressure = 10.67 * flow**k * length / c**k / (2*radius)**4.8704
return deltaPressure
def createGroundTruth(self, seed=None, option=1):
"""
Manually set the velocity and pressure for all edges/nodes in order to check whether the solver is correct.
Option 1: each child branch randomly takes ~1/N (with some random fluctuation) of the parent flow.
Option 2: flow is split proportional to the cross sectional area of the child branches.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
success = False
# Set argsIndex (index of pressure/velocity unknowns in the fluid simulation)
argsIndex = 0
for edgeIndex in edgeIndexList:
edgeInfoDict[edgeIndex]['argsIndex'] = argsIndex
argsIndex += 1
for node in G.nodes():
nodeInfoDict[node]['isBifurcatingNode'] = False
nodeList = [node for node in G.nodes() if node != 0 and G.degree(node) != 1]
for node in nodeList:
nodeInfoDict[node]['argsIndex'] = argsIndex
nodeInfoDict[node]['isBifurcatingNode'] = True
argsIndex += 1
minSetVelocity, maxSetVelocity = 0.01, 3 # m/s
inletPressure = 13560 * 9.8 * 0.12 # Pascal
inletVelocity = 1.5 # m/s
inletFlow = 754/60/10**6 # m^3/s
minSplitAmout, maxSplitAmout = 0.4, 0.6
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth):
## first deal with the nodes whose child edge merges
nodesAtNextDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth + 1]
for nodeAtNextDepth in nodesAtNextDepth:
parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] == currentDepth]
# parentNodes = [node for node in G[nodeAtNextDepth].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[nodeAtNextDepth]['depth']]
if len(parentNodes) > 1:
# print('Node {} merge into {}'.format(parentNodes, nodeAtNextDepth))
flowAtParentNodes = [nodeInfoDict[node]['simulationData']['flow'] for node in parentNodes] # m^3/s
degreeAtParentNodes = [G.degree(node) for node in parentNodes]
pressureAtParentNodes = [nodeInfoDict[node]['simulationData']['pressure'] for node in parentNodes]
parentEdgeIndexList = [G[nodeAtNextDepth][node]['edgeIndex'] for node in parentNodes]
parentEdgeDeltaPressureList = [self.getDeltaPressureFromFlow(edgeIndex, flow) for edgeIndex, flow in zip(parentEdgeIndexList, flowAtParentNodes)]
nodeMinPressureList = [headPressure - deltaPressure for headPressure, deltaPressure in zip(pressureAtParentNodes, parentEdgeDeltaPressureList)]
if degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] > 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] > 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = True, False
elif degreeAtParentNodes[0] == 2 and degreeAtParentNodes[1] == 2:
loc1, loc2 = 0, 1
isEdge1StraightPipe, isEdge2StraightPipe = True, True
if nodeMinPressureList[0] != nodeMinPressureList[1]:
success = False
print('Error! Two straight edges cannot achieve the same end pressure')
return success
print('Warning! Two straight edges merge into one node')
else:
if nodeMinPressureList[0] > nodeMinPressureList[1]:
loc1, loc2 = 0, 1
else:
loc1, loc2 = 1, 0
isEdge1StraightPipe, isEdge2StraightPipe = False, False
edgeIndex1, edgeIndex2 = parentEdgeIndexList[loc1], parentEdgeIndexList[loc2]
parentNode1, parentNode2 = parentNodes[loc1], parentNodes[loc2]
parentPressure1, parentPressure2 = pressureAtParentNodes[loc1], pressureAtParentNodes[loc2]
parentFlow1, parentFlow2 = flowAtParentNodes[loc1], flowAtParentNodes[loc2]
radius1, radius2 = edgeInfoDict[edgeIndex1]['meanRadius'] * spacing, edgeInfoDict[edgeIndex2]['meanRadius'] * spacing
length1, length2 = edgeInfoDict[edgeIndex1]['length'] * spacing, edgeInfoDict[edgeIndex2]['length'] * spacing
c1, c2 = edgeInfoDict[edgeIndex1]['c'], edgeInfoDict[edgeIndex2]['c']
k1, k2 = edgeInfoDict[edgeIndex1]['k'], edgeInfoDict[edgeIndex2]['k']
flowCounter = 0
# for the first edge
maxPossibleFlow = parentFlow1
minDeltaPressure = np.max([0, pressureAtParentNodes[loc1] - pressureAtParentNodes[loc2]])
minPossibleFlow, _ = self.getFlowInfoFromDeltaPressure(parentEdgeIndexList[loc1], minDeltaPressure)
if minPossibleFlow > maxPossibleFlow:
success = False
print('Error while merging node {} to node {}, minPossibleFlow ({}) is larger than maxPossibleFlow ({})'.format(parentNodes, nodeAtNextDepth, minPossibleFlow, maxPossibleFlow))
return success
if isEdge1StraightPipe:
flow1 = parentFlow1
if flow1 >= minPossibleFlow and flow1 <= maxPossibleFlow:
pass
else:
print('Edge {} wants to use all flow={} from node {}, but possible range is [{}, {}]'.format(edgeIndex1, flow1, parentNode1, minPossibleFlow, maxPossibleFlow))
else:
# flow1 = np.random.rand() * (maxPossibleFlow - minPossibleFlow) + minPossibleFlow
flow1 = (maxPossibleFlow + minPossibleFlow) / 2
## Manual manipulation !!! ##
if nodeAtNextDepth == 10:
if edgeIndex1 == 9:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
elif edgeIndex1 == 11:
flow1 = maxPossibleFlow * 0.15 # used to be 0.3
print('Edge {} gets flow={} cm^3/s'.format(edgeIndex1, flow1*10**6))
# radius8, radius9 = edgeInfoDict[8]['meanRadius'], edgeInfoDict[9]['meanRadius']
# flow9 = maxPossibleFlow * radius9**2 / (radius8**2 + radius9**2)
# print('Edge {} get flow={}'.format(edgeIndex1, flow1))
velocity1 = flow1 / (np.pi * radius1**2) # m/s
edgeInfoDict[edgeIndex1]['simulationData']['velocity'] = velocity1
edgeInfoDict[edgeIndex1]['simulationData']['flow'] = flow1
deltaPressure1 = 10.67 * flow1**k1 * length1 / c1**k1 / (2*radius1)**4.8704
tailPressure = parentPressure1 - deltaPressure1 # pressure at the merging node
nodeInfoDict[nodeAtNextDepth]['simulationData']['pressure'] = tailPressure
flowCounter += flow1
# the other edge
deltaPressure2 = parentPressure2 - tailPressure
flow2 = np.power(deltaPressure2 / 10.67 / length2 * c2**k2 * (2*radius2)**4.8704, 1/k2)
velocity2 = flow2 / (np.pi * radius2**2) # m/s
edgeInfoDict[edgeIndex2]['simulationData']['velocity'] = velocity2
edgeInfoDict[edgeIndex2]['simulationData']['flow'] = flow2
flowCounter += flow2
nodeInfoDict[nodeAtNextDepth]['simulationData']['flow'] = flowCounter
if flow2 > parentFlow2:
print('Node {}: the flow ({}) in other edge is larger than provided ({})'.format(nodeAtNextDepth, flow2, parentFlow2))
print('edgeIndex1={}, edgeIndex2={}, flow1={}, flow2={}'.format(edgeIndex1, edgeIndex2, flow1, flow2))
print(nodeInfoDict[1]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[3]['simulationData']['pressure']/13560/9.8*1000, nodeInfoDict[2]['simulationData']['pressure']/13560/9.8*1000)
## Now deal with remaining nodes
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth]
for currentNode in nodesAtCurrentDepth:
if currentDepth == 0:
nodeInfoDict[currentNode]['simulationData']['pressure'] = inletPressure
nodeInfoDict[currentNode]['simulationData']['flow'] = inletFlow
flowIn = inletFlow
pressureIn = inletPressure
# print('inletPressure={} mmHg, inletFlow={} cm^3/s, currentDepth={}'.format(inletPressure/13560/9.8*1000, inletFlow*10**6, currentDepth))
else:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
if flowIn is None:
print('Node {} has flow=None, nodesAtCurrentDepth={}'.format(currentNode, nodesAtCurrentDepth))
pressureIn = nodeInfoDict[currentNode]['simulationData']['pressure']
edgeIndexAtNextDepth = [G[currentNode][neighborNode]['edgeIndex'] for neighborNode in G[currentNode].keys() if nodeInfoDict[neighborNode]['depth'] > currentDepth]
edgeIndexToProcess = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is None]
edgeIndexCompleted = [edgeIndex for edgeIndex in edgeIndexAtNextDepth if edgeInfoDict[edgeIndex]['simulationData']['flow'] is not None]
edgeCounter = len(edgeIndexToProcess)
flowAvailable = nodeInfoDict[currentNode]['simulationData']['flow']
for edgeIndex in edgeIndexCompleted:
flowAvailable -= edgeInfoDict[edgeIndex]['simulationData']['flow']
if flowAvailable < 0 - np.finfo(float).eps:
flowIn = nodeInfoDict[currentNode]['simulationData']['flow']
flowUsed = ['Edge {}: {}'.format(edgeIndex, edgeInfoDict[edgeIndex]['simulationData']['flow']) for edgeIndex in edgeIndexCompleted]
print('Error! Node {}: flowIn={}, flowUsed={}, flowAvailable={}'.format(currentNode, flowIn, flowUsed, flowAvailable))
flowAmount = []
# Random split the flow (within a range)
if option == 1:
while edgeCounter >= 1:
if edgeCounter > 1:
basePercentage = 100 / edgeCounter
fluctuationPercentage = basePercentage / 3.0
actualPercentage = basePercentage - fluctuationPercentage/2 + np.random.rand() * fluctuationPercentage
# actualPercentage = (np.random.rand() * 0.8 + 0.1) * 100
flow = flowAvailable * actualPercentage / 100
if flow < 0:
print('Node {}: flow < 0, actualPercentage={}, flowAvailable={}'.format(currentNode, actualPercentage, flowAvailable))
flowAmount.append(flow)
flowAvailable -= flow
if flowAvailable < 0:
print('Node {}: flowAvailable < 0, actualPercentage={}'.format(currentNode, actualPercentage))
else:
flowAmount.append(flowAvailable)
edgeCounter -= 1
elif option == 2:
radiusList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexToProcess]
radiusSqList = [radius**2 for radius in radiusList]
sumOfRadiusSq = np.sum(radiusSqList)
flowAmount = [flowAvailable * radiusSq / sumOfRadiusSq for radiusSq in radiusSqList]
## Manual manipulation !!! ###
if currentNode == 0 and G.degree(currentNode) == 3:
edgeIndexToProcess = [0, 2, 1] # LICA/RICA/VA
inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
flowAmount = [inletFlow*0.4, inletFlow*0.4, inletFlow*0.2]
# elif currentNode == 8:
# edgeIndexToProcess = [16, 17] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
# elif currentNode == 9:
# edgeIndexToProcess = [18, 19] #
# inletFlow = nodeInfoDict[currentNode]['simulationData']['flow']
# flowAmount = [inletFlow*0.7, inletFlow*0.3]
for edgeIndex, flow in zip(edgeIndexToProcess, flowAmount):
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = flow / (np.pi * radius**2) # m/s
edgeHead, edgeTail = edge[0], edge[1]
if nodeInfoDict[edgeHead]['depth'] > nodeInfoDict[edgeTail]['depth']:
edgeHead, edgeTail = edgeTail, edgeHead
pressureHead = nodeInfoDict[edgeHead]['simulationData']['pressure']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
deltaPressure = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704 # Pascal
if np.isnan(deltaPressure):
print('velocity={}, flow={}'.format(velocity, flow))
pressureTail = pressureHead - deltaPressure # Pascal
nodeInfoDict[edgeTail]['simulationData']['pressure'] = pressureTail
nodeInfoDict[edgeTail]['simulationData']['flow'] = flow
# print('Node {} (head={}, edgeIndex={}), flow={}'.format(edgeTail, edgeHead, edgeIndex, flow))
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# print('Pressure at {} = {} mmHg, currentDepth={}'.format(edgeTail, pressureTail/13560/9.8*1000, currentDepth))
# if edgeIndex ==5 or edgeIndex == 6:
# print('Node {}, edgeIndex={}, flow={} cm^3/s, deltaPressure={} mmHg'.format(currentNode, edgeIndex, flow*10**6, deltaPressure/13560/9.8*1000))
velocityPressure = [0] * argsIndex
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
pressure = nodeInfoDict[node]['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
velocity = edgeInfoDict[edgeIndex]['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
self.velocityPressure = velocityPressure # Ground truth solution
self.velocityPressureGroundTruth = velocityPressure # Ground truth solution
success = True
return success
def getVelocityPressure(self):
"""
Extract velocity and pressure from edgeInfoDict and nodeInfoDict.
Returns
-------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
velocityPressure = np.hstack((np.full((numOfEdges,), 0.0), np.full((numOfNodes,), 0.0))) # make sure dtype is float
for node, info in nodeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressure[argsIndex] = pressure
for edgeIndex, info in edgeInfoDict.items():
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressure[argsIndex] = velocity
return velocityPressure
def getVolumePerPartition(self):
"""
Calculate the total volume of each compartment.
Returns
volumePerPartition : dict
A dictionary with compartments names as keys and volumes (with a unit of mm^3) as corresponding values.
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
volumePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
totalVolume = 0
for edgeIndex in visitedEdges:
radius, length= itemgetter('meanRadius', 'length')(edgeInfoDict[edgeIndex])
radius = radius * spacing * 1000 # mm
length = length * spacing * 1000 # mm
edgeVolume = np.pi * radius**2 * length # mm^3
totalVolume += edgeVolume
volumePerPartition[partitionName] = totalVolume
return volumePerPartition
def showTerminatingPressureAndPathLength(self):
"""
Check terminating pressure vs path length relationship.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
terminatingNodes = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == maxDepth]
terminatingPressure = [nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000 for node in terminatingNodes] # mmHg
termiantingPathLength = []
for node in terminatingNodes:
path = nx.shortest_path(G, self.rootNode, node)
pathEdgeIndex = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in pathEdgeIndex]) # meter
termiantingPathLength.append(pathLength)
fig = plt.figure(1, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
plt.plot(termiantingPathLength, terminatingPressure, 'bo')
plt.xlabel('Path length (m)')
plt.ylabel('Terminating pressure (mmHg)')
plt.show()
def setupFluidEquations(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
There are two kinds of equations: H-W equation for each edge and flow conservation equation for each node and optionally boundary
conditions. For the H-W equation, the
information is stored in a dictionay as:
{'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
For the flow conservation equation, the information is stored as:
{'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
For the boundary conditions (inlet or outlet velocity), the information is stored as:
{'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
All of the units are SI units. The dictonaries that hold these equations are then stored in the `eqnInfoDictList`.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing for neighborIndexIn in neighborsIndexIn]
radiusOutList = [edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [edgeInfoDict[neighborIndexIn]['argsIndex'] for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [edgeInfoDict[neighborIndexOut]['argsIndex'] for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': nodeInfoDict[node]['nodeIndex'], 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': edgeIndex}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = nodeInfoDict[headNode]['simulationData']['pressure']
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = nodeInfoDict[headNode]['argsIndex']
headNodeIndex = nodeInfoDict[headNode]['nodeIndex']
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = nodeInfoDict[tailNode]['simulationData']['pressure']
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
# print('Tail node {} has pressure={} mmHg'.format(tailNode, tailPressure/13560/9.8*1000))
else:
tailPressureIndex = nodeInfoDict[tailNode]['argsIndex']
tailNodeIndex = nodeInfoDict[tailNode]['nodeIndex']
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = info['edgeIndex']
velocityIn = info['velocityIn']
edge = edgeList[edgeIndex]
velocityIndex = edgeInfoDict[edgeIndex]['argsIndex']
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupFluidEquationsMatLab(self, boundaryCondition=None):
"""
Programmatically stores the info to generate the conservation equations used for fluid simulation (each dict represents an equation).
Note that the Python-MatLab bridge only accepts generic python types, and thus all numpy types need to be converted.
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = []
numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns = 0, 0, 0
for node in G.nodes():
if nodeInfoDict[node]['isBifurcatingNode']:
neighborsIndexIn = [G[node][neighborIn]['edgeIndex'] for neighborIn in G.neighbors(node) if 'depth' in G.node[neighborIn] and G.node[neighborIn]['depth'] < G.node[node]['depth']]
neighborsIndexOut = [G[node][neighborOut]['edgeIndex'] for neighborOut in G.neighbors(node) if 'depth' in G.node[neighborOut] and G.node[neighborOut]['depth'] > G.node[node]['depth']]
radiusInList = [float(edgeInfoDict[neighborIndexIn]['meanRadius'] * spacing) for neighborIndexIn in neighborsIndexIn]
radiusOutList = [float(edgeInfoDict[neighborIndexOut]['meanRadius'] * spacing) for neighborIndexOut in neighborsIndexOut]
velocityInIndexList = [int(edgeInfoDict[neighborIndexIn]['argsIndex']) for neighborIndexIn in neighborsIndexIn]
velocityOutIndexList = [int(edgeInfoDict[neighborIndexOut]['argsIndex']) for neighborIndexOut in neighborsIndexOut]
if len(radiusInList) != 0 and len(radiusOutList) != 0: # Exclude the nodes at inlet and outlet
eqnInfoDict = {'type': 'flow', 'velocityInIndexList': velocityInIndexList, 'radiusInList': radiusInList,
'velocityOutIndexList': velocityOutIndexList, 'radiusOutList': radiusOutList, 'coord': nodeInfoDict[node]['coord'],
'nodeIndex': int(nodeInfoDict[node]['nodeIndex']), 'neighborsInEdgeIndex': neighborsIndexIn, 'neighborsOutEdgeIndex': neighborsIndexOut}
eqnInfoDictList.append(eqnInfoDict)
numOfFlowEqns += 1
else:
print('node={}, len(radiusInList)={}, len(radiusOutList)={}'.format(node, len(radiusInList), len(radiusOutList)))
for edgeIndex in edgeIndexList:
edge = edgeList[edgeIndex]
radius = float(edgeInfoDict[edgeIndex]['meanRadius'] * spacing)
length = float(edgeInfoDict[edgeIndex]['length'] * spacing)
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
c, k = float(edgeInfoDict[edgeIndex]['c']), float(edgeInfoDict[edgeIndex]['k'])
eqnInfoDict = {'type': 'pressure', 'radius': radius, 'length': length, 'velocityIndex': velocityIndex, 'c': c, 'k': k, 'edgeIndex': int(edgeIndex)}
if nodeInfoDict[edge[0]]['depth'] < nodeInfoDict[edge[-1]]['depth']:
headNode, tailNode = edge[0], edge[-1]
else:
headNode, tailNode = edge[-1], edge[0]
# head pressure
if nodeInfoDict[headNode]['isEntryNode'] is True or G.degree(headNode) == 1:
headPressure = float(nodeInfoDict[headNode]['simulationData']['pressure'])
eqnInfoDict['headPressureInfo'] = {'pressure': headPressure}
else:
headPressureIndex = int(nodeInfoDict[headNode]['argsIndex'])
headNodeIndex = int(nodeInfoDict[headNode]['nodeIndex'])
eqnInfoDict['headPressureInfo'] = {'pressureIndex': headPressureIndex, 'nodeIndex': headNodeIndex}
# tail pressure
if nodeInfoDict[tailNode]['isEntryNode'] is True or G.degree(tailNode) == 1:
tailPressure = float(nodeInfoDict[tailNode]['simulationData']['pressure'])
eqnInfoDict['tailPressureInfo'] = {'pressure': tailPressure}
else:
tailPressureIndex = int(nodeInfoDict[tailNode]['argsIndex'])
tailNodeIndex = int(nodeInfoDict[tailNode]['nodeIndex'])
eqnInfoDict['tailPressureInfo'] = {'pressureIndex': tailPressureIndex, 'nodeIndex': tailNodeIndex}
eqnInfoDictList.append(eqnInfoDict)
numOfPressureEqns += 1
if boundaryCondition is not None and len(boundaryCondition) != 0 and 'pressureIn' not in boundaryCondition:
for boundaryNode, info in boundaryCondition.items():
edgeIndex = int(info['edgeIndex'])
velocityIn = float(info['velocityIn'])
edge = edgeList[edgeIndex]
velocityIndex = int(edgeInfoDict[edgeIndex]['argsIndex'])
eqnInfoDict = {'type': 'boundary', 'velocityIndex': velocityIndex, 'velocityIn': velocityIn}
eqnInfoDictList.append(eqnInfoDict)
numOfBoundaryConditionEqns += 1
print('There are {} flow eqns, {} pressure eqns and {} boundary condition eqns'.format(numOfFlowEqns, numOfPressureEqns, numOfBoundaryConditionEqns))
self.eqnInfoDictList = eqnInfoDictList
def setupEquationsForDistributeFlow(self):
"""
Setup equations for distributeFlowTest(). This function is unfinished. TODO
The resulting file is distributeFlowEqnDict and it contains three fields:
-- 'connectInfoDictList' --
It is a list of dicts and each dict represents an edge and it contains:
-- 'connection' -- In the form of [headNode, edgeIndex, tailNode]
-- 'edgeInfo' -- Contains subfields 'c'/'k'/'radius'/'length'
-- 'mergeInfoDict' --
Each merging node is a key and the corresponding value is empty (for now)
-- 'desiredTerminatingPressures' --
Each terminating node is a key and the corresponding value is the desired terminating pressure for that node
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
edgeList = self.edgeList
spacing = self.spacing
distributeFlowEqnDict = {'connectInfoDictList': [], 'mergeInfoDict': {}, 'desiredTerminatingPressures': {}}
edgeDepthArray = np.array([edgeInfoDict[edgeIndex]['depth'] for edgeIndex in edgeIndexList])
edgeIndexListSorted = np.array(edgeIndexList)[edgeDepthArray.argsort()].tolist()
for edgeIndex in edgeIndexListSorted:
edge = edgeList[edgeIndex]
headNode, tailNode = edge
if nodeInfoDict[headNode]['depth'] > nodeInfoDict[tailNode]['depth']:
headNode, tailNode = tailNode, headNode
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing
length = edgeInfoDict[edgeIndex]['length'] * spacing
c, k = edgeInfoDict[edgeIndex]['c'], edgeInfoDict[edgeIndex]['k']
distributeFlowEqnDict['connectInfoDictList'].append({'connection': [headNode, edgeIndex, tailNode], 'edgeInfo': {'radius': radius, 'length': length, 'c': c, 'k': k}})
for currentNode in G.nodes():
parentNodes = [node for node in G[currentNode].keys() if nodeInfoDict[node]['depth'] < nodeInfoDict[currentNode]['depth']]
if len(parentNodes) > 1:
distributeFlowEqnDict['mergeInfoDict'][currentNode] = {}
for node in G.nodes():
if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0:
distributeFlowEqnDict['desiredTerminatingPressures'][node] = 13560*9.8*0.12 # Pascal
print(edgeIndexListSorted)
print(distributeFlowEqnDict['mergeInfoDict'])
# Save #
self.distributeFlowEqnDict = distributeFlowEqnDict
def validateFluidEquations(self, velocityPressure=None, boundaryCondition=None):
"""
Validate if all of the equations generated by `setupFluidEquations` are satisfied. This function will output errors for
each of the equations and corresponding details. Note that the error for each equations is amplified in the same way as
in the function `computeNetworkDetail`.
Parameters
----------
velocityPressure : list
A list of velocities and pressures in the form of [v0, v1,..., vN, p0, p1,..., pN].
"""
directory = self.directory
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
eqnInfoDictList = self.eqnInfoDictList
if velocityPressure is None:
velocityPressure = self.velocityPressure
counter = 0
pressureErrorList, flowErrorList = [], []
pressureErrorTrueList, flowErrorTrueList = [], []
for eqnInfoDict in eqnInfoDictList:
eqnType = eqnInfoDict['type']
if eqnType == 'pressure':
radius, length, velocityIndex, edgeIndex = itemgetter('radius', 'length', 'velocityIndex', 'edgeIndex')(eqnInfoDict)
velocity = np.abs(velocityPressure[velocityIndex])
c, k = eqnInfoDict['c'], eqnInfoDict['k']
if 'pressure' in eqnInfoDict['headPressureInfo']:
headPressure = eqnInfoDict['headPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['headPressureInfo']:
pressureIndex = eqnInfoDict['headPressureInfo']['pressureIndex']
headPressure = velocityPressure[pressureIndex]
headPressureInmmHg = headPressure / 13560 / 9.8 * 1000
if 'pressure' in eqnInfoDict['tailPressureInfo']:
tailPressure = eqnInfoDict['tailPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['tailPressureInfo']:
pressureIndex = eqnInfoDict['tailPressureInfo']['pressureIndex']
tailPressure = velocityPressure[pressureIndex]
tailPressureInmmHg = tailPressure / 13560 / 9.8 * 1000
deltaPressureByNode = np.abs(headPressure - tailPressure)
deltaPressureByHW = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704
error = np.abs(deltaPressureByNode - deltaPressureByHW)
deltaPressureByHWInmmHg = deltaPressureByHW / 13560 / 9.8 * 1000
errorInmmHg = error / 13560 / 9.8 * 1000
pressureErrorList.append(errorInmmHg * 500)
pressureErrorTrueList.append(errorInmmHg)
print('error={:.4f} mmHg, headP={:.2f} mmHg, tailP={:.2f} mmHg, headP>tailP={}, deltaPByHW={:.2f} mmHg, velocity={:.3f} cm/s, radius={:.4f} cm, length={:.4f} cm, edgeIndex={}'.format(errorInmmHg,
headPressureInmmHg, tailPressureInmmHg, headPressure>tailPressure, deltaPressureByHWInmmHg, velocity*100, radius*100, length*100, edgeIndex))
if headPressure <= tailPressure:
counter += 1
elif eqnType == 'flow':
velocityInIndexList, radiusInList = eqnInfoDict['velocityInIndexList'], eqnInfoDict['radiusInList']
velocityOutIndexList, radiusOutList = eqnInfoDict['velocityOutIndexList'], eqnInfoDict['radiusOutList']
neighborsInEdgeIndex, neighborsOutEdgeIndex = itemgetter('neighborsInEdgeIndex', 'neighborsOutEdgeIndex')(eqnInfoDict)
velocityInList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityInIndexList]
velocityOutList = [np.abs(velocityPressure[velocityIndex]) for velocityIndex in velocityOutIndexList]
flowIn = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityInList, radiusInList)])
flowOut = np.sum([velocity * np.pi * radius**2 for velocity, radius in zip(velocityOutList, radiusOutList)])
error = np.abs(flowIn -flowOut)
inVel = [np.round(100*vel, 4) for vel in velocityInList]
inR = [np.round(100*r, 4) for r in radiusInList]
inFlow = np.round(flowIn*10**6, 4)
outVel = [np.round(100*vel, 4) for vel in velocityOutList]
outR = [np.round(100*r, 4) for r in radiusOutList]
outFlow = np.round(flowOut*10**6, 4)
errorT = np.round(error*10**6, 4)
coord = eqnInfoDict['coord']
flowErrorList.append(error * 10**6 * 20000)
flowErrorTrueList.append(error * 10**6)
print('error={} cm^3/s, inVel={} cm/s, inR={} cm, inFlow={} cm^3/s, outVel={} cm/s, outR={} cm, outFlow={} cm^3/s, coord={}'.format(errorT, inVel, inR, inFlow, outVel, outR, outFlow, coord))
elif eqnType == 'boundary':
velocityIndex, velocityIn = eqnInfoDict['velocityIndex'], eqnInfoDict['velocityIn']
velocityActual = np.abs(velocityPressure[velocityIndex])
error = np.abs(velocityActual - velocityIn)
print('error={}, desired inlet velocity={} cm/s, actual velocity={} cm/s'.format(error, velocityIn*100, velocityActual*100))
totalErrorList = pressureErrorList + flowErrorList
totalError = norm(totalErrorList)
print('There are {} flow eqns where headPressure<=tailPressure'.format(counter))
print('Pressure error: mean+-std={}+-{} mmHg, min={} mmHg, max={} mmHg'.format(np.mean(pressureErrorTrueList), np.std(pressureErrorTrueList), np.amin(pressureErrorTrueList), np.max(pressureErrorTrueList)))
print('Flow error: mean+-std={}+-{} cm^3/s, min={} cm^3/s, max={} cm^3/s'.format(np.mean(flowErrorTrueList), np.std(flowErrorTrueList), np.amin(flowErrorTrueList), np.max(flowErrorTrueList)))
print('Combined error (magnified): {}'.format(totalError))
def BFS(self, startNodes, boundaryNodes):
"""
Start from given node(s), visit other nodes at larger depth in a BFS fashion.
Parameters
----------
startNodes : list
A list of nodes to start with.
boundaryNodes : list
A list of nodes used as the boundary.
Returns
-------
resultDict : dict
A dictionary containing the indices of visited edges and nodes.
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
visitedNodes, visitedEdges = [], []
for startNode in startNodes:
nodesAtCurrentDepth = [startNode]
while len(nodesAtCurrentDepth) != 0:
nodesAtNextDepth = []
for currentNode in nodesAtCurrentDepth:
visitedNodes.append(currentNode)
newNodes = [node for node in G[currentNode].keys() if nodeInfoDict[currentNode]['depth'] < nodeInfoDict[node]['depth'] and node not in boundaryNodes and node not in visitedNodes]
newEdges = [G[currentNode][newNode]['edgeIndex'] for newNode in newNodes]
nodesAtNextDepth += newNodes
visitedEdges += newEdges
nodesAtCurrentDepth = nodesAtNextDepth
resultDict = {'visitedNodes': visitedNodes, 'visitedEdges': visitedEdges}
return resultDict
def calculateVariableBounds(self):
"""
Calculate the pressure bound for each node and velocity bound for each branch (because pressure at child nodes
cannot be higher than that of the parent node).
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxDepth = np.max([info['depth'] for node, info in nodeInfoDict.items()])
for currentDepth in range(maxDepth-1, 0, -1):
nodesAtCurrentDepth = [node for node in G.nodes() if nodeInfoDict[node]['depth'] == currentDepth and G.degree(node) != 1]
for nodeAtCurrentDepth in nodesAtCurrentDepth:
childNodes = [node for node in G[nodeAtCurrentDepth].keys() if nodeInfoDict[node]['depth'] > currentDepth]
minPressureAtChildNodes = [nodeInfoDict[node]['simulationData']['minPressure'] if 'argsIndex' in nodeInfoDict[node] else nodeInfoDict[node]['simulationData']['pressure'] for node in childNodes]
nodeInfoDict[nodeAtCurrentDepth]['simulationData']['minPressure'] = np.amax(minPressureAtChildNodes)
# print('minPressure for node {} is set'.format(nodeAtCurrentDepth))
# Save #
self.nodeInfoDict = nodeInfoDict
def perturbNetwork(self, option=1, extraInfo=None):
"""
Perturb the network in various ways
Option=1: randomly choose {numOfEdgesToPerturb} branches and decrease the radius by {reducePercentage}
Option=2: use the radius from year={perturbedYear}
Option=3: radius of the edges in {partitionToPerturb} are decreased by {reducePercentage}
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if option == 1:
if extraInfo is None:
numOfEdgesToPerturb = 5
reducePercentage = 30
else:
numOfEdgesToPerturb, reducePercentage = itemgetter('numOfEdgesToPerturb', 'reducePercentage')(extraInfo)
edgeIndexToPerturb = np.random.choice(edgeIndexList, numOfEdgesToPerturb)
for edgeIndex in edgeIndexToPerturb:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
elif option == 2:
perturbedYear, excludedEdgeIndex = itemgetter('perturbedYear', 'excludedEdgeIndex')(extraInfo)
self.loadNetwork(version=4, year=perturbedYear)
resultDict = self.loadedNetwork
GOld, segmentList, partitionInfo, chosenVoxels, segmentInfoDictOld, nodeInfoDictOld, resultADANDict = itemgetter('G', 'segmentList', 'partitionInfo', 'chosenVoxels', 'segmentInfoDict', 'nodeInfoDict', 'resultADANDict')(resultDict)
for edgeIndex in edgeIndexList:
if edgeIndex not in excludedEdgeIndex:
segmentIndex = edgeInfoDict[edgeIndex]['segmentIndex'] # segmentIndex is the index of the edges in the old files
perturbedRadius = segmentInfoDictOld[segmentIndex]['meanRadius']
edgeInfoDict[edgeIndex]['meanRadius'] = perturbedRadius
elif option == 3:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []},
'ACA': {'startNodes': [10], 'boundaryNodes': []}}
partitionToPerturb, reducePercentage = itemgetter('partitionToPerturb', 'reducePercentage')(extraInfo)
for partitionName, info in partitionInfo.items():
if partitionName in partitionToPerturb:
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
for edgeIndex in visitedEdges:
edgeInfoDict[edgeIndex]['meanRadius'] *= (1 - reducePercentage / 100)
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def perturbTerminatingPressure(self, option=1, extraInfo=None):
"""
Perturb the terminating pressure in various ways
Option=1: pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
Option=2: No change
Option=3: All left compartments -30%, no change to all other compartments
Option=4: pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': -0.44}
Option=5: pressureDropChangePerPartition obtained from extraInfo
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
if option == 1:
pressureDecreasePerPartition = {'LMCA': 0.3, 'RMCA': -0.01, 'ACA': 0.05, 'LPCA': -0.02, 'RPCA': 0.02}
elif option == 2:
pressureDecreasePerPartition = {'LMCA': 0, 'RMCA': 0, 'ACA': 0, 'LPCA': 0, 'RPCA': 0}
elif option == 3:
pressureDecreasePerPartition = {'LMCA': -0.3, 'RMCA': 0, 'ACA': 0, 'LPCA': -0.3, 'RPCA': 0}
elif option == 4:
pressureDropChangePerPartition = {'LMCA': 0.14, 'RMCA': -0.45, 'ACA': -0.26, 'LPCA': 0.095, 'RPCA': 0.44}
elif option == 5:
pressureDropChangePerPartition = extraInfo['pressureDropChangePerPartition']
rootPressure = 13560*9.8*0.12 # Pa
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
if option in [1,2,3]:
decreaseAmount = pressureDecreasePerPartition[partitionName]
nodeInfoDict[terminatingNode]['simulationData']['pressure'] *= (1-decreaseAmount)
elif option in [4, 5]:
changeAmount = pressureDropChangePerPartition[partitionName]
oldPressure = nodeInfoDict[terminatingNode]['simulationData']['pressure']
newPressure = rootPressure - (rootPressure - oldPressure) * (1+changeAmount)
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = newPressure
# terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
# terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
# print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def printTerminatingPressurePerPartition(self, partitionInfo=None):
"""
Print out terminating pressures in each compartment.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
if partitionInfo is None:
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
terminatingPressuresInThisPartition.append(np.round(nodeInfoDict[terminatingNode]['simulationData']['pressure']/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
def setTerminatingPressure(self, option=1, extraInfo=None):
"""
Set the terminating pressure based on the terminating pressure vs path length relationship found in ADAN.
Note: make sure to use the right slope!!!
Option=1: all partitions use the slope from ADAN dataset
Option=2: use custom slope for each partition
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
ADANFolder = os.path.abspath(join(directory, '../../../../'))
with open(join(ADANFolder, 'ADAN-Web/resultADANDict.pkl'), 'rb') as f:
resultADANDict = pickle.load(f)
print('resultADANDict.pkl loaded from {}'.format(ADANFolder))
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'pressureIn': 13560*9.8*0.115},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'pressureIn': 13560*9.8*0.115}}
# Use the slope and intercept from the ADAN dataset
if option == 1:
slopePressurePathLength, interceptPressurePathLength = itemgetter('slopePressurePathLength', 'interceptPressurePathLength')(resultADANDict)
print('slope={}, intercept={}'.format(slopePressurePathLength, interceptPressurePathLength))
fitResultPerPartition = {'LMCA': [slopePressurePathLength, interceptPressurePathLength], 'RMCA': [slopePressurePathLength, interceptPressurePathLength],
'LPCA': [slopePressurePathLength, interceptPressurePathLength], 'RPCA': [slopePressurePathLength, interceptPressurePathLength],
'ACA': [slopePressurePathLength, interceptPressurePathLength]}
# Use the slope and intercept fitted from a ground truth solution
elif option == 2:
fitResultPerPartition = extraInfo['fitResultPerPartition']
elif option == 3:
pass
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes, pressureIn = itemgetter('startNodes', 'boundaryNodes', 'pressureIn')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = []
slopePressurePathLength, interceptPressurePathLength = fitResultPerPartition[partitionName]
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pressure = pressureIn + pathLength * slopePressurePathLength * 0.8
nodeInfoDict[terminatingNode]['simulationData']['pressure'] = pressure
terminatingPressuresInThisPartition.append(np.round(pressure/13560/9.8*1000, 2)) # mmHg
terminatingPressuresInThisPartition = list(sorted(terminatingPressuresInThisPartition))
print('Terminating pressures in {} are {} mmHg'.format(partitionName, terminatingPressuresInThisPartition))
self.nodeInfoDict = nodeInfoDict
def fitTerminatingPressureToPathLength(self, showFittingResult=False, figIndex=1, isLastFigure=False):
"""
Extract the terminating pressures from the existing fluid solution and fit them to path length per compartment.
Check the manual correction for LMCA!
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'color': 'r'}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'color': 'g'},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'color': 'b'}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'color': 'y'},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'color': 'c'}}
fitResultPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
terminatingPressurePerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
pathLengthPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = [nodeInfoDict[node]['simulationData']['pressure'] for node in terminatingNodesInThisPartition] # Pascal
pathLengthInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing for edgeIndex in uniquePathEdgeIndexList]) # meter
pathLengthInThisPartition.append(pathLength)
# Check this manual correction!
# if partitionName == 'LMCA':
# terminatingPressuresInThisPartition = [val for val in terminatingPressuresInThisPartition if val <= 13560*9.8*0.1]
# pathLengthInThisPartition = [val1 for val1, val2 in zip(pathLengthInThisPartition, terminatingPressuresInThisPartition) if val2 <= 13560*9.8*0.1]
terminatingPressurePerPartition[partitionName] = terminatingPressuresInThisPartition
pathLengthPerPartition[partitionName] = pathLengthInThisPartition
# slopeTerminatingPressureVSPathLength, interceptTerminatingPressureVSPathLength = np.polyfit(pathLengthInThisPartition, terminatingPressuresInThisPartition, 1)
slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength, stdErrorPressurePathLength = stats.linregress(pathLengthInThisPartition, terminatingPressuresInThisPartition)
print('{}: slopePressurePathLength={} Pa/m, interceptPressurePathLength={} Pa, rSquared={}, pValue={}'.format(partitionName, slopePressurePathLength, interceptPressurePathLength, rSqPressurePathLength, pPressurePathLength))
fitResultPerPartition[partitionName] = [slopePressurePathLength, interceptPressurePathLength]
if showFittingResult:
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
ax = fig.add_subplot(1,5,1)
for partitionName, info in partitionInfo.items():
terminatingPressuresInThisPartition = terminatingPressurePerPartition[partitionName]
pathLengthInThisPartition = pathLengthPerPartition[partitionName]
xValues = [val * 1000 for val in pathLengthInThisPartition] # mm
yValues = [val / 13560 / 9.8 * 1000 for val in terminatingPressuresInThisPartition] # mmHg
color = info['color']
ax.scatter(xValues, yValues, c=color, label=partitionName)
ax.set_xlabel('Path length (mm)')
ax.set_ylabel('Terminating pressure (mmHg)')
ax.legend(prop={'size': 6})
if isLastFigure:
plt.show()
return fitResultPerPartition
def updateNetworkWithSimulationResult(self, velocityPressure):
"""
Update the flow rate and pressure in `edgeInfoDict` and `nodeInfoDict` with the given `velocityPressure`.
"""
G = self.G
edgeIndexList = self.edgeIndexList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
nodeInfoDict[node]['simulationData']['pressure'] = velocityPressure[argsIndex]
for edgeIndex in edgeIndexList:
if 'argsIndex' in edgeInfoDict[edgeIndex]:
argsIndex = edgeInfoDict[edgeIndex]['argsIndex']
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = velocityPressure[argsIndex] # m/s
flow = velocity * np.pi * radius**2
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
# Save
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def loadFluidResult(self, loadFileName, return_ResultDict=False):
"""
Load the saved fluid simulation result.
For use with GBMTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResult')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
if return_ResultDict is False:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
else:
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed, resultDict
def loadFluidResult2(self, loadFileName):
"""
Load the saved fluid simulation result.
For use with computeNetworkTest()
"""
directory = self.directory
loadFolderPath = join(directory, 'fluidSimulationResultRandomNetwork')
# loadFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(loadFolderPath, loadFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}'.format(loadFileName, loadFolderPath))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
nodeInfoDictPerturbed, edgeInfoDictPerturbed = itemgetter('nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
numOfNodes = len([node for node in nodeInfoDictPerturbed if 'argsIndex' in nodeInfoDictPerturbed[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDictPerturbed[edgeIndex]])
velocityPressurePerturbed = [0] * (numOfNodes + numOfEdges)
for node in G.nodes():
info = nodeInfoDictPerturbed[node]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
pressure = info['simulationData']['pressure']
velocityPressurePerturbed[argsIndex] = pressure
for edgeIndex in edgeIndexList:
info = edgeInfoDictPerturbed[edgeIndex]
if 'argsIndex' in info:
argsIndex = info['argsIndex']
velocity = info['simulationData']['velocity']
velocityPressurePerturbed[argsIndex] = velocity
return nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed
def GBMTest(self, saveResult=False):
"""
Create a GBM network with radius following the BraVa distribution, generate a ground truth solution, then perturb the network
in a particular way while keeping the terminating pressures unchanged, then try to solve the network.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
success = self.createGroundTruth()
self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
edgeNameDict = {0: 'LICA', 3: 'LICA', 2: 'RICA', 7: 'RICA', 1: 'VA', 4: 'RPCA\nComm', 8: 'LMCA', 9: 'LM', 11: 'RM', 10: 'RMCA', 5: 'LPCA', 6: 'RPCA', 20: 'ACA'}
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeDepth
nodeLabelDict = {} # None
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeDepth
nodeValueList = [0 for node in G.nodes()] # None
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeDepth
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()} # edge radius
edgeLabelDict = {edge: edgeNameDict[G[edge[0]][edge[1]]['edgeIndex']] if G[edge[0]][edge[1]]['edgeIndex'] in edgeNameDict else '' for edge in G.edges()} # edge name
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeDepth
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['meanRadius']*spacing*1000, 2) for edge in G.edges()] # edgeIndex
edgeValueList = [0 for edge in G.edges()] # None
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': [],
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': [],
'figTitle': 'Major branch name'}
self.plotNetwork(infoDict, figIndex=2, isLastFigure=True)
return
# print(G.edges(data=True))
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# Load previous optimization result #
loadFileName = 'fluidSimulationResult3(referenceYear=BraVa, perturbedYear=2013).pkl'
nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed = self.loadFluidResult(loadFileName)
velocityPressureInit = velocityPressurePerturbed
self.nodeInfoDict = nodeInfoDictPerturbed
self.edgeInfoDict = edgeInfoDictPerturbed
computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = self.nodeInfoDict[node]['argsIndex']
minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=100, stepsize=50, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}'.format(extraInfo['perturbedYear'])}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict, 'G': G}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult(referenceYear={}, perturbedYear={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest2(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Perturb the terminating pressure in a specific way and check if the new system could be solved.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]} # perturbTerminatingPressureOption=2
# perturbTerminatingPressureOption = 1
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
self.printTerminatingPressurePerPartition()
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# GBM_BraVa_Reference flow_perturbTerminatingPressureOption=4_GBMTest2
# GBM_2013_Solved flow_perturbTerminatingPressureOption=4_GBMTest2
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM {}, TPOption={}'.format(extraInfo['perturbedYear'], perturbTerminatingPressureOption)} # TP->terminating pressure
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbedYear'] = {'year': 2013, 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest2(referenceYear={}, perturbedYear={}, perturbTerminatingPressureOption={}).pkl'.format(resultDict['referenceYear']['year'], resultDict['perturbedYear']['year'], perturbTerminatingPressureOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest3(self, perturbTerminatingPressureOption=1, saveResult=False):
"""
Test the solver
flowResult_referenceYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=1_GBMTest3
flowResult_referenceYear(BraVa)_groundTruthOption=2_GBMTest3
flowResult_solvedYear(BraVa)_groundTruthOption=2_GBMTest3
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}, 'solvedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
success = self.createGroundTruth(option=2)
# self.showFlowInfo()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G)}
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
# perturbTerminatingPressureOption = 1
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# self.perturbTerminatingPressure(option=perturbTerminatingPressureOption)
# self.showFlowInfo()
# computeNetworkDetailExtraInfo = None
# computeNetworkDetailExtraInfo = {'excludedEdgeIndex': [0,1,2,3,4,5,6,7,10,11,12,13]}
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# velocityPressureInit = self.getVelocityPressure() # Get velocityPressure from ground truth solution
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# computeNetworkDetail(velocityPressureInit, eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo)
# self.validateFluidEquations(velocityPressure=velocityPressureInit)
# print(list(zip(self.velocityPressureGroundTruth, velocityPressureInit)))
# return
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=10, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM Solved'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest3(referenceYear={}, solvedYear={}, groundTruthOption=2).pkl'.format(resultDict['referenceYear']['year'], resultDict['solvedYear']['year'])
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest4(self, perturbNetworkOption=1, saveResult=False):
"""
Perturb the radius in a specific way, set the TP using path length relationship and solve the network
Option=1: all LMCA edge radius decrease by 10%
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
extraInfo = {'partitionToPerturb': ['LMCA'], 'reducePercentage': 10}
self.perturbNetwork(option=perturbNetworkOption, extraInfo=extraInfo)
self.setNetwork(option=2)
self.createGroundTruth(option=2)
self.setTerminatingPressure(option=1, extraInfo=None)
computeNetworkDetailExtraInfo = None
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
self.updateNetworkWithSimulationResult(velocityPressure)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM BraVa, perturbNetworkOption={}'.format(perturbNetworkOption)}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['solvedYear'] = {'year': 'BraVa', 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'G': copy.deepcopy(G), 'velocityPressure': copy.deepcopy(velocityPressure)}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultGBMTest4(solvedYear=BraVa, perturbNetworkOption={}).pkl'.format(perturbNetworkOption)
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def GBMTest5(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, change the terminating pressure based on the volume change of the compartment.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(4, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
def GBMTest5b(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, TODO !!!
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResultTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
print(pressureDropChangePerPartition)
def GBMTest6(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Exactly the same as GBMTest5, tweaked the solver setting a little, trying to see if results can be improved.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=2000, stepsize=1000, interval=5, niter_success=16, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def computeNetworkTest(self, saveResult=False):
"""
Check whether the solve can correctly solve a system by creating a ground truth model first and comparing the simulation result with it
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
directory = self.directory
resultDict = {'reference': {}, 'perturbed': {}}
self.generateNetwork(maxDepth=5, allowMerge=False)
self.setNetwork(option=1)
success = False
self.createGroundTruth()
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# self.showTerminatingPressureAndPathLength()
resultDict['reference'] = {'G': G, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that in generateNetwork()!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
self.calculateVariableBounds()
for node in G.nodes():
if 'argsIndex' in nodeInfoDict[node]:
argsIndex = nodeInfoDict[node]['argsIndex']
minPressure = nodeInfoDict[node]['simulationData']['minPressure']
boundsVelocityPressure[argsIndex][0] = minPressure
boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
## intensionally perturb the inlet/terminating pressure away from ground truth to see how solver reacts
# self.nodeInfoDict[0]['simulationData']['pressure'] = 13560*9.8*0.12*(1-np.random.rand()*0.1) # perturb inlet pressure
## perturb terminating pressure
perturbPercent = 0.1
for node in G.nodes():
if G.degree(node) == 1:
self.nodeInfoDict[node]['simulationData']['pressure'] *= (np.random.rand() * perturbPercent + 1 - perturbPercent / 2)
## Perturb radius
# extraInfo = {'numOfEdgesToPerturb': 10, 'reducePercentage': 30}
# self.perturbNetwork(option=1, extraInfo=extraInfo)
# least square optimization #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# optResult = least_squares(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod), bounds=boundsVelocityPressure, ftol=1e-9, xtol=1e-9)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.cost
# message = optResult.message
# minimize (L-BFGS-B), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'maxiter': 25000, 'maxfun': 25000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, method='L-BFGS-B', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# minimize (BFGS), bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# options = {'norm': 2, 'maxiter': 30000}
# optResult = minimize(computeNetworkDetail, velocityPressureInit, args=(eqnInfoDictList, fluidMethod, errorNorm), method='BFGS', options=options)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# basinhopping #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 0
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'options': {'norm': np.inf, 'maxiter': 30000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 30000, 'maxfun': 30000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=300, T=5, stepsize=5, interval=5, niter_success=20, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
# differential evolution, bounds in (min, max) pair form #
# self.setupFluidEquations()
# eqnInfoDictList = self.eqnInfoDictList
# errorNorm = 2
# optResult = differential_evolution(computeNetworkDetail, args=(eqnInfoDictList, fluidMethod, errorNorm), bounds=boundsVelocityPressure, maxiter=2000, polish=True, disp=True)
# velocityPressure = np.abs(optResult.x)
# cost = optResult.fun
# message = optResult.message
# Matlab fsolve #
# self.setupFluidEquationsMatLab()
# eqnInfoDictList = self.eqnInfoDictList
# import matlab.engine, io
# # eng = matlab.engine.start_matlab()
# eng = matlab.engine.connect_matlab()
# eng.addpath('/Users/zhuj10/Dropbox/NIH/Data/Ron Data/1358-Subject18016/fluidSimulationWithCoW')
# print(matlab.engine.find_matlab())
# out = io.StringIO()
# err = io.StringIO()
# solver = 'fsolve'
# solver = 'lsqnonlin'
# # solver = 'Validate'
# # velocityPressureGroundTruth = self.velocityPressureGroundTruth
# # velocityPressureInit = [float(p) for p in velocityPressureTrue]
# velocityPressureInit = [float(p) for p in velocityPressureInit]
# optResult = eng.performFluidSimulation4ForMatLab(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # optResult = eng.testMatLab1(eqnInfoDictList, solver, velocityPressureInit, stdout=out, stderr=err)
# # print(optResult)
# print(out.getvalue())
# print(err.getvalue())
# cost = optResult['error']
# message = optResult['message']
# velocityPressure = optResult['optParam'][0]
##
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
velocityPressureGroundTruth = self.velocityPressureGroundTruth
self.velocityPressure = velocityPressure
# self.validateFluidEquations(velocityPressure=velocityPressure)
self.validateFluidEquations(velocityPressure=velocityPressure)
print(list(zip(velocityPressureGroundTruth, velocityPressure)))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
self.updateNetworkWithSimulationResult(velocityPressure)
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Simulated'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
resultDict['perturbed'] = {'G': G, 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResultRandomNetwork')
saveFileName = 'fluidSimulationResult.pkl'
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
def argsBoundTest(self):
"""
Test the function `calculateVariableBounds`
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
directory = self.directory
# Artificial network
# self.generateNetwork(maxDepth=5, allowMerge=False)
# self.setNetwork(option=1)
# GBM network
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth()
self.calculateVariableBounds()
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
for node in G.nodes():
if 'argsIndex' not in nodeInfoDict[node]:
pass
else:
if 'minPressure' not in nodeInfoDict[node]['simulationData']:
print('Node {} does not have minPressure'.format(node))
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) if 'argsIndex' not in nodeInfoDict[node] else np.round(nodeInfoDict[node]['simulationData']['minPressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) if 'argsIndex' not in nodeInfoDict[node] else np.round(nodeInfoDict[node]['simulationData']['minPressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
def distributeFlowTest(self):
"""
Find a way (by optimization) to distribute the flow in the entire network such that the resulting terminating
pressures match the desired values (does not need to be exactly the same but just to minimize the difference
between them). Unfinished!
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.showFlowInfo()
success = self.createGroundTruth()
if not success:
return
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: G.node[node]['depth'] for node in G.nodes()} # nodeLevel
# nodeLabelDict = {node: G.node[node]['nodeIndex'] for node in G.nodes()} # nodeIndex
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [G.node[node]['depth'] for node in G.nodes()] # nodeLevel
# nodeValueList = [G.node[node]['nodeIndex'] for node in G.nodes()] # nodeIndex
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['depth'] for edge in G.edges()} # edgeLevel
# edgeLabelDict = {edge: G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()} # edgeIndex
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [G[edge[0]][edge[1]]['depth'] for edge in G.edges()] # edgeLevel
# edgeValueList = [G[edge[0]][edge[1]]['edgeIndex'] for edge in G.edges()] # edgeIndex
# edgeValueList = [edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in edgeIndexList] # meanRadius
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node depth',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge depth',
'figTitle': 'GBM Reference'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# resultDict['referenceYear'] = {'year': 'BraVa', 'nodeInfoDict': nodeInfoDict, 'edgeInfoDict': edgeInfoDict}
##
self.setupEquationsForDistributeFlow()
def computeNetwork(self):
pass
def validateNetwork(self):
pass
def plotNetwork(self, infoDict: dict, figIndex: int=1, isLastFigure: bool=True, hideColorbar: bool=False):
"""
Plot the graph G in a tree structure. The color of the nodes and edges reflects corresponding values.
Parameters
----------
G : NetworkX graph
The graph to be plot.
infoDict : dict
A dictionary containing necessary information for plotting.
figIndex : int, optional
The figure index.
isLastFigure : bool, optional
If True, `plt.show()` will be executed.
hideColorbar : bool, optional
If True, the colorbars will be hidden.
"""
G = self.G
## Unpack infoDict ##
nodeLabelDict, nodeValueList = itemgetter('nodeLabelDict', 'nodeValueList')(infoDict)
edgeLabelDict, edgeValueList = itemgetter('edgeLabelDict', 'edgeValueList')(infoDict)
figTitle, nodeColorbarLabel, edgeColorbarLabel = itemgetter('figTitle', 'nodeColorbarLabel', 'edgeColorbarLabel')(infoDict)
## Calculate extra info ##
if 'vmin' not in infoDict or 'vmax' not in infoDict:
vmin, vmax = np.amin(nodeValueList), np.amax(nodeValueList)
else:
vmin, vmax = itemgetter('vmin', 'vmax')(infoDict)
if 'edge_vmin' not in infoDict or 'edge_vmax' not in infoDict:
edge_vmin, edge_vmax = np.amin(edgeValueList), np.amax(edgeValueList)
else:
edge_vmin, edge_vmax = itemgetter('edge_vmin', 'edge_vmax')(infoDict)
## Plot ##
fig = plt.figure(figIndex, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
pos = graphviz_layout(G, prog='dot')
ax = fig.add_axes([0.05, 0.05, 0.7, 0.9])
ax.set_title(figTitle)
ax.set_axis_off()
nodes = nx.draw_networkx_nodes(G, pos, node_size=250, node_color=nodeValueList, cmap=plt.cm.get_cmap('jet'), vmin=vmin, vmax=vmax)
edges = nx.draw_networkx_edges(G, pos, arrowstyle='-', arrowsize=10, edge_color=edgeValueList, edge_cmap=plt.cm.jet, edge_vmin=edge_vmin, edge_vmax=edge_vmax, width=2)
if len(nodeLabelDict) != 0:
nx.draw_networkx_labels(G, pos, labels=nodeLabelDict, font_size=8)
if len(edgeLabelDict) != 0:
nx.draw_networkx_edge_labels(G, pos, edge_labels=edgeLabelDict, font_size=8)
# node colorbar
if len(nodeColorbarLabel) != 0 and not hideColorbar:
# plt.colorbar(nodes, cmap=plt.cm.jet, label=nodeColorbarLabel)
ax1 = fig.add_axes([0.8, 0.05, 0.03, 0.9])
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=mpl.cm.jet, norm=norm, orientation='vertical')
cb1.set_label(nodeColorbarLabel, size=10)
cb1.ax.tick_params(labelsize=10)
# edge colorbar
if len(edgeColorbarLabel) != 0 and not hideColorbar:
ax2 = fig.add_axes([0.9, 0.05, 0.03, 0.9])
norm = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=mpl.cm.jet, norm=norm, orientation='vertical')
cb2.set_label(edgeColorbarLabel, size=10)
cb2.ax.tick_params(labelsize=10)
if isLastFigure:
plt.show()
def getNetwork(self):
return self.G
def compareNetworkPropertyTest(self):
"""
Compare the edge properties before and after perturbing the network.
GBM_Radius ratio vs Graph level_Compartment(5)_Single row
GBM_Radius ratio vs Graph level_Graph plot
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.nodeInfoDictBefore = copy.deepcopy(self.nodeInfoDict)
self.edgeInfoDictBefore = copy.deepcopy(self.edgeInfoDict)
## Solve the system with perturbed network properties
edgeIndexList = self.edgeIndexList
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.nodeInfoDictAfter = copy.deepcopy(self.nodeInfoDict)
self.edgeInfoDictAfter = copy.deepcopy(self.edgeInfoDict)
edgeIndexList = sorted(list(self.edgeInfoDict.keys()))
spacing = self.spacing
print('Edge difference before and after:')
for edgeIndex in edgeIndexList:
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(self.edgeInfoDictBefore[edgeIndex])
radiusBefore = np.round(radius * spacing * 1000, 3) # mm
lengthBefore = np.round(length * spacing * 100, 3) # cm
cBefore, kBefore = np.round(c, 3), np.round(k, 3)
radius, length, c, k = itemgetter('meanRadius', 'length', 'c', 'k')(self.edgeInfoDictAfter[edgeIndex])
radiusAfter = np.round(radius * spacing * 1000, 3) # mm
lengthAfter = np.round(length * spacing * 100, 3) # cm
cAfter, kAfter = np.round(c, 3), np.round(k, 3)
print('edgeIndex={}, radius={}/{} mm, length={}/{} cm, c={}/{}, k={}/{}'.format(edgeIndex, radiusBefore, radiusAfter, lengthBefore, lengthAfter, cBefore, cAfter, kBefore, kAfter))
G = self.G
for edge in G.edges():
edgeIndex = G[edge[0]][edge[1]]['edgeIndex']
radiusRatio = np.round(self.edgeInfoDictAfter[edgeIndex]['meanRadius'] / self.edgeInfoDictBefore[edgeIndex]['meanRadius'], 2)
self.edgeInfoDictAfter[edgeIndex]['radiusRatio'] = radiusRatio
self.edgeInfoDictBefore[edgeIndex]['radiusRatio'] = radiusRatio
self.edgeInfoDict[edgeIndex]['radiusRatio'] = radiusRatio
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: nodeInfoDict[node]['nodeIndex'] for node in G.nodes()}
nodeValueList = [nodeInfoDict[node]['nodeIndex'] for node in G.nodes()]
edgeLabelDict = {edge: self.edgeInfoDictAfter[G[edge[0]][edge[1]]['edgeIndex']]['radiusRatio'] for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [self.edgeInfoDictAfter[G[edge[0]][edge[1]]['edgeIndex']]['radiusRatio'] for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'Ground truth'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
partitionInfo = {'LMCA': {'startNodes': [5], 'boundaryNodes': [13]}, 'RMCA': {'startNodes': [6], 'boundaryNodes': [13]},
'LPCA': {'startNodes': [4], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [13], 'boundaryNodes': []}}
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
# fig = plt.figure(2, figsize=(15, 8))
# plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
fig = plt.figure(11, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
print('{}:\nvisitedNodes={}\nvisitedEdges={}'.format(partitionName, visitedNodes, visitedEdges))
ax = fig.add_subplot(1,5,subplotIndex)
dictUsed = edgeInfoDict
attribute1, attribute2, attribute3 = 'segmentLevel', 'meanRadius', 'partitionName'
attribute1List = [edgeInfoDict[edgeIndex]['depth'] for edgeIndex in visitedEdges]
# attribute2List = [edgeInfoDict[edgeIndex]['meanRadius']*spacing*1000 for edgeIndex in visitedEdges]
attribute2List = [edgeInfoDict[edgeIndex]['radiusRatio'] for edgeIndex in visitedEdges]
# attribute1List = [info[attribute1] for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames]
# attribute2List = [info[attribute2]*spacing*1000 for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames] # mm
# ax.plot(attribute1List, attribute2List, 'bo')
positions = np.sort(np.unique(attribute1List))
values = []
attribute1Array, attribute2Array = np.array(attribute1List), np.array(attribute2List)
for segmentLevel in positions:
locs = np.nonzero(attribute1Array == segmentLevel)[0]
values.append((attribute2Array[locs]).tolist())
mf.boxPlotWithWhiskers(values, ax, positions=positions, whis='range', xlabel='Graph level', ylabel='Radius (mm)')
ax.set_xlabel('Graph level')
ax.set_ylabel('Radius ratio')
ax.set_title(partitionName)
subplotIndex += 1
plt.show()
def updateEdgeRadius(self, edgeRadiusList):
"""
Update the edge radius with the supplied list.
The i-th element in edgeRadiusList is the radius (in voxel) of the i-th edge.
Parameters
----------
edgeRadiusList : list
A list of new edge radius.
"""
edgeInfoDict = self.edgeInfoDict
for edgeIndex, radius in enumerate(edgeRadiusList):
edgeInfoDict[edgeIndex]['meanRadius'] = radius
self.edgeInfoDict = edgeInfoDict
self.setNetwork(option=2)
def applyFlowToNetwork(self, edgeFlowList):
"""
Apply the flow from edgeFlowList to the corresponding edges and recalculates all the pressures.
The i-th element in edgeFlowList is the flow (in m^3/s) of the i-th edge.
Parameters
----------
edgeFlowList : list
A list of flow rates to be applied to each edges.
"""
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
edgeList = self.edgeList
spacing = self.spacing
for edgeIndex, flow in enumerate(edgeFlowList):
edgeInfoDict[edgeIndex]['simulationData']['flow'] = flow
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
velocity = flow / (np.pi * radius**2) # m/s
edgeInfoDict[edgeIndex]['simulationData']['velocity'] = velocity
edgeDepthArray = np.array([edgeInfoDict[edgeIndex]['depth'] for edgeIndex in edgeIndexList])
edgeIndexListSorted = np.array(edgeIndexList)[edgeDepthArray.argsort()].tolist()
for edgeIndex in edgeIndexListSorted:
edge = edgeList[edgeIndex]
edgeHead, edgeTail = edge
if nodeInfoDict[edgeHead]['depth'] > nodeInfoDict[edgeTail]['depth']:
edgeHead, edgeTail = edgeTail, edgeHead
pressureHead = nodeInfoDict[edgeHead]['simulationData']['pressure']
radius = edgeInfoDict[edgeIndex]['meanRadius'] * spacing # meter
length = edgeInfoDict[edgeIndex]['length'] * spacing # meter
c, k = itemgetter('c', 'k')(edgeInfoDict[edgeIndex])
flow = edgeFlowList[edgeIndex]
deltaPressure = 10.67 * flow**k * length / c**k / (2*radius)**4.8704
if pressureHead is None:
print('Error! EdgeIndex={} has pressure = None'.format(edgeIndex))
pressureTail = pressureHead - deltaPressure
nodeInfoDict[edgeTail]['simulationData']['pressure'] = pressureTail
self.nodeInfoDict = nodeInfoDict
self.edgeInfoDict = edgeInfoDict
def showVolumePerPartition(self, numOfTimeSteps=4, interpolationOption=1, figIndex=1, isLastFigure=True):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, and check how the volume of each partition changes among different time steps.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
volumeTimeStepListPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': [], 'Left': [], 'Right': []}
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
for currentTimeStep in range(0, numOfTimeSteps):
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
for partitionName, volume in volumePerPartition.items():
volumeTimeStepListPerPartition[partitionName].append(volume)
volumeTimeStepListPerPartition['Left'] = (np.array(volumeTimeStepListPerPartition['LMCA']) + np.array(volumeTimeStepListPerPartition['LPCA'])).tolist()
volumeTimeStepListPerPartition['Right'] = (np.array(volumeTimeStepListPerPartition['RMCA']) + np.array(volumeTimeStepListPerPartition['RPCA'])).tolist()
print('volumeTimeStepListPerPartition={}'.format(volumeTimeStepListPerPartition))
fig = plt.figure(figIndex, figsize=(7, 3))
plt.subplots_adjust(left=0.1, right=0.9, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
ax = fig.add_subplot(1,1,1)
for partitionName, volumeList in volumeTimeStepListPerPartition.items():
xValues = list(range(numOfTimeSteps))
yValues = volumeList
ax.plot(xValues, yValues, 'o-', label=partitionName)
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel(r'Volume ($\mathrm{mm}^3/s$)')
# ax.legend()
ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=7, prop={'size': 8})
if isLastFigure:
plt.show()
def test1(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, split the flow according to the cross-sectional area (option 2 in
createGroundTruth()) and see how the terminating pressures change.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
aa = [self.edgeInfoDict[edgeIndex]['meanRadius'] for edgeIndex in self.edgeIndexList]
# print(aa)
# success = self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
cTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
allNodes = list(range(np.max(list(self.nodeInfoDict.keys())) + 1))
terminatingNodes = [node for node in G.nodes() if G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
nodePressuresTimeStepArray = np.zeros((len(allNodes), numOfTimeSteps))
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
for currentTimeStep in range(0, numOfTimeSteps):
# print(currentTimeStep)
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
# print(radiusList)
self.updateEdgeRadius(radiusList)
success = self.createGroundTruth(option=2)
if not success:
print('Time step={} failed'.format(currentTimeStep))
terminatingPressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in terminatingNodes]
terminatingPressuresTimeStepArray[:, currentTimeStep] = terminatingPressures
nodePressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in allNodes]
nodePressuresTimeStepArray[:, currentTimeStep] = nodePressures
cValues = [self.edgeInfoDict[edgeIndex]['c'] for edgeIndex in edgeIndexList]
cTimeStepArray[edgeIndexList, currentTimeStep] = cValues
# G = self.G
# nodeInfoDict = self.nodeInfoDict
# edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
# infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
# 'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
# 'figTitle': 'Ground truth'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=True)
# Clear the simulation result #
for node in G.nodes():
self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
for edgeIndex in edgeIndexList:
self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
print(terminatingPressuresTimeStepArray)
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
fig = plt.figure(1, figsize=(15, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
subplotIndex = 1
colorList = ['r','g','b']
# terminatingNodes = {'LMCA': [], 'RMCA': [], 'ACA': [], 'LPCA': [], 'RPCA': []}
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# terminatingNodes[partitionName] = terminatingNodesInThisPartition
ax = fig.add_subplot(2,4,subplotIndex, projection='3d')
for ii, node in enumerate(terminatingNodesInThisPartition):
rowNum = terminatingNodes.index(node)
pressures = terminatingPressuresTimeStepArray[rowNum, :]
xValues = [node] * numOfTimeSteps
yValues = list(range(numOfTimeSteps))
zValues = list(pressures)
ax.plot(xValues, yValues, zValues, 'bo-')
ax.set_xlabel('Node index')
ax.set_ylabel('Time step')
ax.set_zlabel('Terminating pressure (mmHg)')
ax.set_title(partitionName)
subplotIndex += 1
edgeRadiusTimeStepArray = np.array(edgeRadiusTimeStepList)
spacing = self.spacing
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
if partitionName != 'LPCA' and partitionName != 'LMCA' and partitionName != 'ACA':
continue
print('{}:'.format(partitionName))
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
edgeIndexAlongPath = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
for currentTimeStep in range(numOfTimeSteps):
pressuresAlongPath = np.round(nodePressuresTimeStepArray[path, currentTimeStep], 2) # mmHg
edgeRadiusAlongPath = np.round(edgeRadiusTimeStepArray[edgeIndexAlongPath, currentTimeStep]*spacing*1000, 2) # mm
cAlongPath = np.round(cTimeStepArray[edgeIndexAlongPath, currentTimeStep], 3)
print('Terminating node {} (time step={}): pressures along path are {} mmHg, radius along path are {} mm, c={}'.format(terminatingNode, currentTimeStep, pressuresAlongPath, edgeRadiusAlongPath, cAlongPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
plt.show()
def test2(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, apply the same flow/different flow and check the differences in terminating pressures
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeFlowList = [0] * len(edgeIndexList)
for edgeIndex in edgeIndexList:
edgeFlowList[edgeIndex] = self.edgeInfoDict[edgeIndex]['simulationData']['flow']
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'BraVa'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
self.plotTerminatingPressures(figIndex=2, isLastFigure=False)
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
self.applyFlowToNetwork(edgeFlowList)
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM 2013'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=False)
self.plotTerminatingPressures(figIndex=4, isLastFigure=True)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def test3(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, use the same flow pattern as the BraVa for other time points and see how the
terminating pressures change.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeFlowList = [0] * len(edgeIndexList)
for edgeIndex in edgeIndexList:
edgeFlowList[edgeIndex] = self.edgeInfoDict[edgeIndex]['simulationData']['flow']
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
cTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
flowTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
allNodes = list(range(np.max(list(self.nodeInfoDict.keys())) + 1))
terminatingNodes = [node for node in G.nodes() if G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
nodePressuresTimeStepArray = np.zeros((len(allNodes), numOfTimeSteps))
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
for currentTimeStep in range(0, numOfTimeSteps):
# print(currentTimeStep)
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
# print(radiusList)
self.updateEdgeRadius(radiusList)
self.applyFlowToNetwork(edgeFlowList)
terminatingPressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in terminatingNodes]
terminatingPressuresTimeStepArray[:, currentTimeStep] = terminatingPressures
nodePressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in allNodes]
nodePressuresTimeStepArray[:, currentTimeStep] = nodePressures
cValues = [self.edgeInfoDict[edgeIndex]['c'] for edgeIndex in edgeIndexList]
cTimeStepArray[edgeIndexList, currentTimeStep] = cValues
flowValues = [self.edgeInfoDict[edgeIndex]['simulationData']['flow'] for edgeIndex in edgeIndexList] # m^3/s
flowTimeStepArray[edgeIndexList, currentTimeStep] = flowValues
# G = self.G
# nodeInfoDict = self.nodeInfoDict
# edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
# infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
# 'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
# 'figTitle': 'Ground truth'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=True)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# print(terminatingPressuresTimeStepArray)
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=11, isLastFigure=False)
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=2, figIndex=21, isLastFigure=False)
# self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=3, figIndex=31, isLastFigure=False)
self.plotTerminatingPressureVSPathLength(terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=31, isLastFigure=False)
self.plotFlow(flowTimeStepArray, option=1, figIndex=41, isLastFigure=False)
self.plotRootPressuresCompartment(nodePressuresTimeStepArray, option=1, figIndex=51, isLastFigure=False)
self.plotFlowProportion(flowTimeStepArray, figIndex=61, isLastFigure=True)
# Flow proportion_Same flow_All CoW branches fixed_test3
# Flow proportion_Same flow_LICA RICA VA fixed_test3
# partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
# 'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
# G = self.G
# nodeInfoDict = self.nodeInfoDict
# edgeInfoDict = self.edgeInfoDict
# edgeRadiusTimeStepArray = np.array(edgeRadiusTimeStepList)
# spacing = self.spacing
# for partitionName, info in partitionInfo.items():
# startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
# resultDict = self.BFS(startNodes, boundaryNodes)
# visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
# terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# if partitionName != 'RMCA' and partitionName != 'LMCA' and partitionName != 'LPCA':
# continue
# print('{}:'.format(partitionName))
# for terminatingNode in terminatingNodesInThisPartition:
# path = nx.shortest_path(G, startNodes[0], terminatingNode)
# edgeIndexAlongPath = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
# for currentTimeStep in range(numOfTimeSteps):
# pressuresAlongPath = np.round(nodePressuresTimeStepArray[path, currentTimeStep], 2) # mmHg
# edgeRadiusAlongPath = np.round(edgeRadiusTimeStepArray[edgeIndexAlongPath, currentTimeStep]*spacing*1000, 2) # mm
# cAlongPath = np.round(cTimeStepArray[edgeIndexAlongPath, currentTimeStep], 3)
# print('Terminating node {} (time step={}): pressures along path are {} mmHg, radius along path are {} mm, c={}'.format(terminatingNode, currentTimeStep, pressuresAlongPath, edgeRadiusAlongPath, cAlongPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# plt.show()
def test4(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, and check the differences in terminating pressures
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeFlowList = [0] * len(edgeIndexList)
for edgeIndex in edgeIndexList:
edgeFlowList[edgeIndex] = self.edgeInfoDict[edgeIndex]['simulationData']['flow']
edgeRadiusTimeStepArray = np.zeros((len(edgeIndexList), 2))
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for edgeIndex in edgeIndexList:
edgeRadiusTimeStepArray[edgeIndex, 0] = self.edgeInfoDict[edgeIndex]['meanRadius'] * spacing * 1000
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'BraVa'}
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# self.plotTerminatingPressures(figIndex=2, isLastFigure=False)
# Clear the simulation result #
for node in G.nodes():
self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
for edgeIndex in edgeIndexList:
self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
edgeRadiusTimeStepArray[edgeIndex, 1] = self.edgeInfoDict[edgeIndex]['meanRadius'] * spacing * 1000
for ii, row in enumerate(edgeRadiusTimeStepArray):
radiusBefore, radiusAfter = np.round(row, 3).tolist()
print('Edge {}: radius before/after = {}/{} mm'.format(ii, radiusBefore, radiusAfter))
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
'figTitle': 'GBM 2013'}
self.plotNetwork(infoDict, figIndex=3, isLastFigure=True)
# self.plotTerminatingPressures(figIndex=4, isLastFigure=True)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def test5(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, and see how the terminating pressures change.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
edgeIndexList = self.edgeIndexList
edgeFlowList = [0] * len(edgeIndexList)
for edgeIndex in edgeIndexList:
edgeFlowList[edgeIndex] = self.edgeInfoDict[edgeIndex]['simulationData']['flow']
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
cTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
allNodes = list(range(np.max(list(self.nodeInfoDict.keys())) + 1))
terminatingNodes = [node for node in G.nodes() if G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
nodePressuresTimeStepArray = np.zeros((len(allNodes), numOfTimeSteps))
flowTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
for node in G.nodes():
self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
for edgeIndex in edgeIndexList:
self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
for currentTimeStep in range(0, numOfTimeSteps):
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
self.createGroundTruth(option=2)
terminatingPressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in terminatingNodes]
terminatingPressuresTimeStepArray[:, currentTimeStep] = terminatingPressures
nodePressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in allNodes]
nodePressuresTimeStepArray[:, currentTimeStep] = nodePressures
cValues = [self.edgeInfoDict[edgeIndex]['c'] for edgeIndex in edgeIndexList]
cTimeStepArray[edgeIndexList, currentTimeStep] = cValues
flowValues = [self.edgeInfoDict[edgeIndex]['simulationData']['flow'] for edgeIndex in edgeIndexList] # m^3/s
flowTimeStepArray[edgeIndexList, currentTimeStep] = flowValues
# Clear the simulation result #
for node in G.nodes():
self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
for edgeIndex in edgeIndexList:
self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
# print(terminatingPressuresTimeStepArray)
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=11, isLastFigure=False)
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=2, figIndex=21, isLastFigure=False)
# self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=3, figIndex=31, isLastFigure=False)
self.plotFlow(flowTimeStepArray, option=1, figIndex=41, isLastFigure=False)
self.plotRootPressuresCompartment(nodePressuresTimeStepArray, option=1, figIndex=51, isLastFigure=False)
self.plotFlowProportion(flowTimeStepArray, figIndex=61, isLastFigure=True)
# Flow proportion_Split flow with radius_All CoW branches fixed_test3
# Flow proportion_Split flow with radius_LICA RICA VA fixed_test3
print(edgeRadiusTimeStepList[8:12])
print(flowTimeStepArray[[4,8,9,10,11],:])
# edgeRadiusTimeStepArray = np.array(edgeRadiusTimeStepList)
# spacing = self.spacing
# for partitionName, info in partitionInfo.items():
# startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
# resultDict = self.BFS(startNodes, boundaryNodes)
# visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
# terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
# if partitionName != 'LPCA' and partitionName != 'LMCA' and partitionName != 'ACA':
# continue
# print('{}:'.format(partitionName))
# for terminatingNode in terminatingNodesInThisPartition:
# path = nx.shortest_path(G, startNodes[0], terminatingNode)
# edgeIndexAlongPath = [G[path[ii]][path[ii+1]]['edgeIndex'] for ii in range(len(path) - 1)]
# for currentTimeStep in range(numOfTimeSteps):
# pressuresAlongPath = np.round(nodePressuresTimeStepArray[path, currentTimeStep], 2) # mmHg
# edgeRadiusAlongPath = np.round(edgeRadiusTimeStepArray[edgeIndexAlongPath, currentTimeStep]*spacing*1000, 2) # mm
# cAlongPath = np.round(cTimeStepArray[edgeIndexAlongPath, currentTimeStep], 3)
# print('Terminating node {} (time step={}): pressures along path are {} mmHg, radius along path are {} mm, c={}'.format(terminatingNode, currentTimeStep, pressuresAlongPath, edgeRadiusAlongPath, cAlongPath))
# elapsed = timeit.default_timer() - start_time
# print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
plt.show()
def test6(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, set the terminating pressures using the terminating pressure vs path length
relationship found with ADAN dataset, and solve the network. The purpose is to see when the network fails to
produce a solution (i.e., optimization error becomes too large to be acceptable), and for those time steps that
do not have a solution, perturb the terminating pressures with minimum effort (another optimization) so that
there exists a solution.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Note: check what slope is being used in setTerminatingPressures()!
Saved Result:
fluidSimulationResultTest6_Timestep={}_v1.pkl: everything normal
fluidSimulationResultTest6_Timestep={}_v2.pkl: slope of terminating pressure vs path length reduced by 30%
fluidSimulationResultTest6_Timestep={}_v3.pkl: slope of terminating pressure vs path length reduced by 40%
fluidSimulationResultTest6_Timestep={}_v4.pkl: slope of terminating pressure vs path length reduced by 20%
fluidSimulationResultTest6_Timestep={}_v5.pkl: slope of terminating pressure vs path length comes from fitting the ground truth solution (option=2)
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
# G = self.G
# nodeInfoDict = self.nodeInfoDict
# edgeInfoDict = self.edgeInfoDict
# nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
# nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
# edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
# edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
# infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Node',
# 'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': 'Edge',
# 'figTitle': 'Ground truth'}
# self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
# cTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
# allNodes = list(range(np.max(list(self.nodeInfoDict.keys())) + 1))
# terminatingNodes = [node for node in G.nodes() if G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
# terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
# nodePressuresTimeStepArray = np.zeros((len(allNodes), numOfTimeSteps))
# flowTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = 1 # numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
for currentTimeStep in range(0, maxTimeStep):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
# self.createGroundTruth(option=2)
extraInfo = None
extraInfo = {'fitResultPerPartition': fitResultPerPartition}
self.setTerminatingPressure(option=2, extraInfo=extraInfo)
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResultTest6_Timestep={}_v5.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
def showResult_GBMTest5(self, numOfTimeSteps=5):
"""
Plot the result obtained from `GBMTest5`.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
terminatingNodes = [node for node in self.G.nodes() if self.G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
directory = self.directory
edgeIndexList = self.edgeIndexList
resultFolderPath = join(directory, 'fluidSimulationResult')
numOfTimeSteps = 5
incomingEdgesFlowTimeStepArray = np.zeros((3, numOfTimeSteps))
flowTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
terminatingNodes = [node for node in self.G.nodes() if self.G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
for currentTimeStep in range(numOfTimeSteps):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
resultFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
with open(join(resultFolderPath, resultFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}.'.format(resultFileName, resultFolderPath))
if 'velocityPressure' not in resultDict:
if 'perturbedYear' in resultDict:
G, nodeInfoDict, edgeInfoDict = itemgetter('G', 'nodeInfoDict', 'edgeInfoDict')(resultDict['perturbedYear'])
velocityPressure = resultDict['perturbedYear']['velocityPressure'] #self.getVelocityPressure()
with open(join(resultFolderPath, resultFileName), 'wb') as f:
resultDictNew = {'G': copy.deepcopy(G), 'nodeInfoDict': copy.deepcopy(nodeInfoDict), 'edgeInfoDict': copy.deepcopy(edgeInfoDict), 'velocityPressure': copy.deepcopy(velocityPressure)}
pickle.dump(resultDictNew, f, 2)
print('{} saved to {}.'.format(resultFileName, resultFolderPath))
else:
G, nodeInfoDict, edgeInfoDict = itemgetter('G', 'nodeInfoDict', 'edgeInfoDict')(resultDict)
velocityPressure = resultDict['velocityPressure']
self.G = copy.deepcopy(G)
self.nodeInfoDict = copy.deepcopy(nodeInfoDict)
self.edgeInfoDict = copy.deepcopy(edgeInfoDict)
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
self.validateFluidEquations(velocityPressure=velocityPressure)
self.updateNetworkWithSimulationResult(velocityPressure=velocityPressure)
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Pressure (mmHg)',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': r'Flow ($\mathrm{cm}^3/s$)',
'figTitle': 'GBM_Time step={}'.format(currentTimeStep)}
if currentTimeStep == numOfTimeSteps - 1:
isLastFigure = True
else:
isLastFigure = False
self.plotNetwork(infoDict, figIndex=currentTimeStep+1, isLastFigure=False)
# Collect some results
for edgeIndex in [0,1,2]:
incomingEdgesFlowTimeStepArray[edgeIndex, currentTimeStep] = edgeInfoDict[edgeIndex]['simulationData']['flow']
for edgeIndex in edgeIndexList:
flowTimeStepArray[edgeIndex, currentTimeStep] = edgeInfoDict[edgeIndex]['simulationData']['flow']
terminatingPressures = [self.nodeInfoDict[node]['simulationData']['pressure'] /13560/9.8*1000 for node in terminatingNodes]
terminatingPressuresTimeStepArray[:, currentTimeStep] = terminatingPressures
# Flow proportions_GBMTest5
# self.plotFlowProportion(flowTimeStepArray, figIndex=21, isLastFigure=False)
# Mean terminating pressure vs Time step_GBMTest5 and Terminating pressure vs Time step_Compartment_GBMTest5
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=31, isLastFigure=False)
self.plotTerminatingPressures2(terminatingNodes, terminatingPressuresTimeStepArray, option=2, figIndex=41, isLastFigure=True)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# plt.show()
def showResult2_GBMTest5(self, numOfTimeSteps=5):
"""
Show graph plot of pressures and flows from `GBMTest5` between two time steps and share one legend.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
terminatingNodes = [node for node in self.G.nodes() if self.G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
directory = self.directory
edgeIndexList = self.edgeIndexList
resultFolderPath = join(directory, 'fluidSimulationResult')
numOfTimeSteps = 5
incomingEdgesFlowTimeStepArray = np.zeros((3, numOfTimeSteps))
flowTimeStepArray = np.zeros((len(edgeIndexList), numOfTimeSteps))
terminatingNodes = [node for node in self.G.nodes() if self.G.degree(node) == 1 and self.nodeInfoDict[node]['depth'] != 0]
terminatingPressuresTimeStepArray = np.zeros((len(terminatingNodes), numOfTimeSteps))
infoDictList = []
nodeValueListTotal, edgeValueListTotal = [], []
timeStepsToUse = [0, 4]
for currentTimeStep in timeStepsToUse:
print('##### currentTimeStep={} #####'.format(currentTimeStep))
resultFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
with open(join(resultFolderPath, resultFileName), 'rb') as f:
resultDict = pickle.load(f)
print('{} loaded from {}.'.format(resultFileName, resultFolderPath))
G, nodeInfoDict, edgeInfoDict = itemgetter('G', 'nodeInfoDict', 'edgeInfoDict')(resultDict)
velocityPressure = resultDict['velocityPressure']
self.G = copy.deepcopy(G)
self.nodeInfoDict = copy.deepcopy(nodeInfoDict)
self.edgeInfoDict = copy.deepcopy(edgeInfoDict)
self.updateNetworkWithSimulationResult(velocityPressure=velocityPressure)
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Pressure (mmHg)',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': r'Flow ($\mathrm{cm}^3/s$)',
'figTitle': 'GBM_Time step={}'.format(currentTimeStep)}
# self.plotNetwork(infoDict, figIndex=currentTimeStep+1, isLastFigure=False)
infoDictList.append(infoDict)
nodeValueListTotal += nodeValueList
edgeValueListTotal += edgeValueList
vmin, vmax = np.amin(nodeValueListTotal), np.amax(nodeValueListTotal)
edge_vmin, edge_vmax = np.amin(edgeValueListTotal), np.amax(edgeValueListTotal)
figIndex = 1
# fluidSimulationResult_Time step=0_Compare between two time steps_Same Legend_GBMTest5
# fluidSimulationResult_Time step=4_Compare between two time steps_Same Legend_GBMTest5
# fluidSimulationResult_Legend_Time step=0,4_GBMTest5
for infoDict in infoDictList:
infoDict['vmin'] = vmin
infoDict['vmax'] = vmax
infoDict['edge_vmin'] = edge_vmin
infoDict['edge_vmax'] = edge_vmax
self.plotNetwork(infoDict, figIndex=figIndex, isLastFigure=False, hideColorbar=True)
figIndex += 1
extraInfo = {'nodeLabel': 'Pressure (mmHg)', 'nodeLabelSize': 18, 'nodeTickSize': 18,
'edgeLabel': r'Flow rate ($\mathrm{cm}^3/s$)', 'edgeLabelSize': 18, 'edgeTickSize': 18,
'vmin': vmin, 'vmax': vmax, 'edge_vmin': edge_vmin, 'edge_vmax': edge_vmax}
self.graphPlotStandaloneLegend(figIndex=10, isLastFigure=True, extraInfo=extraInfo)
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def graphPlotStandaloneLegend(self, figIndex=1, isLastFigure=True, orientation='horizontal', extraInfo=None):
"""
Standalone legend for the graph plot.
"""
fig = plt.figure(figIndex, figsize=(12, 8))
plt.subplots_adjust(left=0.15, right=0.85, top=0.94, bottom=0.06, wspace=0.3, hspace=0.9)
if orientation == 'horizontal':
ax1 = fig.add_axes([0.15, 0.9, 0.7, 0.04])
ax2 = fig.add_axes([0.15, 0.75, 0.7, 0.04])
elif orientation == 'vertical':
ax1 = fig.add_axes([0.05, 0.05, 0.04, 0.9])
ax2 = fig.add_axes([0.2, 0.05, 0.04, 0.9])
vmin, vmax, edge_vmin, edge_vmax = itemgetter('vmin', 'vmax', 'edge_vmin', 'edge_vmax')(extraInfo)
nodeColorNorm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=mpl.cm.jet, norm=nodeColorNorm, orientation=orientation)
nodeLabel, nodeLabelSize, nodeTickSize = itemgetter('nodeLabel', 'nodeLabelSize', 'nodeTickSize')(extraInfo)
cb1.set_label(nodeLabel, size=nodeLabelSize)
cb1.ax.tick_params(labelsize=nodeTickSize)
edgeColorNorm = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=mpl.cm.jet, norm=edgeColorNorm, orientation=orientation)
edgeLabel, edgeLabelSize, edgeTickSize = itemgetter('edgeLabel', 'edgeLabelSize', 'edgeTickSize')(extraInfo)
cb2.set_label(edgeLabel, size=edgeLabelSize)
cb2.ax.tick_params(labelsize=edgeTickSize)
if isLastFigure:
plt.show()
def plotTerminatingPressures(self, figIndex=1, isLastFigure=True):
"""
Plot distribution of terminating pressures per compartment.
"""
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
terminatingNodes = {'LMCA': [], 'RMCA': [], 'ACA': [], 'LPCA': [], 'RPCA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingNodes[partitionName] = terminatingNodesInThisPartition
# fig = plt.figure(figIndex, figsize=(15, 8))
# plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
fig = plt.figure(figIndex, figsize=(15, 3)) # 1*5 figure
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
for partitionName, info in partitionInfo.items():
terminatingPressuresInThisPartition = [nodeInfoDict[node]['simulationData']['pressure']/13560/9.8*1000 for node in terminatingNodes[partitionName]]
ax = fig.add_subplot(1,5,subplotIndex)
ax.hist(terminatingPressuresInThisPartition, bins=10)
ax.set_xlabel('Terminating pressure (mmHg)')
ax.set_ylabel('Count')
ax.set_title(partitionName)
subplotIndex += 1
if isLastFigure:
plt.show()
def plotTerminatingPressures2(self, terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=1, isLastFigure=True):
"""
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
numOfTimeSteps = terminatingPressuresTimeStepArray.shape[1]
# Line plot of terminating pressures (one line=one terminating node) per compartment
if option == 1:
# fig = plt.figure(1, figsize=(15, 8))
# plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
fig2 = plt.figure(figIndex+1, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
meanTerminatingPressuresPerPartitionArray = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
meanTerminatingPressuresList = []
ax = fig.add_subplot(1,5,subplotIndex)
ax2 = fig2.add_subplot(1,5,1)
# Terminating pressures vs Time steps(5)_Same flow_All CoW branches fixed_Compartment(5)_Single row
# Terminating pressures vs Time steps(5)_Same flow_LICA RICA VA fixed_Compartment(5)_Single row
# Terminating pressures vs Time steps(5)_Split flow with radius_All CoW branches fixed_Compartment(5)_Single row
# Terminating pressures vs Time steps(5)_Split flow with radius_LICA RICA VA fixed_Compartment(5)_Single row
for ii, node in enumerate(terminatingNodesInThisPartition):
rowNum = terminatingNodes.index(node)
pressures = terminatingPressuresTimeStepArray[rowNum, :]
xValues = list(range(numOfTimeSteps))
yValues = list(pressures)
ax.plot(xValues, yValues, 'o-')
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
if subplotIndex == 1:
ax.set_ylabel('Terminating pressure (mmHg)')
ax.set_title(partitionName)
meanTerminatingPressuresList.append(pressures)
# Mean terminating pressures vs Time steps(5)_Same flow_All CoW branches fixed_Compartment(5)_Single row
# Mean terminating pressures vs Time steps(5)_Same flow_LICA RICA VA fixed_Compartment(5)_Single row
# Mean terminating pressures vs Time steps(5)_Split flow with radius_All CoW branches fixed_Compartment(5)_Single row
# Mean terminating pressures vs Time steps(5)_Split flow with radius_LICA RICA VA fixed_Compartment(5)_Single row
# ax2 = fig2.add_subplot(1,5,subplotIndex)
meanTerminatingPressuresArray = np.array(meanTerminatingPressuresList)
xValues = list(range(numOfTimeSteps))
yValues = np.mean(meanTerminatingPressuresArray, axis=0)
meanTerminatingPressuresPerPartitionArray[partitionName] = yValues
ax2.plot(xValues, yValues, 'o-', label=partitionName)
ax2.set_xlabel('Time step')
ax2.set_xticks(xValues)
ax2.set_xticklabels(['T{}'.format(ii) for ii in xValues])
# if subplotIndex == 1:
ax2.set_ylabel('Mean terimating pressure (mmHg)')
# ax2.set_title(partitionName)
subplotIndex += 1
ax2.legend(prop={'size': 6})
ax3 = fig2.add_subplot(1,5,2)
xValues = list(range(numOfTimeSteps))
yValuesLeft = (meanTerminatingPressuresPerPartitionArray['LMCA'] + meanTerminatingPressuresPerPartitionArray['LPCA']) / 2
yValuesRight = (meanTerminatingPressuresPerPartitionArray['RMCA'] + meanTerminatingPressuresPerPartitionArray['RPCA']) / 2
ax3.plot(xValues, yValuesLeft, 'o-', label='Left')
ax3.plot(xValues, yValuesRight, 'o-', label='Right')
ax3.set_xlabel('Time step')
ax3.set_xticks(xValues)
ax3.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax3.legend()
# Each plot represents a time step and shows TP distribution of different compartments (one color for each)
# Terminating pressures distribution per time step_Same flow_All CoW branches fixed_Compartment(5)
# Terminating pressures distribution per time step_Same flow_LICA RICA VA fixed_Compartment(5)
# Terminating pressures distribution per time step_Split flow with radius_All CoW branches fixed_Compartment(5)
# Terminating pressures distribution per time step_Split flow with radius_LICA RICA VA fixed_Compartment(5)
elif option == 2:
# Terminating pressure vs Time step vs Compartment(5)_3D Histogram_Same flow_All CoW branches fixed
# Terminating pressure vs Time step vs Compartment(5)_3D Histogram_Same flow_LICA RICA VA fixed
# Terminating pressure vs Time step vs Compartment(5)_3D Histogram_Split flow with radius_All CoW branches fixed
# Terminating pressure vs Time step vs Compartment(5)_3D Histogram_Split flow with radius_LICA RICA VA fixed
fig = plt.figure(figIndex, figsize=(8, 5))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.5)
subplotIndex = 1
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nbins = 10
colorList = ['r', 'g', 'b', 'y', 'c', 'm']
colorDict = {'LMCA': 'r', 'RMCA': 'g', 'LPCA': 'b', 'RPCA': 'y', 'ACA': 'c'}
ax = fig.add_subplot(1,1,subplotIndex, projection='3d')
for currentTimeStep in range(numOfTimeSteps):
data = []
counter = 0
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingPressuresInThisPartition = [terminatingPressuresTimeStepArray[terminatingNodes.index(node), currentTimeStep] for node in terminatingNodesInThisPartition]
data.append(terminatingPressuresInThisPartition)
# Bar plot (still is histogram)
hist, bins = np.histogram(terminatingPressuresInThisPartition, bins=nbins)
xs = (bins[:-1] + bins[1:])/2
color = colorDict[partitionName]
# if partitionName != 'LMCA':
# continue
ax.bar(xs, hist, zs=currentTimeStep*10, zdir='y', color=color, ec=color, alpha=0.8)
# ax.bar3d(xs, counter*10, 0, 1, 0.1, hist, color=color, alpha=0.8)
ax.set_xlabel('Terminating pressure (mmHg)')
ax.set_ylabel('Time step')
ax.set_yticks([ii*10 for ii in range(numOfTimeSteps)])
ax.set_yticklabels(['T{}'.format(ii) for ii in range(numOfTimeSteps)])
ax.set_zlabel('Count')
counter += 1
subplotIndex += 1
f = lambda x,y,z: proj3d.proj_transform(x,y,z, ax.get_proj())[:2]
# ax.legend(list(partitionInfo.keys()), loc="upper right", bbox_to_anchor=f(30,45,10), bbox_transform=ax.transData) # for test3
# ax.legend(list(partitionInfo.keys()), loc="upper right", bbox_to_anchor=f(50,45,6), bbox_transform=ax.transData) # for test5
ax.legend(list(partitionInfo.keys()), loc="upper right", bbox_to_anchor=f(65,45,4), bbox_transform=ax.transData) # for showResult_GBMTest5
# ax.legend(list(partitionInfo.keys()))
# Each plot represents a compartment and shows distribution of different time steps (one color for each)
# Terminating pressures distribution per compartment(5)_Same flow_All CoW branches fixed
# Terminating pressures distribution per compartment(5)_Same flow_LICA RICA VA fixed
# Terminating pressures distribution per compartment(5)_Split flow with radius_All CoW branches fixed
# Terminating pressures distribution per compartment(5)_Split flow with radius_LICA RICA VA fixed
elif option == 3:
fig = plt.figure(figIndex, figsize=(9, 8))
plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.5)
subplotIndex = 1
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nbins = 10
colorList = ['r', 'g', 'b', 'y', 'c', 'm']
colorDict = {'LMCA': 'r', 'RMCA': 'g', 'LPCA': 'b', 'RPCA': 'y', 'ACA': 'c'}
ax = fig.add_subplot(1,1,subplotIndex, projection='3d')
partitionCounter = 0
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
data = []
for currentTimeStep in range(numOfTimeSteps):
terminatingPressuresAtThisTimeStep = [terminatingPressuresTimeStepArray[terminatingNodes.index(node), currentTimeStep] for node in terminatingNodesInThisPartition]
data.append(terminatingPressuresAtThisTimeStep)
# Bar plot (still is histogram)
hist, bins = np.histogram(terminatingPressuresAtThisTimeStep, bins=nbins)
xs = (bins[:-1] + bins[1:])/2
color = colorList[currentTimeStep]
# if partitionName != 'LMCA':
# continue
ax.bar(xs, hist, zs=partitionCounter*10, zdir='y', color=color, ec=color, alpha=0.8)
ax.set_xlabel('Terminating Pressure (mmHg)')
ax.set_ylabel('Compartment')
ax.set_zlabel('Count')
partitionCounter += 1
subplotIndex += 1
if isLastFigure:
plt.show()
def plotFlow(self, flowTimeStepArray, option=1, figIndex=1, isLastFigure=True):
"""
Plot the flow to each of the compartments.
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]}, 'ACA': {'startNodes': [10], 'boundaryNodes': []},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}}
timestepList = []
numOfTimeSteps = flowTimeStepArray.shape[1]
fig = plt.figure(figIndex, figsize=(8, 3))
plt.subplots_adjust(left=0.10, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
# Total flow vs Time steps(5)_Split flow with radius_All CoW branches fixed_Compartment(5)_Single row
# Total flow vs Time steps(5)_Split flow with radius_LICA RICA VA fixed_Compartment(5)_Single row
subplotIndex = 1
ax = fig.add_subplot(1,1,subplotIndex)
ax.set_xlabel('Time step')
# ax.set_ylabel(r'Flow ($\mathrm{cm}^3 /s$)')
ax.set_ylabel('Percentage of flow change (%)')
for partitionName in ['LMCA', 'RMCA', 'LPCA', 'RPCA', 'ACA', 'Left', 'Right']:
if partitionName == 'LMCA':
flowValues = flowTimeStepArray[8, :] / flowTimeStepArray[8, 0] * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
print('LMCA flow: {}'.format(flowValues))
elif partitionName == 'RMCA':
flowValues = flowTimeStepArray[10, :] / flowTimeStepArray[10, 0] * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
elif partitionName == 'LPCA':
flowValues = flowTimeStepArray[5, :] / flowTimeStepArray[5, 0] * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
print('LPCA flow: {}'.format(flowValues))
elif partitionName == 'RPCA':
flowValues = flowTimeStepArray[6, :] / flowTimeStepArray[6, 0] * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
print('RPCA flow: {}'.format(flowValues))
elif partitionName == 'ACA':
flowValues = flowTimeStepArray[20, :] / flowTimeStepArray[20, 0] * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
elif partitionName == 'Left':
flowValues = (flowTimeStepArray[8, :] + flowTimeStepArray[5, :]) / (flowTimeStepArray[8, 0] + flowTimeStepArray[5, 0]) * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
elif partitionName == 'Right':
flowValues = (flowTimeStepArray[10, :] + flowTimeStepArray[6, :]) / (flowTimeStepArray[10, 0] + flowTimeStepArray[6, 0]) * 100 - 100
timeStepValues = list(range(numOfTimeSteps))
# ax = fig.add_subplot(1,1,subplotIndex)
ax.plot(timeStepValues, flowValues, 'o-', label=partitionName)
print('{}: {}% change in flow at 2013'.format(partitionName, np.round(flowValues[-1], 2)))
ax.set_xticks(list(range(numOfTimeSteps)))
ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=7)
if isLastFigure:
plt.show()
def plotRootPressuresCompartment(self, nodePressuresTimeStepArray, option=1, figIndex=1, isLastFigure=True):
"""
Plot the root pressures of each compartment over time.
Root pressure per compartment_Split flow with radius_All CoW branches fixed
Root pressure per compartment_Split flow with radius_LICA RICA VA fixed
Root pressure per compartment_Same flow_LICA RICA VA fixed
"""
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
numOfTimeSteps = nodePressuresTimeStepArray.shape[1]
fig = plt.figure(figIndex, figsize=(6, 3))
plt.subplots_adjust(left=0.10, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
ax = fig.add_subplot(1,1,subplotIndex)
ax.set_xlabel('Time step')
ax.set_ylabel('Root pressure (mmHg)')
for partitionName in ['LMCA', 'RMCA', 'LPCA', 'RPCA', 'ACA']:
rootNode = partitionInfo[partitionName]['startNodes'][0]
rootPressures = nodePressuresTimeStepArray[rootNode, :]
timeStepValues = list(range(numOfTimeSteps))
print(rootPressures)
ax.plot(timeStepValues, rootPressures, 'o-', label=partitionName)
ax.set_xticks(list(range(numOfTimeSteps)))
ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=5)
if isLastFigure:
plt.show()
def plotTerminatingPressureVSPathLength(self, terminatingNodes, terminatingPressuresTimeStepArray, option=1, figIndex=1, isLastFigure=True):
"""
Scatter plot of terminating pressure vs path length.
"""
G = self.G
edgeList = self.edgeList
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
edgeIndexList = self.edgeIndexList
spacing = self.spacing
directory = self.directory
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10], 'color': 'r'}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10], 'color': 'g'},
'LPCA': {'startNodes': [6], 'boundaryNodes': [], 'color': 'b'}, 'RPCA': {'startNodes': [7], 'boundaryNodes': [], 'color': 'y'},
'ACA': {'startNodes': [10], 'boundaryNodes': [], 'color': 'c'}}
if option == 1:
pass
elif option == 2:
pass
elif option == 3:
pass
terminatingPressureVSPathLength = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
terminatingNodesPerPartition = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingNodesPerPartition[partitionName] = terminatingNodesInThisPartition
terminatingPressuresInThisPartition = []
for terminatingNode in terminatingNodesInThisPartition:
path = nx.shortest_path(G, startNodes[0], terminatingNode)
pathEdgeIndexList = [G[path[ii]][path[ii + 1]]['edgeIndex'] for ii in range(len(path) - 1)]
uniquePathEdgeIndexList = np.unique(pathEdgeIndexList)
assert len(uniquePathEdgeIndexList) != 0
pathLength = np.sum([edgeInfoDict[edgeIndex]['length'] * spacing * 1000 for edgeIndex in uniquePathEdgeIndexList]) # millimeter
pressure = nodeInfoDict[terminatingNode]['simulationData']['pressure'] / 13560 / 9.8 * 1000 # mmHg
terminatingPressureVSPathLength[partitionName].append([pathLength, pressure])
nodeInfoDict[terminatingNode]['pathLength'] = pathLength
nodeInfoDict[terminatingNode]['partitionName'] = partitionName
terminatingNodesPathLengthList = [nodeInfoDict[node]['pathLength'] for node in terminatingNodes]
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
numOfTimeSteps = np.shape(terminatingPressuresTimeStepArray)[1]
for currentTimeStep in range(0, numOfTimeSteps):
ax = fig.add_subplot(1,5,currentTimeStep+1)
for partitionName, info in partitionInfo.items():
terminatingNodesInThisPartition = terminatingNodesPerPartition[partitionName]
pathLengthList = [nodeInfoDict[node]['pathLength'] for node in terminatingNodesInThisPartition]
pressureList = [terminatingPressuresTimeStepArray[terminatingNodes.index(node), currentTimeStep] for node in terminatingNodesInThisPartition]
color = info['color']
ax.scatter(pathLengthList, pressureList, c=color, label=partitionName)
ax.legend(prop={'size': 6})
ax.set_xlabel('Path length (mm)')
ax.set_ylabel('Terminating pressure (mmHg)')
ax.set_title('Timestep={}'.format(currentTimeStep))
if isLastFigure:
plt.show()
def plotFlowProportion(self, flowTimeStepArray, figIndex=1, isLastFigure=True):
"""
"""
numOfTimeSteps = flowTimeStepArray.shape[1]
# Flow proportions_GBMTest5
fig = plt.figure(figIndex, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
# incomingFlowInfo_GBMTest5
ax = fig.add_subplot(1,5,1)
edgeNameList = ['LICA', 'VA', 'RICA'] # corresponds to edgeIndex=0/1/2
for edgeIndex in [0,1,2]:
xValues = list(range(numOfTimeSteps))
yValues = flowTimeStepArray[edgeIndex, :] / np.sum(flowTimeStepArray[:3, :], axis=0)
ax.plot(xValues, yValues, 'o-', label=edgeNameList[edgeIndex])
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel('Flow proportion')
ax.legend()
ax = fig.add_subplot(1,5,2)
partitionProportionTimeStepDict = {'LMCA': [], 'RMCA': [], 'LPCA': [], 'RPCA': [], 'ACA': []}
partitionProportionTimeStepDict['LMCA'] = flowTimeStepArray[8, :] / flowTimeStepArray[3, :]
partitionProportionTimeStepDict['RMCA'] = flowTimeStepArray[10, :] / (flowTimeStepArray[4, :] + flowTimeStepArray[7, :])
partitionProportionTimeStepDict['LPCA'] = flowTimeStepArray[5, :] / flowTimeStepArray[1, :]
partitionProportionTimeStepDict['RPCA'] = flowTimeStepArray[6, :] / flowTimeStepArray[1, :]
partitionProportionTimeStepDict['ACA'] = flowTimeStepArray[20, :] / (flowTimeStepArray[9, :] + flowTimeStepArray[11, :])
for partitionName, proportionList in partitionProportionTimeStepDict.items():
xValues = list(range(numOfTimeSteps))
yValues = proportionList
ax.plot(xValues, yValues, 'o-', label=partitionName)
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel(r'Compartment flow proportion')
ax.legend(prop={'size': 6})
# ax.legend(bbox_to_anchor=(0,1.02,1,0.2), loc="lower left", mode="expand", borderaxespad=0, ncol=5, prop={'size': 6})
ax = fig.add_subplot(1,5,3)
xValues = list(range(numOfTimeSteps))
yValues = np.sum(flowTimeStepArray[:3, :], axis=0) * 10**6
ax.plot(xValues, yValues, 'o-')
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel(r'Total flow rate ($\mathrm{cm}^3/s$)')
ax = fig.add_subplot(1,5,4)
xValues = list(range(numOfTimeSteps))
edgeNameDict = {0: 'LICA', 1: 'VA', 2: 'RICA', 4: 'RPCA Comm', 9: 'LM', 11: 'RM', 8: 'LMCA', 20: 'ACA', 10: 'RMCA', 5: 'LPCA', 6: 'RPCA'}
from matplotlib.cm import get_cmap
name = "tab20"
cmap = get_cmap(name) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
ax.set_prop_cycle(color=colors)
for edgeIndex, edgeName in edgeNameDict.items():
yValues = flowTimeStepArray[edgeIndex, :] * 10**6
ax.plot(xValues, yValues, 'o-', label=edgeName)
ax.set_xlabel('Time step')
ax.set_xticks(xValues)
ax.set_xticklabels(['T{}'.format(ii) for ii in xValues])
ax.set_ylabel(r'Flow rate ($\mathrm{cm}^3/s$)')
# ax.legend(prop={'size': 6})
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# ax.legend(bbox_to_anchor=(1.01, 1), loc='upper left')
if isLastFigure:
plt.show()
def BFSTest(self):
"""
Test BFS function, and plot radius vs graph level for each compartment
GBM_BraVa distribution_Radius vs Graph level_Compartment(5)_Full range_Single row
"""
partitionInfo = {'LMCA': {'startNodes': [5], 'boundaryNodes': [13]}, 'RMCA': {'startNodes': [6], 'boundaryNodes': [13]},
'LPCA': {'startNodes': [4], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [13], 'boundaryNodes': []}}
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
# edgeIndexList = self.edgeIndexList
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,12]}
# self.perturbNetwork(option=2, extraInfo=extraInfo)
# self.setNetwork(option=2)
# fig = plt.figure(1, figsize=(15, 8))
# plt.subplots_adjust(left=0.06, right=0.94, top=0.94, bottom=0.06, wspace=0.3, hspace=0.3)
fig = plt.figure(11, figsize=(15, 3))
plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
subplotIndex = 1
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
spacing = self.spacing
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
print('{}:\nvisitedNodes={}\nvisitedEdges={}'.format(partitionName, visitedNodes, visitedEdges))
ax = fig.add_subplot(1,5,subplotIndex)
dictUsed = edgeInfoDict
attribute1, attribute2, attribute3 = 'segmentLevel', 'meanRadius', 'partitionName'
attribute1List = [edgeInfoDict[edgeIndex]['depth'] for edgeIndex in visitedEdges]
attribute2List = [edgeInfoDict[edgeIndex]['meanRadius']*spacing*1000 for edgeIndex in visitedEdges]
# attribute1List = [info[attribute1] for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames]
# attribute2List = [info[attribute2]*spacing*1000 for _, info in dictUsed.items() if attribute1 in info and attribute2 in info and attribute3 in info and info[attribute3] in partitionNames] # mm
# ax.plot(attribute1List, attribute2List, 'bo')
positions = np.sort(np.unique(attribute1List))
values = []
attribute1Array, attribute2Array = np.array(attribute1List), np.array(attribute2List)
for segmentLevel in positions:
locs = np.nonzero(attribute1Array == segmentLevel)[0]
values.append((attribute2Array[locs]).tolist())
mf.boxPlotWithWhiskers(values, ax, positions=positions, whis='range', xlabel='Graph level', ylabel='Radius (mm)')
ax.set_xlabel('Graph level')
ax.set_ylabel('Radius (mm)')
ax.set_title(partitionName)
subplotIndex += 1
# if partitionName == 'LPCA':
# print(sorted(attribute2List))
plt.show()
def examineFluidResult(self):
"""
Examine the result obtained by solving the network.
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
resultDict = {'referenceYear': {}, 'perturbedYear': {}}
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2)
loadFileName = 'fluidSimulationResultGBMTest2(referenceYear=BraVa, perturbedYear=2013, perturbTerminatingPressureOption=1).pkl'
_, _, _, resultDict = self.loadFluidResult(loadFileName, return_ResultDict=True)
self.nodeInfoDict = resultDict['referenceYear']['nodeInfoDict']
self.edgeInfoDict = resultDict['referenceYear']['edgeInfoDict']
partitionInfo = {'LMCA': {'startNodes': [4], 'boundaryNodes': [10]}, 'RMCA': {'startNodes': [5], 'boundaryNodes': [10]},
'LPCA': {'startNodes': [6], 'boundaryNodes': []}, 'RPCA': {'startNodes': [7], 'boundaryNodes': []}, 'ACA': {'startNodes': [10], 'boundaryNodes': []}}
terminatingNodes = {'LMCA': [], 'RMCA': [], 'ACA': [], 'LPCA': [], 'RPCA': []}
terminatingPressures = {'LMCA': [], 'RMCA': [], 'ACA': [], 'LPCA': [], 'RPCA': []}
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
for partitionName, info in partitionInfo.items():
startNodes, boundaryNodes = itemgetter('startNodes', 'boundaryNodes')(info)
resultDict = self.BFS(startNodes, boundaryNodes)
visitedNodes, visitedEdges = itemgetter('visitedNodes', 'visitedEdges')(resultDict)
terminatingNodesInThisPartition = [node for node in visitedNodes if G.degree(node) == 1 and nodeInfoDict[node]['depth'] != 0]
terminatingNodes[partitionName] = terminatingNodesInThisPartition
terminatingPressuresInThisPartition = [nodeInfoDict[node]['simulationData']['pressure']/13560/9.8*1000 for node in terminatingNodesInThisPartition]
terminatingPressures[partitionName].append(terminatingPressuresInThisPartition)
# GBM reference flow_BraVa time step_Ground truth option=2
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Pressure (mmHg)',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': r'Flow rate ($\mathrm{cm}^3/s$)',
'figTitle': 'GBM Reference (BraVa)'} # TP->terminating pressure
self.plotNetwork(infoDict, figIndex=1, isLastFigure=False)
# Manually perturb the network #
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# Load result
loadFileName = 'fluidSimulationResultGBMTest2(referenceYear=BraVa, perturbedYear=2013, perturbTerminatingPressureOption=1).pkl'
nodeInfoDictPerturbed, edgeInfoDictPerturbed, velocityPressurePerturbed = self.loadFluidResult(loadFileName)
self.nodeInfoDict = nodeInfoDictPerturbed
self.edgeInfoDict = edgeInfoDictPerturbed
self.setupFluidEquations()
self.validateFluidEquations(velocityPressure=velocityPressurePerturbed)
for partitionName, info in partitionInfo.items():
terminatingNodesInThisPartition = terminatingNodes[partitionName]
terminatingPressuresInThisPartition = [self.nodeInfoDict[node]['simulationData']['pressure']/13560/9.8*1000 for node in terminatingNodesInThisPartition]
terminatingPressures[partitionName].append(terminatingPressuresInThisPartition)
# GBM fluid solution_GBMTest2(referenceYear=BraVa, perturbedYear=2013, perturbTerminatingPressureOption=1)
G = self.G
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
nodeLabelDict = {node: np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()} # ground truth pressure in mmHg
nodeValueList = [np.round(nodeInfoDict[node]['simulationData']['pressure'] / 13560 / 9.8 * 1000, 1) for node in G.nodes()] # ground truth pressure in mmHg
edgeLabelDict = {edge: np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()} # ground truth flow in cm^3/s
edgeValueList = [np.round(edgeInfoDict[G[edge[0]][edge[1]]['edgeIndex']]['simulationData']['flow']*10**6, 2) for edge in G.edges()] # ground truth flow in cm^3/s
infoDict = {'nodeLabelDict': nodeLabelDict, 'nodeValueList': nodeValueList, 'nodeColorbarLabel': 'Pressure (mmHg)',
'edgeLabelDict': edgeLabelDict, 'edgeValueList': edgeValueList, 'edgeColorbarLabel': r'Flow rate ($\mathrm{cm}^3/s$)',
'figTitle': 'GBM {}'.format(extraInfo['perturbedYear'])} # TP->terminating pressure
self.plotNetwork(infoDict, figIndex=3, isLastFigure=False)
# GBM terminating pressure distribution per compartment(referenceYear=BraVa, perturbedYear=2013, perturbTerminatingPressureOption=1)
self.plotTerminatingPressures(figIndex=11, isLastFigure=True)
# fig = plt.figure(20, figsize=(15, 3)) # 1*5 figure
# plt.subplots_adjust(left=0.05, right=0.96, top=0.90, bottom=0.15, wspace=0.3, hspace=0.4)
# subplotIndex = 1
# for partitionName, info in partitionInfo.items():
# pressures = terminatingPressures[partitionName]
# ax = fig.add_subplot(1,5,subplotIndex)
# ax.hist(pressures, bins=10)
# ax.set_xlabel('Terminating pressure (mmHg)')
# ax.set_ylabel('Count')
# ax.set_title(partitionName)
# ax.legend(['BraVa', '2013'])
# subplotIndex += 1
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# plt.show()
def computeNetworkDetail(args, eqnInfoDictList, method='HW', errorNorm=0, extraInfo=None):
"""
Given a network, the inlet pressure and all the terminating pressure, find the velocity/pressure of the remaining branches/nodes.
"""
rouBlood = 1050 # kg/m^3
rouMercury = 13560 # kg/m^3
g = 9.8 # m/s^2
rougBlood = rouBlood * g
# f in D-W equation = 64/Re = 64/(vD/nu) = 64*nu/(v*D) where nu is kinematic viscosity, D is diameter
# nu for blood is 2.6e-6 m^2/s
eqnList, eqnFlowList, eqnPressureList, eqnBoundaryList = [], [], [], []
for eqnInfoDict in eqnInfoDictList:
eqnType = eqnInfoDict['type']
if eqnType == 'flow':
velocityInIndexList, radiusInList = eqnInfoDict['velocityInIndexList'], eqnInfoDict['radiusInList']
velocityOutIndexList, radiusOutList = eqnInfoDict['velocityOutIndexList'], eqnInfoDict['radiusOutList']
velocityInList = [args[velocityIndex] for velocityIndex in velocityInIndexList]
velocityOutList = [args[velocityIndex] for velocityIndex in velocityOutIndexList]
QIn = np.sum([np.abs(velocity) * np.pi * radius**2 for velocity, radius in zip(velocityInList, radiusInList)])
QOut = np.sum([np.abs(velocity) * np.pi * radius**2 for velocity, radius in zip(velocityOutList, radiusOutList)])
eqn = np.abs(QIn - QOut)
eqnFlowList.append(eqn)
elif eqnType == 'pressure':
radius, length, velocityIndex, c, k, edgeIndex = itemgetter('radius', 'length', 'velocityIndex', 'c', 'k', 'edgeIndex')(eqnInfoDict)
velocity = np.abs(args[velocityIndex])
if 'pressure' in eqnInfoDict['headPressureInfo']:
headPressure = eqnInfoDict['headPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['headPressureInfo']:
pressureIndex = eqnInfoDict['headPressureInfo']['pressureIndex']
headPressure = args[pressureIndex]
if 'pressure' in eqnInfoDict['tailPressureInfo']:
tailPressure = eqnInfoDict['tailPressureInfo']['pressure']
elif 'pressureIndex' in eqnInfoDict['tailPressureInfo']:
pressureIndex = eqnInfoDict['tailPressureInfo']['pressureIndex']
tailPressure = args[pressureIndex]
if method == 'HW':
deltaPressureByNode = headPressure - tailPressure
deltaPressureByHW = 10.67 * (velocity * np.pi * radius**2)**k * length / c**k / (2 * radius)**4.8704
if | np.isnan(deltaPressureByHW) | numpy.isnan |
# RAiSERHD module
# <NAME>, 23 Feb 2022
# import packages
import h5py
import numpy as np
import pandas as pd
import time as ti
import os, warnings
from astropy import constants as const
from astropy import units as u
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.cosmology import FlatLambdaCDM
from astropy.io import fits
from astropy import wcs
from copy import copy
from matplotlib import pyplot as plt
from matplotlib import cm, rc
from matplotlib.colors import LogNorm
from matplotlib.ticker import FormatStrFormatter, NullFormatter, LogLocator
from numba import jit
from scipy.optimize import least_squares
from scipy.special import gamma, zeta
## Define global variables that can be adjusted to customise model output
# basic constants
year = 365.2422*24*3600 # average year in seconds
maverage = (0.6*const.m_p.value) # kg average particle mass
hubble = 0.7 # dimensionless Hubble parameter
OmegaM = 0.27 # fraction of matter in the flat universe
OmegaD = 0.73 # fraction of dark energy in the flat universe
freq_cmb = 5.879e10 # frequency of cosmic microwave background at z = 0
temp_cmb = 2.725 # temperature of cosmic microwave background at z = 0
c_speed = const.c.value # speed of light
e_charge = const.e.value # electron charge
k_B = const.k_B.value # Boltzmann constant
m_e = const.m_e.value # electron mass
mu0 = const.mu0.value # vacuum permeability
sigma_T = const.sigma_T.value # electron scattering cross-section
# model parameters that can be optimised for efficiency
nangles = 16 # number of angles to calculate expansion rate along (must be greater than 1)
betaRegions = 64 # set maximum number of beta regions
limTime = (year) # the FR-II limit must be used before this time
stepRatio = 1.01 # ratio to increase time/radius
crit_age = 0.95 # fraction of source age for lower end of power law approximations
lambda_min = 1e-256 # minimum value of Lambda for computational efficiency
# shocked gas and lobe parameters
chi = 2*np.pi/3.0 # lobe geometry parameter
shockAxisRatio = 0.5875 # exponent relating the cocoon axis ratio to the shocked gas axis ratio
shockRadius = 1.072 # fraction of the radius the shocked gas is greater than the lobe
gammaX = (5./3) # lorentz factor of external gas
gammaJ = (4./3) # lorentz factor of jet plasma
# set electron energy distribution constants
Lorentzmin = 780. # minimum Lorentz factor of injected electrons AT HOTSPOT for Cygnus A
Lorentzmax = 1e6 # effectively infinity
# density and temperature profiles
rCutoff = 0.01 # minimum radius to match profiles as a fraction of r200
betaMax = 2 # set critical value above which the cocoon expands balistically
# average and standard deviation of Vikhlinin model parameters
alphaAvg = 1.64 # corrected for removal of second core term
alphaStdev = 0.30
betaPrimeAvg = 0.56
betaPrimeStdev = 0.10
gammaPrimeAvg = 3
gammaPrimeStdev = 0
epsilonAvg = 3.23
epsilonStdev = 0 # 1.93; this parameter has little effect on profile
rCoreAvg = 0.087 # this is ratio of rc to r200
rCoreStdev = 0.028
rSlopeAvg = 0.73 # this is ratio of rs to r200
rSlopeStdev = 0 # 0.39; this parameter has little effect on profile
# temperature parameters
TmgConst = (-2.099)
TmgSlope = 0.6678
TmgError = 0.0727
# new temperature parameters assuming heating from AGN during expansion
TmgAvg = 7.00
TmgStdev = 0.28
# approximate halo to gas fraction conversion
# for halo masses between 10^12 and 10^15 and redshifts 0 < z < 5
halogasfracCONST1z0 = (-0.881768418)
halogasfracCONST1z1 = (-0.02832004)
halogasfracCONST2z0 = (-0.921393448)
halogasfracCONST2z1 = 0.00064515
halogasfracSLOPE = 0.053302276
# uncertainties, in dex
dhalogasfracz0 = 0.05172769
dhalogasfracz1 = (-0.00177947)
# correction to SAGE densities
SAGEdensitycorr = (-0.1)
## Define functions for run-time user output
def __join(*values):
return ";".join(str(v) for v in values)
def __color_text(s, c, base=30):
template = '\x1b[{0}m{1}\x1b[0m'
t = __join(base+8, 2, __join(*c))
return template.format(t, s)
class Colors:
DogderBlue = (30, 144, 255)
Green = (0,200,0)
Orange = (255, 165, 0)
## Define main function to run RAiSE HD
def RAiSE_run(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5, equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, brightness=True, angle=0., resolution='standard', seed=None, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# record start time of code
start_time = ti.time()
# function to test type of inputs and convert type where appropriate
if nangles <= 1:
raise Exception('Private variable nangles must be greater than 1.')
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# download and pre-process particles from hydrodynamical simulation
if not resolution == None:
print(__color_text('Reading particle data from file.', Colors.Green))
time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio = __PLUTO_particles('RAiSE_particles.hdf5')
# set seed for quasi-random profiles
if not seed == None:
__set_seed(seed)
# create folder for output files if not present
if not os.path.exists('LDtracks'):
os.mkdir('LDtracks')
if not resolution == None:
print(__color_text('Running RAiSE dynamics and emissivity.', Colors.Green))
else:
print(__color_text('Running RAiSE dynamics.', Colors.Green))
for i in range(0, len(redshift)):
for j in range(0, len(axis_ratio)):
for k in range(0, len(jet_power)):
for l in range(0, nenvirons):
for m in range(0, len(active_age)):
for n in range(0, len(equipartition)):
for o in range(0, len(jet_lorentz)):
# set correct data types for halo mass and core density
if isinstance(halo_mass, (list, np.ndarray)):
new_halo_mass = halo_mass[l]
else:
new_halo_mass = halo_mass
if isinstance(rho0Value, (list, np.ndarray)):
new_rho0Value = rho0Value[l]
new_temperature = temperature[l]
new_betas = betas[l]
new_regions = regions[l]
else:
new_rho0Value = rho0Value
new_temperature = temperature
new_betas = betas
new_regions = regions
# calculate dynamical evolution of lobe and shocked shell using RAiSE dynamics
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda = __RAiSE_environment(redshift[i], axis_ratio[j], jet_power[k], source_age, halo_mass=new_halo_mass, rand_profile=rand_profile, rho0Value=new_rho0Value, regions=new_regions, betas=new_betas, temperature=new_temperature, active_age=active_age[m], jet_lorentz=jet_lorentz[o], gammaCValue=gammaCValue, aj_star=aj_star, jet_angle=jet_angle, axis_exponent=axis_exponent, fill_factor=fill_factor)
# calculate synchrotron emission from lobe using particles and RAiSE model
if not resolution == None:
location, luminosity, magnetic_field = __RAiSE_emissivity(frequency, redshift[i], time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age[m], equipartition[n], spectral_index, gammaCValue=gammaCValue, lorentz_min=lorentz_min, resolution=resolution)
# create pandas dataframe for integrated emission
df = pd.DataFrame()
df['Time (yrs)'] = 10**np.asarray(source_age).astype(np.float_)
df['Size (kpc)'] = 2*lobe_lengths[0,:]/const.kpc.value
df['Pressure (Pa)'] = shock_pressures[0,:]
df['Axis Ratio'] = lobe_lengths[0,:]/lobe_lengths[-1,:]
if not resolution == None:
for q in range(0, len(frequency)):
if frequency[q] > 0:
df['B{:.2f} (T)'.format(frequency[q])] = magnetic_field[:,q]
df['L{:.2f} (W/Hz)'.format(frequency[q])] = np.nansum(luminosity[:,:,q], axis=1)
# write data to file
if isinstance(rho0Value, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
elif isinstance(halo_mass, (list, np.ndarray)):
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i]), index=False)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate brightness per pixel across the source
if brightness == True and not resolution == None:
x_values, y_values, brightness_list = __RAiSE_brightness_map(frequency, redshift[i], source_age, lobe_lengths, location, luminosity, angle, resolution=resolution)
for p in range(0, len(source_age)):
for q in range(0, len(frequency)):
# create pandas dataframe for spatially resolved emission
if isinstance(x_values[p][q], (list, np.ndarray)):
df = pd.DataFrame(index=x_values[p][q]/const.kpc.value, columns=y_values[p][q]/const.kpc.value, data=brightness_list[p][q])
# write surface brightness map to file
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[q] > 0:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), header=True, index=True)
else:
df.to_csv('LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], source_age[p], resolution), header=True, index=True)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
else:
if isinstance(rho0Value, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), np.abs(np.log10(rho0Value[l])), jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
elif isinstance(halo_mass, (list, np.ndarray)):
warnings.warn('The following file was not created as no emission is present: LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}_{:s}.csv'.format(axis_ratio[j], np.abs(equipartition[n]), halo_mass[l], jet_power[k], 2*np.abs(spectral_index) + 1, active_age[m], jet_lorentz[o], redshift[i], frequency[q], source_age[p], resolution), category=UserWarning)
else:
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# print total run time to screen
print(__color_text('RAiSE completed running after {:.2f} seconds.'.format(ti.time() - start_time), Colors.Green))
# Define function to test type of inputs and convert type where appropriate
def __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz):
# convert redshift, axis ratio and jet power to correct data types
if not isinstance(frequency, (list, np.ndarray)):
frequency = [frequency]
for i in range(0, len(frequency)):
if not isinstance(frequency[i], (int, float)):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
else:
if frequency[i] <= 0:
frequency[i] = -1.
warnings.warn('Pressure map will be produced instead of surface brightness image.', category=UserWarning)
elif not (5 < frequency[i] and frequency[i] < 20):
raise Exception('Frequency must be provided as a float or list/array of floats in units of log10 Hertz.')
if not isinstance(redshift, (list, np.ndarray)):
redshift = [redshift]
for i in range(0, len(redshift)):
if not isinstance(redshift[i], (int, float)) or not (0 < redshift[i] and redshift[i] < 20):
raise Exception('Redshift must be provided as a float or list/array of floats.')
if not isinstance(axis_ratio, (list, np.ndarray)):
axis_ratio = [axis_ratio]
for i in range(0, len(axis_ratio)):
if not isinstance(axis_ratio[i], (int, float)) or not (1 <= axis_ratio[i] and axis_ratio[i] < 20):
raise Exception('Axis ratio must be provided as a float or list/array of floats and be greater than 1.')
if not isinstance(jet_power, (list, np.ndarray)):
jet_power = [jet_power]
for i in range(0, len(jet_power)):
if not isinstance(jet_power[i], (int, float)) or not (33 < jet_power[i] and jet_power[i] < 46):
raise Exception('Jet power must be provided as a float or list/array of floats in units of log10 Watts.')
if not isinstance(source_age, (list, np.ndarray)):
source_age = [source_age]
for i in range(0, len(source_age)):
if not isinstance(source_age[i], (int, float)) or not (0 <= source_age[i] and source_age[i] <= 10.14):
raise Exception('Source age must be provided as a float or list/array of floats in units of log10 years.')
else:
source_age[i] = float(source_age[i])
if not isinstance(active_age, (list, np.ndarray)):
active_age = [active_age]
for i in range(0, len(active_age)):
if not isinstance(active_age[i], (int, float)) or not (0 <= active_age[i] and active_age[i] <= 10.14):
raise Exception('Active age must be provided as a float or list/array of floats in units of log10 years.')
if not isinstance(equipartition, (list, np.ndarray)):
equipartition = [equipartition]
for i in range(0, len(equipartition)):
if not isinstance(equipartition[i], (int, float)) or not (-6 < equipartition[i] and equipartition[i] < 6):
raise Exception('Equipartition factor must be provided as a float or list/array of floats in units of log10.')
if not isinstance(jet_lorentz, (list, np.ndarray)):
jet_lorentz = [jet_lorentz]
for i in range(0, len(jet_lorentz)):
if not isinstance(jet_lorentz[i], (int, float)) or not (-100 <= jet_lorentz[i] and jet_lorentz[i] < 20):
raise Exception('Jet bulk lorentz factor factor must be provided as a float or list/array of floats.')
elif (-100 <= jet_lorentz[i] and jet_lorentz[i] <= 1):
jet_lorentz[i] = 0
warnings.warn('Jet phase will not be included in this simulation.', category=UserWarning)
# convert environment to correct data types
if not isinstance(halo_mass, (list, np.ndarray)) and not halo_mass == None:
halo_mass = [halo_mass]
nenvirons_halo = len(halo_mass)
elif not halo_mass == None:
nenvirons_halo = len(halo_mass)
if isinstance(halo_mass, (list, np.ndarray)):
for i in range(0, len(halo_mass)):
if not isinstance(halo_mass[i], (int, float)) or not (9 < halo_mass[i] and halo_mass[i] < 17):
raise Exception('Dark matter halo mass must be provided as a float or list/array of floats in units of log10 stellar mass.')
if not isinstance(rho0Value, (list, np.ndarray)) and not rho0Value == None:
rho0Value = [rho0Value]
nenvirons_rho = len(rho0Value)
elif not rho0Value == None:
nenvirons_rho = len(rho0Value)
if isinstance(rho0Value, (list, np.ndarray)):
if not isinstance(temperature, (list, np.ndarray)) and not temperature == None:
temperature = [temperature]*nenvirons_rho
elif temperature == None or not len(temperature) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(betas, (list, np.ndarray)) and not isinstance(betas[0], (list, np.ndarray)):
betas = [betas]*nenvirons_rho
elif not isinstance(betas, (list, np.ndarray)) and not betas == None:
betas = [[betas]]*nenvirons_rho
elif betas == None or not len(betas) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(regions, (list, np.ndarray)) and not isinstance(regions[0], (list, np.ndarray)):
regions = [regions]*nenvirons_rho
elif not isinstance(regions, (list, np.ndarray)) and not betas == None:
regions = [[regions]]*nenvirons_rho
elif regions == None or not len(regions) == nenvirons_rho:
rho0Value = None # full density profile not provided
if isinstance(rho0Value, (list, np.ndarray)):
nenvirons = nenvirons_rho
for i in range(0, len(rho0Value)):
if not isinstance(rho0Value[i], (int, float)) or not (1e-30 < rho0Value[i] and rho0Value[i] < 1e-15):
raise Exception('Core gas density must be provided as a float or list/array of floats in units of kg/m^3.')
for i in range(0, len(temperature)):
if not isinstance(temperature[i], (int, float)) or not (0 < temperature[i] and temperature[i] < 1e12):
raise Exception('Gas temperature must be provided as a float or list/array of floats in units of Kelvin.')
else:
nenvirons = nenvirons_halo
return frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons
# Define random seed function
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __set_seed(value):
np.random.seed(value)
## Define functions for analytic modelling of the environment
# function to calculate properties of the environment and call RAiSE_evolution
def __RAiSE_environment(redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., gammaCValue=5./3, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# check minimal inputs
if halo_mass == None and (not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray))):
raise Exception('Either the halo mass or full density profile must be provided as model inputs.')
# calculate gas mass and virial radius of halo unless density and temperature profile fully specified
gasfraction = 0
if not halo_mass == None:
rVir = (10**halo_mass*const.M_sun.value/(100./const.G.value*(100.*hubble*np.sqrt(OmegaM*(1 + redshift)**3 + OmegaD)/const.kpc.value)**2))**(1./3)
if rand_profile == False:
gasfraction = __HalogasfracFunction(halo_mass, redshift)
else:
gasfraction = __rand_norm(__HalogasfracFunction(halo_mass, redshift), __dHalogasfracFunction(halo_mass, redshift))
gasMass = 10**(halo_mass + gasfraction)*const.M_sun.value
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
if not isinstance(betas, (list, np.ndarray)) or not isinstance(regions, (list, np.ndarray)):
# set maximum number of regions
nregions = betaRegions
nregions, new_betas, new_regions = __DensityProfiler(rVir, nregions, rand_profile)
elif len(betas) == len(regions):
# set maximum number of regions
nregions = len(betas)
new_betas = np.asarray(betas.copy())
new_regions = np.asarray(regions.copy())
else:
raise Exception('Variables betas and regions must be arrays of the same length.')
# calculate the average temperature of the external medium
if temperature == None:
if not halo_mass == None:
if rand_profile == False:
tempFlat = 10**TmgAvg
tempCluster = 10**(TmgConst + TmgSlope*halo_mass)
else:
tempFlat = 10**(__rand_norm(TmgAvg, TmgStdev))
tempCluster = 10**(__rand_norm(TmgConst + TmgSlope*halo_mass, TmgError))
temperature = max(tempFlat, tempCluster) # take the highest temperature out of the flat profile and cluster model
else:
raise Exception('Either the halo mass or temperature must be provided as model inputs.')
# determine initial value of density parameter given gas mass and density profile
if not rho0Value == None:
# determine density parameter in the core
k0Value = rho0Value*new_regions[0]**new_betas[0]
# extend first beta region to a radius of zero
new_regions[0] = 0
elif not halo_mass == None:
# extend first beta region to a radius of zero
new_regions[0] = 0
# find relative values (i.e. to 1) of density parameter in each beta region
kValues = __DensityParameter(nregions, 1.0, new_betas, new_regions)
# determine density parameter in the core
k0Value = __k0ValueFinder(rVir, gasMass, nregions, new_betas, new_regions, kValues)
else:
raise Exception('Either the halo mass or core density must be provided as model inputs.')
# find values of density parameter in each beta region
kValues = __DensityParameter(nregions, k0Value, new_betas, new_regions)
# call RadioSourceEvolution function to calculate Dt tracks
return __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, new_betas, new_regions, kValues, temperature, jet_lorentz, aj_star, jet_angle, axis_exponent, fill_factor)
# approximate the gas density profile of Vikhlinin 2006 by multiple density profiles with a simple beta dependence
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityProfiler(rVir, nregions, rand_profile):
# instantiate variables
betas, regions = np.zeros(nregions), np.zeros(nregions)
# set values of Vikhlinin model parameters
if rand_profile == False:
alpha = alphaAvg
betaPrime = betaPrimeAvg
gammaPrime = gammaPrimeAvg # this value has no uncertainty
epsilon = epsilonAvg
rCore = rCoreAvg
rSlope = rSlopeAvg
else:
alpha = __rand_norm(alphaAvg, alphaStdev)
betaPrime = __rand_norm(betaPrimeAvg, betaPrimeStdev)
gammaPrime = __rand_norm(gammaPrimeAvg, gammaPrimeStdev) # this value has no uncertainty
epsilon = __rand_norm(epsilonAvg, epsilonStdev)
rCore = __rand_norm(rCoreAvg, rCoreStdev)
rSlope = __rand_norm(rSlopeAvg, rSlopeStdev)
# set minimum and maximum radius for density profile to be matched
rmin = rCutoff*rVir
rmax = rVir
# use logarithmic radius scale
r = rmin
ratio = (rmax/rmin)**(1./(nregions)) - 1
for count in range(0, nregions):
# set radius at low end of region
rlow = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoLow = np.sqrt((rlow/(rCore*rVir))**(-alpha)/((1 + rlow**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rlow**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# increment radius
dr = r*ratio
r = r + dr
# set radius at high end of region
rhigh = r
# calculate relative density at rlow, i.e. ignoring rho_0 factor
rhoHigh = np.sqrt((rhigh/(rCore*rVir))**(-alpha)/((1 + rhigh**2/(rCore*rVir)**2)**(3*betaPrime - alpha/2.)*(1 + rhigh**gammaPrime/(rSlope*rVir)**gammaPrime)**(epsilon/gammaPrime)))
# set value of innermost radius of each beta region
if count == 0:
# extend first beta region to a radius of zero
regions[count] = 0
else:
regions[count] = rlow
# calculate exponent beta for each region to match density profile, ensuring beta is less than 2
if (-np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh) < betaMax):
betas[count] = -np.log(rhoLow/rhoHigh)/np.log(rlow/rhigh)
else:
# ensure beta is less than (or equal to) 2
betas[count] = betaMax
# set this count to be the number of distinct regions
nregions = count + 1
break
return nregions, betas, regions
# find values of density parameter in each beta region
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __DensityParameter(nregions, k0Value, betas, regions):
# instantiate variables
kValues = np.zeros(nregions)
# calculate density parameters in each region
for count in range(0, nregions):
# match tracks between regions `a' and `b'
if count > 0:
# find replicating core density in region `b' required to match pressures and times
kValues[count] = kValues[count - 1]*regions[count]**(betas[count] - betas[count - 1])
# if first region, set initial value of replicating core density as actual core density
else:
kValues[count] = k0Value
return kValues
# determine value of the density parameter at the core given gas mass and density profile
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __k0ValueFinder(rVir, gasMass, nregions, betas, regions, kValues):
# set volume to zero initially
volume = 0
# calculate weighted volume integral using by analytically integraing the volume in each beta region
for count in range(0, nregions):
# set lower bound of analytic integral
rlow = regions[count]
# set upper bound of analytic integral
if (count + 1 == nregions):
rhigh = rVir
else:
rhigh = regions[count + 1]
# increment total weighted volume by weigthed volume of this region
volume = volume + 4*np.pi*(kValues[count]/kValues[0])/(3 - betas[count])*(rhigh**(3 - betas[count]) - rlow**(3 - betas[count]))
# calculate density parameter at the core from stellar mass and weighted volume
k0Value = gasMass/volume
return k0Value
# random normal with values truncated to avoid sign changes
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rand_norm(mean, stdev):
rand_number = np.random.normal(mean, stdev)
while (mean*rand_number < 0 or np.abs(rand_number - mean) > 2*stdev):
rand_number = np.random.normal(mean, stdev)
return rand_number
# gas fraction-halo mass relationship
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __HalogasfracFunction(halo_mass, redshift):
return max(halogasfracCONST1z0 + halogasfracCONST1z1*redshift, halogasfracCONST2z0 + halogasfracCONST2z1*redshift) + halogasfracSLOPE*(halo_mass - 14) + SAGEdensitycorr # in log space
# gas fraction-halo mass relationship error
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __dHalogasfracFunction(halo_mass, redshift):
return dhalogasfracz0 + dhalogasfracz1*redshift # in log space
## Define functions required for RAiSE dynamical evolution
# function to calculate dynamical evolution of lobe and shocked shell
def __RAiSE_evolution(redshift, axis_ratio, jet_power, source_age, active_age, gammaCValue, nregions, betas, regions, kValues, temperature, jet_lorentz, aj_star=0.231, jet_angle=0.686, axis_exponent=0.343, fill_factor=0.549):
# convert jet power and source age to correct units
QavgValue = 10**jet_power/2. # set the power of *each* jet; convert from log space
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = np.array([10**source_age*year])
tActive = 10**active_age*year
# calculate angle of current radial line
angles = np.arange(0, nangles, 1).astype(np.int_)
dtheta = (np.pi/2)/nangles
theta = dtheta*(angles + 0.5)
# calculate opening angle of jet
open_angle = (jet_angle*np.pi/180)/(axis_ratio/2.83)
# evaluate the translation coefficients eta_c and eta_s
eta_c = 1./np.sqrt(axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_s = 1./np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
# evaluate the translation coefficient zeta_s/eta_s at t -> infinity
zetaeta = np.sqrt(axis_ratio**(2*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)/np.sqrt(axis_ratio**(4*shockAxisRatio)*(np.sin(theta))**2 + (np.cos(theta))**2)
eta_c[0], eta_s[0], zetaeta[0] = 1., 1., 1,
# calculate the differential volume element coefficient chi
dchi = 4*np.pi/3.*np.sin(theta)*np.sin(dtheta/2.)
# solve RAiSE dynamics iteratively to find thermal component of lobe pressure
if jet_lorentz > 1:
# run code in strong-shock limit to calibrate initial velocity
x_time = 10**10.14*year
_, _, _, _, _, _, _, critical_point_1 = __RAiSE_runge_kutta(QavgValue, np.array([x_time]), x_time, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed, strong_shock=True)
# run code for full RAiSE HD dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point_3 = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=c_speed*critical_point_1[2]/critical_point_1[3])
else:
# run code for RAiSE X dynamical model
lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, _ = __RAiSE_runge_kutta(QavgValue, tFinal, tActive, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue)
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda
# function to apply Runge-Kutta method and extract values at requested time steps
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_runge_kutta(QavgValue, source_age, active_age, axis_ratio, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, eta_c, eta_s, zetaeta, dchi, nregions, betas, regions, kValues, temperature, gammaCValue, critical_velocity=0., strong_shock=False):
# instantiate variables
X, P = np.zeros((nangles, 5)), np.zeros((nangles, 4))
critical_point = np.zeros(4)
regionPointer = np.zeros(nangles).astype(np.int_)
lobe_minor, lambda_crit, alphaP_denv, alpha_lambda = np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age)), np.zeros(len(source_age))
lobe_lengths, shock_lengths, shock_pressures = np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age))), np.zeros((nangles, len(source_age)))
# calculate injection ages to derive time-average power-law indices for external pressure and filling factor
inject_age = np.zeros(2*len(source_age))
inject_axis_ratios, inject_pressures, inject_lambdas = np.zeros(2*len(source_age)), np.zeros(2*len(source_age)), np.zeros(2*len(source_age))
for timePointer in range(0, len(source_age)):
inject_age[2*timePointer:2*(timePointer + 1)] = np.asarray([crit_age*source_age[timePointer], source_age[timePointer]])
inject_index = np.argsort(inject_age) # sort ages in ascending order
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
i = 0
for timePointer in range(0, len(source_age)):
# set initial conditions for each volume element
if timePointer == 0:
# calculate initial time and radius for ODE
FR2time = limTime
if jet_lorentz > 1:
FR2radius = bulk_velocity*limTime
FR2velocity = bulk_velocity # eta_R is very large
else:
FR2radius = np.sqrt(1 - 1./100**2)*c_speed*limTime
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# test if this radius is above start of second region boundary
if (regions[1] < FR2radius):
FR2radius = regions[1]
if jet_lorentz > 1:
FR2time = regions[1]/bulk_velocity
FR2velocity = bulk_velocity
else:
FR2time = regions[1]/(np.sqrt(1 - 1./100**2)*c_speed)
FR2velocity = np.sqrt(1 - 1./100**2)*c_speed
# calculate the initial jet/shock shell radius and velocity for each angle theta
X[angles,0] = FR2time
X[angles,1] = FR2radius*eta_s
X[angles,2] = FR2velocity*eta_s
if jet_lorentz > 1:
X[0,3], X[angles[1:],3] = bulk_lorentz, 1./np.sqrt(1 - (FR2velocity*eta_s[angles[1:]]/c_speed)**2)
else:
X[0,3], X[angles[1:],3] = 100, 100*eta_s[angles[1:]]
X[angles,4] = -1 # null value
# set region pointer to first (non-zero) region if smaller than FR2 radius
index = regions[1] < X[angles,1]
regionPointer[index] = 1
regionPointer[np.logical_not(index)] = 0
# calculate fraction of jet power injected into each volume element
injectFrac = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac = injectFrac/np.sum(injectFrac) # sum should be equal to unity
# solve ODE to find radius and pressue at each time step
while (X[0,0] < source_age[timePointer]):
while (X[0,0] < inject_age[inject_index[i]]):
# calculate the appropriate density profile for each angle theta
for anglePointer in range(0, nangles):
while (regionPointer[anglePointer] + 1 < nregions and X[anglePointer,1] > regions[regionPointer[anglePointer] + 1]):
regionPointer[anglePointer] = regionPointer[anglePointer] + 1
# check if next step passes time point of interest
if (X[0,0]*stepRatio > inject_age[inject_index[i]]):
step = inject_age[inject_index[i]] - X[0,0]
else:
step = X[0,0]*(stepRatio - 1)
# update estimates of time, radius and velocity
__rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,3] = np.maximum(1, X[:,3])
# find location of jet--lobe transition
critical_point[0], critical_point[1], critical_point[2], critical_point[3] = X[0,0], X[0,1], X[0,2]*X[0,3], X[0,4]
# record axis ratio, external pressure and filling factor and injection times
if P[-1,0] > 0:
inject_axis_ratios[inject_index[i]] = 1./(P[0,0]/P[-1,0])**2 # inverted to match alpha_lambda definition
else:
inject_axis_ratios[inject_index[i]] = 1
inject_pressures[inject_index[i]] = P[0,2]
inject_lambdas[inject_index[i]] = P[0,3]
# update injection age if not a requested source age
if inject_age[inject_index[i]] < source_age[timePointer]:
i = i + 1
# calculate the lobe and shocked shell length, shock pressure and total pressure as a function of angle
lobe_lengths[angles,timePointer] = P[angles,0]
shock_lengths[angles,timePointer] = X[angles,1]
shock_pressures[angles,timePointer] = P[angles,1]
lambda_crit[timePointer] = P[0,3]
# calculate lobe minor axis (associated with dimensions of shocked shell) at this time step
lobe_minor[timePointer] = X[-1,1]*eta_c[-1]/(shockRadius*eta_s[-1])
# calculate the slope of external pressure profile at this time step
if inject_pressures[inject_index[2*timePointer]] <= 0:
alphaP_denv[timePointer] = 0
else:
alphaP_denv[timePointer] = np.log(inject_pressures[2*timePointer + 1]/inject_pressures[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer])
if inject_lambdas[2*timePointer] <= 0:
alpha_lambda[timePointer] = 1e9 # no emission from this injection time
else:
alpha_lambda[timePointer] = np.log(inject_lambdas[2*timePointer + 1]/inject_lambdas[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) + np.log(inject_axis_ratios[2*timePointer + 1]/inject_axis_ratios[2*timePointer])/np.log(inject_age[2*timePointer + 1]/inject_age[2*timePointer]) # filling factor and changing volume/axis ratio
return lobe_lengths, lobe_minor, shock_lengths, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, critical_point
# Runge-Kutta method to solve ODE in dynamical model
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __rk4sys(step, X, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# instantiate variables
Y, K1, K2, K3, K4 = np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5)), np.zeros((len(angles), 5))
# fouth order Runge-Kutta method
__xpsys(X, K1, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K1[:,:]
__xpsys(Y, K2, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K2[:,:]
__xpsys(Y, K3, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
Y[:,:] = X[:,:] + 0.5*step*K3[:,:]
__xpsys(Y, K4, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock)
X[:,:] = X[:,:] + (step/6.)*(K1[:,:] + 2*K2[:,:] + 2*K3[:,:] + K4[:,:])
# coupled second order differential equations for lobe evolution
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __xpsys(X, f, P, QavgValue, active_age, aj_star, axis_exponent, fill_factor, jet_lorentz, open_angle, angles, injectFrac, eta_c, eta_s, zetaeta, dchi, regionPointer, betas, kValues, temperature, gammaCValue, critical_velocity, strong_shock):
# Differential equations for X[0,1,2,3,4] = (time, radius, velocity, lorentz_factor, thermal_velocity)
# Additional variable for P[0,1,2,3] = (lobe_length, lobe_pressure, external_pressure, lambda_crit)
f[angles,0] = 1.
f[angles,1] = X[angles,2]
# test if the AGN is active at this time-step
if (X[0,0] <= active_age):
active_jet = 1
else:
active_jet = 0
# calculate the spatially-averaged jet velocity and Lorentz factor
if jet_lorentz > 1:
bulk_lorentz = np.sqrt(jet_lorentz**2*aj_star**4 - aj_star**4 + 1)
bulk_velocity = np.sqrt((jet_lorentz**2*aj_star**4 - aj_star**4)/(jet_lorentz**2*aj_star**4 - aj_star**4 + 1))*c_speed
else:
bulk_lorentz, bulk_velocity = -1, -1
# TWO-PHASE FLUID
if jet_lorentz > 1:
# calculate the lobe formation scale
eta_R = QavgValue*bulk_lorentz**2/(2*np.pi*kValues[regionPointer[0]]*(bulk_lorentz*bulk_velocity)*(bulk_lorentz - 1)*c_speed**2*(1 - np.cos(open_angle))*X[0,1]**(2 - betas[regionPointer[0]]))
# calculate lambda_crit
#if (eta_R/bulk_lorentz**2) > 1:
# lambda_crit = 0
#else:
# lambda_crit = 1
lambda_crit = np.exp(-(eta_R/bulk_lorentz**2)/(2*np.log(2)))
P[0,3] = lambda_crit
else:
P[0,3] = 1
# ACCELERATION
# update fraction of jet power injected into each volume element
injectFrac_new = dchi*eta_s**(3 - betas[regionPointer[0]])*zetaeta**2
injectFrac_new = injectFrac/np.sum(injectFrac) # sum should be equal to unity
if jet_lorentz > 1:
injectFrac[angles] = (1 - lambda_crit)*injectFrac_new + lambda_crit*injectFrac # keep static at late times
else:
injectFrac[angles] = injectFrac_new[angles]
# acceleration of jet-head
if jet_lorentz > 1:
jet_acceleration = (betas[regionPointer[0]] - 2)*bulk_velocity*X[0,2]/(2*X[0,1]*(1 + eta_R**(-1./2))**2*eta_R**(1./2))
# acceleration of lobe (supersonic/subsonic)
if jet_lorentz > 1 and strong_shock == True:
f[angles,2] = np.minimum((gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)), (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,2]*X[angles,3]/(X[0,0] + year)) # ensure model doesn't run slower than limit due to numerics
elif jet_lorentz > 1:
f[angles,2] = (gammaCValue - 1)*injectFrac[angles]*(QavgValue*active_jet)*X[angles,1]**(betas[regionPointer[angles]] - 3)/(X[angles,2]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*dchi[angles]*(X[angles,3]*zetaeta[angles])**2*kValues[regionPointer[angles]]) + (betas[regionPointer[angles]] - 3*gammaCValue)*(X[angles,2])**2/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)) - (3*gammaCValue - betas[regionPointer[angles]])*(k_B*temperature/maverage)/(2*X[angles,1]*(1 + (X[angles,3]*X[angles,2]/c_speed)**2)*(X[angles,3]*zetaeta[angles])**2)
else:
sub_angles = (X[angles,2]*X[angles,3]*zetaeta)**2/(gammaX*(k_B*temperature/maverage)) <= 1
super_angles = np.logical_not(sub_angles)
f[super_angles,2] = (gammaX + 1)*(gammaCValue - 1)*injectFrac[super_angles]*(QavgValue*active_jet)*X[super_angles,1]**(betas[regionPointer[super_angles]] - 3)/(2*X[super_angles,2]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*dchi[super_angles]*(X[super_angles,3]*zetaeta[super_angles])**2*kValues[regionPointer[super_angles]]) + (betas[regionPointer[super_angles]] - 3*gammaCValue)*(X[super_angles,2])**2/(2*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)) + (gammaX - 1)*(3*gammaCValue - betas[regionPointer[super_angles]])*(k_B*temperature/maverage)/(4*X[super_angles,1]*(1 + (X[super_angles,3]*X[super_angles,2]/c_speed)**2)*(X[super_angles,3]*zetaeta[super_angles])**2)
f[sub_angles,2] = (betas[regionPointer[sub_angles]] - 2)*(X[sub_angles,2])**2/X[sub_angles,1]
# combine acceleration from jet-head and lobe as two-phase fluid
if jet_lorentz > 1:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,2], f[angles[1:],2] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],2] = X[0,2]*eta_s[angles[1:]]
else:
f[0,2], f[angles[1:],2] = (1 - lambda_crit)*jet_acceleration + lambda_crit*f[0,2], (1 - lambda_crit)*jet_acceleration*eta_s[angles[1:]] + lambda_crit*f[angles[1:],2]
# calculate Lorentz factor of two-phase fluid
f[angles,3] = X[angles,3]**3*X[angles,2]*f[angles,2]/c_speed**2
# PRESSURES
# external pressure at each volume element
P[angles,2] = kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# set velocity associated with thermal component of lobe perssure
if jet_lorentz > 1 and critical_velocity > 0:
if (lambda_crit < lambda_min or X[0,0] < 10*limTime): # improve stability
f[0,4], f[angles[1:],4] = jet_acceleration, jet_acceleration*eta_s[angles[1:]]
X[angles[1:],4] = X[0,4]*eta_s[angles[1:]]
else:
f[angles,4] = (betas[regionPointer[angles]] - 2)/(5 - betas[regionPointer[angles]]) * X[angles,4]/(X[0,0] + year)
else:
X[angles,4], f[angles,4] = X[angles,2]*X[angles,3], f[angles,2]
# jet/lobe pressure at each volume element
volume = X[angles,1]**3*dchi[angles]
if jet_lorentz > 1:
# calculate lobe pressure
P[angles,1] = zetaeta[angles]**2*kValues[regionPointer[angles]]*X[angles,1]**(-betas[regionPointer[angles]])*(np.minimum(X[angles,2], X[angles,4]))**2 + kValues[regionPointer[angles]]*(k_B*temperature/maverage)*X[angles,1]**(-betas[regionPointer[angles]])
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
else:
# calculate lobe pressure
P[super_angles,1] = 2./(gammaX + 1)*zetaeta[super_angles]**2*kValues[regionPointer[super_angles]]*X[super_angles,1]**(-betas[regionPointer[super_angles]])*(X[super_angles,2]*X[super_angles,3])**2 - (gammaX - 1)/(gammaX + 1)*kValues[regionPointer[super_angles]]*(k_B*temperature/maverage)*X[super_angles,1]**(-betas[regionPointer[super_angles]])
P[sub_angles,1] = P[sub_angles,2]
# calculate average pressure across jet/lobe
pressure = np.sum(P[angles,1]*volume)/np.sum(volume)
# set average pressure in all of lobe other than hotspot
P[angles[1:],1] = pressure
# AXIS RATIO
if jet_lorentz > 1:
# calculate total mass of particles from the jet
particle_mass = QavgValue*np.minimum(active_age, X[0,0])/((bulk_lorentz - 1)*c_speed**2)
# calculate volume occupied by particles expanding at sound speed and maximum fillable volume within shocked shell
jet_sound = c_speed*np.sqrt(gammaJ - 1)
particle_volume = particle_mass/(gammaJ*pressure/jet_sound**2) # mass / density
shell_volume = np.sum(volume*eta_c/(shockRadius*eta_s))
# calculate (optimal) lobe volume as weighted sum of particle volume and maximum fillable volume (i.e. enable sound speed to reduce as lobe approaches size of shocked shell)
lobe_volume = 1./(1./(particle_volume/fill_factor)**axis_exponent + 1./(shell_volume)**axis_exponent)**(1./axis_exponent)
# find axis ratio for an ellipsoidal lobe
if lobe_volume > 0 and lambda_crit >= lambda_min:
lobe_axis_ratio = np.minimum(np.sqrt(2*np.pi*(X[0,1]/shockRadius)**3/(3*lobe_volume)), 1/np.tan(open_angle))
else:
lobe_axis_ratio = 1/np.tan(open_angle)
# update lobe length along let axis and axis ratio of shocked shell
P[0,0] = X[0,1]/shockRadius
# calculate geometry of each angular volume element
dtheta = (np.pi/2)/len(angles)
theta = dtheta*(angles + 0.5)
lobe_eta_c = 1./np.sqrt(lobe_axis_ratio**2*(np.sin(theta))**2 + (np.cos(theta))**2)
# set length of lobe along each angular volume element
P[angles[1:],0] = np.minimum(lobe_eta_c[angles[1:]]*P[0,0], X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])) # second condition should rarely be met
else:
# set length of lobe along each angular volume element
P[0,0], P[angles[1:],0] = X[0,1]/shockRadius, X[angles[1:],1]*eta_c[angles[1:]]/(shockRadius*eta_s[angles[1:]])
## Define functions to download and preprocess particles from hydrodynamical simulations
def __PLUTO_particles(particle_data_path):
# unpack particle data from hydrodynamical simulations
particle_dict = h5py.File(os.path.join(os.path.dirname(os.path.realpath(__file__)), particle_data_path), 'r')
# store variables at desired resolution
time = particle_dict['time'][:].astype(np.float32)
shock_time = particle_dict['tinject'][:,:].astype(np.float32)
major = particle_dict['major'][:].astype(np.float32)
minor = particle_dict['minor'][:].astype(np.float32)
x1 = particle_dict['x1'][:,:].astype(np.float32)
x2 = particle_dict['x2'][:,:].astype(np.float32)
x3 = particle_dict['x3'][:,:].astype(np.float32)
tracer = particle_dict['tracer'][:,:].astype(np.float32)
vx3 = particle_dict['vx3'][:,:].astype(np.float32)
volume = particle_dict['volume'][:,:].astype(np.float32)
pressure = particle_dict['pressure'][:,:].astype(np.float32)
press_minor = particle_dict['pressminor'][:].astype(np.float32)
alphaP_hyd = particle_dict['alphaP'][:,:].astype(np.float32)
alphaP_henv = particle_dict['alphaPenv'][:,:].astype(np.float32)
hotspot_ratio = particle_dict['hotspotratio'][:].astype(np.float32)
return time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio
## Define functions to add emissivity from particles in hydrodynamical simulations on top of dynamics
# function to manage orientation and distribution of particles from simulation output
def __RAiSE_emissivity(frequency, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, source_age, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, active_age, equipartition, spectral_index, gammaCValue=5./3, lorentz_min=Lorentzmin, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
nsamples = 2048
elif resolution == 'high':
nsamples = 512
elif resolution == 'standard':
nsamples = 128
elif resolution == 'poor':
nsamples = 32
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# randomly generate viewing time in the simulated source age
timePointer = np.arange(0, nsamples).astype(np.int_)%len(time)
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
inverse_compton = np.zeros_like(frequency).astype(np.int_)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton[freqPointer] = 1
else:
rest_frequency = [10**frequency*(1 + redshift)]
if rest_frequency[freqPointer] > 1e12: # assume frequencies greater than 1000 GHz are inverse-Compton
inverse_compton = [1]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
tActive = 10**active_age*year
equi_factor = 10**float(-np.abs(equipartition)) # ensure sign is correct
s_index = 2*float(np.abs(spectral_index)) + 1 # ensure sign is correct
# derive redshift dependent ancillary variables used by every analytic model
Ks = __RAiSE_Ks(s_index, gammaCValue, lorentz_min)
blackbody = __RAiSE_blackbody(s_index)
return __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody)
# function to calculate emissivity from each particle using RAiSE model
@jit(nopython=True, parallel=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_particles(timePointer, rest_frequency, inverse_compton, redshift, time, shock_time, major, minor, x1, x2, x3, tracer, vx3, volume, pressure, press_minor, alphaP_hyd, alphaP_henv, hotspot_ratio, tFinal, lobe_lengths, lobe_minor, shock_pressures, lambda_crit, alphaP_denv, alpha_lambda, tActive, equi_factor, s_index, gammaCValue, lorentz_min, Ks, blackbody):
# instantiate variables
luminosity = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), len(rest_frequency)))
magnetic_field = np.zeros((len(tFinal), len(rest_frequency)))
magnetic_particle, magnetic_weighting = np.zeros((len(tFinal), len(timePointer), len(rest_frequency))), np.zeros((len(tFinal), len(timePointer), len(rest_frequency)))
location = np.zeros((len(tFinal), len(timePointer)*len(pressure[:,0]), 3))
# derive emissivity at each time step
for i in range(0, len(tFinal)):
# derive emissivity for random variations in particle distribution
for j in range(0, len(timePointer)):
# SHOCK ACCELERATION TIMES
new_shock_time = shock_time[:,timePointer[j]]*(tFinal[i]/time[timePointer[j]])*np.minimum(1., (tActive/tFinal[i])) # scale the last acceleration time to active age if source is a remnant
# PRESSURES
new_pressure = pressure[:,timePointer[j]]*(shock_pressures[-1,i]/press_minor[timePointer[j]]) # correction factor to match Model A
# correct the hotspot/lobe pressure ratio based on the dynamical model
new_pressure = new_pressure*((shock_pressures[0,i]/shock_pressures[-1,i])/hotspot_ratio[timePointer[j]] - 1)*(np.abs(x3[:,timePointer[j]])/major[timePointer[j]]) + new_pressure # increase log-space pressure linearly along lobe
# correct the evolutionary histories of the particles based on the dynamical model
alphaP_dyn = np.maximum(-2, np.minimum(0, alphaP_denv[i] + alphaP_hyd[:,timePointer[j]] - alphaP_henv[:,timePointer[j]]))
# VOLUMES
volume_fraction = volume[:,timePointer[j]]/(4*np.pi/3.*major[timePointer[j]]*minor[timePointer[j]]**2)
#volume_sum = np.nansum(volume_fraction[~np.isinf(volume_fraction)])
# cap the largest volumes at the 95th percentile to outliers in surface brightness map; minimal effect on total luminosity
volume_fraction[volume_fraction > np.nanpercentile(volume_fraction, 95)] = np.nanpercentile(volume_fraction, 95)
new_volume = volume_fraction*(4*np.pi/3.*lobe_lengths[0,i]*lobe_minor[i]**2)*tracer[:,timePointer[j]] #/volume_sum
# RELATIVISTIC BEAMING
doppler_factor = np.sqrt(np.maximum(1e-6, 1 - vx3[:,timePointer[j]]**2))**(3 - (s_index - 1)/2.) # Doppler boosting of particles in jet; 1e-6 ensures some very low level emission
doppler_factor[np.logical_and(np.abs(x3[:,timePointer[j]])/major[timePointer[j]] < 0.1, np.logical_and(np.abs(x1[:,timePointer[j]])/major[timePointer[j]] < 0.01, np.abs(x2[:,timePointer[j]])/major[timePointer[j]] < 0.01))] = 0 # completely remove very bright particles clumped at start of jet
# LOBE PARTICLES
# find angle and radius of each particle from core
new_angles = np.arctan((np.sqrt(x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*lobe_minor[i]/minor[timePointer[j]])/(x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])) # rescale axes to correct axis ratio
new_radii = np.sqrt((x1[:,timePointer[j]]**2 + x2[:,timePointer[j]]**2)*(lobe_minor[i]/minor[timePointer[j]])**2 + (x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]])**2)/lobe_lengths[0,i]
# find particles within lobe region; particles outside this region will not emit. Particle map is set to axis ratio based on shocked shell to maintain geometry of jet
new_eta_c = 1./np.sqrt((lobe_lengths[0,i]/lobe_lengths[-1,i])**2*(np.sin(new_angles))**2 + (np.cos(new_angles))**2)
lobe_particles = np.zeros_like(x1[:,timePointer[j]])
lobe_particles[np.abs(vx3[:,timePointer[j]]) > 1./np.sqrt(3)] = 1 # assume sound speed is critical value for relativisitic particles
lobe_particles[new_radii < new_eta_c] = 1.
# TWO PHASE FLUID
# fraction of jet particles that have reached location in lobe
two_phase_weighting = np.maximum(0, np.minimum(1, lambda_crit[i]*(new_shock_time/np.minimum(tActive, tFinal[i]))**np.maximum(0, alpha_lambda[i])))
if tActive/tFinal[i] >= 1:
# keep jet particles visible at all times
two_phase_weighting = np.maximum(two_phase_weighting, np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3)))) # assume sound speed is critical value for relativisitic particles
else:
# suppress emission from jet particle
two_phase_weighting = np.minimum(two_phase_weighting, 1 - np.minimum(1, np.abs(vx3[:,timePointer[j]]*np.sqrt(3))))
# PARTICLE EMISSIVITY
for k in range(0, len(rest_frequency)):
if rest_frequency[k] > 100:
# calculate losses due to adiabatic expansion, and synchrotron/iC radiation
lorentz_ratio, pressure_ratio = __RAiSE_loss_mechanisms(rest_frequency[k], inverse_compton[k], redshift, tFinal[i], new_shock_time, new_pressure, alphaP_dyn, equi_factor, gammaCValue)
# calculate luminosity associated with each particle
temp_luminosity = None
if inverse_compton[k] == 1:
# inverse-Compton
sync_frequency = (3*e_charge*rest_frequency[k]*np.sqrt(2*mu0*( equi_factor*new_pressure/((gammaCValue - 1)*(equi_factor + 1)) ))/(2*np.pi*m_e*(freq_cmb*temp_cmb*(1 + redshift)))) # assuming emission at CMB frequency only
temp_luminosity = Ks/blackbody*sync_frequency**((1 - s_index)/2.)*(sync_frequency/rest_frequency[k])*(gammaCValue - 1)*__RAiSE_uC(redshift) * (equi_factor**((s_index + 1)/4. - 1 )/(equi_factor + 1)**((s_index + 5)/4. - 1 ))*new_volume*new_pressure**((s_index + 1 )/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
else:
# synchrotron
temp_luminosity = Ks*rest_frequency[k]**((1 - s_index)/2.)*(equi_factor**((s_index + 1)/4.)/(equi_factor + 1)**((s_index + 5)/4.))*new_volume*new_pressure**((s_index + 5)/4.)*pressure_ratio**(1 - 4./(3*gammaCValue))*lorentz_ratio**(2 - s_index)/len(timePointer) * doppler_factor*lobe_particles*two_phase_weighting
# remove any infs
index = np.isinf(temp_luminosity)
temp_luminosity[index] = np.nan
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = temp_luminosity
# calculate luminosity weighted magnetic field strength
magnetic_particle[i,j,k] = np.nansum(np.sqrt(2*mu0*new_pressure*equi_factor/(gammaCValue - 1)*(equi_factor + 1))*luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
magnetic_weighting[i,j,k] = np.nansum(luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k])
# PARTICLE PRESSURE
else:
luminosity[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),k] = new_pressure*lobe_particles
# CARTESIAN LOCATIONS
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),0] = x1[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%8 - 3.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),1] = x2[:,timePointer[j]]*lobe_minor[i]/minor[timePointer[j]] *np.sign(timePointer[j]%4 - 1.5)
location[i,j*len(pressure[:,0]):(j+1)*len(pressure[:,0]),2] = x3[:,timePointer[j]]*lobe_lengths[0,i]/major[timePointer[j]] *np.sign(timePointer[j]%2 - 0.5)
# calculate luminosity weighted magnetic field strength for time step
for k in range(0, len(rest_frequency)):
if np.nansum(magnetic_weighting[i,:,k]) == 0:
magnetic_field[i,k] = 0
else:
magnetic_field[i,k] = np.nansum(magnetic_particle[i,:,k])/np.nansum(magnetic_weighting[i,:,k])
return location, luminosity, magnetic_field
# find ratio of the lorentz factor and the pressure at the time of acceleration to that at the time of emission
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_loss_mechanisms(rest_frequency, inverse_compton, redshift, time, shock_time, pressure, alphaP, equipartition, gammaCValue=5./3):
# calculate lorentz factor at time of emission
if inverse_compton == 1:
# inverse-Compton
lorentz_factor = np.sqrt(rest_frequency/(freq_cmb*temp_cmb*(1 + redshift)))*np.ones(len(pressure)) # assuming emission at CMB frequency only
else:
# synchrotron
lorentz_factor = np.sqrt(2*np.pi*m_e*rest_frequency/(3*e_charge*np.sqrt(2*mu0*pressure/(gammaCValue - 1)*(equipartition/(equipartition + 1))))) # assuming emission at Larmor frequency only
# calculate pressure and volume at time of acceleration
pressure_inject = pressure*(shock_time/time)**alphaP
# calculate RAiSE constant a2
a2 = __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue)
# calculate lorentz factor at time of acceleration, and remove invalid points
lorentz_inject = lorentz_factor*shock_time**(alphaP/(3*gammaCValue))/(time**(alphaP/(3*gammaCValue)) - a2*lorentz_factor) # second time is i becasue is time_high
lorentz_inject[lorentz_inject < 1] = np.nan
return lorentz_inject/lorentz_factor, pressure_inject/pressure
# find RAiSE constant a2 for synchrotron and iC radiative losses
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_a2(redshift, time, shock_time, pressure, pressure_inject, equipartition, alphaP, gammaCValue=5./3):
return 4*sigma_T/(3*m_e*c_speed)*(pressure_inject/(gammaCValue - 1)*(equipartition/(equipartition + 1))/(1 + alphaP*(1 + 1./(3*gammaCValue)))*shock_time**(-alphaP)*(time**(1 + alphaP*(1 + 1./(3*gammaCValue))) - shock_time**(1 + alphaP*(1 + 1./(3*gammaCValue)))) + __RAiSE_uC(redshift)/(1 + alphaP/(3*gammaCValue))*(time**(1 + alphaP/(3*gammaCValue)) - shock_time**(1 + alphaP/(3*gammaCValue)))) # array is shorter by one element
# find CMB radiation energy density
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_uC(redshift):
uC0 = 0.25*1e6*e_charge # J m-3 CMB energy density at z = 0 (Longair, 1981)
return uC0*(redshift + 1)**4 # assuming uC prop to (z + 1)^4 as in KDA97
# find RAiSE constant K(s) for the absolute scaling of the emissivity
def __RAiSE_Ks(s_index, gammaCValue=5./3, lorentz_min=Lorentzmin):
kappa = (gamma(s_index/4. + 19./12)*gamma(s_index/4. - 1./12)*gamma(s_index/4. + 5./4)/gamma(s_index/4. + 7./4))
return kappa/(m_e**((s_index + 3)/2.)*c_speed*(s_index + 1))*(e_charge**2*mu0/(2*(gammaCValue - 1)))**((s_index + 5)/4.)*(3./np.pi)**(s_index/2.)/((lorentz_min**(2 - s_index) - Lorentzmax**(2 - s_index))/(s_index - 2) - (lorentz_min**(1 - s_index) - Lorentzmax**(1 - s_index))/(s_index - 1))
# find RAiSE blackbody constant to convert cosmic microwave background emission from single frequency to blackbody spectrum
def __RAiSE_blackbody(s_index):
return np.pi**4/(15*gamma((s_index + 5)/2.)*zeta((s_index + 5)/2.))
## Define functions to produce surface brightness maps of radio lobes
# define function to manage the discretisation of particles down to pixels
def __RAiSE_brightness_map(frequency, redshift, source_age, lobe_lengths, location, luminosity, angle, resolution='standard'):
# determine spatial resolution of particles; i.e. overdensity of particles to include in calculations
if resolution == 'best':
npixels = 2048/4
elif resolution == 'high':
npixels = 512/2
elif resolution == 'standard':
npixels = 128/1
elif resolution == 'poor':
npixels = 32*2
else:
raise Exception('Unrecognised keyword for particle resolution. The accepted keywords are: best, high, standard and poor.')
# convert frequency, equipartition factor and spectral index to correct units
if isinstance(frequency, (list, np.ndarray)):
rest_frequency = np.zeros_like(frequency)
for freqPointer in range(0, len(frequency)):
rest_frequency[freqPointer] = 10**frequency[freqPointer]*(1 + redshift)
else:
rest_frequency = [10**frequency*(1 + redshift)]
if isinstance(source_age, (list, np.ndarray)):
tFinal = np.zeros_like(source_age)
for i in range(0, len(source_age)):
tFinal[i] = 10**source_age[i]*year # convert from log space years to seconds
else:
tFinal = [10**source_age*year]
return __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels)
# define function to discretise particles down to pixels
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def __RAiSE_pixels(rest_frequency, redshift, tFinal, lobe_lengths, location, luminosity, angle, npixels):
# instantiate variables to store brightness map variables
x_list = []
y_list = []
brightness_list = []
for i in range(0, len(tFinal)):
x_col = []
y_col = []
brightness_col = []
sim_x, sim_y, sim_z = location[i,:,0], location[i,:,1], location[i,:,2] # x, y, z (i.e. 0, 1, 2) in simulations
for j in range(0, len(rest_frequency)):
# separate location array into components
index = np.logical_and(np.logical_and(np.logical_not(np.isnan(luminosity[i,:,j])), np.logical_not(np.isinf(luminosity[i,:,j]))), np.logical_not(np.isnan(sim_x)))
location_x = np.sin(angle*np.pi/180.)*sim_y[index] + np.cos(angle*np.pi/180.)*sim_z[index]
location_y = sim_x[index]
new_luminosity = luminosity[i,:,j]
new_luminosity = new_luminosity[index]
if len(location_x) > 0:
# discretise particles
location_x = np.floor(location_x/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
location_y = np.floor(location_y/lobe_lengths[0,i]*(npixels//2)).astype(np.int_)
min_x, min_y = np.min(location_x), np.min(location_y)
location_x = location_x - min_x
location_y = location_y - min_y
# instantiate variables to store discrete particles
x_values = np.arange(np.min(location_x), np.max(location_x) + 0.1, 1).astype(np.int_)
y_values = np.arange(np.min(location_y), np.max(location_y) + 0.1, 1).astype(np.int_)
brightness = np.zeros((len(x_values), len(y_values)))
# add luminosity from each particle to correct pixel
for k in range(0, len(new_luminosity)):
if rest_frequency[j] > 100:
brightness[location_x[k],location_y[k]] = brightness[location_x[k],location_y[k]] + new_luminosity[k]
else:
brightness[location_x[k],location_y[k]] = max(brightness[location_x[k],location_y[k]], new_luminosity[k])
# add x and y pixel values, and brightnesses to arrays
x_col.append((x_values + min_x + 0.5)*lobe_lengths[0,i]/(npixels//2)) # add 0.5 to get pixel centres and scale back to physical dimensions
y_col.append((y_values + min_y + 0.5)*lobe_lengths[0,i]/(npixels//2))
brightness_col.append(brightness)
else:
x_col.append(None)
y_col.append(None)
brightness_col.append(None)
x_list.append(x_col)
y_list.append(y_col)
brightness_list.append(brightness_col)
return x_list, y_list, brightness_list
# Define functions to plot emissivity maps throughout source evolutionary history
def RAiSE_evolution_maps(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, seed=None, rerun=False, cmap='RdPu'):
# function to test type of inputs and convert type where appropriate
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
# set up plot
fig, axs = plt.subplots(len(source_age), 1, figsize=(12, 1 + (10/axis_ratio[0] + 0.8)*len(source_age)))
if len(source_age) <= 1: # handle case of single image
axs = [axs]
fig.subplots_adjust(hspace=0)
#cmap = cm.get_cmap('binary')
colour_scheme = cm.get_cmap(cmap)
rc('text', usetex=True)
rc('font', size=14)
rc('legend', fontsize=14)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
for i in range(0, len(source_age)):
if isinstance(rho0Value, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), np.abs(np.log10(rho0Value[0])), jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
elif isinstance(halo_mass, (list, np.ndarray)):
if frequency[0] > 0:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}_nu={:.2f}_t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], frequency[0], source_age[i])
else:
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_H={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}__t={:.2f}'.format(axis_ratio[0], np.abs(equipartition[0]), halo_mass[0], jet_power[0], 2*np.abs(spectral_index) + 1, active_age[0], jet_lorentz[0], redshift[0], source_age[i])
# read-in data from file (must be RAiSE output of correct format)
if rerun == False:
try:
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
except:
# run RAiSE HD for set of parameters at requested resolution
RAiSE_run(frequency[0], redshift[0], axis_ratio[0], jet_power[0], source_age[i], halo_mass=halo_mass, rand_profile=rand_profile, betas=betas, regions=regions, rho0Value=rho0Value, temperature=temperature, active_age=active_age[0], jet_lorentz=jet_lorentz[0], equipartition=equipartition[0], spectral_index=spectral_index, gammaCValue=gammaCValue, lorentz_min=Lorentzmin, brightness=True, resolution='best', seed=seed)
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
else:
# run RAiSE HD for set of parameters at requested resolution
RAiSE_run(frequency[0], redshift[0], axis_ratio[0], jet_power[0], source_age[i], halo_mass=halo_mass, rand_profile=rand_profile, betas=betas, regions=regions, rho0Value=rho0Value, temperature=temperature, active_age=active_age[0], jet_lorentz=jet_lorentz[0], equipartition=equipartition[0], spectral_index=spectral_index, gammaCValue=gammaCValue, lorentz_min=Lorentzmin, brightness=True, resolution='best', seed=seed)
dataframe = pd.read_csv(filename+'_best.csv', index_col=0)
# assign dataframe contents to variables
x, y = (dataframe.index).astype(np.float_), (dataframe.columns).astype(np.float_)
#x, y = x/np.max(x), y/np.max(x)
Y, X = np.meshgrid(y, x)
Z = dataframe.values
if frequency[0] > 0:
Z = Z/np.nanmax(Z)
else:
Z = Z*1e12
Z[Z <= 0] = np.nan
if frequency[0] > 0:
h = axs[i].pcolormesh(X, Y, Z, shading='nearest', cmap=colour_scheme, vmin=0, vmax=1)
else:
h = axs[i].pcolormesh(X, Y, Z, shading='nearest', cmap=colour_scheme, vmin=np.nanmin(Z[0:len(x)//3,:]), vmax=np.nanmax(Z[0:len(x)//3,:]))
axs[i].set_aspect('equal')
axs[i].set_xlim([-1.05*np.max(x), 1.05*np.max(x)])
axs[i].set_ylim([-1.05*np.max(x)/axis_ratio, 1.05*np.max(x)/axis_ratio])
axs[i].xaxis.set_major_formatter(FormatStrFormatter('%g'))
axs[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))
axs[i].plot(np.NaN, np.NaN, '-', color='none', label=str('{:g}'.format(float('{:.2g}'.format(10**source_age[i]/1e6))))+' Myr')
axs[i].legend(frameon=False)
# add a big axes for labels, hide frame
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
axs[-1].set_xlabel(r'Jet axis (kpc)', fontsize=14.5, labelpad=10)
plt.ylabel(r'Transverse axis (kpc)', fontsize=14.5, labelpad=15)
if frequency[0] <= 0:
if len(axs) == 1:
cax = fig.add_axes([axs[0].get_position().x1+0.01,axs[0].get_position().y0,0.02,axs[0].get_position().height])
c = plt.colorbar(h, cax=cax, pad=0.025)
else:
cax = fig.add_axes([axs.ravel().tolist().get_position().x1+0.01,axs.ravel().tolist().get_position().y0,0.02,axs.ravel().tolist().get_position().height]) # haven't tested this yet
c = plt.colorbar(h, cax=cax, pad=0.015)
c.set_label(r'Pressure (pPa)', labelpad=12.5)
# show plot and return handle to plot
plt.show()
return fig
# Define function to plot Dt and LD tracks
def RAiSE_evolution_tracks(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass=None, rand_profile=False, betas=None, regions=None, rho0Value=None, temperature=None, active_age=10.14, jet_lorentz=5., equipartition=-1.5, spectral_index=0.7, gammaCValue=5./3, lorentz_min=Lorentzmin, resolution='standard', seed=None, rerun=False, labels=None, colors=None, linestyles=None):
# function to test type of inputs and convert type where appropriate
frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz, nenvirons = __test_inputs(frequency, redshift, axis_ratio, jet_power, source_age, halo_mass, betas, regions, rho0Value, temperature, active_age, equipartition, jet_lorentz)
if len(source_age) <= 1:
raise Exception('Evolutionary tracks require more than one source age; provide a list/array of ages.')
if len(frequency) > 1:
warnings.warn('First frequency in list/array will be plotted for every set of parameters.', category=UserWarning)
if not isinstance(colors, (list, np.ndarray)) and not colors == None:
colors = [colors]
elif colors == None:
colors = ['crimson', 'darkorange', 'darkorchid', 'mediumblue']
if not isinstance(linestyles, (list, np.ndarray)) and not linestyles == None:
linestyles = [linestyles]
elif linestyles == None:
linestyles = ['-']
# set up plot
if resolution == None:
fig, axs = plt.subplots(2, 1, figsize=(6, 10), sharex=True)
else:
fig, axs = plt.subplots(3, 1, figsize=(6, 14), sharex=True)
fig2, axs2 = plt.subplots(1, 1, figsize=(6, 6))
fig.subplots_adjust(hspace=0)
rc('text', usetex=True)
rc('font', size=14)
rc('legend', fontsize=14)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
if resolution == None:
axs[1].set_xlabel(r'Source age (Myr)')
else:
axs[2].set_xlabel(r'Source age (Myr)')
axs[2].set_ylabel(r'Lobe luminosity (W/Hz)')
axs2.set_xlabel(r'Lobe length (kpc)')
axs2.set_ylabel(r'Lobe luminosity (W/Hz)')
axs[0].set_ylabel(r'Lobe length (kpc)')
axs[1].set_ylabel(r'Pressure (Pa)')
# calculate number of plots
nplots = np.max(np.array([len(redshift), len(axis_ratio), len(jet_power), nenvirons, len(active_age), len(equipartition), len(jet_lorentz)]))
time, size, pressure, luminosity, y_min, y_max = [], [], [], [], [], []
for i in range(0, nplots):
if isinstance(rho0Value, (list, np.ndarray)):
filename = 'LDtracks/LD_A={:.2f}_eq={:.2f}_p={:.2f}_Q={:.2f}_s={:.2f}_T={:.2f}_y={:.2f}_z={:.2f}'.format(axis_ratio[min(len(axis_ratio) - 1, i)], np.abs(equipartition[min(len(equipartition) - 1, i)]), np.abs(np.log10(rho0Value[min(len(rho0Value) - 1, i)])), jet_power[min(len(jet_power) - 1, i)], 2* | np.abs(spectral_index) | numpy.abs |
import os
from glob import glob
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
fig = plt.figure(figsize=(7.2,6))
grid = plt.GridSpec(18, 10, wspace=4, hspace=15)
ax = fig.add_subplot(grid[:9, :5])
ax.text(0.025, 0.966, 'a', transform=ax.transAxes,
fontsize=8, fontweight='bold', va='top', ha='left')
var_dir = '/home/atom/ongoing/work_worldwide/variance'
region_list = os.listdir(var_dir)
region_nmad = []
region_nsamp = []
for region in region_list:
list_fn_csv = [os.path.join(var_dir,region,f) for f in os.listdir(os.path.join(var_dir,region))]
list_nmad = []
list_nsamp = []
for fn_csv in list_fn_csv:
df = pd.read_csv(fn_csv)
list_nmad.append(df.nmad.values)
list_nsamp.append(df.nsamp.values)
nmad_all = np.stack(list_nmad,axis=1)
nsamp_all = np.stack(list_nsamp,axis=1)
nan_mask = np.all(np.logical_or(np.isnan(nmad_all),nmad_all==0),axis=1)
nmad_final = np.nansum(nmad_all * nsamp_all,axis=1) / np.nansum(nsamp_all,axis=1)
nsamp_final = np.nansum(nsamp_all,axis=1)
nmad_final[nan_mask] = np.nan
nsamp_final[nan_mask] = 0
region_nmad.append(nmad_final)
region_nsamp.append(nsamp_final)
# ax.figure(figsize=(16,9))
slope = df.bin_slope.values
corr = df.bin_corr.values
bin_slope = sorted(list(set(list(slope))))
bin_corr = sorted(list(set(list(corr))))
nb_slope = len(bin_slope)
nb_corr = len(bin_corr)
color_list = ['tab:orange','tab:blue','tab:olive','tab:cyan','tab:red','tab:purple','tab:brown','tab:pink','tab:gray','tab:olive']
ls_list = ['solid','dashed','dotted']
# model_var = np.sqrt(3**2 + (20 * np.tan(np.array(5) * np.pi / 180))**2) + (((100-np.array(bin_corr))/100)*20)**1.25
#
# for i in range(len(region_nmad)):
# i = 0
# for j in range(nb_slope-2):
#
# nmad = region_nmad[i]
#
# ax.plot(corr[1:nb_corr],nmad[j*nb_corr+1:j*nb_corr+nb_corr],label='Slope category: '+str(bin_slope[j]-5)+'-'+str(bin_slope[j]+5)+' degrees',color=color_list[j],linestyle=ls_list[i])
#
#
# # ax.plot(bin_corr,model_var,label='model',linewidth=2)
#
# ax.xlabel('Correlation (percent)')
# ax.ylabel('Stable terrain NMAD (m)')
# ax.ylim([0,50])
# ax.legend()
#
x_slope = np.arange(5,45,0.1)
model_var = np.sqrt(3**2 + (40 * np.tan(np.array(x_slope) * np.pi / 180))**2.5 + (((100-np.array(50))/100)*20)**2)
i=0
# for i in range(len(region_nmad)-1):
u=0
for j in np.arange(1,nb_corr,2):
nmad = region_nmad[i]
# ax.plot(bin_slope,nmad[np.arange(j,len(slope),nb_corr)],label='region: '+region_list[i]+', corr: '+str(bin_corr[j]),color=color_list[j],linestyle=ls_list[i])
ax.plot(bin_slope[:-2],nmad[np.arange(j,len(slope)-2*nb_corr,nb_corr)]**2,label='Empirical variance: $q$='+str(int(bin_corr[j]-5))+'-'+str(int(bin_corr[j]+5))+' %',color=color_list[u],linestyle=ls_list[i],marker='o',lw=0.5)
u+=1
model_var = np.sqrt(3**2 + ((20+(((100-np.array(100))/100)*20)) * np.tan(np.array(x_slope) * np.pi / 180))**2 + (((100-np.array(95))/100)*15)**2.5)
ax.plot(x_slope,model_var**2,label='Modelled: center of above\ncategories',linestyle='dashed',color='black',lw=0.5)
model_var = np.sqrt(3**2 + ((20+(((100-np.array(80))/100)*20)) * np.tan( | np.array(x_slope) | numpy.array |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import time
"""
This code was built just to generate ICE plots for comparison in the paper.
We just hacked it together.
"""
from stratx.partdep import getcats
def predict_catice(model, X:pd.DataFrame, colname:str, targetname, cats=None, ncats=None):
if cats is None:
cats = np.unique(X[colname]) # get unique codes
return predict_ice(model=model, X=X, colname=colname, targetname=targetname,
cats=cats, nlines=ncats)
def predict_ice(model, X:pd.DataFrame, colname:str, targetname="target", cats=None, numx=50, nlines=None):
"""
Return dataframe with one row per observation in X and one column
per unique value of column identified by colname.
Row 0 is actually the sorted unique X[colname] values used to get predictions.
It's handy to have so we don't have to pass X around to other methods.
Points in a single ICE line are the unique values of colname zipped
with one row of returned dataframe. E.g.,
predicted weight predicted weight ...
height=62.3638789416112 height=62.78667197542318 ...
0 62.786672 70.595222 ... unique X[colname] values
1 109.270644 161.270843 ...
"""
start = time.time()
save = X[colname].copy()
if nlines is not None and nlines > len(X):
nlines = len(X)
if cats is not None:
linex = np.unique(cats)
numx = None
elif numx is not None:
linex = np.linspace(np.min(X[colname]), np.max(X[colname]), numx, endpoint=True)
else:
linex = sorted(X[colname].unique())
lines = np.zeros(shape=(len(X) + 1, len(linex)))
lines[0, :] = linex
i = 0
for v in linex:
X[colname] = v
y_pred = model.predict(X)
lines[1:, i] = y_pred
i += 1
X[colname] = save
columns = [f"predicted {targetname}\n{colname}={str(v)}"
for v in linex]
df = pd.DataFrame(lines, columns=columns)
if nlines is not None:
# sample lines (first row is special: linex)
df_ = pd.DataFrame(lines)
df_ = df_.sample(n=nlines, axis=0, replace=False)
lines = df_.values
lines = np.concatenate([linex.reshape(1,-1),lines], axis=0)
df = pd.DataFrame(lines, columns=columns)
stop = time.time()
print(f"ICE_predict {stop - start:.3f}s")
return df
def ice2lines(ice:np.ndarray) -> np.ndarray:
"""
Return a 3D array of 2D matrices holding X coordinates in col 0 and
Y coordinates in col 1. result[0] is first 2D matrix of [X,Y] points
in a single ICE line for single observations. Shape of result is:
(nobservations,nuniquevalues,2)
"""
start = time.time()
linex = ice.iloc[0,:] # get unique x values from first row
# If needed, apply_along_axis() is faster than the loop
# def getline(liney): return np.array(list(zip(linex, liney)))
# lines = np.apply_along_axis(getline, axis=1, arr=ice.iloc[1:])
lines = []
for i in range(1,len(ice)): # ignore first row
liney = ice.iloc[i].values
line = np.array(list(zip(linex, liney)))
lines.append(line)
stop = time.time()
# print(f"ICE_lines {stop - start:.3f}s")
return np.array(lines)
def plot_ice(ice, colname, targetname="target", ax=None, linewidth=.5, linecolor='#9CD1E3',
alpha=.1, title=None, xrange=None, yrange=None, pdp=True, pdp_linewidth=.5, pdp_alpha=1,
pdp_color='black', show_xlabel=True, show_ylabel=True):
start = time.time()
if ax is None:
fig, ax = plt.subplots(1,1)
avg_y = np.mean(ice[1:], axis=0)
min_pdp_y = avg_y[0]
# if 0 is in x feature and not on left/right edge, get y at 0
# and shift so that is x,y 0 point.
linex = ice.iloc[0,:] # get unique x values from first row
nx = len(linex)
if linex[int(nx*0.05)]<0 or linex[-int(nx*0.05)]>0:
closest_x_to_0 = np.abs(linex - 0.0).argmin()
min_pdp_y = avg_y[closest_x_to_0]
lines = ice2lines(ice)
lines[:,:,1] = lines[:,:,1] - min_pdp_y
# lines[:,:,0] scans all lines, all points in a line, and gets x column
minx, maxx = np.min(lines[:,:,0]), | np.max(lines[:,:,0]) | numpy.max |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import numpy as np
import matplotlib.pyplot as plt
import random as rn
import os
import time
from torchvision import datasets, transforms
import argparse
from torchsummary import summary
from pyntcloud import PyntCloud
from glob import glob
from utils.training_tools import save_ckp,load_ckp,compute_metric, Rotation, Random_sampling
import datetime
from torchvision.transforms import Compose
from torch.utils.tensorboard import SummaryWriter
'''RESNET BLOCKS'''
def conv_bn(in_channels, out_channels, *args, **kwargs):
return nn.Sequential(nn.Conv3d(in_channels, out_channels, *args, **kwargs), nn.BatchNorm3d(out_channels))
class ResnetBlock(nn.Module):
def __init__(self, in_filters, out_filters, *args, **kwargs):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.block=nn.Sequential(
conv_bn(self.in_filters,self.out_filters,kernel_size=1,stride=1, padding=0,bias=False),
nn.ReLU(),
conv_bn(self.out_filters,self.out_filters,kernel_size=3,stride=1, padding=1,bias=False),
nn.ReLU(),
conv_bn(self.out_filters,self.in_filters,kernel_size=1,stride=1, padding=0,bias=False),
)
def forward(self,x):
identity=x
out=self.block(x)
out=out+identity
out=F.relu(out)
return out
class ResNet_kkk(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=3,bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_kk2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(1,1,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_k2k2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(1,2,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
class ResNet_2k2k2k(nn.Module):
def __init__(self,in_filters,out_filters,nblocks,block):
super().__init__()
self.in_filters=in_filters
self.out_filters=out_filters
self.nblocks=nblocks
self.block=block
self.blocks=nn.Sequential(
nn.Conv3d(self.in_filters,self.out_filters,kernel_size=7,stride=1,padding=(3,3,3),bias=False),
nn.Conv3d(self.out_filters,self.out_filters,kernel_size=3,stride=(2,2,2),padding=(1,1,1),bias=False),
*[block(self.out_filters, self.out_filters) for _ in range(self.nblocks)]
)
def forward(self,x):
out=self.blocks(x)
return out
'''Voxel CNN BLOCKS'''
class maskedConv3D(nn.Conv3d):
def __init__(self, masktype, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('mask', self.weight.data.clone())
_, _, kD, kH, kW = self.weight.size()
self.mask.fill_(1)
self.mask[:, :, kD // 2, kH // 2, kW // 2 + (masktype == 'B'):] = 0
self.mask[:, :, kD // 2, kH // 2 + 1:, :] = 0
self.mask[:, :, kD // 2 + 1:, :, :] = 0
def forward(self, x):
self.weight.data *= self.mask
return super(maskedConv3D, self).forward(x)
class maskedResnet(nn.Module):
def __init__(self, no_filters):
super().__init__()
self.no_filters = no_filters
self.conv2a = nn.Conv3d(in_channels=2 * self.no_filters, out_channels=self.no_filters, kernel_size=1, stride=1,
padding=0)
self.conv2b = maskedConv3D(masktype='B', in_channels=self.no_filters, out_channels=self.no_filters,
kernel_size=3, stride=1, padding=1)
self.conv2c = nn.Conv3d(in_channels=self.no_filters, out_channels=2 * self.no_filters, kernel_size=1, stride=1,
padding=0)
def forward(self, x):
identity = x
out = self.conv2a(x)
out = F.relu(out)
out = self.conv2b(out)
out = F.relu(out)
out = self.conv2c(out)
out += identity
return out
class VoxelCNN(nn.Module):
def __init__(self, input_channel):
super().__init__()
self.pixelcnn = nn.Sequential(
maskedConv3D(masktype='A', in_channels=input_channel, out_channels=64, kernel_size=7, stride=1, padding=3),
maskedResnet(32),
maskedConv3D(masktype='B', in_channels=64, out_channels=32, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
maskedConv3D(masktype='B', in_channels=32, out_channels=2, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
)
def forward(self, x):
# print(x.size())
batch, cin, d, h, w = x.size()
# print(batch, cin, h,w)
assert torch.sum(torch.isnan(x)) == 0
out = self.pixelcnn(x)
# print(out.size())
out = out.view(batch, 2, d, h, w)
# out = out.permute(0, 1, 3, 4, 2)
# print(out.shape)
return out
''' BUILDING PIPELINE, MERGE AND SPLIT'''
class MSVoxelCNN(nn.Module):
def __init__(self, Mpatch, input_channel,input_size,no_resnet,group):
super().__init__()
self.Mpatch=Mpatch
self.input_channel=input_channel
self.input_size=input_size
self.VoxelCNN=VoxelCNN(32)
self.patch_size=self.input_size//self.Mpatch
self.group = group
if (self.group <= 1):
self.Resnet = ResNet_kkk(1, 32, no_resnet,
ResnetBlock) # 1 is number of input channel ,32 is number of output, 12 is number of resnet block
elif (self.group == 2):
self.Resnet = ResNet_kk2k(1, 32, no_resnet, ResnetBlock)
elif (self.group == 3 or self.group == 4):
self.Resnet = ResNet_k2k2k(1, 32, no_resnet, ResnetBlock)
else:
self.Resnet = ResNet_2k2k2k(1, 32, no_resnet, ResnetBlock)
def forward(self,x):
#x=self.maxpooling(x)
ResnetFeature = self.Resnet(x)
patches = ResnetFeature.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size).unfold(4, self.patch_size, self.patch_size)
unfold_shape = patches.size()
patches_orig = torch.zeros(unfold_shape[0],2,unfold_shape[2],unfold_shape[3],unfold_shape[4],unfold_shape[5],unfold_shape[6],unfold_shape[7])
for i in range (unfold_shape[2]):
for j in range (unfold_shape[3]):
for k in range (unfold_shape[4]):
patches_orig[:,:,i,j,k,:,:,:]= self.VoxelCNN(patches[:,:,i,j,k,:,:,:])
output_d = unfold_shape[2] * unfold_shape[5]
output_h = unfold_shape[3] * unfold_shape[6]
output_w = unfold_shape[4] * unfold_shape[7]
patches_orig = patches_orig.permute(0,1,2,5,3,6,4,7).contiguous()
patches_orig = patches_orig.view(unfold_shape[0],2,output_d, output_h, output_w)
return patches_orig
class PCdataset(Dataset):
def __init__(self, files, transforms=None):
self.files=np.asarray(files)
self.transforms=transforms
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
pc=PyntCloud.from_file(self.files[idx])
points=pc.points.to_numpy()[:,:3]
if(self.transforms):
points=self.transforms(points)
try:
points = np.unique(points, axis=0)
except:
return None
points=torch.from_numpy(points).type(torch.LongTensor)
#print(points.shape)
v=torch.ones(points.shape[0])
dense_block=torch.sparse.FloatTensor(torch.transpose( points,0,1),v, torch.Size([64,64,64])).to_dense().view(1,64,64,64)
#print(dense_block.shape, torch.max(dense_block), torch.min(dense_block), torch.count_nonzero(dense_block))
return dense_block
def collate_fn(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def data_collector(training_dirs,params):
total_files = []
for training_dir in training_dirs:
training_dir = training_dir + '**/*.ply'
files = glob(training_dir, recursive=True)
print('Total files: ',len(files))
total_files_len = len(files)
total_files = np.concatenate((total_files, files), axis=0)
print('Selected ', len(files), ' from ', total_files_len, ' in ', training_dir)
assert len(total_files) > 0
rn.shuffle(total_files) # shuffle file
print('Total blocks for training: ', len(total_files))
files_cat = np.array([os.path.split(os.path.split(x)[0])[1] for x in total_files])
files_train = total_files[files_cat == 'train']
files_valid = total_files[files_cat == 'test']
rotation = Rotation(64)
sampling = Random_sampling()
#rotation, sampling,
transforms_ = Compose([rotation, sampling])
#,transforms.ToTensor()
training_set = PCdataset(files_train)
training_generator = torch.utils.data.DataLoader(training_set,collate_fn=collate_fn, **params)
# Validation data
valid_set = PCdataset(files_valid)
valid_generator = torch.utils.data.DataLoader(valid_set, collate_fn=collate_fn,**params)
return training_generator, valid_generator
def index_hr(group,d,h,w):#generate the index to select high resolution from block d,h,w for input
index=[[np.arange(0,d,2),np.arange(0,h,2),np.arange(0,w,2)],
[np.arange(0,d,2),np.arange(0,h,2),np.arange(0,w,1)],
[np.arange(0,d,2),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,2),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,1),np.arange(0,h,1),np.arange(0,w,1)],
[np.arange(0,d,1), | np.arange(0,h,1) | numpy.arange |
import warnings
import numpy as np
from nbeats_keras.model import NBeatsNet as NBeatsKeras
from nbeats_pytorch.model import NBeatsNet as NBeatsPytorch
warnings.filterwarnings(action='ignore', message='Setting attributes')
def main():
# https://keras.io/layers/recurrent/
num_samples, time_steps, input_dim, output_steps = 50_000, 10, 1, 1
# Definition of the model.
# NOTE: If you choose the Keras backend with input_dim>1, you have
# to set the value here too (in the constructor).
model_keras = NBeatsKeras(backcast_length=time_steps, forecast_length=output_steps,
stack_types=(NBeatsKeras.GENERIC_BLOCK, NBeatsKeras.GENERIC_BLOCK),
nb_blocks_per_stack=2, thetas_dim=(4, 4), share_weights_in_stack=True,
hidden_layer_units=64)
model_pytorch = NBeatsPytorch(backcast_length=time_steps, forecast_length=output_steps,
stack_types=(NBeatsPytorch.GENERIC_BLOCK, NBeatsPytorch.GENERIC_BLOCK),
nb_blocks_per_stack=2, thetas_dim=(4, 4), share_weights_in_stack=True,
hidden_layer_units=64)
# Definition of the objective function and the optimizer.
model_keras.compile(loss='mae', optimizer='adam')
model_pytorch.compile(loss='mae', optimizer='adam')
# Definition of the data. The problem to solve is to find f such as | f(x) - y | -> 0.
# where f = np.mean.
x = np.random.uniform(size=(num_samples, time_steps, input_dim))
y = np.mean(x, axis=1, keepdims=True)
# Split data into training and testing datasets.
c = num_samples // 10
x_train, y_train, x_test, y_test = x[c:], y[c:], x[:c], y[:c]
test_size = len(x_test)
# Train the model.
print('Keras training...')
model_keras.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20, batch_size=128)
print('Pytorch training...')
model_pytorch.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20, batch_size=128)
# Save the model for later.
model_keras.save('n_beats_model.h5')
model_pytorch.save('n_beats_pytorch.th')
# Predict on the testing set (forecast).
predictions_keras_forecast = model_keras.predict(x_test)
predictions_pytorch_forecast = model_pytorch.predict(x_test)
| np.testing.assert_equal(predictions_keras_forecast.shape, (test_size, model_keras.forecast_length, output_steps)) | numpy.testing.assert_equal |
import numpy as np
from flask import Flask, jsonify, make_response, request
from bokeh.models import AjaxDataSource, CustomJS
from bokeh.plotting import figure, show
# Bokeh related code
adapter = CustomJS(code="""
const result = {x: [], y: []}
const pts = cb_data.response.points
for (let i=0; i<pts.length; i++) {
result.x.push(pts[i][0])
result.y.push(pts[i][1])
}
return result
""")
source = AjaxDataSource(data_url='http://localhost:5555/data',
polling_interval=100, adapter=adapter)
p = figure(plot_height=300, plot_width=800, background_fill_color="lightgrey",
title="Streaming Noisy sin(x) via Ajax")
p.circle('x', 'y', source=source)
p.x_range.follow = "end"
p.x_range.follow_interval = 10
# Flask related code
app = Flask(__name__)
def crossdomain(f):
def wrapped_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
h['Access-Control-Allow-Origin'] = '*'
h['Access-Control-Allow-Methods'] = "GET, OPTIONS, POST"
h['Access-Control-Max-Age'] = str(21600)
requested_headers = request.headers.get('Access-Control-Request-Headers')
if requested_headers:
h['Access-Control-Allow-Headers'] = requested_headers
return resp
return wrapped_function
x = list(np.arange(0, 6, 0.1))
y = list(np.sin(x) + np.random.random(len(x)))
@app.route('/data', methods=['GET', 'OPTIONS', 'POST'])
@crossdomain
def data():
x.append(x[-1]+0.1)
y.append(np.sin(x[-1])+ | np.random.random() | numpy.random.random |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: <NAME>
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mpath
from matplotlib.collections import LineCollection
from matplotlib.collections import PathCollection
from matplotlib.collections import CircleCollection
from matplotlib.collections import PolyCollection
from matplotlib.collections import EllipseCollection
from matplotlib.collections import RegularPolyCollection
from matplotlib.collections import StarPolygonCollection
from matplotlib.collections import AsteriskPolygonCollection
fig = plt.figure(figsize=(4.25, 8 * 0.4))
ax = fig.add_axes(
[0, 0, 1, 1],
xlim=[0, 11],
ylim=[0.5, 8.5],
frameon=False,
xticks=[],
yticks=[],
aspect=1,
)
y = 8
# Line collection
# ----------------------------------------------------------------------------
n = 50
segments = np.zeros((n, 2, 2))
segments[:, 0, 0] = np.linspace(1, 10.25, n) - 0.2
segments[:, 0, 1] = y - 0.2
segments[:, 1, 0] = segments[:, 0, 0] + 0.2
segments[:, 1, 1] = y + 0.15
linewidths = np.linspace(0.5, 2.5, n)
collection = LineCollection(segments, linewidths=linewidths, edgecolor="black")
ax.add_collection(collection)
ax.text(1 - 0.25, y + 0.25, "Line collection", size="small", ha="left", va="baseline")
ax.text(
10 + 0.25,
y + 0.25,
"LineCollection",
color="blue",
size="small",
ha="right",
va="baseline",
family="monospace",
)
y -= 1
# Circle collection
# ----------------------------------------------------------------------------
n = 10
offsets = np.ones((n, 2))
offsets[:, 0], offsets[:, 1] = np.linspace(1, 10, n), y
X, Y = offsets[:, 0], offsets[:, 1]
sizes = np.linspace(25, 100, n)
linewidths = np.linspace(1, 2, n)
facecolors = ["%.1f" % c for c in np.linspace(0.25, 0.75, n)]
collection = CircleCollection(
sizes,
# linewidths = linewidths,
facecolors=facecolors,
edgecolor="black",
offsets=offsets,
transOffset=ax.transData,
)
ax.add_collection(collection)
ax.text(
X[0] - 0.25, y + 0.35, "Circle collection", size="small", ha="left", va="baseline"
)
ax.text(
X[-1] + 0.25,
y + 0.35,
"CircleCollection",
color="blue",
size="small",
ha="right",
va="baseline",
family="monospace",
)
y -= 1
# Ellipse collection
# ----------------------------------------------------------------------------
n = 10
offsets = np.ones((n, 2))
offsets[:, 0], offsets[:, 1] = np.linspace(1, 10, n), y
X, Y = offsets[:, 0], offsets[:, 1]
widths, heights = 15 * np.ones(n), 10 * np.ones(n)
angles = np.linspace(0, 45, n)
linewidths = np.linspace(1, 2, n)
facecolors = ["%.1f" % c for c in np.linspace(0.25, 0.75, n)]
collection = EllipseCollection(
widths,
heights,
angles,
# linewidths = linewidths,
facecolors=facecolors,
edgecolor="black",
offsets=offsets,
transOffset=ax.transData,
)
ax.add_collection(collection)
ax.text(
X[0] - 0.25, y + 0.35, "Ellipse collection", size="small", ha="left", va="baseline"
)
ax.text(
X[-1] + 0.25,
y + 0.35,
"EllipseCollection",
color="blue",
size="small",
ha="right",
va="baseline",
family="monospace",
)
y -= 1
# Polygon collection
# ----------------------------------------------------------------------------
n = 10
offsets = np.ones((n, 2))
offsets[:, 0], offsets[:, 1] = np.linspace(1, 10, n) - 0.2, y + 0.1
X, Y = offsets[:, 0], offsets[:, 1]
verts = np.zeros((n, 4, 2))
verts[:] = [0, 0], [1, 0], [1, 1], [0, 1]
sizes = np.linspace(0.25, 0.50, n)
verts *= sizes.reshape(n, 1, 1)
widths, heights = 15 * np.ones(n), 10 * np.ones(n)
numsides = 5
rotation = np.pi / 4
offsets[:, 1] -= sizes / 2 - 0.25
linewidths = | np.linspace(1, 2, n) | numpy.linspace |
from numpy import arctan, zeros, pi, real as re, imag as im, linspace,eye, prod, newaxis
from numpy import array as arr, exp, log, arange, diag, kron, savetxt, cumsum, argmax
from numpy.linalg import det, norm, solve
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from copy import deepcopy
from time import clock
from numpy.random import uniform, shuffle
from scipy import integrate
import matplotlib.patches as pat
import pylab as pl
def ncr(n,r):
if r < 0:
return 0
p,q = 1,1
for j in range(r):
p *= n-j
q *= j+1
return p//q
def ind_draw(o,m,n): #generator giving all ways to draw n elements from range(o,m) without replacement
if n>=0:
l = m-o
if n == 0:
yield []
elif n == l:
yield list(range(o,m))
else:
for k in range(o,m-n+1):
for wha in ind_draw(k+1,m,n-1):
yield [k]+wha
def disect(We): #We is l-dimensional tensor, returns the sum along all but each axes.
w = []
As = We.shape
l = len(As)
for j in range(l):
if As[j] >1:
w.append(We.sum(tuple(range(1,l-j))))
else:
w.append(arr([1]))
We = We.sum(0)
for j in range(l):
if As[j] >1: #don't calculate the full sum rule over and over again on trivial boxes. (empty or full)
w[j] /= We
return w
def boxicles(n,deps): #puts n particles in boxes with maximal capacity deps. oo=None was last input
M = len(deps)
if n == 0:
yield [0 for _ in range(M)]
else:
for preput in boxicles(n-1,deps):
for k in range(M):
#postput = [a for a in preput]
#postput[k] += 1
#yield postput
if preput[k]<deps[k]:
yield [preput[a]+int(a==k) for a in range(M)]
if preput[k]:
break
def TBA(T,c,chempot,givedens = True):
interpts = 101 #odd is best
bdys = 20
la = linspace(-bdys,bdys,interpts) #lambda
dla = la[1]-la[0]
ootp = 1/(2*pi)
lala = (la*la-chempot)/T
convo = dla*c/(pi*(c*c+(la[:,newaxis]-la[newaxis,:])**2))
tba = lambda eps: lala- eps +(convo*log(1+exp(-eps))[newaxis,:]).sum(axis=1)
exep = exp(fsolve(tba,zeros(interpts)))
ooexep = 1/(1+exep)
#plt.plot(la,ooexep)
tba2 = lambda rhop: rhop/(ootp+(convo*rhop[newaxis,:]).sum(axis=1))-ooexep
rhopsol = fsolve(tba2,0.15*ooexep)
#plt.plot(la,ooexep/rhopsol)
rhopsol -= min(rhopsol) #ensure non-negativity, despite numerical error
D = sum(rhopsol)*dla
if givedens:
return D
else:
rhot = ootp+(convo*rhopsol[newaxis,:]).sum(axis=1)
xi = [0]
for rj in rhot:
xi.append(xi[-1]+dla*rj)
xi = (arr(xi[1:])+arr(xi[:-1]))/2
xi -= xi[interpts//2]
return rhopsol/rhot, xi
def LL_gibbs(N,L,T,c,ss): #more closely recreates a gibbs ensemble from a finite set of states.
qngen = LL_thermal_disc(N,L,T,c,200*ss)
ensemble = []
pref = 2*pi/L
for qn in qngen:
aqn = arr(qn)
lam,_ = newtrap(aqn,L,c,aqn*pref)
ensemble.append([sum(lam*lam),qn])
ensemble.sort()
h=1
while h<len(ensemble):
if ensemble[h-1][0]==ensemble[h][0]:
if ensemble[h-1][1]==ensemble[h][1]:
ensemble.pop(h)
h += 1
energies = arr([e[0] for e in ensemble])
prolly = cumsum(exp(-energies/T))
prolly /= prolly[-1]
#plt.hist(energies,bins=linspace(0,150,100))
#plt.plot(prolly)
for _ in range(ss):
yield ensemble[argmax(prolly>uniform())][1]
def LL_thermal_disc(N,L,T,c,samplesize):
if N==0:
for _ in range(samplesize):
yield []
else:
dens = N/L
chempot = fsolve(lambda chemp: TBA(T,c,chemp)-dens,1)
#plt.plot([TBA(T,c,ch) for ch in linspace(-10,10,100)])
rhox,xi = TBA(T,c,chempot,False)
pref = 1/L
#dom = max(1000,T/L)
#print(xi[0],xi[-1])
nf = lambda k : smirt(xi,rhox,k)
#KX = linspace(-10,10,1000)
#plt.plot(KX,[nf(kx) for kx in KX])
#boundbox = int(fsolve(lambda bd: integrate.quad(nf,-bd*pref,bd*pref)[0]-0.99*dens,L/2)[0]+2) #find the Qn inside which 99.5% of the particles should be
boundbox = int(xi[-1]*L)
for _ in range(samplesize):
if N%2:
I = [0]
index = 1
else:
I = []
index = 0.5
sign = 1
newreject = []
while len(I) < N and index<boundbox:
ki = index*pref
if uniform()<nf(ki):
I.append(sign*index)
else:
newreject.append(sign*index)
if sign == 1:
sign = -1
else:
sign = 1
index += 1
while len(I) < N:
shuffle(newreject)
reject = newreject
shuffle(reject)
rejlen,rejind = len(reject),0
newreject = []
while len(I) < N and rejind<rejlen:
if uniform()<nf(pref*reject[rejind]):
I.append(reject[rejind])
else:
newreject.append(reject[rejind])
rejind +=1
if uniform()<0.5:
I = [-ii for ii in I]
yield sorted(I)
def smirt(x,y,a): # y(x) irregularly spaced, x in increasing tho. a is desired x coordinate: interpolate
n = len(y)-1
h = 0
if a<x[0] or a>x[-1]:
return 0
while x[h+1]<a and h<n:
h += 1
return y[h]+(y[h+1]-y[h])*(a-x[h])/(x[h+1]-x[h])
def fd_better_disc(N,L,T,samplesize):
pref = 2*pi/L
if L==0:
for _ in range(samplesize):
yield []
else:
beta = 0.5/T
#dom = max(1000,T/L)
dom = 100
dens = 2*pi*N/L
mu = fsolve(lambda moo: integrate.quad(lambda k: 1/(1+exp((k*k-moo)*beta)),-dom,dom)[0]-dens,0)[0]
nf = lambda k : 1/(1+exp((k*k-mu)*beta))
boundbox = int(fsolve(lambda bd: integrate.quad(nf,-bd*pref,bd*pref)[0]-0.99*dens,L/2)[0]+2) #find the Qn inside which 99.5% of the particles should be
for _ in range(samplesize):
if N%2:
I = [0]
index = 1
else:
I = []
index = 0.5
sign = 1
newreject = []
while len(I) < N and index<boundbox:
ki = index*pref
if | uniform() | numpy.random.uniform |
"""
A collection of (mostly) stand alone helper functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
import sys
if sys.version_info >= (3,0):
xrange = range
"""
Miscellaneous functions
~~~~~~~~~~~~~~~~~~~~~~~
"""
def setDefaultArgs(func, **kwargs):
"""
Changes the default args in `func` to match `kwargs`.
This can be useful when dealing with deeply nested functions for which the
default parameters cannot be set directly in the top-level function.
Raises
------
ValueError
if `func` does not have default arguments that match `kwargs`.
Example
-------
>>> def foo(bar="Hello world!"):
... print bar
>>> setDefaultArgs(foo, bar="The world has changed!")
>>> foo()
The world has changed!
"""
import inspect
args, varargs, varkwargs, defaults = inspect.getargspec(func)
have_defaults = args[-len(defaults):]
defaults = list(defaults) # so that we can write to it
for kwd in kwargs:
try:
i = have_defaults.index(kwd)
except ValueError:
raise ValueError("Function '%s()' does not have default argument "
"named '%s'." % (func.__name__, kwd))
defaults[i] = kwargs[kwd]
func.__defaults__ = tuple(defaults)
def monotonicIndices(x):
"""
Returns the indices of `x` such that `x[i]` is purely increasing.
"""
x = np.array(x)
if x[0] > x[-1]:
x = x[::-1]
is_reversed = True
else:
is_reversed = False
I = [0]
for i in xrange(1, len(x)-1):
if x[i] > x[I[-1]] and x[i] < x[-1]:
I.append(i)
I.append(len(x)-1)
if is_reversed:
return len(x)-1-np.array(I)
else:
return np.array(I)
def clampVal(x, a, b):
"""
Clamp the value `x` to be between `a` and `b`.
Parameters
----------
x, a, b : array_like
Must have the same shape or be broadcastable.
Returns
-------
array_like
"""
s = np.sign(b-a) # +1 for b > a, -1 for b < a
sa = 1+s*np.sign(x-a)
x = (x*sa + a*(2-sa)) // 2
sb = 1+s*np.sign(b-x)
x = (x*sb + b*(2-sb)) // 2
return x
"""
Numerical integration
~~~~~~~~~~~~~~~~~~~~~
"""
class IntegrationError(Exception):
"""
Used to indicate an integration error, primarily in :func:`rkqs`.
"""
pass
_rkqs_rval = namedtuple("rkqs_rval", "Delta_y Delta_t dtnxt")
def rkqs(y,dydt,t,f, dt_try, epsfrac, epsabs, args=()):
"""
Take a single 5th order Runge-Kutta step with error monitoring.
This function is adapted from Numerical Recipes in C.
The step size dynamically changes such that the error in `y` is smaller
than the larger of `epsfrac` and `epsabs`. That way, if one wants to
disregard the fractional error, set `epsfrac` to zero but keep `epsabs`
non-zero.
Parameters
----------
y, dydt : array_like
The initial value and its derivative at the start of the step.
They should satisfy ``dydt = f(y,t)``. `dydt` is included here for
efficiency (in case the calling function already calculated it).
t : float
The integration variable.
f : callable
The derivative function.
dt_try : float
An initial guess for the step size.
epsfrac, epsabs : array_like
The maximual fractional and absolute errors. Should be either length 1
or the same size as `y`.
args : tuple
Optional arguments for `f`.
Returns
-------
Delta_y : array_like
Change in `y` during this step.
Delta_t : float
Change in `t` during this step.
dtnext : float
Best guess for next step size.
Raises
------
IntegrationError
If the step size gets smaller than the floating point error.
References
----------
Based on algorithms described in [1]_.
.. [1] <NAME>, et. al. "Numerical Recipes in C: The Art of Scientific
Computing. Second Edition." Cambridge, 1992.
"""
dt = dt_try
while True:
dy,yerr = _rkck(y,dydt,t,f,dt,args)
errmax = np.nan_to_num(np.max(np.min([
abs(yerr/epsabs), abs(yerr)/((abs(y)+1e-300)*epsfrac)
], axis=0)))
if(errmax < 1.0):
break # Step succeeded
dttemp = 0.9*dt*errmax**-.25
dt = max(dttemp,dt*.1) if dt > 0 else min(dttemp,dt*.1)
if t + dt == t:
raise IntegrationError("Stepsize rounds down to zero.")
if errmax > 1.89e-4:
dtnext = 0.9 * dt * errmax**-.2
else:
dtnext = 5*dt
return _rkqs_rval(dy, dt, dtnext)
def rkqs2(y,dydt,t,f, dt_try, inv_epsabs, args=()):
"""
Same as :func:`rkqs`, but ``inv_epsabs = 1/epsabs`` and ``epsfrac`` is
not used.
"""
dt = dt_try
while True:
dy,yerr = _rkck(y,dydt,t,f,dt,args)
errmax = np.max(yerr * inv_epsabs)
if(errmax < 1.0):
break # Step succeeded
dttemp = 0.9*dt*errmax**-.25
dt = max(dttemp,dt*.1) if dt > 0 else min(dttemp,dt*.1)
if t+dt == t:
raise IntegrationError("Stepsize rounds down to zero.")
if errmax > 1.89e-4:
dtnext = 0.9 * dt * errmax**-.2
else:
dtnext = 5*dt
return _rkqs_rval(dy, dt, dtnext)
def _rkck(y,dydt,t,f,dt,args=()):
"""
Take one 5th-order Cash-Karp Runge-Kutta step.
Returns
-------
array_like
The change in `y` during this step.
array_like
An error estimate for `y`.
"""
a2=0.2;a3=0.3;a4=0.6;a5=1.0;a6=0.875;b21=0.2 # noqa
b31=3.0/40.0;b32=9.0/40.0;b41=0.3;b42 = -0.9;b43=1.2; # noqa
b51 = -11.0/54.0; b52=2.5;b53 = -70.0/27.0;b54=35.0/27.0; # noqa
b61=1631.0/55296.0;b62=175.0/512.0;b63=575.0/13824.0; # noqa
b64=44275.0/110592.0;b65=253.0/4096.0;c1=37.0/378.0; # noqa
c3=250.0/621.0;c4=125.0/594.0;c6=512.0/1771.0; # noqa
dc5 = -277.00/14336.0; # noqa
dc1=c1-2825.0/27648.0;dc3=c3-18575.0/48384.0; # noqa
dc4=c4-13525.0/55296.0;dc6=c6-0.25 # noqa
ytemp = y+b21*dt*dydt
ak2 = f(ytemp, t+a2*dt, *args)
ytemp = y+dt*(b31*dydt+b32*ak2)
ak3 = f(ytemp, t+a3*dt, *args)
ytemp = y+dt*(b41*dydt+b42*ak2+b43*ak3)
ak4 = f(ytemp, t+a4*dt, *args)
ytemp = y + dt*(b51*dydt+b52*ak2+b53*ak3+b54*ak4)
ak5 = f(ytemp, t+a5*dt, *args)
ytemp = y + dt*(b61*dydt+b62*ak2+b63*ak3+b64*ak4+b65*ak5)
ak6 = f(ytemp, t+a6*dt, *args)
dyout = dt*(c1*dydt+c3*ak3+c4*ak4+c6*ak6)
yerr = dt*(dc1*dydt+dc3*ak3+dc4*ak4+dc5*ak5+dc6*ak6)
return dyout, yerr
"""
Numerical derivatives
~~~~~~~~~~~~~~~~~~~~~
The *derivij()* functions accept arrays as input and return arrays as output.
In contrast, :class:`gradientFunction` and :class:hessianFunction` accept
functions as input and return callable class instances (essentially functions)
as output. The returned functions can then be used to find derivatives.
"""
def deriv14(y,x):
R"""
Calculates :math:`dy/dx` to fourth-order in :math:`\Delta x` using
finite differences. The derivative is taken along the last dimension of `y`.
Both `y` and `x` should be numpy arrays. The derivatives are centered
in the interior of the array, but not at the edges. The spacing in `x`
does not need to be uniform.
"""
n = len(x)
j = np.arange(5)
j[j > 4//2] -= 5
i = np.arange(n) - j[:,np.newaxis]
i[i < 0] += 5
i[i >= n] -= 5
d1 = x[i[1]]-x[i[0]]
d2 = x[i[2]]-x[i[0]]
d3 = x[i[3]]-x[i[0]]
d4 = x[i[4]]-x[i[0]]
w4 = (d1*d2*d3) / (
-d4 * (-d1*d2*d3 + d4 * (d1*d2+d2*d3+d3*d1 + d4 * (+d4-d1-d2-d3))))
w3 = (d1*d2*d4) / (
-d3 * (-d1*d2*d4 + d3 * (d1*d2+d2*d4+d4*d1 + d3 * (-d4-d1-d2+d3))))
w2 = (d1*d4*d3) / (
-d2 * (-d1*d4*d3 + d2 * (d1*d4+d4*d3+d3*d1 + d2 * (-d4-d1+d2-d3))))
w1 = (d4*d2*d3) / (
-d1 * (-d4*d2*d3 + d1 * (d4*d2+d2*d3+d3*d4 + d1 * (-d4+d1-d2-d3))))
w0 = -(w1+w2+w3+w4)
dy = (w0*y[...,i[0]] + w1*y[...,i[1]] +
w2*y[...,i[2]] + w3*y[...,i[3]] + w4*y[...,i[4]])
return dy
def deriv14_const_dx(y, dx=1.0):
R"""
Calculates :math:`dy/dx` to fourth-order in :math:`\Delta x` using
finite differences. The derivative is taken along the last dimension of `y`.
The output of this function should be identical to :func:`deriv14` when the
spacing in `x` is constant, but this will be faster.
Parameters
----------
y : array_like
dx : float, optional
"""
y = y.T # now the derivative is along the first dimension
dy = np.empty_like(y)
dy[2:-2] = y[:-4] - 8*y[1:-3] + 8*y[3:-1] - y[4:]
dy[+0] = -25*y[+0] + 48*y[+1] - 36*y[+2] + 16*y[+3] - 3*y[+4]
dy[+1] = -3*y[+0] - 10*y[+1] + 18*y[+2] - 6*y[+3] + y[+4]
dy[-2] = +3*y[-1] + 10*y[-2] - 18*y[-3] + 6*y[-4] - y[-5]
dy[-1] = +25*y[-1] - 48*y[-2] + 36*y[-3] - 16*y[-4] + 3*y[-5]
return dy.T / (12.0 * dx)
def deriv1n(y,x,n):
"""
Calculates :math:`dy/dx` to nth-order in :math:`\Delta x` using
finite differences. The derivative is taken along the last dimension of `y`.
Both `y` and `x` should be numpy arrays. The derivatives are centered in the
interior of the array, but not at the edges. The spacing in `x` does not
need to be uniform.
"""
nx = len(x)
j = np.arange(n+1)
j[j > n//2] -= n+1
i = np.arange(nx) - j[:,np.newaxis]
i[i < 0] += n+1
i[i >= nx] -= n+1
d = np.empty((n,n,nx), dtype=x.dtype)*1.0
d[0] = x[i[1:]] - x[i[0]]
for j in xrange(1,n):
d[j] = np.roll(d[j-1], -1, axis=0)
d[:,0] *= -1
w = np.zeros((n+1,nx), dtype=y.dtype)*1.
# For example, when calculating w[1], we need only use
# w[1]: d1 = d[0,0], d2 = d[0,1], d3 = d[0,2], ..., dn = d[0,n-1]
# and for the other weights we just increment the first index:
# w[2]: d2 = d[1,0], d3 = d[1,1], d4 = d[1,2], ..., dn = d[1,n-2],
# d1 = d[1,n-1]
# So we should be able to calculate all of them at once like this.
s = ((2**np.arange(n-1)) & np.arange(2**(n-1))[:,np.newaxis])
s[s > 0] = (np.arange(1,n) * np.ones(2**(n-1))[:,np.newaxis])[s > 0]
w[1:] = (np.sum(np.product(d[:,s],axis=2), axis=1)*d[:,0]
/ np.product(d[:,1:], axis=1))
w[1:] = -w[1:]**-1
w[0] = -np.sum(w[1:],axis=0)
dy = np.sum(w*y[...,i], axis=-2)
return dy
def deriv23(y,x):
"""
Calculates :math:`d^2y/dx^2` to third-order in :math:`\Delta x` using
finite differences. The derivative is taken along the last dimension of `y`.
Both `y` and `x` should be numpy arrays. The derivatives are centered in the
interior of the array, but not at the edges. The spacing in `x` does not
need to be uniform. The accuracy increases to fourth-order if the spacing
is uniform.
"""
n = len(x)
j = np.arange(5)
j[j > 4//2] -= 5
i = np.arange(n) - j[:,np.newaxis]
i[i < 0] += 5
i[i >= n] -= 5
d1 = x[i[1]]-x[i[0]]
d2 = x[i[2]]-x[i[0]]
d3 = x[i[3]]-x[i[0]]
d4 = x[i[4]]-x[i[0]]
w4 = 2*(d1*d2+d2*d3+d3*d1) / (
d4 * (-d1*d2*d3 + d4 * (d1*d2+d2*d3+d3*d1 + d4 * (+d4-d1-d2-d3) ) ) )
w3 = 2*(d1*d2+d2*d4+d4*d1) / (
d3 * (-d1*d2*d4 + d3 * (d1*d2+d2*d4+d4*d1 + d3 * (-d4-d1-d2+d3) ) ) )
w2 = 2*(d1*d4+d4*d3+d3*d1) / (
d2 * (-d1*d4*d3 + d2 * (d1*d4+d4*d3+d3*d1 + d2 * (-d4-d1+d2-d3) ) ) )
w1 = 2*(d4*d2+d2*d3+d3*d4) / (
d1 * (-d4*d2*d3 + d1 * (d4*d2+d2*d3+d3*d4 + d1 * (-d4+d1-d2-d3) ) ) )
w0 = -(w1+w2+w3+w4)
d2y = (w0*y[...,i[0]] + w1*y[...,i[1]] +
w2*y[...,i[2]] + w3*y[...,i[3]] + w4*y[...,i[4]])
return d2y
def deriv23_const_dx(y, dx=1.0):
"""
Calculates :math:`d^2y/dx^2` to third-order in :math:`\Delta x` using
finite differences. The derivative is taken along the last dimension of `y`.
The output of this function should be identical to :func:`deriv23` when the
spacing in `x` is constant, but this will be faster.
Parameters
----------
y : array_like
dx : float, optional
"""
y = y.T # now the derivative is along the first dimension
dy = np.empty_like(y)
dy[2:-2] = -y[:-4] + 16*y[1:-3] - 30*y[2:-2] + 16*y[3:-1] - y[4:]
dy[+0] = 35*y[+0] - 104*y[+1] + 114*y[+2] - 56*y[+3] + 11*y[+4]
dy[+1] = 11*y[+0] - 20*y[+1] + 6*y[+2] + 4*y[+3] - y[+4]
dy[-2] = 11*y[-1] - 20*y[-2] + 6*y[-3] + 4*y[-4] - y[-5]
dy[-1] = 35*y[-1] - 104*y[-2] + 114*y[-3] - 56*y[-4] + 11*y[-5]
return dy.T / (12.0 * dx)
class gradientFunction:
"""
Make a function which returns the gradient of some scalar function.
Parameters
----------
f : callable
The first argument `x` should either be a single point with length
`Ndim` or an array (or matrix, etc.) of points with shape
``(..., Ndim)``, where ``...`` is some arbitrary shape. The return
shape should be the same as the input shape, but with the last axis
stripped off (i.e., it should be a scalar function). Additional
required or optional arguments are allowed.
eps : float or array_like
The small change in `x` used to calculate the finite differences.
Can either be a scalar or have length `Ndim`.
Ndim : int
Number of dimensions for each point.
order : 2 or 4
Calculate the derivatives to either 2nd or 4th order in `eps`.
Example
-------
>>> def f(X):
... x,y = np.asarray(X).T
... return (x*x + x*y +3.*y*y*y).T
>>> df = gradientFunction(f, eps=.01, Ndim=2, order=4)
>>> x = np.array([[0,0],[0,1],[1,0],[1,1]])
>>> print df(x)
array([[ 0., 0.], [ 1., 9.], [ 2., 1.], [ 3., 10.]])
"""
def __init__(self, f, eps, Ndim, order=4):
assert order == 2 or order == 4
eps = np.asanyarray(eps)
dx = np.empty((order, Ndim, Ndim))
dx[:] = np.diag(np.ones(Ndim)*eps)
dxT = dx.T
coef = np.empty((order, Ndim))
coef[:] = 1.0/eps
coefT = coef.T
if order == 2:
dxT *= [-1, 1]
coefT *= [-.5, .5]
if order == 4:
dxT *= [-2, -1, 1, 2]
coefT *= [1, -8, 8, -1]
coefT /= 12.0
self.f = f
self.dx = dx
self.coef = coef
def __call__(self, x, *args, **kwargs):
"""
Calculate the gradient. Output shape is the same as the input shape.
"""
x = np.asanyarray(x)[...,np.newaxis,np.newaxis,:]
return np.sum(self.f(x+self.dx, *args, **kwargs)*self.coef, axis=-2)
class hessianFunction:
"""
Make a function which returns the Hessian (second derivative) matrix of
some scalar function.
Parameters
----------
f : callable
The first argument `x` should either be a single point with length
`Ndim` or an array (or matrix, etc.) of points with shape
``(..., Ndim)``, where ``...`` is some arbitrary shape. The return
shape should be the same as the input shape, but with the last axis
stripped off (i.e., it should be a scalar function). Additional
required or optional arguments are allowed.
eps : float or array_like
The small change in `x` used to calculate the finite differences.
Can either be a scalar or have length `Ndim`.
Ndim : int
Number of dimensions for each point.
order : 2 or 4
Calculate the derivatives to either 2nd or 4th order in `eps`.
"""
def __init__(self, f, eps, Ndim, order=4):
assert order == 2 or order == 4
eps = np.ones(Ndim) * eps
dx = []
coef = []
for i in xrange(Ndim):
dx.append([])
coef.append([])
for j in xrange(i):
dx_ = np.zeros((order, order, Ndim))
if (order == 2):
dx_[:,:,i] = np.array([-1,1]) * eps[i]
dx_ = np.rollaxis(dx_, 1)
dx_[:,:,j] = np.array([-1,1]) * eps[j]
coef_ = np.array([-.5, .5])
coef_ = coef_[:,np.newaxis] * coef_[np.newaxis,:]
coef_ /= eps[i]*eps[j]
if (order == 4):
dx_[:,:,i] = np.array([-2,-1,1,2]) * eps[i]
dx_ = np.rollaxis(dx_, 1)
dx_[:,:,j] = np.array([-2,-1,1,2]) * eps[j]
coef_ = | np.array([1, -8, 8, -1.]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 13:30:06 2018
@author: gregz
"""
import time
import numpy as np
import os.path as op
import glob
import warnings
from astropy.io import fits
from utils import biweight_location
from scipy.signal import savgol_filter, medfilt2d
from scipy.interpolate import interp1d, interp2d
from input_utils import setup_logging
from astrometry import Astrometry
dither_pattern = np.array([[0., 0.], [1.27, -0.73], [1.27, 0.73]])
virus_amps = ['LL', 'LU', 'RU', 'RL']
lrs2_amps = [['LL', 'LU'], ['RL', 'RU']]
fplane_file = '/work/03730/gregz/maverick/fplane.txt'
flt_obs = '%07d' % 15
twi_obs = '%07d' % 1
sci_obs = '%07d' % 13
twi_date = '20170205'
sci_date = twi_date
flt_date = twi_date
# FOR LRS2
instrument = 'lrs2'
AMPS = lrs2_amps[0]
dither_pattern = np.zeros((10, 2))
log = setup_logging('panacea_quicklook')
basered = '/work/03730/gregz/maverick'
baseraw = '/work/03946/hetdex/maverick'
twi_path = op.join(basered, 'reductions', twi_date, '%s', '%s%s', 'exp01',
'%s', 'multi_*_%s_*_LL.fits')
sci_path = op.join(baseraw, sci_date, '%s', '%s%s', 'exp%s',
'%s', '2*_%sLL*.fits')
flt_path = op.join(baseraw, flt_date, '%s', '%s%s', 'exp*',
'%s', '2*_%sLL*.fits')
sciflt_path = op.join(baseraw, twi_date, '%s', '%s%s', 'exp*',
'%s', '2*_%sLL_twi.fits')
bias_path = op.join(baseraw, twi_date, '%s', '%s%s', 'exp*',
'%s', '2*_%sLL_zro.fits')
def get_cal_info(twi_path, amp):
F = fits.open(glob.glob(twi_path.replace('LL', amp))[0])
return (np.array(F['ifupos'].data, dtype=float),
np.array(F['trace'].data, dtype=float),
np.array(F['wavelength'].data, dtype=float))
def orient_image(image, amp, ampname):
'''
Orient the images from blue to red (left to right)
Fibers are oriented to match configuration files
'''
if amp == "LU":
image[:] = image[::-1, ::-1]
if amp == "RL":
image[:] = image[::-1, ::-1]
if ampname is not None:
if ampname == 'LR' or ampname == 'UL':
image[:] = image[:, ::-1]
return image
def make_avg_spec(wave, spec, binsize=35, per=50):
ind = np.argsort(wave.ravel())
T = 1
for p in wave.shape:
T *= p
wchunks = np.array_split(wave.ravel()[ind],
T / binsize)
schunks = np.array_split(spec.ravel()[ind],
T / binsize)
nwave = np.array([np.mean(chunk) for chunk in wchunks])
nspec = np.array([np.percentile(chunk, per) for chunk in schunks])
nwave, nind = np.unique(nwave, return_index=True)
return nwave, nspec[nind]
def base_reduction(filename):
a = fits.open(filename)
image = np.array(a[0].data, dtype=float)
# overscan sub
overscan_length = 32 * (image.shape[1] / 1064)
O = biweight_location(image[:, -(overscan_length-2):])
image[:] = image - O
# trim image
image = image[:, :-overscan_length]
try:
ampname = a[0].header['AMPNAME']
except:
ampname = None
a = orient_image(image, amp, ampname)
return a
def get_sciflat_field(flt_path, amp, array_wave, array_trace, common_wave,
masterbias, log):
files = glob.glob(flt_path.replace('LL', amp))
listflat = []
array_flt = base_reduction(files[0])
bigW = np.zeros(array_flt.shape)
Y, X = np.indices(array_wave.shape)
YY, XX = np.indices(array_flt.shape)
for x, at, aw, xx, yy in zip(np.array_split(X, 2, axis=0),
np.array_split(array_trace, 2, axis=0),
np.array_split(array_wave, 2, axis=0),
np.array_split(XX, 2, axis=0),
np.array_split(YY, 2, axis=0)):
for j in np.arange(at.shape[1]):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p0 = np.polyfit(at[:, j], aw[:, j], 7)
bigW[yy[:, j], j] = np.polyval(p0, yy[:, j])
listspec = []
for filename in files:
log.info('Working on sciflat %s' % filename)
array_flt = base_reduction(filename) - masterbias
x = | np.arange(array_wave.shape[1]) | numpy.arange |
# Module survey figures
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import glob, copy, os, sys
import json
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import aplpy
from astropy import units as u
from astropy.units import Unit
from astropy.io import ascii, fits
from astropy.table import QTable, Table, Column
from astropy.coordinates import SkyCoord
from astropy import constants as const
from astropy import wcs
from linetools.spectra.xspectrum1d import XSpectrum1D
from xastropy.obs import x_getsdssimg as xosdss
from xastropy.obs import radec as xra
from xastropy.plotting import utils as xputils
from xastropy.xutils import xdebug as xdb
from xastropy.casbah import utils as xcasu
from xastropy.casbah import load_casbah as xcasl
# Local
#sys.path.append(os.path.abspath("./py"))
#import qpq_spec as qpqs
#### ########################## #########################
def hectospec_targets(field, outfil=None):
# Init
fcoord = SkyCoord(ra=field[1],dec=field[2])
if outfil is None:
outfil = xcasu.get_filename(field,'HECTO_TARG_FIG')
# Load field
lfield = xcasl.load_field(field)
targ_coord = SkyCoord(ra=lfield.targets['TARG_RA']*u.deg,
dec=lfield.targets['TARG_DEC']*u.deg)
all_pa = fcoord.position_angle(targ_coord)
all_sep = fcoord.separation(targ_coord).to('arcmin')
# Start the plot
if outfil is not None:
pp = PdfPages(outfil)
# Targets only
plt.figure(figsize=(8, 4.5))
plt.clf()
gs = gridspec.GridSpec(1,2)
plt.suptitle('{:s}: Hectospec Targets (from SDSS imaging)'.format(field[0])
,fontsize=19.)
##
# Hectospec first
for tt in range(2):
ax_hecto = plt.subplot(gs[tt])
# Read SDSS Image
if tt == 0:
imsize=60. # arcmin
else:
imsize=10. # arcmin
#Configs
if tt == 0:
hecto_obs = lfield.observing[np.where(
lfield.observing['INSTR']=='HECTOSPEC')[0]]
unimsk = np.unique( | np.array(hecto_obs['MASK_NAME']) | numpy.array |
import numpy as np
from scipy.special import erfinv, gammainc
from scipy.stats import gamma
from scipy.linalg import lstsq
from copy import deepcopy
from obspy.geodetics.base import calc_vincenty_inverse
""" Contains the auxilliary functions called by fast_lts_array.py
Many of these codes are Python3 translations of those found in
the MATLAB Continuous Sound and Vibration Toolbox.
"""
def hcalc(ALPHA, n, p):
r""" Generate the h-value, the number of points to fit.
Args:
ALPHA (float): The decimal percentage of points
to keep. Default is 0.75.
n (int): The total number of points.
p (int): The number of parameters.
Returns:
(int):
``h``: The number of points to fit.
"""
h = np.floor(2*np.floor((n + p + 1)/2)
- n + 2*(n - np.floor((n + p + 1)/2)) * ALPHA)
return int(h)
def uniran(seed):
r""" Generate a random number and a new seed.
Args:
seed (int): A seed value.
Returns:
(tuple):
``random`` (float): A pseudorandom number.
``seed`` (float): A (new) seed value.
"""
seed = np.floor(seed * 5761) + 999
quot = np.floor(seed / 65536)
seed = np.floor(seed) - np.floor(quot * 65536)
random = float(seed / 65536)
return random, seed
def randomset(tot, npar, seed):
r""" Generate an array of indices and a new seed.
This function is called if not all (p+1) subsets out of
n will be considered. It randomly draws a subsample of
nel cases out of tot.
Args:
tot (int): The total number of data points.
npar (int): The number of parameters to estimate.
seed (float): A random seed.
Returns:
(tuple):
``randset`` (array): A random set of indices for choosing subsets.
``seed`` (float): A new random seed.
"""
randlist = []
for jj in range(0, npar):
random, seed = uniran(seed)
num = np.floor(random * tot) + 1
if jj > 0:
while num in randlist:
random, seed = uniran(seed)
num = np.floor(random * tot) + 1
randlist.append(num)
randset = np.array(randlist, dtype=int)
return randset, seed
def qgamma(p, a):
r""" The gamma inverse distribution function. """
x = np.max((a - 1, 0.1))
dx = 1
eps = 7/3 - 4/3 - 1
while np.abs(dx) > 256 * eps * np.max(np.append(x, 1)):
dx = (pgamma(x, a) - p) / dgamma(x, a)
x = x - dx
x = x + (dx - x) / 2 * float(x < 0)
if hasattr(x, "__len__"):
x[x == 0] = 0
x[x == 1] = np.inf
return x
def pgamma(x, a):
""" Regularized lower incomplete gamma function. """
g1 = gammainc(a, x)
return g1
def dgamma(x, a):
""" Probability of a gamma continuous random variable. """
g2 = gamma.pdf(x, a)
return g2
def qchisq(p, a):
""" The Chi-squared inverse distribution function. """
x = 2*qgamma(p, 0.5*a)
return x
def insertion(bestmean, bobj, z, obj):
r""" Keep track of the value of the objective function and the
associated parameter vector z.
This code could likely be re-written for more simplicty.
Args:
bestmean (array): Array of best least squares fit values.
bobj (array): Array of lowest 10 objective function values.
z (array): Trial coefficient vector.
obj (float): Trial objective function value;
the sum of squared residuals.
Returns:
(tuple):
``bestmean`` (array): New array of best least squares fit values.
``bobj`` (array): New array of lowest objective function values.
"""
insert = 1
equ = [x for x in range(len(bobj)) if bobj[x] == obj]
z = np.reshape(z, (len(z), ))
for jj in equ:
if (bestmean[:, jj] == z).all():
insert = 0
if insert:
ins = np.min([x for x in range(0, 10) if obj < bobj[x]])
if ins == 9:
bestmean[:, ins] = z
bobj[ins] = obj
else:
ins2 = np.array(list(range(ins, 9)))
best2 = deepcopy(bestmean[:, ins2])
bestmean[:, ins] = z
best1 = deepcopy(bestmean[:, range(0, ins+1)])
if ins == 0:
m = np.shape(bestmean)[0]
best1 = np.reshape(best1, (m, 1))
bestmean = np.concatenate((best1, best2), axis=1)
bobj2 = deepcopy(bobj[ins2])
bobj[ins] = obj
bobj1 = deepcopy(bobj[range(0, ins+1)])
if ins == 0:
bobj = np.append(bobj1, bobj2)
else:
bobj = np.concatenate((bobj1, bobj2), axis=0)
return bestmean, bobj
def rawcorfactorlts(p, intercept, n, ALPHA):
r""" Calculate small sample correction factor.
Calculates the correction factor (from Pison et al. 2002)
to make the LTS solution unbiased for small n.
Args:
p (int): The rank of X, the number of parameters to fit.
intercept (int): Logical. Are you fitting an intercept?
Set to false for array processing.
n (int): The number of data points used in processing.
ALPHA (float): The percentage of data points to keep in
the LTS, e.g. h = floor(ALPHA*n).
Returns:
(float):
``finitefactor``: A correction factor to make the LTS
solution approximately unbiased for small (i.e. finite n).
"""
if intercept == 1:
p = p - 1
if p == 0:
fp_500_n = 1 - np.exp(0.262024211897096)*1/(n**0.604756680630497)
fp_875_n = 1 - np.exp(-0.351584646688712)*1/(n**1.01646567502486)
if (ALPHA >= 0.500) and (ALPHA <= 0.875):
fp_alpha_n = fp_500_n + (fp_875_n - fp_500_n)/0.375*(ALPHA - 0.500)
fp_alpha_n = np.sqrt(fp_alpha_n)
if (ALPHA > 0.875) and (ALPHA < 1):
fp_alpha_n = fp_875_n + (1 - fp_875_n)/0.125*(ALPHA - 0.875)
fp_alpha_n = np.sqrt(fp_alpha_n)
else:
if p == 1:
if intercept == 1:
fp_500_n = 1
- np.exp(0.630869217886906)*1/(n**0.650789250442946)
fp_875_n = 1
- np.exp(0.565065391014791)*1/(n**1.03044199012509)
else:
fp_500_n = 1
- np.exp(-0.0181777452315321)*1/(n**0.697629772271099)
fp_875_n = 1
- np.exp(-0.310122738776431)*1/(n**1.06241615923172)
if p > 1:
if intercept == 1:
# ALPHA = 0.875.
coeffalpha875 = np.array([
[-0.251778730491252, -0.146660023184295],
[0.883966931611758, 0.86292940340761],
[3, 5]])
# ALPHA = 0.500.
coeffalpha500 = np.array([
[-0.487338281979106, -0.340762058011],
[0.405511279418594, 0.37972360544988], [3, 5]])
else:
# ALPHA = 0.875.
coeffalpha875 = np.array([
[-0.251778730491252, -0.146660023184295],
[0.883966931611758, 0.86292940340761], [3, 5]])
# ALPHA = 0.500.
coeffalpha500 = np.array([
[-0.487338281979106, -0.340762058011],
[0.405511279418594, 0.37972360544988], [3, 5]])
# Apply eqns (6) and (7) from Pison et al. (2002)
y1_500 = 1 + coeffalpha500[0, 0]/np.power(p, coeffalpha500[1, 0])
y2_500 = 1 + coeffalpha500[0, 1]/np.power(p, coeffalpha500[1, 1])
y1_875 = 1 + coeffalpha875[0, 0]/ | np.power(p, coeffalpha875[1, 0]) | numpy.power |
"""
A federated learning training session using FedProx.
Reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
"Federated optimization in heterogeneous networks." Proceedings of Machine
Learning and Systems, 2, 429-450.
https://proceedings.mlsys.org/paper/2020/file/38af86134b65d0f10fe33d30dd76442e-Paper.pdf
"""
import torch
import numpy as np
from plato.config import Config
from plato.trainers import basic
def flatten_weights_from_model(model):
""" Return the weights of the given model as a 1-D tensor """
weights = torch.tensor([], requires_grad=False)
for param in model.parameters():
weights = torch.cat((weights, torch.flatten(param)))
return weights
class FedProxLocalObjective:
""" Representing the local objective of FedProx clients. """
def __init__(self, model):
self.model = model
self.init_global_weights = flatten_weights_from_model(model)
def compute_objective(self, outputs, labels):
""" Compute the objective the FedProx client wishes to minimize. """
cur_weights = flatten_weights_from_model(self.model)
mu = Config().clients.proximal_term_penalty_constant
prox_term = mu / 2 * torch.linalg.norm(
cur_weights - self.init_global_weights, ord=2)
local_function = torch.nn.CrossEntropyLoss()
h = local_function(outputs, labels) + prox_term
return h
class Trainer(basic.Trainer):
""" The federated learning trainer for the FedProx client. """
def train_process(self, config, trainset, sampler, cut_layer=None):
"""The main training loop in FedProx framework. """
# For FedProx, the server will accept partial solutions from straggling clients
# after waiting them for a certain amount of time. To re-create this scenario in
# an experiment, a proportion of the selected clients will train for a smaller
# number of epochs to simulate the stragglers that return with partial solutions,
# as mentioned in Section 5.2
if hasattr(Config().clients, 'straggler_simulation') and Config(
).clients.straggler_simulation:
| np.random.seed(self.client_id) | numpy.random.seed |
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import networkx as nx
from mossspider import NetworkTMLE
@pytest.fixture
def sm_network():
"""Loads a small network for short test runs and checks of data set creations"""
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1, 'C': 1}),
(2, {'W': 0, 'A': 0, 'Y': 0, 'C': -1}),
(3, {'W': 0, 'A': 1, 'Y': 0, 'C': 5}),
(4, {'W': 0, 'A': 0, 'Y': 1, 'C': 0}),
(5, {'W': 1, 'A': 0, 'Y': 0, 'C': 0}),
(6, {'W': 1, 'A': 0, 'Y': 1, 'C': 0}),
(7, {'W': 0, 'A': 1, 'Y': 0, 'C': 10}),
(8, {'W': 0, 'A': 0, 'Y': 0, 'C': -5}),
(9, {'W': 1, 'A': 1, 'Y': 0, 'C': -5})])
G.add_edges_from([(1, 2), (1, 3), (1, 9),
(2, 3), (2, 6),
(3, 4),
(4, 7),
(5, 7), (5, 9)
])
return G
@pytest.fixture
def r_network():
"""Loads network from the R library tmlenet for comparison"""
df = pd.read_csv("tests/tmlenet_r_data.csv")
df['IDs'] = df['IDs'].str[1:].astype(int)
df['NETID_split'] = df['Net_str'].str.split()
G = nx.DiGraph()
G.add_nodes_from(df['IDs'])
for i, c in zip(df['IDs'], df['NETID_split']):
if type(c) is list:
for j in c:
G.add_edge(i, int(j[1:]))
# Adding attributes
for node in G.nodes():
G.nodes[node]['W'] = np.int(df.loc[df['IDs'] == node, 'W1'])
G.nodes[node]['A'] = np.int(df.loc[df['IDs'] == node, 'A'])
G.nodes[node]['Y'] = np.int(df.loc[df['IDs'] == node, 'Y'])
return G
class TestNetworkTMLE:
def test_error_node_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), ("N", {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_self_loops(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 1, 'Y': 1}), (2, {'A': 0, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
G.add_edges_from([(1, 1), (1, 2), (3, 4)])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_nonbinary_a(self):
G = nx.Graph()
G.add_nodes_from([(1, {'A': 2, 'Y': 1}), (2, {'A': 5, 'Y': 1}), (3, {'A': 1, 'Y': 0}), (4, {'A': 0, 'Y': 0})])
with pytest.raises(ValueError):
NetworkTMLE(network=G, exposure='A', outcome='Y')
def test_error_degree_restrictions(self, r_network):
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=2)
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[0, 1, 2])
with pytest.raises(ValueError):
NetworkTMLE(network=r_network, exposure='A', outcome='Y', degree_restrict=[2, 0])
def test_error_fit_gimodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
# tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_fit_gsmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
# tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_gs_distributions(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution=None)
with pytest.raises(ValueError):
tmle.exposure_map_model('W', measure='mean', distribution='multinomial')
def test_error_fit_qmodel(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
# tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=0.0, samples=10)
def test_error_p_bound(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
# For single 'p'
with pytest.raises(ValueError):
tmle.fit(p=1.5, samples=10)
# For multiple 'p'
with pytest.raises(ValueError):
tmle.fit(p=[0.1, 1.5, 0.1,
0.1, 0.1, 0.1,
0.1, 0.1, 0.1], samples=100)
def test_error_p_type(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.fit(p=5, samples=10)
def test_error_summary(self, r_network):
tmle = NetworkTMLE(network=r_network, exposure='A', outcome='Y')
tmle.exposure_model('W')
tmle.exposure_map_model('W', distribution=None)
tmle.outcome_model('A + W')
with pytest.raises(ValueError):
tmle.summary()
def test_df_creation(self, sm_network):
columns = ["_original_id_", "W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2/3, 1, 1/3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1/3, 1, 1/3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1/2, 2],
[6, 1, 0, 1, 0, 0, 0, 0, 1],
[7, 0, 1, 0, 0, 0, 1, 1/2, 2],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_df_creation_restricted(self, sm_network):
expected = pd.DataFrame([[1, 1, 1, 2, 2/3, 1, 1/3, 3],
[0, 0, 0, 2, 2/3, 2, 2/3, 3],
[0, 1, 0, 1, 1/3, 1, 1/3, 3],
[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected_r = pd.DataFrame([[0, 0, 1, 2, 1, 0, 0, 2],
[1, 0, 0, 2, 1, 1, 1/2, 2],
[1, 0, 1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1/2, 2],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1/2, 2, 1, 2]],
columns=["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"],
index=[3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
created = tmle.df
created_r = tmle.df_restricted
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected,
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
pdt.assert_frame_equal(expected_r,
created_r[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_restricted_number(self, sm_network):
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[0, 2])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 6 == n_created_r
assert 3 == n_created - n_created_r
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y', degree_restrict=[1, 3])
n_created = tmle.df.shape[0]
n_created_r = tmle.df_restricted.shape[0]
assert 8 == n_created_r
assert 1 == n_created - n_created_r
def test_continuous_processing(self):
G = nx.Graph()
y_list = [1, -1, 5, 0, 0, 0, 10, -5]
G.add_nodes_from([(1, {'A': 0, 'Y': y_list[0]}), (2, {'A': 1, 'Y': y_list[1]}),
(3, {'A': 1, 'Y': y_list[2]}), (4, {'A': 0, 'Y': y_list[3]}),
(5, {'A': 1, 'Y': y_list[4]}), (6, {'A': 1, 'Y': y_list[5]}),
(7, {'A': 0, 'Y': y_list[6]}), (8, {'A': 0, 'Y': y_list[7]})])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y', continuous_bound=0.0001)
# Checking all flagged parts are correct
assert tmle._continuous_outcome is True
assert tmle._continuous_min_ == -5.0001
assert tmle._continuous_max_ == 10.0001
assert tmle._cb_ == 0.0001
# Checking that TMLE bounding works as intended
maximum = 10.0001
minimum = -5.0001
y_bound = (np.array(y_list) - minimum) / (maximum - minimum)
pdt.assert_series_equal(pd.Series(y_bound, index=[0, 1, 2, 3, 4, 5, 6, 7]),
tmle.df['Y'],
check_dtype=False, check_names=False)
def test_df_creation_continuous(self, sm_network):
expected = pd.DataFrame([[1, 1, 2, 1, 3],
[0, 0, 2, 2, 3],
[0, 1, 1, 1, 3],
[0, 0, 2, 0, 2],
[1, 0, 2, 1, 2],
[1, 0, 0, 0, 1],
[0, 1, 0, 1, 2],
[0, 0, 0, 0, 0],
[1, 1, 1, 2, 2]],
columns=["W", "A", "A_sum", "W_sum", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
expected["C"] = [4.00001333e-01, 2.66669778e-01, 6.66664444e-01, 3.33335556e-01, 3.33335556e-01,
3.33335556e-01, 9.99993333e-01, 6.66657778e-06, 6.66657778e-06]
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='C', continuous_bound=0.0001)
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is True
pdt.assert_frame_equal(expected[["W", "A", "C", "A_sum", "W_sum", "degree"]],
created[["W", "A", "C", "A_sum", "W_sum", "degree"]],
check_dtype=False)
def test_no_consecutive_ids(self):
G = nx.Graph()
G.add_nodes_from([(1, {'W': 1, 'A': 1, 'Y': 1}), (2, {'W': 0, 'A': 0, 'Y': 0}),
(3, {'W': 0, 'A': 1, 'Y': 0}), (4, {'W': 0, 'A': 0, 'Y': 1}),
(5, {'W': 1, 'A': 0, 'Y': 0}), (7, {'W': 1, 'A': 0, 'Y': 1}),
(9, {'W': 0, 'A': 1, 'Y': 0}), (11, {'W': 0, 'A': 0, 'Y': 0}),
(12, {'W': 1, 'A': 1, 'Y': 0})])
G.add_edges_from([(1, 2), (1, 3), (1, 12), (2, 3), (2, 7),
(3, 4), (4, 9), (5, 9), (5, 12)])
expected = pd.DataFrame([[1, 1, 1, 1, 2, 2 / 3, 1, 1 / 3, 3],
[2, 0, 0, 0, 2, 2/3, 2, 2/3, 3],
[3, 0, 1, 0, 1, 1 / 3, 1, 1 / 3, 3],
[4, 0, 0, 1, 2, 1, 0, 0, 2],
[5, 1, 0, 0, 2, 1, 1, 1 / 2, 2],
[7, 1, 0, 1, 0, 0, 0, 0, 1],
[8, 0, 1, 0, 0, 0, 1, 1 / 2, 2],
[11, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 1, 1, 0, 1, 1 / 2, 2, 1, 2]
],
columns=["_original_id_", "W", "A", "Y", "A_sum",
"A_mean", "W_sum", "W_mean", "degree"],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=G, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
pdt.assert_frame_equal(expected[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
created[["W", "A", "Y", "A_sum", "A_mean", "W_sum", "W_mean", "degree"]],
check_dtype=False)
def test_df_creation_nonparametric(self, sm_network):
columns = ["_original_id_", "A", "A_map1", "A_map2", "A_map3"]
expected = pd.DataFrame([[1, 1, 0, 1, 1],
[2, 0, 1, 1, 0],
[3, 1, 1, 0, 0],
[4, 0, 1, 1, 0],
[5, 0, 1, 1, 0],
[6, 0, 0, 0, 0],
[7, 1, 0, 0, 0],
[8, 0, 0, 0, 0],
[9, 1, 1, 0, 0]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df.sort_values(by='_original_id_').reset_index()
# Checking that expected is the same as the created
pdt.assert_frame_equal(expected[columns], created[columns], check_dtype=False)
def test_summary_measures_creation(self, sm_network):
columns = ["_original_id_", "A_sum", "A_mean", "A_var", "W_sum", "W_mean", "W_var"]
neighbors_w = {1: np.array([0, 0, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: np.array([0, 1]), 6: np.array([0]), 7: np.array([0, 1]), 9: np.array([1, 1])}
neighbors_a = {1: np.array([0, 1, 1]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([1, 1]),
5: np.array([1, 1]), 6: np.array([0]), 7: np.array([0, 0]), 9: np.array([0, 1])}
expected = pd.DataFrame([[1, np.sum(neighbors_a[1]), np.mean(neighbors_a[1]), np.var(neighbors_a[1]),
np.sum(neighbors_w[1]), np.mean(neighbors_w[1]), np.var(neighbors_w[1])],
[2, np.sum(neighbors_a[2]), np.mean(neighbors_a[2]), np.var(neighbors_a[2]),
np.sum(neighbors_w[2]), np.mean(neighbors_w[2]), np.var(neighbors_w[2])],
[3, np.sum(neighbors_a[3]), np.mean(neighbors_a[3]), np.var(neighbors_a[3]),
np.sum(neighbors_w[3]), np.mean(neighbors_w[3]), np.var(neighbors_w[3])],
[4, np.sum(neighbors_a[4]), np.mean(neighbors_a[4]), np.var(neighbors_a[4]),
np.sum(neighbors_w[4]), np.mean(neighbors_w[4]), np.var(neighbors_w[4])],
[5, np.sum(neighbors_a[5]), np.mean(neighbors_a[5]), np.var(neighbors_a[5]),
np.sum(neighbors_w[5]), np.mean(neighbors_w[5]), np.var(neighbors_w[5])],
[6, np.sum(neighbors_a[6]), np.mean(neighbors_a[6]), np.var(neighbors_a[6]),
np.sum(neighbors_w[6]), np.mean(neighbors_w[6]), np.var(neighbors_w[6])],
[7, np.sum(neighbors_a[7]), np.mean(neighbors_a[7]), np.var(neighbors_a[7]),
np.sum(neighbors_w[7]), np.mean(neighbors_w[7]), np.var(neighbors_w[7])],
[8, 0, 0, 0, 0, 0, 0], # Isolates are = 0
[9, np.sum(neighbors_a[9]), np.mean(neighbors_a[9]), np.var(neighbors_a[9]),
np.sum(neighbors_w[9]), np.mean(neighbors_w[9]), np.var(neighbors_w[9])]],
columns=columns,
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
tmle = NetworkTMLE(network=sm_network, exposure='A', outcome='Y')
created = tmle.df
# Checking that expected is the same as the created
assert tmle._continuous_outcome is False
pdt.assert_frame_equal(expected,
created[columns],
check_dtype=False)
def test_distance_measures_creation(self, sm_network):
columns = ["_original_id_", "A_mean_dist", "A_var_dist", "W_mean_dist", "W_var_dist"]
neighbors_w = {1: np.array([-1, -1, 0]), 2: np.array([0, 1, 1]), 3: np.array([0, 0, 1]), 4: np.array([0, 0]),
5: np.array([-1, 0]), 6: | np.array([-1]) | numpy.array |
from typing import Optional
from typing import Union
import numpy as np
from scipy.sparse.coo import coo_matrix
from ._typing_utils import Float
from ._typing_utils import Matrix
from ._utils import coo_matrix_builder
def build_frame_cost_matrix(
dist_matrix: coo_matrix_builder,
*,
track_start_cost: Optional[Float],
track_end_cost: Optional[Float],
) -> coo_matrix:
"""Build sparce array for frame-linking cost matrix.
Parameters
----------
dist_matrix : Matrix or `_utils.coo_matrix_builder`
The distance matrix for points at time t and t+1.
track_start_cost : Float, optional
The cost for starting the track (b in Jaqaman et al 2008 NMeth)
track_end_cost : Float, optional
The cost for ending the track (d in Jaqaman et al 2008 NMeth)
Returns
-------
cost_matrix : FloatArray
the cost matrix for frame linking
"""
M = dist_matrix.shape[0]
N = dist_matrix.shape[1]
C = coo_matrix_builder((M + N, N + M), dtype=np.float32)
C.append_matrix(dist_matrix)
if track_start_cost is None:
if len(C.data) > 0:
track_start_cost = np.max(C.data) * 1.05
else:
track_start_cost = 1.05
if track_end_cost is None:
if len(C.data) > 0:
track_end_cost = np.max(C.data) * 1.05
else:
track_end_cost = 1.05
C[np.arange(M, M + N), np.arange(N)] = np.ones(N) * track_end_cost
C[np.arange(M), np.arange(N, N + M)] = np.ones(M) * track_start_cost
min_val = np.min(C.data) if len(C.data) > 0 else 0
C[dist_matrix.col + M, dist_matrix.row + N] = min_val
return C.to_coo_matrix()
def build_segment_cost_matrix(
gap_closing_dist_matrix: Union[coo_matrix_builder, Matrix],
splitting_dist_matrix: Union[coo_matrix_builder, Matrix],
merging_dist_matrix: Union[coo_matrix_builder, Matrix],
track_start_cost: Optional[Float],
track_end_cost: Optional[Float],
no_splitting_cost: Optional[Float],
no_merging_cost: Optional[Float],
alternative_cost_factor: Float = 1.05,
alternative_cost_percentile: Float = 90,
alternative_cost_percentile_interpolation: str = "lower",
) -> coo_matrix:
"""Build sparce array for segment-linking cost matrix.
Parameters
----------
gap_closing_dist_matrix : coo_matrix_builder or Matrix
The distance matrix for closing gaps between segment i and j.
splitting_dist_matrix : coo_matrix_builder or Matrix
The distance matrix for splitting between segment i and time/index j
merging_dist_matrix : coo_matrix_builder or Matrix
The distance matrix for merging between segment i and time/index j
track_start_cost : Float, optional
The cost for starting the track (b in Jaqaman et al 2008 NMeth)
track_end_cost : Float, optional
The cost for ending the track (d in Jaqaman et al 2008 NMeth)
no_splitting_cost : Float, optional
The cost to reject splitting (d' in Jaqaman et al 2008 NMeth)
no_merging_cost : Float, optional
The cost to reject merging (b' in Jaqaman et al 2008 NMeth)
alternative_cost_factor: Float
The factor to calculate the alternative costs, by default 1.05.
alternative_cost_percentile: Float
The percentile to calculate the alternative costs, by default 90.
alternative_cost_percentile_interpolation: str
The percentile interpolation to calculate the alternative costs,
by default "lower".
See `numpy.percentile` for allowed values.
Returns
-------
cost_matrix : Optional[coo_matrix]
the cost matrix for frame linking, None if not appropriate
"""
M = gap_closing_dist_matrix.shape[0]
assert gap_closing_dist_matrix.shape[1] == M
assert splitting_dist_matrix.shape[0] == M
assert merging_dist_matrix.shape[0] == M
N1 = splitting_dist_matrix.shape[1]
N2 = merging_dist_matrix.shape[1]
S = 2 * M + N1 + N2
C = coo_matrix_builder((S, S), dtype=np.float32)
C.append_matrix(gap_closing_dist_matrix)
C.append_matrix(splitting_dist_matrix.T, shift=(M, 0))
C.append_matrix(merging_dist_matrix, shift=(0, M))
upper_left_size = C.size()
if upper_left_size == 0:
return None
# Note:
# Though the way of assigning track_start_cost, track_end_cost, no_splitting_cost, no_merging_cost # noqa :
# and min_val is similar to that of TrackMate (link1, link2), GPL3 of TrackMate does not apply. (See link3 for license discussion.) # noqa :
# link1 https://github.com/fiji/TrackMate/blob/5a97426586b3c592c986c57aa1a09bab9d21419c/src/main/java/fiji/plugin/trackmate/tracking/sparselap/costmatrix/DefaultCostMatrixCreator.java#L186 # noqa :
# https://github.com/fiji/TrackMate/blob/5a97426586b3c592c986c57aa1a09bab9d21419c/src/main/java/fiji/plugin/trackmate/tracking/sparselap/costmatrix/JaqamanSegmentCostMatrixCreator.java # noqa:
# https://github.com/fiji/TrackMate/blob/5a97426586b3c592c986c57aa1a09bab9d21419c/src/main/java/fiji/plugin/trackmate/tracking/sparselap/SparseLAPSegmentTracker.java#L148 # noqa:
# link2 (default parameters for alternative_cost_percentile, alternative_cost_factor) # noqa :
# https://github.com/fiji/TrackMate/blob/5a97426586b3c592c986c57aa1a09bab9d21419c/src/main/java/fiji/plugin/trackmate/tracking/TrackerKeys.java # noqa :
# link3 https://forum.image.sc/t/linear-assignment-problem-based-tracking-package-in-python/57793 # noqa :
# https://web.archive.org/web/20210921134401/https://forum.image.sc/t/linear-assignment-problem-based-tracking-package-in-python/57793 # noqa :
if (
track_start_cost is None
or track_end_cost is None
or no_splitting_cost is None
or no_merging_cost is None
):
alternative_cost = (
np.percentile(
C.data,
alternative_cost_percentile,
interpolation=alternative_cost_percentile_interpolation,
)
* alternative_cost_factor
)
if track_start_cost is None:
track_start_cost = alternative_cost
if track_end_cost is None:
track_end_cost = alternative_cost
if no_splitting_cost is None:
no_splitting_cost = alternative_cost
if no_merging_cost is None:
no_merging_cost = alternative_cost
C[ | np.arange(M + N1, 2 * M + N1) | numpy.arange |
"""
This module contains all the logic required to execute
sbelt simulations/runs. All functions/classes are designed
for internal use and may change without note.
A primary purpose of this module is to manipulate n-7 NumPy
arrays which represent multiple stream particles. In these
n-7 arrays, a _single_ particle is represented by a NumPy array
with 7 attributes::
[x_location, diam, y_location, UID, active state, age, loop]
In this general example, x_location and y_location define the centre point of
spherical particle whose diameter is defined by diam. See the project docs for
more information on the other attributes.
"""
import math
import random
import numpy as np
import logging
logging.getLogger(__name__)
class Subregion():
""" A subregion in the stream.
A Subregion is defined by left (upstream)
and right (downstream) boundaries. Each
subregion maintains a NumPy list which is
used to record the number of model particles that
pass the downstream boundary in a given iteration.
For example::
flux_list = [0,3,2,1]
Means that 0 crossings happened in the first iteration,
3 happened in the 2nd, and so on. The list has
length equal to the number of iterations for a model run.
Attributes:
name: Name of the subregion.
left_boundary: Location of the left boundary (float).
right_boundary: Location of the right boundary (float).
iterations: The number of iterations for the model run.
"""
def __init__(self, name, left_boundary, right_boundary, iterations):
self.name = name
self.left_boundary = left_boundary
self.right_boundary = right_boundary
self.flux_list = np.zeros(iterations, dtype=np.int64)
def leftBoundary(self):
"""Returns subregion's left boundary"""
return self.left_boundary
def rightBoundary(self):
"""Returns subregion's right boundary"""
return self.right_boundary
def getName(self):
"""Returns subregion's name"""
return self.name
def incrementFlux(self, iteration):
"""Increments flux list by 1.
Args:
iteration: The iteration/index to increment by 1
"""
self.flux_list[iteration] += 1
def getFluxList(self):
"""Returns subregion's flux list"""
return self.flux_list
def get_event_particles(e_events, subregions, model_particles, level_limit, height_dependant=False):
""" Find and return list of particles to be entrained
Will loop through each subregion and select n = e_events
model particles (within a subregion boundaries) at random
to be entrained. No particle will be selected twice.
Args:
e_events: The number of events requested per subregion (int).
subregions: Python array of initialized Subregion objects.
model_particles: An n-7 NumPy array representing the stream's n
model particles.
Returns:
event_particles: A NumPy array of k uids representing the model particles
that have been selected for entrainment. For example::
[2.0, 5.0, 25.0]
Will represent that model particles with uids 2.0, 5.0 and
25.0 have been selected for entrainment.
"""
if e_events == 0:
e_events = 1 #???
event_particles = []
for subregion in subregions:
# Take only particles in the boundaries of the current subregion
subregion_particles = model_particles[
(model_particles[:,0] >= subregion.leftBoundary())
& (model_particles[:,0] <= subregion.rightBoundary())]
# Take only particles that are in-stream (not ghost)
in_stream_particles = subregion_particles[
subregion_particles[:,0] != -1]
# Take only particles that are 'active'
active_particles = in_stream_particles[
in_stream_particles[:,4] != 0]
# Do not take any particles that have been selected for entrainment (i.e do not double select)
# This only happens when particles rest on the boundary.
active_event, active_idx, event_idx = np.intersect1d(active_particles[:,3], event_particles, return_indices=True)
active_particles = np.delete(active_particles, active_idx, axis=0)
subregion_event_ids = []
if height_dependant: # any particle at the level limit must be entrained
levels = elevation_list(subregion_particles[:,2], desc=False)
tip_particles = []
# find the tip particles -- these are the particles being entrained
if len(levels) == level_limit:
tip_particles = active_particles[active_particles[:,2] == levels[level_limit-1]]
for particle in tip_particles:
subregion_event_ids.append(particle[3])
active_particles = active_particles[active_particles[:,2] != particle[2]]
# If there are not enough particles in the subregion to sample from, alter the sample size
if e_events > len(active_particles):
random_sample = random.sample(range(len(active_particles)),
len(active_particles))
else:
random_sample = random.sample(range(len(active_particles)),
e_events)
# TODO: change so that we don't rely on loop index to grab particle
for index in random_sample:
subregion_event_ids.append(int(active_particles[index][3]) )
ghost_particles = np.where(model_particles[:,0] == -1)[0]
for index in ghost_particles:
model_particles[index][0] = 0
subregion_event_ids.append(index)
if e_events != len(subregion_event_ids):
msg = (
f'Requested {e_events} events in {subregion.getName()} '
f'but {len(subregion_event_ids)} are occuring'
)
logging.info(msg)
event_particles = event_particles + subregion_event_ids
event_particles = np.array(event_particles, dtype=np.intp)
return event_particles
def define_subregions(bed_length, num_subregions, iterations):
""" Define subregion list for model stream.
Args:
bed_length: The length of the stream (int).
num_subregions: The number of subregions (int).
iterations: The number of iterations for the model run (int).
Returns:
subregions_arr: Python array of initialized Subregion objects.
"""
subregion_length = bed_length/num_subregions
left_boundary = 0.0
subregions_arr = []
for region in range(num_subregions):
right_boundary = left_boundary + subregion_length
subregion = Subregion(f'subregion-{region}', left_boundary, right_boundary, iterations)
left_boundary = right_boundary
subregions_arr.append(subregion)
return subregions_arr
def build_streambed(bed_length, particle_diam):
""" Builds the array of bed particles.
Args:
bed_length: The length of the stream (int).
particle_diam: The diameter of all particles (float).
Returns:
bed_particles: An m-7 NumPy array representing the stream's m
bed particles. For example::
[[x1, diam, y, uid1, active, age, loops], ...
,[xM, diam, y, uidM, active, age, loops]]
Where all bed particles share the same diam (diam1=...=diamM) and
y (y1=...=yM), all uids are negative, and to represent 'static-ness'
active = 0, age = 0, and loops = 0.
"""
max_particles = int(math.ceil( bed_length / particle_diam ))
bed_particles = np.zeros([max_particles, 7],dtype=float)
particle_id = -1
centre = (particle_diam/2)
state = 0
age = 0
loop_age = 0
elevation = 0
while not bed_complete(centre, bed_length):
# index with negative indices... bed particles are built from the final element to the first
bed_particles[particle_id] = [centre, particle_diam, elevation, particle_id, state, age, loop_age]
centre += particle_diam
particle_id += -1 # Bed particles get negative IDs
return bed_particles
def bed_complete(centre, bed_length):
if centre >= bed_length:
return 1
else: return 0
def determine_num_particles(pack_frac, num_vertices):
"""Return the number of model particles to be created
based on the packing fraction"""
num_particles = num_vertices * pack_frac
num_particles = int(math.ceil(num_particles))
return num_particles
# Trig from: https://math.stackexchange.com/questions/2293201/
def place_particle(particle, model_particles, bed_particles, h):
""" Calculate new y (elevation) of particle based on it's
x (horizontal) location in stream.
Provided a particle p's location x in the stream,
search for 2 supporting particles (s1, s2) that p
will rest on when placed at x.
Calculate the y position of p based on the (x,y) of both
s1 and s2. The computed x for p might be different up to some
decimal point, so both x and y are rounded to 2 decimal places.
Args:
particle: NumPy array representing the model particle being placed.
model_particles: An n-7 NumPy array representing the stream's n model particles.
bed_particles: An m-7 NumPy array representing the stream's m bed particles.
Returns:
rounded_x: Rounded float of particle's new x location.
rounded_y: Rounded float of particle's new y location.
left_support: UID of the left support for the placed particle.
right_support: UID of the right support for the placed particle.
"""
left_support, right_support = find_supports(particle, model_particles, bed_particles)
rounded_x = round(particle[0], 2)
rounded_y = round(np.add(h, left_support[2]), 2)
return rounded_x, rounded_y, left_support[3], right_support[3]
def update_particle_states(model_particles, model_supports):
""" Set/update each model particle's state.
If any model particle p has a particle
resting on it in the stream then p must
be set to inactive indicated by a boolean 0.
If p does not have any particles resting
on top of it then it is considered active
indicated by a boolean 1.
Args:
model_particles: An n-7 NumPy array representing the stream's
n model particles.
model_supports: An n-2 NumPy array with the uids of the two
particles supporting each model particle.
bed_particles: An m-7 NumPy array representing the stream's m bed
particles.
Return values:
model_particles: The provided model_particles array (Args)
but with updated active (attribute 4) values.
"""
# Start by setting all model particles to active then
# only set to inactive if there is a particle sitting on top
model_particles[:,4] = 1
in_stream_particles = model_particles[model_particles[:,0] != -1]
inactive_left = np.intersect1d(in_stream_particles[:,3], model_supports[:,0])
inactive_right = np.intersect1d(in_stream_particles[:,3], model_supports[:,1])
if inactive_left.size != 0:
model_particles[inactive_left.astype(int), 4] = 0
if inactive_right.size != 0:
model_particles[inactive_right.astype(int), 4] = 0
return model_particles
def find_supports(particle, model_particles, bed_particles):
""" Find the 2 supporting particles for a given particle.
Provided a particle p at location x, this function
will search the stream for particles that p would
rest on, if being dropped at x. More generally, supporting particles
are those particles which directly hold up a particle. Supporting
particles will always have a centre location that is
exactly a radius length away from p's centre.
Args:
particle: 1-7 NumPy array representing a model particle.
model_particles: An n-7 NumPy array representing the stream's
n model particles.
bed_particles: An m-7 NumPy array representing the stream's m
bed particles.
Returns:
left_support: NumPy array representing the left supporting particle
right_support: NumPy array representing the right supporting particle
"""
all_particles = np.concatenate((model_particles, bed_particles), axis=0)
# Define location where left and right supporting particles must sit
# in order to be considered a supporting particle.
# Note: This limits the model to using same-sized grains.
left_center = particle[0] - (particle[1] / 2)
right_center = particle[0] + (particle[1] / 2)
l_candidates = all_particles[all_particles[:,0] == left_center]
try:
left_support = l_candidates[l_candidates[:,2]
== np.max(l_candidates[:,2])]
except ValueError:
error_msg = (
f'No left supporting particle at {left_center}'
f'for particle {particle[3]}'
)
logging.error(error_msg)
raise
r_candidates = all_particles[all_particles[:,0] == right_center]
try:
right_support = r_candidates[r_candidates[:,2] == np.max(r_candidates[:,2])]
except ValueError:
error_msg = (
f'No right supporting particle at {right_center}'
f'for particle {particle[3]}'
)
logging.error(error_msg)
raise
return left_support[0], right_support[0]
def set_model_particles(bed_particles, available_vertices, particle_diam, pack_fraction, h):
""" Create array of n model particles and set each particle in-stream.
Model particles are randomly placed at available vertex
locations (x,y) across the bed. Location and initial attribute
values are stored in the returned NumPy array.
Args:
bed_particles: An m-7 NumPy array representing the stream's m bed particles.
available_vertices: A NumPy array with all available vertices in the stream.
particle_diam: The diameter of all particles (float).
pack_fraction: Packing density value (float). See THEORY.md in project
repo for more information.
h: Geometric value used in calculations of particle placement (float). See
in-line and project documentation for further explanation.
Returns:
model_particles: An n-7 NumPy array representing the stream's n model particles and their
initial placement in the stream. For example::
[[x1, diam, y1, uid1, active, age, loops], ... ,[xN, diam, yN, uidN, active, age, loops]]
Where (xi, yi) pairs will define the centre location of each particle and no two particles
will have the same (xi, yi) pair values. All uids are unique and are positive whole numbers.
All particles will start with active = 1, age = 0, and loops = 0.
model_supp: An n-2 NumPy array with the uids of the two particles supporting each
model particle. For example::
[[[-1,-2]], ... ,[-3,-4]]
The above example states that model particle with uid 0 (model_supp[0]) is supported
by bed particles with uids -1 and -2. Similarly, the model particle with uid n
(model_supp[n]) is supported by bed particles with uids -3 and -4.
"""
num_placement_loc = np.size(available_vertices)
# determine the number of model particles that should be introduced into the stream bed
num_particles = determine_num_particles(pack_fraction, num_placement_loc)
# create an empty n-6 array to store model particle information
model_particles = np.zeros([num_particles, 7], dtype='float')
model_supp = np.zeros([num_particles, 2], dtype='float')
for particle in range(num_particles):
# the following lines select a vertex to place the current particle at,
# and ensure that it is not already occupied by another particle
random_idx = random.randint(0, np.size(available_vertices)-1)
vertex = available_vertices[random_idx]
available_vertices = available_vertices[available_vertices != vertex]
# intialize the particle information
model_particles[particle][0] = vertex
model_particles[particle][1] = particle_diam
model_particles[particle][3] = particle # id number for each particle
model_particles[particle][4] = 1 # each particle begins as active
# place particle at the chosen vertex
p_x, p_y, left_supp, right_supp = place_particle(model_particles[particle],
model_particles,
bed_particles,
h)
model_particles[particle][0] = p_x
model_particles[particle][2] = p_y
model_particles[particle][5] = 0
model_particles[particle][6] = 0
model_supp[particle][0] = left_supp
model_supp[particle][1] = right_supp
return model_particles, model_supp
def compute_available_vertices(model_particles, bed_particles, particle_diam, level_limit,
lifted_particles=None):
""" Compute the avaliable vertices in the model stream.
An available vertex is an x location that a
model particle is able to be entrained to.
This function identifies the distinct elevations
present in the stream then looks at subsets of
particles in decesnding order of their elevation,
in order to compute available vertices.
For each elevation group, if a particle is sitting on a vertex
x, then x cannot be available (it is occupied) and it
is considered nulled. Then vertices v created by two
particles touching are considered. If v is not already
nulled by a particle occupying the location at a higher
level, then it is considered an available vertex. This ends once the bed
particles (the lowest elevation) have been considered.
Args:
model_particles: An n-7 NumPy array representing the stream's
n model particles.
bed_particles: An m-7 NumPy array representing the stream's m
bed particles.
lifted_particles: UID of particles that are 'lifted'. Lifted
particles will not be considered as present in the stream
when the available vertices are being calculated; their
(x,y) location will not be considered as occupied.
Returns:
available_vertices: A NumPy array with all available vertices in the stream.
"""
nulled_vertices = []
avail_vertices = []
# If we are lifting particles, we need to consider the subset of particles
# that includes every particles _except_ the particles being
if lifted_particles is not None:
model_particles_lifted = np.delete(model_particles,
lifted_particles, 0)
all_particles = np.concatenate((model_particles_lifted,
bed_particles), axis=0)
else:
all_particles = np.concatenate((model_particles,
bed_particles), axis=0)
# Get unique model particle elevations in stream (descending)
elevations = elevation_list(all_particles[:,2])
for idx, elevation in enumerate(elevations):
tmp_particles = all_particles[all_particles[:,2] == elevation]
for particle in tmp_particles:
nulled_vertices.append(particle[0])
right_vertices = tmp_particles[:,0] + (particle_diam / 2)
left_vertices = tmp_particles[:,0] - (particle_diam / 2)
tmp_shared_vertices = | np.intersect1d(left_vertices, right_vertices) | numpy.intersect1d |
"""
This module contains our thermodynamic calculations. Calculation of pressure, fugacity coefficient, and max density are handled by an Eos object so that these functions can be used with any EOS. The thermodynamics module contains a series of wrapper to handle the inputs and outputs of these functions.
"""
import numpy as np
from scipy import interpolate
import scipy.optimize as spo
from scipy.ndimage.filters import gaussian_filter1d
import copy
import logging
import despasito.utils.general_toolbox as gtb
from despasito import fundamental_constants as constants
import despasito.utils.general_toolbox as gtb
logger = logging.getLogger(__name__)
def pressure_vs_volume_arrays(
T,
xi,
Eos,
min_density_fraction=(1.0 / 500000.0),
density_increment=5.0,
max_volume_increment=1.0e-4,
pressure_min=100,
maxiter=25,
multfactor=2,
extended_npts=20,
max_density=None,
density_max_opts={},
**kwargs
):
r"""
Output arrays with specific volume and pressure arrays calculated from the given EOS.
This function is fundamental to every calculation, the options of which are passed through higher level calculation with the keyword variable ``density_opts``.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
min_density_fraction : float, Optional, default=(1.0/500000.0)
Fraction of the maximum density used to calculate, and is equal to, the minimum density of the density array. The minimum density is the reciprocal of the maximum specific volume used to calculate the roots.
density_increment : float, Optional, default=5.0
The increment between density values in the density array.
max_volume_increment : float, Optional, default=1.0E-4
Maximum increment between specific volume array values. After conversion from density to specific volume, the increment values are compared to this value.
pressure_min : float, Optional, default=100
Ensure pressure curve reaches down to this value
multfactor : int, Optional, default=2
Multiplication factor to extend range
extended_npts : int, Optional, default=20
Number of points in extended range
maxiter : int, Optional, default=25
Number of times to multiply range by to obtain full pressure vs. specific volume curve
max_density : float, Optional, default=None
[mol/:math:`m^3`] Maximum molar density defined, if default of None is used then the Eos object method, density_max is used.
density_max_opts : dict, Optional, default={}
Keyword arguments for density_max method for EOS object
Returns
-------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_arrays' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.any(np.isnan(xi)):
raise ValueError("Given mole fractions are NaN")
if isinstance(xi, list):
xi = np.array(xi)
# estimate the maximum density based on the hard sphere packing fraction, part of EOS
if not max_density:
max_density = Eos.density_max(xi, T, **density_max_opts)
elif gtb.isiterable(max_density):
logger.error(
" Maxrho should be type float. Given value: {}".format(max_density)
)
max_density = max_density[0]
if max_density > 1e5:
raise ValueError(
"Max density of {} mol/m^3 is not feasible, check parameters.".format(
max_density
)
)
# min rho is a fraction of max rho, such that minrho << rhogassat
minrho = max_density * min_density_fraction
# list of densities for P,rho and P,v
if (max_density - minrho) < density_increment:
raise ValueError(
"Density range, {}, is less than increment, {}. Check parameters used in Eos.density_max().".format(
(max_density - minrho), density_increment
)
)
rholist = np.arange(minrho, max_density, density_increment)
# check rholist to see when the spacing
vspace = (1.0 / rholist[:-1]) - (1.0 / rholist[1:])
if np.amax(vspace) > max_volume_increment:
vspaceswitch = np.where(vspace > max_volume_increment)[0][-1]
rholist_2 = (
1.0
/ np.arange(
1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment
)[::-1]
)
rholist = np.append(rholist_2, rholist[vspaceswitch + 2 :])
# compute Pressures (Plist) for rholist
Plist = Eos.pressure(rholist, T, xi)
# Make sure enough of the pressure curve is obtained
for i in range(maxiter):
if Plist[0] > pressure_min:
rhotmp = np.linspace(rholist[0] / 2, rholist[0], extended_npts)[:-1]
Ptmp = Eos.pressure(rhotmp, T, xi)
Plist = np.append(Ptmp, Plist)
rholist = np.append(rhotmp, rholist)
else:
break
# Flip Plist and rholist arrays
Plist = Plist[:][::-1]
rholist = rholist[:][::-1]
vlist = 1.0 / rholist
return vlist, Plist
def pressure_vs_volume_spline(vlist, Plist):
r"""
Fit arrays of specific volume and pressure values to a cubic Univariate Spline.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Returns
-------
Pvspline : obj
Function object of pressure vs. specific volume
roots : list
List of specific volume roots. Subtract a system pressure from the output of Pvsrho to find density of vapor and/or liquid densities.
extrema : list
List of specific volume values corresponding to local minima and maxima.
"""
# Larger sigma value
Psmoothed = gaussian_filter1d(Plist, sigma=1.0e-2)
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed)
roots = Pvspline.roots().tolist()
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed, k=4)
extrema = Pvspline.derivative().roots().tolist()
if extrema:
if len(extrema) > 2:
extrema = extrema[0:2]
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if np.any(np.isnan(Plist)):
roots = [np.nan]
return Pvspline, roots, extrema
def pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=[], **kwargs):
r"""
Plot pressure vs. specific volume.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Pvspline : obj
Function object of pressure vs. specific volume
markers : list, Optional, default=[]
List of plot markers used in plot
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_plot' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(vlist, Plist, label="Orig.")
plt.plot(vlist, Pvspline(vlist), label="Smoothed")
plt.plot([vlist[0], vlist[-1]], [0, 0], "k")
for k in range(len(markers)):
plt.plot([markers[k], markers[k]], [min(Plist), max(Plist)], "k")
plt.xlabel("Specific Volume [$m^3$/mol]"), plt.ylabel("Pressure [Pa]")
# plt.ylim(min(Plist)/2,np.abs(min(Plist))/2)
plt.legend(loc="best")
plt.tight_layout()
plt.show()
except Exception:
logger.error("Matplotlib package is not installed, could not plot")
def calc_saturation_properties(
T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs
):
r"""
Computes the saturated pressure, gas and liquid densities for a single component system.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
tol : float, Optional, default=1e-6
Tolerance to accept pressure value
Pconverged : float, Optional, default=1.0
If the pressure is negative (under tension), we search from a value just above vacuum
Returns
-------
Psat : float
[Pa] Saturation pressure given system information
rhov : float
[mol/:math:`m^3`] Density of vapor at saturation pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at saturation pressure
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.count_nonzero(xi) != 1:
if np.count_nonzero(xi > 0.1) != 1:
raise ValueError(
"Multiple components have compositions greater than 10%, check code for source"
)
else:
ind = np.where((xi > 0.1) == True)[0]
raise ValueError(
"Multiple components have compositions greater than 0. Do you mean to obtain the saturation pressure of {} with a mole fraction of {}?".format(
Eos.beads[ind], xi[ind]
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
if not extrema or len(extrema) < 2 or np.any(np.isnan(roots)):
logger.warning(" The component is above its critical point")
Psat, rhol, rhov = np.nan, np.nan, np.nan
else:
ind_Pmin1 = np.argwhere(np.diff(Plist) > 0)[0][0]
ind_Pmax1 = np.argmax(Plist[ind_Pmin1:]) + ind_Pmin1
Pmaxsearch = Plist[ind_Pmax1]
Pminsearch = max(Pconverged, np.amin(Plist[ind_Pmin1:ind_Pmax1]))
# Using computed Psat find the roots in the maxwell construction to give liquid (first root) and vapor (last root) densities
Psat = spo.minimize_scalar(
objective_saturation_pressure,
args=(Plist, vlist),
bounds=(Pminsearch, Pmaxsearch),
method="bounded",
)
Psat = Psat.x
obj_value = objective_saturation_pressure(Psat, Plist, vlist)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist - Psat)
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if obj_value < tol:
logger.debug(
" Psat found: {} Pa, obj value: {}, with {} roots and {} extrema".format(
Psat, obj_value, np.size(roots), np.size(extrema)
)
)
if len(roots) == 2:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:] - Psat, 1)
vroot = -yroot / slope
if vroot < 0.0:
vroot = np.finfo(float).eps
rho_tmp = spo.minimize(
pressure_spline_error,
1.0 / vroot,
args=(Psat, T, xi, Eos),
bounds=[(1.0 / (vroot * 1e2), 1.0 / (1.1 * roots[-1]))],
)
roots = np.append(roots, [1.0 / rho_tmp.x])
rhol = 1.0 / roots[0]
rhov = 1.0 / roots[2]
else:
logger.warning(
" Psat NOT found: {} Pa, obj value: {}, consider decreasing 'pressure_min' option in density_opts".format(
Psat, obj_value
)
)
Psat, rhol, rhov = np.nan, np.nan, np.nan
tmpv, _, _ = calc_vapor_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
tmpl, _, _ = calc_liquid_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
logger.debug(" phiv: {}, phil: {}".format(tmpv, tmpl))
return Psat, rhol, rhov
def objective_saturation_pressure(shift, Pv, vlist):
r"""
Objective function used to calculate the saturation pressure.
Parameters
----------
shift : float
[Pa] Guess in Psat value used to translate the pressure vs. specific volume curve
Pv : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
vlist : numpy.ndarray
[mol/:math:`m^3`] Specific volume array. Length depends on values in density_opts passed to :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
obj_value : float
Output of objective function, the addition of the positive area between first two roots, and negative area between second and third roots, quantity squared.
"""
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Pv - shift)
if len(roots) >= 3:
a = Pvspline.integral(roots[0], roots[1])
b = Pvspline.integral(roots[1], roots[2])
elif len(roots) == 2:
a = Pvspline.integral(roots[0], roots[1])
# If the curve hasn't decayed to 0 yet, estimate the remaining area as a triangle. This isn't super accurate but we are just using the saturation pressure to get started.
slope, yroot = np.polyfit(vlist[-4:], Pv[-4:] - shift, 1)
b = (
Pvspline.integral(roots[1], vlist[-1])
+ (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2
)
# raise ValueError("Pressure curve only has two roots. If the curve hasn't fully decayed, either increase maximum specific volume or decrease 'pressure_min' in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`.")
elif np.any(np.isnan(roots)):
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing pressure."
)
else:
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing min_density_fraction"
)
# pressure_vs_volume_plot(vlist, Pv-shift, Pvspline, markers=extrema)
return (a + b) ** 2
def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhov: P {} Pa, roots {} m^3/mol".format(P, roots))
flag_NoOpt = False
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]) < 0:
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vlist[0],
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
flag = 3
rho_tmp = np.nan
elif l_roots == 1:
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
elif l_roots == 2:
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and yi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure.".format(
T, xi
)
)
rho_tmp = 1.0 / roots[2]
flag = 0
if flag in [0, 2]: # vapor or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
if rho_tmp * 1.01 > Eos.density_max(xi, T, maxpack=0.99):
tmp[1] = Eos.density_max(xi, T, maxpack=0.99)
if (
pressure_spline_error(tmp[0], P, T, xi, Eos)
* pressure_spline_error(tmp[1], P, T, xi, Eos)
) < 0:
rho_tmp = spo.brentq(
pressure_spline_error,
tmp[0],
tmp[1],
args=(P, T, xi, Eos),
rtol=0.0000001,
)
else:
if Plist[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
logger.debug(" Vapor Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
return rho_tmp, flag
def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
# Get roots and local minima and maxima
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhol: P {} Pa, roots {} m^3/mol".format(P, str(roots)))
flag_NoOpt = False
if extrema:
if len(extrema) == 1:
logger.warning(
" One extrema at {}, assume weird minima behavior. Check your parameters.".format(
1 / extrema[0]
)
)
# Assess roots, what is the liquid density
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]):
try:
bounds = (1 / vlist[0], Eos.density_max(xi, T, maxpack=0.99))
rho_tmp = spo.least_squares(
pressure_spline_error,
np.mean(bounds),
args=(P, T, xi, Eos),
bounds=bounds,
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1.0 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
flag = 3
logger.error(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
str(T), str(xi)
)
)
rho_tmp = np.nan
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
elif l_roots == 2: # 2 roots
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and xi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else: # There should be three roots, but the values of specific volume don't go far enough to pick up the last one
flag = 1
rho_tmp = 1.0 / roots[0]
elif l_roots == 1: # 1 root
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
rho_tmp = 1.0 / roots[0]
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
if flag in [1, 2]: # liquid or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
P_tmp = [
pressure_spline_error(tmp[0], P, T, xi, Eos),
pressure_spline_error(tmp[1], P, T, xi, Eos),
]
if (P_tmp[0] * P_tmp[1]) < 0:
rho_tmp = spo.brentq(
pressure_spline_error, tmp[0], tmp[1], args=(P, T, xi, Eos), rtol=1e-7
)
else:
if P_tmp[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x[0]
logger.debug(" Liquid Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
return rho_tmp, flag
def pressure_spline_error(rho, Pset, T, xi, Eos):
"""
Calculate difference between set point pressure and computed pressure for a given density.
Used to ensure an accurate value from the EOS rather than an estimate from a spline.
Parameters
----------
rho : float
[mol/:math:`m^3`] Density of system
Pset : float
[Pa] Guess in pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
Returns
-------
pressure_spline_error : float
[Pa] Difference in set pressure and predicted pressure given system conditions.
"""
Pguess = Eos.pressure(rho, T, xi)
return Pguess - Pset
def calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
yi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phiv : float
Fugacity coefficient of vapor at system pressure
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhov, flagv = calc_vapor_density(P, T, yi, Eos, density_opts)
if flagv == 4:
phiv = np.ones_like(yi)
rhov = 0.0
logger.info(" rhov set to 0.")
elif flagv == 3:
phiv = np.array([np.nan, np.nan])
else:
phiv = Eos.fugacity_coefficient(P, rhov, yi, T)
return phiv, rhov, flagv
def calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phil : float
Fugacity coefficient of liquid at system pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true.
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhol, flagl = calc_liquid_density(P, T, xi, Eos, density_opts)
if flagl == 3:
phil = | np.array([np.nan, np.nan]) | numpy.array |
#!/usr/bin/env python
import cPickle as pk
import numpy as np
import pylab as pl
with open("share/data/skymap.ew.50.p", "rb") as f:
globals().update(pk.load(f))
power = 0.5*np.trapz(flux, frequencies, axis=0)*1E+06
RL = 50.
kB = 1.38E-23
T = 297.
pl.figure()
pl.plot(lst, power, "k-")
pl.plot(lst, kB*T*50E+06* | np.ones(lst.shape) | numpy.ones |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback """
import pytest
import numpy as np
from mindspore import ms_function, context, Tensor
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_linspace():
"""
Feature: JIT Fallback
Description: Test numpy with linspace in graph mode.
Expectation: No exception.
"""
@ms_function
def np_linspace():
a = Tensor(np.linspace(1, 10, 10))
b = Tensor(np.linspace(1, 1, 10))
c = Tensor(np.linspace(10, 20, 5, endpoint=False))
d = Tensor(np.linspace(10, 20, 5, endpoint=True))
e = Tensor(np.linspace(1, 10, 10).reshape([10, 1]))
return a, b, c, d, e
a, b, c, d, e = np_linspace()
print("a:", a)
print("b:", b)
print("c:", c)
print("d:", d)
print("e:", e)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_arange_slice_1():
"""
Feature: JIT Fallback
Description: Test numpy with arange slice in graph mode.
Expectation: No exception.
"""
@ms_function
def np_arange_slice_1():
x = np.arange(10)
index = slice(2, 7, 2)
a = Tensor(x[index])
b = Tensor(x[2:7:2])
c = Tensor(x[5])
d = Tensor(x[2:])
e = Tensor(x[2:5])
return a, b, c, d, e
a, b, c, d, e = np_arange_slice_1()
assert np.all(a.asnumpy() == np.array([2, 4, 6]))
assert np.all(b.asnumpy() == np.array([2, 4, 6]))
assert np.all(c.asnumpy() == np.array([5]))
assert np.all(d.asnumpy() == np.array([2, 3, 4, 5, 6, 7, 8, 9]))
assert np.all(e.asnumpy() == np.array([2, 3, 4]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_arange_slice_2():
"""
Feature: JIT Fallback
Description: Test numpy with arange slice in graph mode.
Expectation: No exception.
"""
@ms_function
def np_arange_slice_2():
x = np.array([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
a = Tensor(x[1:])
b = Tensor(x[..., 1])
c = Tensor(x[1, ...])
d = Tensor(x[..., 1:])
return a, b, c, d
a, b, c, d = np_arange_slice_2()
assert np.all(a.asnumpy() == np.array([[3, 4, 5], [4, 5, 6]]))
assert np.all(b.asnumpy() == np.array([2, 4, 5]))
assert np.all(c.asnumpy() == np.array([3, 4, 5]))
assert np.all(d.asnumpy() == np.array([[2, 3], [4, 5], [5, 6]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_array_advanced_index_1():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_1():
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
a = Tensor(x[[0, 1, 2], [0, 1, 0]])
rows = np.array([[0, 0], [3, 3]])
cols = np.array([[0, 2], [0, 2]])
b = Tensor(x[rows, cols])
c = Tensor(x[1:3, 1:3])
d = Tensor(x[1:3, [1, 2]])
e = Tensor(x[..., 1:])
return a, b, c, d, e
a, b, c, d, e = np_array_advanced_index_1()
assert np.all(a.asnumpy() == np.array([0, 4, 6]))
assert np.all(b.asnumpy() == np.array([[0, 2], [9, 11]]))
assert np.all(c.asnumpy() == np.array([[4, 5], [7, 8]]))
assert np.all(d.asnumpy() == np.array([[4, 5], [7, 8]]))
assert np.all(e.asnumpy() == np.array([[1, 2], [4, 5], [7, 8], [10, 11]]))
# Not support <class 'complex'> yet.
@pytest.mark.skip(reason='Not support graph fallback feature yet')
def test_np_array_advanced_index_2():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_2():
x = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
y = np.array([np.nan, 1, 2, np.nan, 3, 4, 5])
z = np.array([1, 2 + 6j, 5, 3.5 + 5j])
a = Tensor(x[x > 5])
b = Tensor(y[~np.isnan(y)])
c = Tensor(z[np.iscomplex(z)])
return a, b, c
a, b, c = np_array_advanced_index_2()
assert np.all(a.asnumpy() == np.array([6, 7, 8, 9, 10, 11]))
assert np.all(b.asnumpy() == np.array([1., 2., 3., 4., 5.]))
assert np.all(c.asnumpy() == np.array([2. + 6.j, 3.5 + 5.j]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_array_advanced_index_3():
"""
Feature: JIT Fallback
Description: Test numpy with array advanced index in graph mode.
Expectation: No exception.
"""
@ms_function
def np_array_advanced_index_3():
x = np.arange(32).reshape((8, 4))
a = Tensor(x[[4, 2, 1, 7]])
y = np.arange(32).reshape((8, 4))
b = Tensor(y[[-4, -2, -1, -7]])
z = np.arange(32).reshape((8, 4))
c = Tensor(z[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
return a, b, c
a, b, c = np_array_advanced_index_3()
print("a:", a)
print("b:", b)
print("c:", c)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_reshape():
"""
Feature: JIT Fallback
Description: Test numpy.reshape() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_reshape():
x = np.arange(8)
y = x.reshape(2, 4)
return Tensor(y)
assert np.all(np_reshape().asnumpy() == np.array([[0, 1, 2, 3], [4, 5, 6, 7]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_ndarray_flatten():
"""
Feature: JIT Fallback
Description: Test numpy.flatten() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_ndarray_flatten():
x = np.arange(8).reshape(2, 4)
y = x.flatten()
return Tensor(y)
assert np.all(np_ndarray_flatten().asnumpy() == np.array([0, 1, 2, 3, 4, 5, 6, 7]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_ravel():
"""
Feature: JIT Fallback
Description: Test numpy.ravel() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_ravel():
x = np.arange(8).reshape(2, 4)
y = x.ravel(order='F')
return Tensor(y)
assert np.all(np_ravel().asnumpy() == np.array([0, 4, 1, 5, 2, 6, 3, 7]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_transpose():
"""
Feature: JIT Fallback
Description: Test numpy.transpose() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_transpose():
x = np.arange(4).reshape(4, 1)
y = np.transpose(x)
return Tensor(y)
assert np.all(np_transpose().asnumpy() == np.array([0, 1, 2, 3]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_rollaxis():
"""
Feature: JIT Fallback
Description: Test numpy.rollaxis() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_rollaxis():
x = np.arange(8).reshape(2, 2, 2)
tensor_x = Tensor(x)
y = np.rollaxis(x, 2, 0)
tensor_y = Tensor(y)
return tensor_x[1, 1, 0], tensor_y[1, 1, 0]
x, y = np_rollaxis()
assert x == 6 and y == 5
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_swapaxes():
"""
Feature: JIT Fallback
Description: Test numpy.swapaxes() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_swapaxes():
x = np.arange(8).reshape(2, 2, 2)
tensor_x = Tensor(x)
y = np.swapaxes(x, 2, 0)
tensor_y = Tensor(y)
return tensor_x[1, 1, 0], tensor_y[1, 1, 0]
x, y = np_swapaxes()
assert x == 6 and y == 3
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_broadcast():
"""
Feature: JIT Fallback
Description: Test numpy.broadcast() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_broadcast():
x = np.array([[1], [2], [3]])
y = np.array([4, 5, 6])
z = np.broadcast(x, y)
return Tensor(z.shape)
assert np.all(np_broadcast().asnumpy() == np.array([3, 3]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_broadcast_to():
"""
Feature: JIT Fallback
Description: Test numpy.broadcast_to() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_broadcast_to():
x = np.arange(4).reshape(1, 4)
y = np.broadcast_to(x, (2, 4))
return Tensor(y)
assert np.all(np_broadcast_to().asnumpy() == np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_expand_dims():
"""
Feature: JIT Fallback
Description: Test numpy.expand_dims() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_expand_dims():
x = np.array(([1, 2], [3, 4]))
y = np.expand_dims(x, axis=0)
return Tensor(y)
assert np.all(np_expand_dims().asnumpy() == np.array([[[1, 2], [3, 4]]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_squeeze():
"""
Feature: JIT Fallback
Description: Test numpy.squeeze() method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_squeeze():
x = np.arange(4).reshape(1, 2, 2)
y = np.squeeze(x)
return Tensor(y)
assert np.all(np_squeeze().asnumpy() == np.array([[0, 1], [2, 3]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_concat():
"""
Feature: JIT Fallback
Description: Test numpy method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_concat():
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
concatenate = np.concatenate((x, y))
stack = np.stack((x, y), 0)
hstack = np.hstack((x, y))
vstack = np.vstack((x, y))
return Tensor(concatenate), Tensor(stack), Tensor(hstack), Tensor(vstack)
out_concatenate, out_stack, out_hstack, out_vstack = np_concat()
assert np.all(out_concatenate.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
assert np.all(out_stack.asnumpy() == np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert np.all(out_hstack.asnumpy() == np.array([[1, 2, 5, 6], [3, 4, 7, 8]]))
assert np.all(out_vstack.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_split():
"""
Feature: JIT Fallback
Description: Test numpy split method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_split():
x = np.arange(4).reshape(2, 2)
split = np.split(x, 2)
hsplit = np.hsplit(x, 2)
vsplit = np.vsplit(x, 2)
return Tensor(split), Tensor(hsplit), Tensor(vsplit)
out_split, out_hsplit, out_vsplit = np_split()
assert np.all(out_split.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
assert np.all(out_hsplit.asnumpy() == np.array([[[0], [2]], [[1], [3]]]))
assert np.all(out_vsplit.asnumpy() == np.array([[[0, 1]], [[2, 3]]]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_element():
"""
Feature: JIT Fallback
Description: Test numpy method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_element():
resize = np.resize(np.array([[1, 2, 3], [4, 5, 6]]), (3, 2))
append = np.append(np.array([[1, 2, 3], [4, 5, 6]]), [[7, 8, 9]], axis=0)
insert = np.insert(np.array([[1, 2], [3, 4], [5, 6]]), 3, [7, 8], axis=0)
delete = np.delete(np.arange(6).reshape(2, 3), 0, axis=0)
unique = np.unique(np.array([5, 2, 6, 2, 7, 5, 6, 8, 2, 9]))
return Tensor(resize), Tensor(append), Tensor(insert), Tensor(delete), Tensor(unique)
out_resize, out_append, out_insert, out_delete, out_unique = np_element()
assert np.all(out_resize.asnumpy() == np.array([[1, 2], [3, 4], [5, 6]]))
assert np.all(out_append.asnumpy() == np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert np.all(out_insert.asnumpy() == np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
assert np.all(out_delete.asnumpy() == np.array([3, 4, 5]))
assert np.all(out_unique.asnumpy() == np.array([2, 5, 6, 7, 8, 9]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_bitwise():
"""
Feature: JIT Fallback
Description: Test numpy bitwise method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_bitwise():
bitwise_and = np.bitwise_and(13, 17)
bitwise_or = np.bitwise_or(13, 17)
invert = np.invert(np.array([13], dtype=np.uint8))
left_shift = np.left_shift(10, 2)
right_shift = np.right_shift(40, 2)
return Tensor(bitwise_and), Tensor(bitwise_or), Tensor(invert), Tensor(left_shift), Tensor(right_shift)
bitwise_and, bitwise_or, invert, left_shift, right_shift = np_bitwise()
assert bitwise_and.asnumpy() == 1
assert bitwise_or.asnumpy() == 29
assert np.all(invert.asnumpy() == np.array([242]))
assert left_shift.asnumpy() == 40
assert right_shift.asnumpy() == 10
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_char_1():
"""
Feature: JIT Fallback
Description: Test numpy char method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_char():
char_add = np.char.add(['MindSpore'], [' fallback'])
char_multiply = np.char.multiply('fallback ', 3)
char_center = np.char.center('fallback', 10, fillchar='*')
char_capitalize = np.char.capitalize('fallback')
char_title = np.char.title('fallback')
char_lower = np.char.lower('FALLBACK')
char_upper = np.char.upper('fallback')
return Tensor(char_add), Tensor(char_multiply), Tensor(char_center), Tensor(char_capitalize), \
Tensor(char_title), Tensor(char_lower), Tensor(char_upper)
char_add, char_multiply, char_center, char_capitalize, char_title, char_lower, char_upper = np_char()
assert char_add.asnumpy() == 'MindSpore fallback'
assert char_multiply.asnumpy() == 'fallback fallback fallback '
assert char_center.asnumpy() == '*fallback*'
assert char_capitalize.asnumpy() == 'Fallback'
assert char_title.asnumpy() == 'Fallback'
assert char_lower.asnumpy() == 'fallback'
assert char_upper.asnumpy() == 'FALLBACK'
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_char_2():
"""
Feature: JIT Fallback
Description: Test numpy char method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_char():
char_split = np.char.split('MindSpore fallback')
out_split = np.char.join(' ', char_split)
char_splitlines = np.char.splitlines('MindSpore\nfallback')
out_splitlines = np.char.join(',', char_splitlines)
out_strip = np.char.strip('abc acd', 'a')
out_replace = np.char.replace('faooback', 'oo', 'll')
char_encode = np.char.encode('runoob', 'cp500')
out_decode = np.char.decode(char_encode, 'cp500')
return Tensor(out_split), Tensor(out_splitlines), Tensor(out_strip), Tensor(out_replace), Tensor(out_decode)
char_split, char_splitlines, char_strip, char_replace, char_decode = np_char()
assert char_split.asnumpy() == 'MindSpore fallback'
assert char_splitlines.asnumpy() == 'MindSpore,fallback'
assert char_strip.asnumpy() == 'bc acd'
assert char_replace.asnumpy() == 'fallback'
assert char_decode.asnumpy() == 'runoob'
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_np_degree():
"""
Feature: JIT Fallback
Description: Test numpy method in graph mode.
Expectation: No exception.
"""
@ms_function
def np_degree():
out_sin = np.sin(30 * np.pi / 180)
out_arcsin = np.degrees(np.arcsin(out_sin))
out_cos = | np.cos(60 * np.pi / 180) | numpy.cos |
import numpy as np
from numerical_eqs.utils import SimTime
def basicStep ( x, t, vf, dt ):
'''Euler step, but using derivative at middle point of dt
'''
dt2 = dt/2.0
tmid = t + dt2
w = x + dt2 * vf( t, x )
return x + dt * vf( tmid, w )
def step ( x, t, vf, dt ):
'''Roughly estimate the future position and error of calculation
given position, callable derivative function
'''
xcourse = basicStep ( x, t, vf, dt )
xmid = basicStep ( x, t, vf, dt/2.0 )
xfine = basicStep ( xmid, t+dt/2.0, vf, dt/2.0 )
xnew = xfine * 4/3.0 - xcourse * 1/3.0
# Return our guess, and the estimated error
return (xnew, xfine - xcourse)
def SDOLESolve(
y0,
func,
t1,
t0=0,
time_controls = {
'tol': 1.0e-3,
'agrow': 1.25,
'ashrink': 0.8,
'dtmin': 1e-6,
'dtmax': 100
},
dont_wrap_func = False,
explicit_times = {
'time points' : [],
'callback': lambda yt, t: yt
},
progress=True,
pbar = None,
include_t0=True,
):
'''Solve the PDE system, using step doubling and local extrapolation
If you want to run some function on the current state at specific points in time, use the explicit_times to control this operation. The callback result will be used to replace yt at each time specified
Args:
y0: Initial value
func: callable which should evaluate to the derivative of y(t, yt)
t1: time value upon which to terminate
t0: start time, corresponding to y0 [default 0]
time_controls:
dict, controls the time stepping and error tolerances. See utils.SimTime for more info on these.
Defaults to {
'tol': 1.0e-3,
'agrow': 1.25,
'ashrink': 0.8,
'dtmin': 1e-6,
'dtmax': 100
}
dont_wrap_func:
This parameter will modify how func is treated. If set to True,
func will be called like func(x,t,dt), and func should return
(dY, error) where y_(t+dt) = y_t + dY. The error returned is used
in the tolerance estimates
explicit_times:
dict, used to evaluate a separate callable which can optionally modify the ODE value at certain times. dict should look like
{
'time points' : [ (t0+t1)/2, ... ],
'callback': (lambda yt, t: yt)
}
with 'time points' a list of points to evaluate callable on, and 'callback', a function which should return the new value of yt
progress: boolean, whether to load with TQDM bar
include_t0: Whether to add y0 and t0 to the lists of results. Defaults to true
pbar: not for public use, leave this as None unless you know what you're doing
'''
# Handle progress and possibly load in TQDM
if progress:
try:
from tqdm import tqdm
except:
raise ImportError('TQDM module not found. Try installing tqdm or use progress=False to disable the progress bar')
# Get our time controls in order
# This makes sure our function can handle either the recursive case or the base level
time_controls = {
**{ 'start_dt': time_controls.get('dtmin', 1e-6) },
**{
'tol': 1.0e-3,
'agrow': 1.25,
'ashrink': 0.8,
'dtmin': 1e-6,
'dtmax': 100
},
**time_controls,
}
# time_controls['start_dt'] = time_controls.get('start_dt', time_controls['dtmin'])
should_close_pbar = False
# Generate our progress bar, possibly
if progress and (pbar is None):
pbar = tqdm(total=t1-t0)
should_close_pbar = True
# We might have to aggregate this
known_times = sorted(set([t0, t1] + np.asarray(explicit_times.get('time points',[])).tolist()))
# print(known_times)
sol = None
# print(explicit_times)
# See exactly what we should be calling
callback = explicit_times.get( 'callback', (lambda ys, t: ys) )
new_start_dt = None
if len(known_times) > 2:
# print("Using recursive mode")
res = None
for i in range(1, len(known_times)):
tt0 = known_times[i-1]
tt1 = known_times[i]
y0 = y0 if res is None else res['ys'][-1,:]
# Recursively get the subinterval we're looking for
new_res = SDOLESolve(
y0 = y0,
func = func,
t1 = tt1,
t0 = tt0,
time_controls = {**time_controls, 'start_dt': new_start_dt},
explicit_times = {},
progress=progress,
pbar = pbar,
dont_wrap_func = dont_wrap_func,
include_t0 = ( include_t0 if i == 1 else False )
)
new_start_dt = new_res['rolling dt']
# Combine
if res is None:
res = new_res
else:
for k in ('steps accepted', 'steps rejected'):
res[k] += new_res[k]
for k in ('ys', 'time'):
res[k] = np.concatenate(
[
res[k],
new_res[k],
],
axis = 0
)
# Explicitly replace the value requested
res['ys'][-1,:] = callback(
res['ys'][-1,:], # yt
res['time'][-1], # t
)
# Go to the end, and this will be returned
sol = res
# We should actually compute the thing
# This is the base-level recursive step
else:
# print("Using base mode")
# Get all our ducks in a row before this thingy
res = [y0]
ts = [t0]
stepsAccepted = 0
stepsRejected = 0
# Get our time simulator in order
simtime = SimTime(
tol = time_controls['tol'],
agrow = time_controls['agrow'],
ashrink = time_controls['ashrink'],
dtmin = time_controls['dtmin'],
dtmax = time_controls['dtmax'],
tstart = t0,
tend = t1,
start_dt = time_controls['start_dt']
)
# We need a function that can evaluate
# the derivative & error given the
# stepsize, time, and current position
# This could possibly be user-supplied
if dont_wrap_func:
def dyFunc(x, t, dt):
return func (
x = x,
t = t,
dt = dt
)
else:
def dyFunc(x, t, dt):
return step (
x = x,
t = t,
vf = func,
dt = dt
)
while ts[-1] < t1:
yt = res[-1]
t = ts[-1]
# Evaluate our error using the established step function
yt, error = dyFunc(
x = yt,
t = t,
dt = simtime.dt
)
# Should we increment step?
if simtime.advance(error):
# Store back our info
res.append(yt)
# Move to the next time
ts.append(simtime.nextStep())
if progress:
# Update our progress bar
pbar.update(ts[-1] - t)
stepsAccepted += 1
# This step was rejected
else:
stepsRejected += 1
sol = {
'ys': np.asarray(res),
'time': | np.asarray(ts) | numpy.asarray |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
def loadDataSet():
dataMat = []
labelMat = []
fr = open('./data/testSet.txt', 'r')
lines = fr.readlines()
for line in lines:
strArr = line.strip().split()
dataMat.append([float(strArr[0]), float(strArr[1])])
labelMat.append(int(strArr[2]))
#dataMat = np.mat(dataMat)
#labelMat = np.mat(labelMat).reshape(np.shape(dataMat)[0], 1)
return np.array(dataMat), np.array(labelMat)
def f_Z(X, w, b):
Z = np.dot(X, w.T) + b
return Z
def f_A(Z): # activation function (sigmoid function)
A = 1.0 / (1.0 + | np.exp(-Z) | numpy.exp |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Reliability calibration plugins."""
import operator
import warnings
import iris
import numpy as np
import scipy
from improver import BasePlugin, PostProcessingPlugin
from improver.calibration.utilities import (
check_forecast_consistency,
create_unified_frt_coord,
filter_non_matching_cubes,
)
from improver.metadata.probabilistic import (
find_threshold_coordinate,
probability_is_above_or_below,
)
from improver.metadata.utilities import generate_mandatory_attributes
from improver.utilities.cube_manipulation import MergeCubes, collapsed
class ConstructReliabilityCalibrationTables(BasePlugin):
"""A plugin for creating and populating reliability calibration tables."""
def __init__(
self,
n_probability_bins=5,
single_value_lower_limit=False,
single_value_upper_limit=False,
):
"""
Initialise class for creating reliability calibration tables. These
tables include data columns entitled observation_count,
sum_of_forecast_probabilities, and forecast_count, defined below.
n_probability_bins (int):
The total number of probability bins required in the reliability
tables. If single value limits are turned on, these are included in
this total.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
"""
self.single_value_tolerance = 1.0e-6
self.probability_bins = self._define_probability_bins(
n_probability_bins, single_value_lower_limit, single_value_upper_limit
)
self.table_columns = np.array(
["observation_count", "sum_of_forecast_probabilities", "forecast_count"]
)
self.expected_table_shape = (len(self.table_columns), n_probability_bins)
def __repr__(self):
"""Represent the configured plugin instance as a string."""
bin_values = ", ".join(
["[{:1.2f} --> {:1.2f}]".format(*item) for item in self.probability_bins]
)
result = "<ConstructReliabilityCalibrationTables: " "probability_bins: {}>"
return result.format(bin_values)
def _define_probability_bins(
self, n_probability_bins, single_value_lower_limit, single_value_upper_limit
):
"""
Define equally sized probability bins for use in a reliability table.
The range 0 to 1 is divided into ranges to give n_probability bins.
If single_value_lower_limit and / or single_value_upper_limit are True,
additional bins corresponding to values of 0 and / or 1 will be created,
each with a width defined by self.single_value_tolerance.
Args:
n_probability_bins (int):
The total number of probability bins desired in the
reliability tables. This number includes the extrema bins
(equals 0 and equals 1) if single value limits are turned on,
in which case the minimum number of bins is 3.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
Returns:
numpy.ndarray:
An array of 2-element arrays that contain the bounds of the
probability bins. These bounds are non-overlapping, with
adjacent bin boundaries spaced at the smallest representable
interval.
Raises:
ValueError: If trying to use both single_value_lower_limit and
single_value_upper_limit with 2 or fewer probability bins.
"""
if single_value_lower_limit and single_value_upper_limit:
if n_probability_bins <= 2:
msg = (
"Cannot use both single_value_lower_limit and "
"single_value_upper_limit with 2 or fewer "
"probability bins."
)
raise ValueError(msg)
n_probability_bins = n_probability_bins - 2
elif single_value_lower_limit or single_value_upper_limit:
n_probability_bins = n_probability_bins - 1
bin_lower = np.linspace(0, 1, n_probability_bins + 1, dtype=np.float32)
bin_upper = | np.nextafter(bin_lower, 0, dtype=np.float32) | numpy.nextafter |
"""
Contextual bandit environments to evaluate performance.
"""
import numpy as np
import os
class StationaryContextualBandit:
def __init__(self, dataset, seed, err_sigma = 0.05):
# Can also be used for real-world non-stationary problems
# as it doesn't shuffle the data.
self.random_state = np.random.RandomState(seed)
if os.path.isdir("rnlps/datasets/" + dataset):
self.X = np.load("rnlps/datasets/" + dataset + "/X.npy")
self.targets = np.load("rnlps/datasets/" + dataset + "/y.npy")
else :
raise Exception("Dataset does not exist. Check the path.")
self.n_arms = len(np.unique(self.targets))
self.step = 0
self.context_dims = np.shape(self.X)[-1]
self.err_sigma = err_sigma
def reset(self):
self.step = 0
return self.X[self.step]
def pull(self, arm):
if (arm >= self.n_arms) or (arm < 0):
raise Exception('Invalid arm.')
reward = 0.0
regret = 1.0
if arm == self.targets[self.step]:
reward = 1.0
regret = 0.0
assert (reward + regret) == 1
self.step += 1
context = self.X[self.step]
reward = reward + self.random_state.normal(0, self.err_sigma)
return reward, context, regret
def best_arms(self):
return [self.targets[self.step]]
def expected_cumulative_rewards(self, trial_length):
return np.cumsum(np.ones(trial_length))
def __repr__(self):
r = 'StationaryContextualBandit(n_arms={0}, X_dims={1})'
return r.format(self.n_arms, np.shape(self.X))
class FlippingContextualBandit:
def __init__(self, dataset, half_period, seed, err_sigma = 0.05):
self.random_state = np.random.RandomState(seed)
if os.path.isdir("rnlps/datasets/" + dataset):
self.X = np.load("rnlps/datasets/" + dataset + "/X.npy")
self.targets = | np.load("rnlps/datasets/" + dataset + "/y.npy") | numpy.load |
from scipy.integrate import solve_bvp,solve_ivp
import numpy as np
__all__=["FermatEquationsEuclid","UniformFermatEquationsEuclid",
"FermatEquationsCurve","UniformFermatEquationsCurve","FermatEquations"]
class FermatEquations(object):
def __init__(self):
pass
def solve_ivp(self,a,b,y0,dy0,**kwargs):
"""Solve initial value problem for light rays.
Notes
-----
The solver can solve the path of an arbitrary number of light rays in one function call
however the format of the solutions has the y(x) positions stacked on top of the derivatives.
Parameters
----------
a : scalar
initial position for the solution of the ray's path.
b : scalar
final position for the solution of the ray's path.
y0 : array_like of shape (n,)
initial position of the ray's.
dy0 : array_like of shape (n,)
initial derivative (with respect to the independent variable) of the ray's trajectory.
kwargs : optional
additional arguments to pass into solver,
see scipy.integrate.solve_ivp for more details.
Returns
-------
Bunch object with the following fields defined:
t : ndarray, shape (n_points,)
Time points.
y : ndarray, shape (n, n_points)
Values of the solution at `t`.
sol : `OdeSolution` or None
Found solution as `OdeSolution` instance; None if `dense_output` was
set to False.
t_events : list of ndarray or None
Contains for each event type a list of arrays at which an event of
that type event was detected. None if `events` was None.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
status : int
Reason for algorithm termination:
* -1: Integration step failed.
* 0: The solver successfully reached the end of `tspan`.
* 1: A termination event occurred.
message : string
Human-readable description of the termination reason.
success : bool
True if the solver reached the interval end or a termination event
occurred (``status >= 0``).
"""
y0 = np.vstack((y0,dy0))
self._yout = | np.zeros_like(y0) | numpy.zeros_like |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 13:57:35 2020
@author: calebhallinan
"""
# Making the randnet package in R #
# importing numpy
import numpy as np
# importing random
import random
# importing powerlaw, must downlaod from github
import powerlaw
# importing math
import math
# import skicit
from sklearn.cluster import KMeans
# downloaded this from github
# pip install -e git+https://github.com/bwlewis/irlbpy.git#egg=irlb
# had to make a slight change on line 138 deleting the x
from irlb import irlb
# for norm function
from numpy import linalg as LA
# for roc curve
import sklearn.metrics as metrics
# to check speed of functions
import timeit
# to copy matrices
from copy import deepcopy
def BlockModelGen(lamb, n, beta = 0, K = 3, rho = 0, simple = True,
power = True, alpha = 5, degree_seed = None):
"""
Description
----------
Generates networks from degree corrected stochastic block model, with various options for node degree distribution
Arguments
----------
lambda : average node degree
n : size of network
beta : out-in ratio: the ratio of between-block edges over within-block edges
K : number of communities
w : not effective
Pi : a vector of community proportion
rho : proportion of small degrees within each community if the degrees are from
two point mass disbribution. rho >0 gives degree corrected block model. If
rho > 0 and simple=TRUE, then generate the degrees from two point mass
distribution, with rho porition of 0.2 values and 1-rho proportion of
1 for degree parameters. If rho=0, generate from SBM.
simple : Indicator of wether two point mass degrees are used, if rho > 0.
If rho=0, this is not effective
power : Whether or not use powerlaw distribution for degrees. If FALSE,
generate from theta from U(0.2,1); if TRUE, generate theta from powerlaw.
Only effective if rho >0, simple=FALSE.
alpha : Shape parameter for powerlaw distribution.
degree.seed : Can be a vector of a prespecified values for theta. Then the
function will do sampling with replacement from the vector to generate theta.
It can be used to control noise level between different configuration settings.
Returns
-------
A dictionary with:
(variable name)["A"] : the generated network adjacency matrix
(variable name)["g"] : community membership
(variable name)["P"] : probability matrix of the network
(variable name)["theta"] : node degree parameter
Author(s)
----------
<NAME>, <NAME>, <NAME>
"""
w = [1] * K
Pi = 1/K
P0 = np.diag(w)
if (beta > 0):
P0 = np.ones((K, K), dtype=np.int32)
diag_P0 = [w_element / beta for w_element in w]
np.fill_diagonal(P0, diag_P0)
Pi_vec = [[Pi] for i in range(K)]
P1 = lamb * P0
P2 = np.matmul((np.transpose(Pi_vec)), P0)
P3 = (n-1) * np.matmul(P2, Pi_vec) * (rho * 0.2 + (1-rho))**2
P = P1/P3
if (rho > 0) and (simple != True) and (power != True):
P1 = lamb * P0
P2 = np.matmul(((n-1) * np.transpose(Pi_vec)), P0)
P3 = np.matmul(P2, Pi_vec) * (0.6)**2
P = P1/P3
if (rho > 0) and (simple != True) and (power == True):
P1 = lamb * P0
P2 = np.matmul(((n-1) * np.transpose(Pi_vec)), P0)
P3 = np.matmul(P2, Pi_vec) * (1.285)**2
P = P1/P3
M = np.zeros((n, K), dtype=np.int32)
membership = random.choices(range(0, K), k = n, weights = [Pi]*K)
i = 0
while i < n:
M[i][membership[i]] = 1
i +=1
MP = np.matmul(M, P)
A_bar = np.matmul(MP, np.transpose(M))
node_degree = [1] * n
if rho > 0:
randunif = np.random.uniform(size = n)
if simple == True:
j = 0
while j < n:
if randunif[j] < rho:
node_degree[j] = 0.2
j += 1
else:
j += 1
else:
if power == False:
node_degree = np.random.uniform(size = n) * 0.8 + 0.2
else:
MM = math.ceil(n/300)
if degree_seed == None:
degree_seed = powerlaw.Power_Law(xmin=1, parameters=[alpha]).generate_random(n)
node_degree = random.choices(degree_seed, k = n)
DD = np.diag(node_degree)
A_bar = np.matmul(DD, A_bar)
A_bar = np.matmul(A_bar, DD)
A_bar = A_bar * lamb/np.mean(np.sum(A_bar, axis = 0))
upper_index = np.triu_indices(n, k = 1)
upper_p = A_bar[upper_index]
upper_u = np.random.uniform(size = len(upper_p))
upper_A = np.where(upper_u < upper_p, 1, 0)
A = np.zeros((n, n), dtype=np.int32)
A[upper_index] = upper_A
A = A + np.transpose(A)
np.fill_diagonal(A, 0)
# return statement in dictionary form
dic = dict()
# generated network adjacency matrix
dic["A"] = A
# community membership
dic["g"] = membership
# probability matrix of the network
dic["P"] = A_bar
# node degree parameter
dic["theta"] = node_degree
return(dic)
def reg_SP(A, K, tau = 1, lap = False):
"""
Description
----------
Community detection by regularized spectral clustering
Arguments
----------
A : Adjacency Matrix
K : Number of Communities
tau : reguarlization parameter. Default value is one. Typically set
between 0 and 1. If tau=0, no regularization is applied.
lap : indicator. If TRUE, the Laplacian matrix for clustering. If FALSE,
the adjacency matrix will be used.
Returns
-------
A dictionary with:
(variable name)["cluster"] giving the cluster levels
(variable name)["loss"] giving the loss of KMeans algorithm
Author(s)
----------
<NAME>, <NAME>, <NAME>
"""
avg_d = np.mean(A.sum(axis = 0))
A_tau = (A + tau*avg_d)/len(A)
if lap != True:
SVD = irlb(A_tau, K, maxit=1000)
else:
d_tau = A_tau.sum(axis = 0)
pre_L_tau = np.matmul(np.diag(1/np.sqrt(d_tau)), A_tau)
L_tau = np.matmul(pre_L_tau, np.diag(1/np.sqrt(d_tau)))
SVD = irlb(L_tau, K, maxit=1000)
dic = dict()
dic["cluster"] = KMeans(n_clusters=K, max_iter = 30).fit_predict(SVD["V"][:,0:K])
dic["loss"] = KMeans(n_clusters=K, max_iter = 30).fit(SVD["V"][:,0:K]).inertia_
return(dic)
def reg_SSP(A, K, tau = 1, lap = False):
"""
Description
----------
Community detection by regularized spherical spectral clustering
Arguments
----------
A : Adjacency Matrix
K : Number of Communities
tau : reguarlization parameter. Default value is one. Typically set between
0 and 1. If tau=0, no regularization is applied.
lap : indicator. If TRUE, the Laplacian matrix for clustering. If FALSE,
the adjacency matrix will be used.
Returns
-------
A dictionary with:
(variable name)["cluster"] giving the cluster levels
(variable name)["loss"] giving the loss of KMeans algorithm
Author(s)
----------
<NAME>, <NAME>, <NAME>
"""
# finding the average distance by taking mean of columns summed
avg_d = np.mean(A.sum(axis = 0))
A_tau = A + (tau*avg_d)/len(A)
if lap != True:
SVD = irlb(A_tau, K, maxit=1000)
V = SVD["V"][:,0:K]
V_norm = LA.norm(V, axis=1)
V_normalized = np.matmul(np.diag(1/V_norm), V)
else:
d_tau = A_tau.sum(axis = 0)
pre_L_tau = np.matmul(np.diag(1/np.sqrt(d_tau)), A_tau)
L_tau = np.matmul(pre_L_tau, np.diag(1/np.sqrt(d_tau)))
SVD = irlb(L_tau, K, maxit=1000)
V = SVD["V"][:,0:K]
V_norm = LA.norm(V, axis=1)
V_normalized = np.matmul(np.diag(1/V_norm), V)
dic = dict()
dic["cluster"] = KMeans(n_clusters=K, max_iter = 30).fit_predict(V_normalized)
dic["loss"] = KMeans(n_clusters=K, max_iter = 30).fit(V_normalized).inertia_
return(dic)
def SBM_estimate(A, g):
"""
Description
----------
Estimates SBM parameters given community labels
Arguments
----------
A : adjacency matrix
g : a vector of community labels
Returns
-------
A dictionary with:
(variable name)["B"] : estimated block connection probability matrix
(variable name)["Phat"] : estimated probability matrix
(variable name)["g"] : community labels
Author(s)
----------
<NAME>, <NAME>, <NAME>
"""
n = A.shape[0]
K = len(np.unique(g))
B = np.zeros((K,K))
M = np.zeros((n,K))
for i in range(K):
for j in range(i, K):
if i != j:
# in order to get each row and column needed, a bit of crazy
# indexing was needed - could possibly be better some other way
pre_mean = A[np.where(g == i),:][0]
B[i,j] = np.mean(pre_mean[:,np.where(g == j)])
B[j,i] = np.mean(pre_mean[:,np.where(g == j)])
else:
n_i = len(np.where(g==i)[0])
pre_sum = A[np.where(g == i),:][0]
B[i,i] = np.sum(pre_sum[:,np.where(g == i)])/(n_i**2 - n_i)
# maybe a faster way ?
for i in range(n):
M[i,g[i]] = 1
pP = np.matmul(M,B)
P = np.matmul(pP,M.T)
dic = {}
dic["B"] = B
dic["Phat"] = P
dic["g"] = g
return dic
def DCSBM_estimate(A,g):
"""
Description
----------
Estimates DCSBM model by given community labels
Arguments
----------
A : adjacency matrix
g : vector of community labels for the nodes
Returns
-------
A dictionary with:
(variable name)["Phat"] : estimated probability matrix
(variable name)["B"] : the B matrix with block connection probability, up to a scaling constant
(variable name)["Psi"] : vector of of degree parameter theta, up to a scaling constant
Author(s)
----------
<NAME>, <NAME>, <NAME>hu
"""
n = A.shape[0]
K = len(np.unique(g))
B = np.zeros((K,K))
Theta = np.zeros((n,K))
for i in range(K):
for j in range(i,K):
N_i = np.where(g==i)
psum = A[np.where(g == i),:][0]
B[i,j] = np.sum(psum[:,np.where(g == j)]) + .001
B[j,i] = np.sum(psum[:,np.where(g == j)]) + .001
Theta[N_i,i] = 1
Psi = np.sum(A, axis = 0)
B_rowSums = np.sum(B, axis = 1)
B_g = np.matmul(Theta, B_rowSums)
Psi = (Psi/B_g).reshape(300,1)
tmp_mat = Theta * Psi
pP_hat = np.matmul(tmp_mat,B)
P_hat = np.matmul(pP_hat,tmp_mat.T)
dic = {}
dic["Phat"] = P_hat
dic["B"] = B
dic["Psi"] = Psi.T
dic["g"] = g
return dic
def iter_SVD_core_fast_all(A, Kmax, tol = .00001, max_iter = 100, sparse = True,
init = None, verbose = False, tau = 0, p_sample = 1):
"""
This function is used in ECV_block function
"""
#?? make sparse?
#if sparse == True:
# possibly do this
# A =
n = A.shape[0]
cap = 1 #kappa*avg.p
A = np.where(np.isnan(A), 0, A)
A = A/p_sample
#svd.new <- svd(A,nu=K,nv=K)
#print("begin SVD")
svd_new = irlb(A, Kmax, maxit = max_iter) # might be a better SVD out there
result = dict()
for K in range(Kmax):
# print(K) # not sure why this is here
if K == 0:
A_new = svd_new["S"][0] * np.matmul(np.array(svd_new["U"][:,0]).reshape(n,1), np.array(svd_new["V"][:,0]).reshape(n,1).T)
else:
A_new = A_new + svd_new["S"][K] * np.matmul(np.array(svd_new["U"][:,K]).reshape(n,1), np.array(svd_new["V"][:,K]).reshape(n,1).T)
A_new_thr = A_new
A_new_thr = np.where(A_new < (0 + tau), 0 + tau, A_new_thr)
A_new_thr = np.where(A_new > cap, cap, A_new_thr)
tmp_SVD = dict()
tmp_SVD["u"] = svd_new["U"][:,range(K+1)]
tmp_SVD["v"] = svd_new["V"][:,range(K+1)]
tmp_SVD["d"] = svd_new["S"][range(K+1)]
result[K] = {"iter": None, "SVD": tmp_SVD, "A": A_new, "err_seq": None, "A_thr": A_new_thr}
return(result)
def ECV_block(A, max_K, cv = None, B = 3, holdout_p = 0.1, tau = 0,
kappa = None):
"""
Description
----------
Model selection by ECV for SBM and DCSBM. It can be used to select between the two models or given on model (either SBM or DCSBM) and select K.
Arguments
----------
A : adjacency matrix
max_K : largest possible K for number of communities
cv : cross validation fold. The default value is NULL. We recommend to use the argument B instead, doing indpendent sampling.
B : number of replications
holdout_p : testing set proportion
tau : constant for numerical stability only. Not useful for current version.
dc_est : estimation method for DCSBM. By defaulty (dc.est=2), the maximum likelihood is used. If dc.est=1, the method used by Chen and Lei (2016) is used, which is less stable according to our observation.
kappa : constant for numerical stability only. Not useful for current version.
Returns
-------
A dictionary with:
(variable name)["impute_err"] : average validaiton imputation error
(variable name)["l2"] : average validation L_2 loss under SBM
(variable name)["dev"] : average validation binomial deviance loss under SBM
(variable name)["auc"] : average validation AUC
(variable name)["dc_l2"] : average validation L_2 loss under DCSBM
(variable name)["dc_dev"] : average validation binomial deviance loss under DCSBM
(variable name)["sse"] : average validation SSE
(variable name)["l2_model"] : selected model by L_2 loss
(variable name)["dev_model"] : selected model by binomial deviance loss
(variable name)["l2_mat"] : cross-validation loss matrix for B replications
(variable name)["dc_l2_mat"] : cross-validation loss matrix for B replications
Author(s)
----------
<NAME>, <NAME>, <NAME>
"""
n = A.shape[0]
edge_index = np.triu_indices(n, k = 1)
edge_n = len(edge_index[0])
holdout_index_list = list()
if cv == None:
holdout_n = math.floor(holdout_p * edge_n)
for i in range(B):
holdout_index_list.append(random.sample(range(edge_n), k = holdout_n))
else:
sample_index = random.sample(range(edge_n), k = edge_n)
max_fold_num = np.ceil(edge_n/cv)
fold_index = np.repeat(range(cv), max_fold_num)[edge_n-1]
cv_index = np.where(fold_index-(fold_index) == sample_index, fold_index, None)
B = cv
for i in range(B):
holdout_index_list.append(np.where(cv_index == i)[0].tolist())
def holdout_evaluation_fast_all(holdout_index, A = A, max_K = max_K, tau = tau,
p_sample = 1, kappa = kappa):
n = A.shape[0]
edge_index = np.triu_indices(n, k = 1)
edge_n = len(edge_index[0])
A_new = np.zeros(n**2).reshape(n,n)
A_new[np.triu_indices_from(A_new,k = 1)] = A[edge_index]
# may be better way to index here
x = A_new[np.triu_indices_from(A_new,k = 1)]
for i in range(len(holdout_index)):
x[holdout_index[i]] = None
A_new[np.triu_indices_from(A_new,k = 1)] = x
A_new = A_new + A_new.T
degrees = np.nansum(A_new, axis = 1)
no_edge = 0
no_edge = np.sum(degrees == 0)
Omega = np.isnan(A_new)
non_miss = ~np.isnan(A_new)
#A.new[non.miss] <- A.new[non.miss] + 0.5
SVD_result = iter_SVD_core_fast_all(A_new, max_K, p_sample = p_sample)
dc_block_sq_err = [0] * max_K
dc_loglike = [0] * max_K
roc_auc = [0] * max_K
bin_dev = [0] * max_K
block_sq_err = [0] * max_K
impute_sq_err = [0] * max_K
loglike = [0] * max_K
for k in range(max_K):
tmp_est = SVD_result[k]
A_approx = tmp_est["A_thr"]
impute_sq_err[k] = np.sum((A_approx[Omega] - A[Omega])**2)
response = list()
upper_A = A[np.triu_indices_from(A, k = 1)]
for i in range(len(holdout_index)):
response.append(upper_A[holdout_index[i]])
predictors = list()
upper_A_approx = A_approx[np.triu_indices_from(A_approx, k = 1)]
for i in range(len(holdout_index)):
predictors.append(upper_A_approx[holdout_index[i]])
print("AUC calculation")
fpr, tpr, threshold = metrics.roc_curve(response, predictors, pos_label=1)
roc_auc[k] = metrics.auc(fpr, tpr)
trunc_predictors = np.array(predictors) # changing to an array to compare values
trunc_predictors[np.array(predictors) > (1-1e-6)] = 1-1e-6
trunc_predictors[np.array(predictors) < (1e-6)] = 1e-6
bin_dev[k] = np.sum((np.array(response) - trunc_predictors)**2)
if k == 0:
pb = (np.nansum(A_new) + 1)/ (np.sum(~np.isnan(A_new)) - np.sum(~np.isnan(np.diag(A_new))) + 1)
if pb < 1e-6:
pb = 1e-6
if pb > 1-1e-6:
pb = 1-1e-6
A_Omega = A[Omega]
block_sq_err[k] = np.sum((pb - A_Omega)**2)
loglike[k] = -np.sum(A_Omega * np.log(pb)) - np.sum((1 - A_Omega) * np.log(1-pb))
print("SBM calculation")
start = timeit.default_timer()
if k == 0:
U_approx = np.array(tmp_est["SVD"]["v"]).reshape(len(tmp_est["SVD"]["v"]), k+1)
else:
U_approx = tmp_est["SVD"]["v"][:,range(k+1)]
if tau > 0:
A_approx = A_approx + (tau * np.mean(np.sum(A_approx, axis = 0))/n)
d_approx = np.sum(A_approx, axis = 0)
preL_approx = np.matmul(np.diag(1/np.sqrt(d_approx)), A_approx)
L_approx = np.matmul(preL_approx, np.diag(1/np.sqrt(d_approx)))
A_approx_svd = irlb(L_approx, k+1, maxit = 1000)
U_approx = A_approx_svd["V"][:,range(k+1)]
km = KMeans(n_clusters = k + 1, max_iter = 30).fit(U_approx)
B = np.zeros((k+1,k+1))
Theta = np.zeros((n, k+1))
for i in range(k+1):
for j in range(i, k+1):
N_i = np.where(km.labels_ == i)
N_j = np.where(km.labels_ == j)
if i != j:
B[i,j] = (np.nansum(A_new[N_i[0][:, None], N_j[0][None, :]]) + 1)/ (np.sum(~np.isnan(A_new[N_i[0][:, None], N_j[0][None, :]]))+1) # i believe this is the same indexing, but was having trouble figuring it out
B[j,i] = (np.nansum(A_new[N_i[0][:, None], N_j[0][None, :]]) + 1)/ (np.sum(~np.isnan(A_new[N_i[0][:, None], N_j[0][None, :]]))+1)
else:
B[i,j] = (np.nansum(A_new[N_i[0][:, None], N_j[0][None, :]]) + 1)/(np.sum(~np.isnan(A_new[N_i[0][:, None], N_j[0][None, :]])) - np.sum(~np.isnan(np.diag(A_new[N_i[0][:, None], N_j[0][None, :]])))+1)
B[j,i] = (np.nansum(A_new[N_i[0][:, None], N_j[0][None, :]]) + 1)/(np.sum(~np.isnan(A_new[N_i[0][:, None], N_j[0][None, :]])) - np.sum(~np.isnan(np.diag(A_new[N_i[0][:, None], N_j[0][None, :]])))+1)
Theta[N_i,i] = 1
preP_hat = np.matmul(Theta, B)
P_hat = np.matmul(preP_hat, Theta.T)
np.fill_diagonal(P_hat,0)
block_sq_err[k] = np.sum((P_hat[Omega]-A[Omega])**2)
P_hat_Omega = P_hat[Omega]
A_Omega = A[Omega]
P_hat_Omega[P_hat_Omega < 1e-6] = 1e-6
P_hat_Omega[P_hat_Omega > (1-1e-6)] = 1-1e-6
loglike[k] = -np.sum(A_Omega*np.log(P_hat_Omega)) - np.sum((1-A_Omega)* np.log(1-P_hat_Omega))
stop = timeit.default_timer()
print('Time: ', stop - start)
#### Degree correct model
V = U_approx
print("DCSBM calculation")
start = timeit.default_timer()
if k == 0:
V_norms = np.abs(V.reshape(1, n * (k+1))[0])
else:
def sq_sum(x):
return(np.sqrt(np.sum(x**2))) # made this to use the apply func
V_norms = np.apply_along_axis(sq_sum, 1, V)
iso_index = np.where(V_norms == 0)
Psi = V_norms
Psi = Psi / np.max(V_norms)
inv_V_norms = 1/V_norms
inv_V_norms[iso_index] = 1 # this should work but indexing may be different here
V_normalized = np.matmul(np.diag(inv_V_norms), V)
if k == 0:
B = np.nansum(A_new) + 0.01
partial_d = np.nansum(A_new, axis = 0)
partial_gd = B
phi = [0] * n
B_g = partial_gd
phi = partial_d/B_g
B = B/p_sample
P_hat = ((np.array([B] * (n*n)).reshape(n,n) * phi).T * phi).T
np.fill_diagonal(P_hat,0)
dc_block_sq_err[k] = np.sum((pb - A[Omega])**2)
P_hat_Omega = P_hat[Omega]
A_Omega = A[Omega]
P_hat_Omega[P_hat_Omega < 1e-6] = 1e-6
P_hat_Omega[P_hat_Omega > (1-1e-6)] = 1-1e-6
dc_loglike[k] = -np.sum(A_Omega * np.log(P_hat_Omega)) - np.sum((1 - A_Omega)*np.log(1-P_hat_Omega))
else:
km = KMeans(n_clusters = k + 1, max_iter = 30).fit(V_normalized)
B = np.zeros((k+1,k+1))
Theta = np.zeros((n, k+1))
for i in range(k+1):
for j in range(k+1):
N_i = np.where(km.labels_ == i)
N_j = np.where(km.labels_ == j)
B[i,j] = (np.nansum(A_new[N_i[0][:, None], N_j[0][None, :]]) + 0.01)
Theta[N_i,i] = 1
partial_d = np.nansum(A_new, axis = 0)
partial_gd = np.sum(B, axis = 0)
phi = [0] * n
B_g = np.matmul(Theta, partial_gd)
phi = partial_d/B_g
B = B/p_sample
tmp_int_mat = Theta * phi[:, None]
preP_hat = np.matmul(tmp_int_mat, B)
P_hat = np.matmul(preP_hat, tmp_int_mat.T)
np.fill_diagonal(P_hat,0)
dc_block_sq_err[k] = np.sum((P_hat[Omega]-A[Omega])**2)
P_hat_Omega = P_hat[Omega]
A_Omega = A[Omega]
P_hat_Omega[P_hat_Omega < 1e-6] = 1e-6
P_hat_Omega[P_hat_Omega > (1-1e-6)] = 1-1e-6
dc_loglike[k] = -np.sum(A_Omega * np.log(P_hat_Omega)) - np.sum((1 - A_Omega)*np.log(1-P_hat_Omega))
stop = timeit.default_timer()
print('Time: ', stop - start)
dic = {}
dic["impute_sq_err"] = impute_sq_err
dic["block_sq_err"] = block_sq_err
dic["loglike"] = loglike
dic["roc_auc"] = roc_auc
dic["no_edge"] = no_edge
dic["dc_block_sq_err"] = dc_block_sq_err
dic["dc_loglike"] = dc_loglike
dic["bin_dev"] = bin_dev
return dic
def my_lapply(lst):
dic = {}
j = 0
for i in lst:
dic[j] = holdout_evaluation_fast_all(i, max_K = max_K, A = A, p_sample = 1 - holdout_p)
j += 1
return dic
result = my_lapply(holdout_index_list)
dc_block_err_mat = np.zeros((B, max_K))
dc_loglike_mat = np.zeros((B, max_K))
bin_dev_mat = np.zeros((B, max_K))
roc_auc_mat = np.zeros((B, max_K))
impute_err_mat = np.zeros((B, max_K))
block_err_mat = np.zeros((B, max_K))
loglike_mat = np.zeros((B, max_K))
no_edge_seq = [0] * B
for b in range(0,B):
impute_err_mat[b,] = result[b]["impute_sq_err"]
block_err_mat[b,] = result[b]["block_sq_err"]
loglike_mat[b,] = result[b]["loglike"]
roc_auc_mat[b,] = result[b]["roc_auc"]
bin_dev_mat[b,] = result[b]["bin_dev"]
no_edge_seq[b] = result[b]["no_edge"]
dc_block_err_mat[b,] = result[b]["dc_block_sq_err"]
dc_loglike_mat[b,] = result[b]["dc_loglike"]
output = {}
output["impute_err"] = np.mean(impute_err_mat,axis = 0)
output["l2"] = np.mean(block_err_mat, axis = 0)
output["dev"] = np.sum(loglike_mat, axis = 0)
output["auc"] = np.mean(roc_auc_mat, axis = 0)
output["dc_l2"] = np.mean(dc_block_err_mat, axis = 0)
output["dc_dev"] = np.sum(dc_loglike_mat, axis = 0)
output["sse"] = np.mean(impute_err_mat, axis = 0)
output["auc_mat"] = roc_auc_mat
output["dev_mat"] = loglike_mat
output["l2_mat"] = block_err_mat
output["SSE_mat"] = impute_err_mat
output["dc_dev_mat"] = dc_loglike_mat
output["dc_l2_mat"] = dc_block_err_mat
if np.min(output["dev"]) > np.min(output["dc_dev"]):
#?? should i change it to 1-6 or leave index as 0-5?
dev_model = "DCSBM-" + str(list(output["dc_dev"]).index(min(list(output["dc_dev"]))))
else:
dev_model = "SBM-" + str(list(output["dev"]).index(min(list(output["dev"]))))
if | np.min(output["l2"]) | numpy.min |
import numpy as np
import matplotlib.pyplot as plt
import itertools
import time
import os
from numpy.fft import fft, ifft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
from IPython import display
from scipy.ndimage import uniform_filter
from concurrent.futures import ProcessPoolExecutor
from .util import *
from .optics import *
from .background_estimator import *
def intensity_mapping(img_stack):
img_stack_out = np.zeros_like(img_stack)
img_stack_out[0] = img_stack[0].copy()
img_stack_out[1] = img_stack[4].copy()
img_stack_out[2] = img_stack[3].copy()
img_stack_out[3] = img_stack[1].copy()
img_stack_out[4] = img_stack[2].copy()
return img_stack_out
def instrument_matrix_and_source_calibration(I_cali_mean, handedness = 'RCP'):
_, N_cali = I_cali_mean.shape
# Source intensity
I_tot = np.sum(I_cali_mean,axis=0)
# Calibration matrix
theta = np.r_[0:N_cali]/N_cali*2*np.pi
C_matrix = np.array([np.ones((N_cali,)), np.cos(2*theta), np.sin(2*theta)])
# offset calibration
I_cali_norm = I_cali_mean/I_tot
offset_est = np.transpose(np.linalg.pinv(C_matrix.transpose()).dot(np.transpose(I_cali_norm[0,:])))
alpha = np.arctan2(-offset_est[2], offset_est[1])/2
# Source calibration
C_matrix_offset = np.array([np.ones((N_cali,)), np.cos(2*(theta+alpha)), np.sin(2*(theta+alpha))])
S_source = np.linalg.pinv(C_matrix_offset.transpose()).dot(I_tot[:,np.newaxis])
S_source_norm = S_source/S_source[0]
Ax = np.sqrt((S_source_norm[0]+S_source_norm[1])/2)
Ay = np.sqrt((S_source_norm[0]-S_source_norm[1])/2)
del_phi = np.arccos(S_source_norm[2]/2/Ax/Ay)
if handedness == 'RCP':
E_in = np.array([Ax, Ay*np.exp(1j*del_phi)])
elif handedness == 'LCP':
E_in = np.array([Ax, Ay*np.exp(-1j*del_phi)])
else:
raise TypeError("handedness type must be 'LCP' or 'RCP'")
# Instrument matrix calibration
A_matrix = np.transpose(np.linalg.pinv(C_matrix_offset.transpose()).dot(np.transpose(I_cali_norm)))
theta_fine = np.r_[0:360]/360*2*np.pi
C_matrix_offset_fine = np.array([np.ones((360,)), np.cos(2*(theta_fine+alpha)), np.sin(2*(theta_fine+alpha))])
print('Calibrated source field:\n' + str(np.round(E_in,4)))
print('Calibrated instrument matrix:\n' + str(np.round(A_matrix,4)))
fig,ax = plt.subplots(2,2,figsize=(20,20))
ax[0,0].plot(theta/np.pi*180,np.transpose(I_cali_mean))
ax[0,0].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[0,0].set_title('Calibration curve without normalization')
ax[0,0].set_xlabel('Orientation of LP (deg)')
ax[0,0].set_ylabel('Raw intensity')
ax[0,1].plot(theta/np.pi*180,I_tot)
ax[0,1].plot(theta_fine/np.pi*180,np.transpose(C_matrix_offset_fine).dot(S_source))
ax[0,1].legend(['Mean source intensity', 'Fitted source intensity'])
ax[0,1].set_title('Source calibration curve')
ax[0,1].set_xlabel('Orientation of LP (deg)')
ax[0,1].set_ylabel('Mean intensity from 4 linear channels')
ax[1,0].plot(theta/np.pi*180,np.transpose(I_cali_mean/I_tot))
ax[1,0].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[1,0].set_title('Normalized calibration curve')
ax[1,0].set_xlabel('Orientation of LP (deg)')
ax[1,0].set_ylabel('Normalized intensity')
ax[1,1].plot(theta/np.pi*180,np.transpose(I_cali_norm))
ax[1,1].plot(theta_fine/np.pi*180,np.transpose(A_matrix.dot(C_matrix_offset_fine)))
ax[1,1].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[1,1].set_xlabel('Orientation of LP (deg)')
ax[1,1].set_ylabel('Normalized intensity')
ax[1,1].set_title('Fitted calibration curves')
return E_in, A_matrix, np.transpose(A_matrix.dot(C_matrix_offset_fine))
def instrument_matrix_calibration(I_cali_norm, I_meas):
_, N_cali = I_cali_norm.shape
theta = np.r_[0:N_cali]/N_cali*2*np.pi
S_matrix = np.array([np.ones((N_cali,)), np.cos(2*theta), np.sin(2*theta)])
A_matrix = np.transpose(np.linalg.pinv(S_matrix.transpose()).dot(np.transpose(I_cali_norm)))
if I_meas.ndim == 3:
I_mean = np.mean(I_meas,axis=(1,2))
elif I_meas.ndim == 4:
I_mean = np.mean(I_meas,axis=(1,2,3))
I_tot = np.sum(I_mean)
A_matrix_S3 = I_mean/I_tot-A_matrix[:,0]
I_corr = (I_tot/4)*(A_matrix_S3)/np.mean(A_matrix[:,0])
print('Calibrated instrument matrix:\n' + str(np.round(A_matrix,4)))
print('Last column of instrument matrix:\n' + str(np.round(A_matrix_S3.reshape((4,1)),4)))
plt.plot(np.transpose(I_cali_norm))
plt.plot(np.transpose(A_matrix.dot(S_matrix)))
plt.xlabel('Orientation of LP (deg)')
plt.ylabel('Normalized intensity')
plt.title('Fitted calibration curves')
plt.legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
return A_matrix, I_corr
class waveorder_microscopy:
'''
waveorder_microscopy contains methods to compute weak object transfer function
for label-free image reconstruction with various types of dataset:
1) 2D/3D phase reconstruction with a single brightfield defocused stack (Transport of intensity, TIE)
2) 2D/3D phase reconstruction with intensities of asymetric illumination
(differential phase contrast, DPC)
3) 2D/3D joint phase and polarization (2D orientation) reconstruction
with brightfield-illuminated polarization-sensitive intensities (QLIPP)
4) 2D/3D joint phase and polarization (uniaxial permittivity tensor) reconstruction
with asymmetrically-illuminated polarization-sensitive intensities (uPTI)
Parameters
----------
img_dim : tuple
shape of the computed 2D space with size of (N, M)
lambda_illu : float
wavelength of the incident light
ps : float
xy pixel size of the image space
psz : float
z step size of the image space
NA_obj : float
numerical aperture of the detection objective
NA_illu : float
numerical aperture of the illumination condenser
z_defocus : numpy.ndarray
1D array of defocused z position corresponds to the intensity stack
(matters for 2D reconstruction, the direction positive z matters for 3D reconstruction)
chi : float
swing of the illumination or detection polarization state (in radian)
n_media : float
refractive index of the immersing media
cali : bool
'True' for the orientation convention of QLIPP data,
'False' for the orientation convention of uPTI data
bg_option : str
'local' for estimating background with scipy uniform filter
'local_fit' for estimating background with polynomial fit
other string for normal background subtraction with the provided background
A_matrix : numpy.ndarray
self-provided instrument matrix converting polarization-sensitive intensity images into Stokes parameters
with shape of (N_channel, N_Stokes)
If None is provided, the instrument matrix is determined by the QLIPP convention with swing specify by chi
QLIPP_birefringence_only : bool
'True' to skip pre-processing functions for phase/uPTI reconstruction
'False' to continue with pre-processing functions for phase/uPTI reconstruction
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
phase_deconv : str
string contains the phase reconstruction dimension
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
illu_mode : str
string to set the pattern of illumination source
'BF' for brightfield illumination with source pattern specified by NA_illu
'PH' for phase contrast illumination with the source pattern specify by NA_illu and NA_illu_in
'Arbitrary' for self-defined source pattern of dimension (N_pattern, N, M)
NA_illu_in : flaot
numerical aperture of the inner circle for phase contrast ring illumination
Source : numpy.ndarray
illumination source pattern with dimension of (N_pattern, N, M)
Source_PolState : numpy.ndarray
illumination polarization states (Ex, Ey) for each illumination pattern with dimension of (N_pattern, 2)
If provided with size of (2,), a single state is used for all illumination patterns
pad_z : int
number of z-layers to pad (reflection boundary condition) for 3D deconvolution
use_gpu : bool
option to use gpu or not
gpu_id : int
number refering to which gpu will be used
'''
def __init__(self, img_dim, lambda_illu, ps, NA_obj, NA_illu, z_defocus, chi=None,\
n_media=1, cali=False, bg_option='global',
A_matrix=None, QLIPP_birefringence_only = False, bire_in_plane_deconv=None, inc_recon=None,
phase_deconv=None, ph_deconv_layer = 5,
illu_mode='BF', NA_illu_in=None, Source=None, Source_PolState=np.array([1, 1j]),
pad_z=0, use_gpu=False, gpu_id=0):
'''
initialize the system parameters for phase and orders microscopy
'''
t0 = time.time()
# GPU/CPU
self.use_gpu = use_gpu
self.gpu_id = gpu_id
if self.use_gpu:
globals()['cp'] = __import__("cupy")
cp.cuda.Device(self.gpu_id).use()
# Basic parameter
self.N, self.M = img_dim
self.n_media = n_media
self.lambda_illu = lambda_illu/n_media
self.ps = ps
self.z_defocus = z_defocus.copy()
if len(z_defocus) >= 2:
self.psz = np.abs(z_defocus[0] - z_defocus[1])
self.G_tensor_z_upsampling = np.ceil(self.psz/(self.lambda_illu/2))
self.pad_z = pad_z
self.NA_obj = NA_obj/n_media
self.NA_illu = NA_illu/n_media
self.N_defocus = len(z_defocus)
self.N_defocus_3D = self.N_defocus + 2*self.pad_z
self.chi = chi
self.cali = cali
self.bg_option = bg_option
self.phase_deconv = phase_deconv
if QLIPP_birefringence_only == False:
# setup microscocpe variables
self.xx, self.yy, self.fxx, self.fyy = gen_coordinate((self.N, self.M), ps)
self.Pupil_obj = gen_Pupil(self.fxx, self.fyy, self.NA_obj, self.lambda_illu)
self.Pupil_support = self.Pupil_obj.copy()
# illumination setup
self.illumination_setup(illu_mode, NA_illu_in, Source, Source_PolState)
# Defocus kernel initialization
self.Hz_det_setup(self.phase_deconv, ph_deconv_layer, bire_in_plane_deconv, inc_recon)
# select either 2D or 3D model for phase deconvolution
self.phase_deconv_setup(self.phase_deconv)
# instrument matrix for polarization detection
self.instrument_matrix_setup(A_matrix)
# select either 2D or 3D model for 2D birefringence deconvolution
self.bire_in_plane_deconv_setup(bire_in_plane_deconv)
# inclination reconstruction model selection
self.inclination_recon_setup(inc_recon)
else:
# instrument matrix for polarization detection
self.instrument_matrix_setup(A_matrix)
############## constructor function group ##############
def illumination_setup(self, illu_mode, NA_illu_in, Source, Source_PolState):
'''
setup illumination source function for transfer function computing
Parameters
----------
illu_mode : str
string to set the pattern of illumination source
'BF' for brightfield illumination with source pattern specified by NA_illu
'PH' for phase contrast illumination with the source pattern specify by NA_illu and NA_illu_in
'Arbitrary' for self-defined source pattern of dimension (N_pattern, N, M)
NA_illu_in : flaot
numerical aperture of the inner circle for phase contrast ring illumination
Source : numpy.ndarray
illumination source pattern with dimension of (N_pattern, N, M)
Source_PolState : numpy.ndarray
illumination polarization states (Ex, Ey) for each illumination pattern with dimension of (N_pattern, 2)
'''
if illu_mode == 'BF':
self.Source = gen_Pupil(self.fxx, self.fyy, self.NA_illu, self.lambda_illu)
self.N_pattern = 1
elif illu_mode == 'PH':
if NA_illu_in == None:
raise('No inner rim NA specified in the PH illumination mode')
else:
self.NA_illu_in = NA_illu_in/self.n_media
inner_pupil = gen_Pupil(self.fxx, self.fyy, self.NA_illu_in/self.n_media, self.lambda_illu)
self.Source = gen_Pupil(self.fxx, self.fyy, self.NA_illu, self.lambda_illu)
self.Source -= inner_pupil
Pupil_ring_out = gen_Pupil(self.fxx, self.fyy, self.NA_illu/self.n_media, self.lambda_illu)
Pupil_ring_in = gen_Pupil(self.fxx, self.fyy, self.NA_illu_in/self.n_media, self.lambda_illu)
self.Pupil_obj = self.Pupil_obj*np.exp((Pupil_ring_out-Pupil_ring_in)*(np.log(0.7)-1j*(np.pi/2 - 0.0*np.pi)))
self.N_pattern = 1
elif illu_mode == 'Arbitrary':
self.Source = Source.copy()
if Source.ndim == 2:
self.N_pattern = 1
else:
self.N_pattern = len(Source)
self.Source_PolState = np.zeros((self.N_pattern, 2), complex)
if Source_PolState.ndim == 1:
for i in range(self.N_pattern):
self.Source_PolState[i] = Source_PolState/(np.sum(np.abs(Source_PolState)**2))**(1/2)
else:
if len(Source_PolState) != self.N_pattern:
raise('The length of Source_PolState needs to be either 1 or the same as N_pattern')
for i in range(self.N_pattern):
self.Source_PolState[i] = Source_PolState[i]/(np.sum(np.abs(Source_PolState[i])**2))**(1/2)
def Hz_det_setup(self, phase_deconv, ph_deconv_layer, bire_in_plane_deconv, inc_recon):
'''
setup defocus kernels for deconvolution with the corresponding dimensions
Parameters
----------
phase_deconv : str
string contains the dimension of the phase reconstruction
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-geometric' for 2D non-diffractive reconstruction of 3D anisotropy
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
'''
if phase_deconv == '2D' or bire_in_plane_deconv == '2D' or inc_recon == '2D-vec-WOTF':
# generate defocus kernel based on Pupil function and z_defocus
self.Hz_det_2D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, self.z_defocus)
if phase_deconv == 'semi-3D':
self.ph_deconv_layer = ph_deconv_layer
if self.z_defocus[0] - self.z_defocus[1] >0:
z_deconv = -(np.r_[:self.ph_deconv_layer]-self.ph_deconv_layer//2)*self.psz
else:
z_deconv = (np.r_[:self.ph_deconv_layer]-self.ph_deconv_layer//2)*self.psz
self.Hz_det_semi_3D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z_deconv)
self.G_fun_z_semi_3D = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z_deconv)
if phase_deconv == '3D' or bire_in_plane_deconv == '3D' or inc_recon == '3D':
# generate defocus kernel and Green's function
if self.z_defocus[0] - self.z_defocus[1] >0:
z = -ifftshift((np.r_[0:self.N_defocus_3D]-self.N_defocus_3D//2)*self.psz)
else:
z = ifftshift((np.r_[0:self.N_defocus_3D]-self.N_defocus_3D//2)*self.psz)
self.Hz_det_3D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
self.G_fun_z_3D = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
def phase_deconv_setup(self, phase_deconv):
'''
setup transfer functions for phase deconvolution with the corresponding dimensions
Parameters
----------
phase_deconv : str
string contains the dimension of the phase reconstruction
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
'''
if phase_deconv == '2D':
# compute 2D phase transfer function
self.gen_WOTF()
elif phase_deconv == 'semi-3D':
self.gen_semi_3D_WOTF()
elif phase_deconv == '3D':
# compute 3D phase transfer function
self.gen_3D_WOTF()
def bire_in_plane_deconv_setup(self, bire_in_plane_deconv):
'''
setup transfer functions for 2D birefringence deconvolution with the corresponding dimensions
Parameters
----------
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
'''
if bire_in_plane_deconv == '2D':
# generate 2D vectorial transfer function for 2D birefringence deconvolution in 2D space
self.gen_2D_vec_WOTF(False)
elif bire_in_plane_deconv == '3D':
# generate 3D vectorial transfer function for 2D birefringence deconvolution in 3D space
self.gen_3D_vec_WOTF(False)
def inclination_recon_setup(self, inc_recon):
'''
setup transfer functions for uPTI reconstruction
Parameters
----------
phase_deconv : str
string contains the phase reconstruction dimension
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-geometric' for 2D non-diffractive reconstruction of 3D anisotropy
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
'''
if inc_recon is not None and inc_recon != '3D':
if inc_recon == '2D-geometric':
wave_vec_norm_x = self.lambda_illu*self.fxx
wave_vec_norm_y = self.lambda_illu*self.fyy
wave_vec_norm_z = (np.maximum(0,1 - wave_vec_norm_x**2 - wave_vec_norm_y**2))**(0.5)
incident_theta = np.arctan2((wave_vec_norm_x**2 + wave_vec_norm_y**2)**(0.5), wave_vec_norm_z)
incident_phi = np.arctan2(wave_vec_norm_y,wave_vec_norm_x)
self.geometric_inc_matrix, self.geometric_inc_matrix_inv = gen_geometric_inc_matrix(incident_theta, incident_phi, self.Source)
elif inc_recon == '2D-vec-WOTF':
# generate 2D vectorial transfer function for 2D uPTI
self.gen_2D_vec_WOTF(True)
# compute the AHA matrix for later 2D inversion
self.inc_AHA_2D_vec = np.zeros((7,7,self.N,self.M),complex)
for i,j,p in itertools.product(range(7), range(7), range(self.N_Stokes)):
self.inc_AHA_2D_vec[i,j] += np.sum(np.conj(self.H_dyadic_2D_OTF[p,i])*self.H_dyadic_2D_OTF[p,j],axis=2)
elif inc_recon == '3D':
# generate 3D vectorial transfer function for 3D uPTI
self.gen_3D_vec_WOTF(True)
self.inc_AHA_3D_vec = np.zeros((7,7,self.N,self.M,self.N_defocus_3D), dtype='complex64')
# compute the AHA matrix for later 3D inversion
for i,j,p in itertools.product(range(7), range(7), range(self.N_Stokes)):
self.inc_AHA_3D_vec[i,j] += np.sum(np.conj(self.H_dyadic_OTF[p,i])*self.H_dyadic_OTF[p,j],axis=0)
def instrument_matrix_setup(self, A_matrix):
'''
setup instrument matrix
Parameters
----------
A_matrix : numpy.ndarray
self-provided instrument matrix converting polarization-sensitive intensity images into Stokes parameters
with shape of (N_channel, N_Stokes)
If None is provided, the instrument matrix is determined by the QLIPP convention with swing specify by chi
'''
if A_matrix is None:
self.N_channel = 5
self.N_Stokes = 4
self.A_matrix = 0.5*np.array([[1,0,0,-1], \
[1, np.sin(self.chi), 0, -np.cos(self.chi)], \
[1, 0, np.sin(self.chi), -np.cos(self.chi)], \
[1, -np.sin(self.chi), 0, -np.cos(self.chi)], \
[1, 0, -np.sin(self.chi), -np.cos(self.chi)]])
else:
self.N_channel = A_matrix.shape[0]
self.N_Stokes = A_matrix.shape[1]
self.A_matrix = A_matrix.copy()
############## constructor asisting function group ##############
def gen_WOTF(self):
'''
generate 2D phase transfer functions
'''
self.Hu = np.zeros((self.N, self.M, self.N_defocus*self.N_pattern),complex)
self.Hp = np.zeros((self.N, self.M, self.N_defocus*self.N_pattern),complex)
if self.N_pattern == 1:
for i in range(self.N_defocus):
self.Hu[:,:,i], self.Hp[:,:,i] = WOTF_2D_compute(self.Source, self.Pupil_obj * self.Hz_det_2D[:,:,i], \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
for i,j in itertools.product(range(self.N_defocus), range(self.N_pattern)):
idx = i*self.N_pattern+j
self.Hu[:,:,idx], self.Hp[:,:,idx] = WOTF_2D_compute(self.Source[j], self.Pupil_obj * self.Hz_det_2D[:,:,i], \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
def gen_semi_3D_WOTF(self):
'''
generate semi-3D phase transfer functions
'''
self.Hu = np.zeros((self.N, self.M, self.ph_deconv_layer*self.N_pattern),complex)
self.Hp = np.zeros((self.N, self.M, self.ph_deconv_layer*self.N_pattern),complex)
for i,j in itertools.product(range(self.ph_deconv_layer), range(self.N_pattern)):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[j].copy()
idx = i*self.N_pattern+j
self.Hu[:,:,idx], self.Hp[:,:,idx] = WOTF_semi_3D_compute(Source_current, Source_current, self.Pupil_obj, self.Hz_det_semi_3D[:,:,i], \
self.G_fun_z_semi_3D[:,:,i]*4*np.pi*1j/self.lambda_illu, \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
def gen_3D_WOTF(self):
'''
generate 3D phase transfer functions
'''
self.H_re = np.zeros((self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
self.H_im = np.zeros((self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
for i in range(self.N_pattern):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[i].copy()
self.H_re[i], self.H_im[i] = WOTF_3D_compute(Source_current.astype('float32'), Source_current.astype('float32'), self.Pupil_obj.astype('complex64'), \
self.Hz_det_3D.astype('complex64'), self.G_fun_z_3D.astype('complex64'), self.psz,\
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
self.H_re = np.squeeze(self.H_re)
self.H_im = np.squeeze(self.H_im)
def gen_2D_vec_WOTF(self, inc_option):
'''
generate 2D vectorial transfer functions for 2D QUTIPP
'''
if inc_option == True:
self.H_dyadic_2D_OTF = np.zeros((self.N_Stokes, 7, self.N, self.M, self.N_defocus*self.N_pattern),dtype='complex64')
else:
self.H_dyadic_2D_OTF_in_plane = np.zeros((2, 2, self.N, self.M, self.N_defocus*self.N_pattern),dtype='complex64')
# angle-dependent electric field components due to focusing effect
fr = (self.fxx**2 + self.fyy**2)**(0.5)
cos_factor = (1-(self.lambda_illu**2)*(fr**2)*self.Pupil_support)**(0.5)*self.Pupil_support
dc_idx = (fr==0)
nondc_idx = (fr!=0)
E_field_factor = np.zeros((5, self.N, self.M))
E_field_factor[0, nondc_idx] = ((self.fxx[nondc_idx]**2)*cos_factor[nondc_idx]+ self.fyy[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[0, dc_idx] = 1
E_field_factor[1, nondc_idx] = (self.fxx[nondc_idx]*self.fyy[nondc_idx] * (cos_factor[nondc_idx]-1)) / fr[nondc_idx]**2
E_field_factor[2, nondc_idx] = ((self.fyy[nondc_idx]**2)*cos_factor[nondc_idx] + self.fxx[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[2, dc_idx] = 1
E_field_factor[3, nondc_idx] = -self.lambda_illu*self.fxx[nondc_idx]
E_field_factor[4, nondc_idx] = -self.lambda_illu*self.fyy[nondc_idx]
# generate dyadic Green's tensor
G_fun_z = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, self.z_defocus)
G_tensor_z = gen_dyadic_Greens_tensor_z(self.fxx, self.fyy, G_fun_z, self.Pupil_support, self.lambda_illu)
# compute transfer functions
OTF_compute = lambda x, y, z, w: WOTF_semi_3D_compute(x, y, self.Pupil_obj, w, \
z, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i,j in itertools.product(range(self.N_defocus), range(self.N_pattern)):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[j].copy()
idx = i*self.N_pattern+j
# focusing electric field components
Ex_field = self.Source_PolState[j,0]*E_field_factor[0] + self.Source_PolState[j,1]*E_field_factor[1]
Ey_field = self.Source_PolState[j,0]*E_field_factor[1] + self.Source_PolState[j,1]*E_field_factor[2]
Ez_field = self.Source_PolState[j,0]*E_field_factor[3] + self.Source_PolState[j,1]*E_field_factor[4]
IF_ExEx = np.abs(Ex_field)**2
IF_ExEy = Ex_field * np.conj(Ey_field)
IF_ExEz = Ex_field * np.conj(Ez_field)
IF_EyEy = np.abs(Ey_field)**2
IF_EyEz = Ey_field * np.conj(Ez_field)
Source_norm = Source_current*(IF_ExEx + IF_EyEy)
# intermediate transfer functions
ExEx_Gxx_re, ExEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gxy_re, ExEy_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gyx_re, EyEx_Gyx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gyy_re, EyEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEx_Gxy_re, ExEx_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gxx_re, ExEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gyy_re, EyEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gyx_re, EyEy_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEx_Gyy_re, ExEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gxx_re, EyEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gxx_re, EyEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gyy_re, ExEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
if inc_option == True:
ExEz_Gxz_re, ExEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyz_re, EyEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEx_Gxz_re, ExEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gxx_re, ExEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i])
EyEx_Gyz_re, EyEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyx_re, EyEz_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i])
ExEy_Gxz_re, ExEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gxy_re, ExEz_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i])
EyEy_Gyz_re, EyEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyy_re, EyEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gyz_re, ExEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gxz_re, EyEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEx_Gxz_re, EyEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gxx_re, EyEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i])
ExEy_Gyz_re, ExEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gyy_re, ExEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i])
EyEy_Gxz_re, EyEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEx_Gyz_re, ExEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
# 2D vectorial transfer functions
self.H_dyadic_2D_OTF[0,0,:,:,idx] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re + EyEx_Gyx_re + EyEy_Gyy_re + EyEz_Gyz_re
self.H_dyadic_2D_OTF[0,1,:,:,idx] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im + EyEx_Gyx_im + EyEy_Gyy_im + EyEz_Gyz_im
self.H_dyadic_2D_OTF[0,2,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re + EyEx_Gyx_re - EyEy_Gyy_re
self.H_dyadic_2D_OTF[0,3,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re + EyEx_Gyy_re + EyEy_Gyx_re
self.H_dyadic_2D_OTF[0,4,:,:,idx] = ExEx_Gxz_re + ExEz_Gxx_re + EyEx_Gyz_re + EyEz_Gyx_re
self.H_dyadic_2D_OTF[0,5,:,:,idx] = ExEy_Gxz_re + ExEz_Gxy_re + EyEy_Gyz_re + EyEz_Gyy_re
self.H_dyadic_2D_OTF[0,6,:,:,idx] = ExEz_Gxz_re + EyEz_Gyz_re
self.H_dyadic_2D_OTF[1,0,:,:,idx] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re - EyEx_Gyx_re - EyEy_Gyy_re - EyEz_Gyz_re
self.H_dyadic_2D_OTF[1,1,:,:,idx] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im - EyEx_Gyx_im - EyEy_Gyy_im - EyEz_Gyz_im
self.H_dyadic_2D_OTF[1,2,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_2D_OTF[1,3,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF[1,4,:,:,idx] = ExEx_Gxz_re + ExEz_Gxx_re - EyEx_Gyz_re - EyEz_Gyx_re
self.H_dyadic_2D_OTF[1,5,:,:,idx] = ExEy_Gxz_re + ExEz_Gxy_re - EyEy_Gyz_re - EyEz_Gyy_re
self.H_dyadic_2D_OTF[1,6,:,:,idx] = ExEz_Gxz_re - EyEz_Gyz_re
self.H_dyadic_2D_OTF[2,0,:,:,idx] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re + EyEx_Gxx_re + EyEy_Gyx_re + EyEz_Gxz_re
self.H_dyadic_2D_OTF[2,1,:,:,idx] = ExEx_Gxy_im + ExEy_Gyy_im + ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_2D_OTF[2,2,:,:,idx] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF[2,3,:,:,idx] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
self.H_dyadic_2D_OTF[2,4,:,:,idx] = ExEx_Gyz_re + ExEz_Gxy_re + EyEx_Gxz_re + EyEz_Gxx_re
self.H_dyadic_2D_OTF[2,5,:,:,idx] = ExEy_Gyz_re + ExEz_Gyy_re + EyEy_Gxz_re + EyEz_Gyx_re
self.H_dyadic_2D_OTF[2,6,:,:,idx] = ExEz_Gyz_re + EyEz_Gxz_re
# transfer functions for S3
if self.N_Stokes == 4:
self.H_dyadic_2D_OTF[3,0,:,:,idx] = -ExEx_Gxy_im - ExEy_Gyy_im - ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_2D_OTF[3,1,:,:,idx] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re - EyEx_Gxx_re - EyEy_Gyx_re - EyEz_Gxz_re
self.H_dyadic_2D_OTF[3,2,:,:,idx] = -ExEx_Gxy_im + ExEy_Gyy_im + EyEx_Gxx_im - EyEy_Gyx_im
self.H_dyadic_2D_OTF[3,3,:,:,idx] = -ExEx_Gyy_im - ExEy_Gxy_im + EyEx_Gyx_im + EyEy_Gxx_im
self.H_dyadic_2D_OTF[3,4,:,:,idx] = -ExEx_Gyz_im - ExEz_Gxy_im + EyEx_Gxz_im + EyEz_Gxx_im
self.H_dyadic_2D_OTF[3,5,:,:,idx] = -ExEy_Gyz_im - ExEz_Gyy_im + EyEy_Gxz_im + EyEz_Gyx_im
self.H_dyadic_2D_OTF[3,6,:,:,idx] = -ExEz_Gyz_im + EyEz_Gxz_im
else:
self.H_dyadic_2D_OTF_in_plane[0,0,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_2D_OTF_in_plane[0,1,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF_in_plane[1,0,:,:,idx] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF_in_plane[1,1,:,:,idx] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
def gen_3D_vec_WOTF(self, inc_option):
'''
generate 3D vectorial transfer functions for 3D QUTIPP
'''
if inc_option == True:
self.H_dyadic_OTF = np.zeros((self.N_Stokes, 7, self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
else:
self.H_dyadic_OTF_in_plane = np.zeros((2, 2, self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
# angle-dependent electric field components due to focusing effect
fr = (self.fxx**2 + self.fyy**2)**(0.5)
cos_factor = (1-(self.lambda_illu**2)*(fr**2)*self.Pupil_support)**(0.5)*self.Pupil_support
dc_idx = (fr==0)
nondc_idx = (fr!=0)
E_field_factor = np.zeros((5, self.N, self.M))
E_field_factor[0, nondc_idx] = ((self.fxx[nondc_idx]**2)*cos_factor[nondc_idx]+ self.fyy[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[0, dc_idx] = 1
E_field_factor[1, nondc_idx] = (self.fxx[nondc_idx]*self.fyy[nondc_idx] * (cos_factor[nondc_idx]-1)) / fr[nondc_idx]**2
E_field_factor[2, nondc_idx] = ((self.fyy[nondc_idx]**2)*cos_factor[nondc_idx] + self.fxx[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[2, dc_idx] = 1
E_field_factor[3, nondc_idx] = -self.lambda_illu*self.fxx[nondc_idx]
E_field_factor[4, nondc_idx] = -self.lambda_illu*self.fyy[nondc_idx]
# generate dyadic Green's tensor
N_defocus = self.G_tensor_z_upsampling*self.N_defocus_3D
psz = self.psz/self.G_tensor_z_upsampling
if self.z_defocus[0] - self.z_defocus[1] >0:
z = -ifftshift((np.r_[0:N_defocus]-N_defocus//2)*psz)
else:
z = ifftshift((np.r_[0:N_defocus]-N_defocus//2)*psz)
G_fun_z = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
G_real = fftshift(ifft2(G_fun_z, axes=(0,1))/self.ps**2)
G_tensor = gen_dyadic_Greens_tensor(G_real, self.ps, psz, self.lambda_illu, space='Fourier')
G_tensor_z = (ifft(G_tensor, axis=4)/psz)[...,::np.int(self.G_tensor_z_upsampling)]
# compute transfer functions
OTF_compute = lambda x, y, z: WOTF_3D_compute(x.astype('float32'), y.astype('complex64'),
self.Pupil_obj.astype('complex64'), self.Hz_det_3D.astype('complex64'), \
z.astype('complex64'), self.psz,\
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i in range(self.N_pattern):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[i].copy()
# focusing electric field components
Ex_field = self.Source_PolState[i,0]*E_field_factor[0] + self.Source_PolState[i,1]*E_field_factor[1]
Ey_field = self.Source_PolState[i,0]*E_field_factor[1] + self.Source_PolState[i,1]*E_field_factor[2]
Ez_field = self.Source_PolState[i,0]*E_field_factor[3] + self.Source_PolState[i,1]*E_field_factor[4]
IF_ExEx = np.abs(Ex_field)**2
IF_ExEy = Ex_field * np.conj(Ey_field)
IF_ExEz = Ex_field * np.conj(Ez_field)
IF_EyEy = np.abs(Ey_field)**2
IF_EyEz = Ey_field * np.conj(Ez_field)
Source_norm = Source_current*(IF_ExEx + IF_EyEy)
# intermediate transfer functions
ExEx_Gxx_re, ExEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,0]) #
ExEy_Gxy_re, ExEy_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,1]) #
EyEx_Gyx_re, EyEx_Gyx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,1]) #
EyEy_Gyy_re, EyEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,1]) #
ExEx_Gxy_re, ExEx_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,1]) #
ExEy_Gxx_re, ExEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,0]) #
EyEx_Gyy_re, EyEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,1]) #
EyEy_Gyx_re, EyEy_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,1]) #
ExEy_Gyy_re, ExEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,1]) #
EyEx_Gxx_re, EyEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,0]) #
ExEx_Gyy_re, ExEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,1]) #
EyEy_Gxx_re, EyEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,0]) #
if inc_option == True:
ExEz_Gxz_re, ExEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,2])
EyEz_Gyz_re, EyEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,2])
ExEx_Gxz_re, ExEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,2])
ExEz_Gxx_re, ExEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,0])
EyEx_Gyz_re, EyEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,2])
EyEz_Gyx_re, EyEz_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,1])
ExEy_Gxz_re, ExEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,2])
ExEz_Gxy_re, ExEz_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,1])
EyEy_Gyz_re, EyEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,2])
EyEz_Gyy_re, EyEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,1])
ExEz_Gyz_re, ExEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,2])
EyEz_Gxz_re, EyEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,2])
EyEx_Gxz_re, EyEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,2])
EyEz_Gxx_re, EyEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,0])
ExEy_Gyz_re, ExEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,2])
ExEz_Gyy_re, ExEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,1])
EyEy_Gxz_re, EyEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,2])
ExEx_Gyz_re, ExEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,2])
# 3D vectorial transfer functions
self.H_dyadic_OTF[0,0,i] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re + EyEx_Gyx_re + EyEy_Gyy_re + EyEz_Gyz_re
self.H_dyadic_OTF[0,1,i] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im + EyEx_Gyx_im + EyEy_Gyy_im + EyEz_Gyz_im
self.H_dyadic_OTF[0,2,i] = ExEx_Gxx_re - ExEy_Gxy_re + EyEx_Gyx_re - EyEy_Gyy_re
self.H_dyadic_OTF[0,3,i] = ExEx_Gxy_re + ExEy_Gxx_re + EyEx_Gyy_re + EyEy_Gyx_re
self.H_dyadic_OTF[0,4,i] = ExEx_Gxz_re + ExEz_Gxx_re + EyEx_Gyz_re + EyEz_Gyx_re
self.H_dyadic_OTF[0,5,i] = ExEy_Gxz_re + ExEz_Gxy_re + EyEy_Gyz_re + EyEz_Gyy_re
self.H_dyadic_OTF[0,6,i] = ExEz_Gxz_re + EyEz_Gyz_re
self.H_dyadic_OTF[1,0,i] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re - EyEx_Gyx_re - EyEy_Gyy_re - EyEz_Gyz_re
self.H_dyadic_OTF[1,1,i] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im - EyEx_Gyx_im - EyEy_Gyy_im - EyEz_Gyz_im
self.H_dyadic_OTF[1,2,i] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_OTF[1,3,i] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_OTF[1,4,i] = ExEx_Gxz_re + ExEz_Gxx_re - EyEx_Gyz_re - EyEz_Gyx_re
self.H_dyadic_OTF[1,5,i] = ExEy_Gxz_re + ExEz_Gxy_re - EyEy_Gyz_re - EyEz_Gyy_re
self.H_dyadic_OTF[1,6,i] = ExEz_Gxz_re - EyEz_Gyz_re
self.H_dyadic_OTF[2,0,i] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re + EyEx_Gxx_re + EyEy_Gyx_re + EyEz_Gxz_re
self.H_dyadic_OTF[2,1,i] = ExEx_Gxy_im + ExEy_Gyy_im + ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_OTF[2,2,i] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_OTF[2,3,i] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
self.H_dyadic_OTF[2,4,i] = ExEx_Gyz_re + ExEz_Gxy_re + EyEx_Gxz_re + EyEz_Gxx_re
self.H_dyadic_OTF[2,5,i] = ExEy_Gyz_re + ExEz_Gyy_re + EyEy_Gxz_re + EyEz_Gyx_re
self.H_dyadic_OTF[2,6,i] = ExEz_Gyz_re + EyEz_Gxz_re
# transfer functions for S3
if self.N_Stokes == 4:
self.H_dyadic_OTF[3,0,i] = -ExEx_Gxy_im - ExEy_Gyy_im - ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_OTF[3,1,i] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re - EyEx_Gxx_re - EyEy_Gyx_re - EyEz_Gxz_re
self.H_dyadic_OTF[3,2,i] = -ExEx_Gxy_im + ExEy_Gyy_im + EyEx_Gxx_im - EyEy_Gyx_im
self.H_dyadic_OTF[3,3,i] = -ExEx_Gyy_im - ExEy_Gxy_im + EyEx_Gyx_im + EyEy_Gxx_im
self.H_dyadic_OTF[3,4,i] = -ExEx_Gyz_im - ExEz_Gxy_im + EyEx_Gxz_im + EyEz_Gxx_im
self.H_dyadic_OTF[3,5,i] = -ExEy_Gyz_im - ExEz_Gyy_im + EyEy_Gxz_im + EyEz_Gyx_im
self.H_dyadic_OTF[3,6,i] = -ExEz_Gyz_im + EyEz_Gxz_im
else:
self.H_dyadic_OTF_in_plane[0,0,i] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_OTF_in_plane[0,1,i] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_OTF_in_plane[1,0,i] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_OTF_in_plane[1,1,i] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
############## polarization computing function group ##############
def Stokes_recon(self, I_meas):
'''
reconstruct Stokes parameters from polarization-sensitive intensity images
Parameters
----------
I_meas : numpy.ndarray
polarization-sensitive intensity images with the size of (N_channel, ...)
Returns
-------
S_image_recon : numpy.ndarray
reconstructed Stokes parameters with the size of (N_Stokes, ...)
'''
img_shape = I_meas.shape
A_pinv = np.linalg.pinv(self.A_matrix)
S_image_recon = np.reshape(np.dot(A_pinv, I_meas.reshape((self.N_channel, -1))), (self.N_Stokes,)+img_shape[1:])
return S_image_recon
def Stokes_transform(self, S_image_recon):
'''
transform Stokes parameters into normalized Stokes parameters
Parameters
----------
S_image_recon : numpy.ndarray
reconstructed Stokes parameters with the size of (N_Stokes, ...)
Returns
-------
S_transformed : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
'''
if self.use_gpu:
S_image_recon = cp.array(S_image_recon)
if self.N_Stokes == 4:
S_transformed = cp.zeros((5,)+S_image_recon.shape[1:])
elif self.N_Stokes == 3:
S_transformed = cp.zeros((3,)+S_image_recon.shape[1:])
else:
if self.N_Stokes == 4:
S_transformed = np.zeros((5,)+S_image_recon.shape[1:])
elif self.N_Stokes == 3:
S_transformed = np.zeros((3,)+S_image_recon.shape[1:])
S_transformed[0] = S_image_recon[0]
if self.N_Stokes == 4:
S_transformed[1] = S_image_recon[1] / S_image_recon[3]
S_transformed[2] = S_image_recon[2] / S_image_recon[3]
S_transformed[3] = S_image_recon[3]
S_transformed[4] = (S_image_recon[1]**2 + S_image_recon[2]**2 + S_image_recon[3]**2)**(1/2) / S_image_recon[0] # DoP
elif self.N_Stokes == 3:
S_transformed[1] = S_image_recon[1] / S_image_recon[0]
S_transformed[2] = S_image_recon[2] / S_image_recon[0]
if self.use_gpu:
S_transformed = cp.asnumpy(S_transformed)
return S_transformed
def Polscope_bg_correction(self, S_image_tm, S_bg_tm, kernel_size=400, poly_order=2):
'''
QLIPP background correction algorithm
Parameters
----------
S_image_tm : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
S_bg_tm : numpy.ndarray
normalized background Stokes parameters
kernel_size : int
size of smoothing window for background estimation in 'local' method
poly_order : int
order of polynomial fitting for background estimation in 'local_fit' method
Returns
-------
S_image_tm : numpy.ndarray
background corrected normalized Stokes parameters with the same size as the input Stokes parameters
'''
if self.use_gpu:
S_image_tm = cp.array(S_image_tm)
S_bg_tm = cp.array(S_bg_tm)
dim = S_image_tm.ndim
if dim == 3:
S_image_tm[0] /= S_bg_tm[0]
S_image_tm[1] -= S_bg_tm[1]
S_image_tm[2] -= S_bg_tm[2]
if self.N_Stokes == 4:
S_image_tm[4] /= S_bg_tm[4]
else:
S_image_tm[0] /= S_bg_tm[0,:,:,np.newaxis]
S_image_tm[1] -= S_bg_tm[1,:,:,np.newaxis]
S_image_tm[2] -= S_bg_tm[2,:,:,np.newaxis]
if self.N_Stokes == 4:
S_image_tm[4] /= S_bg_tm[4,:,:,np.newaxis]
if self.bg_option == 'local':
if dim == 3:
S_image_tm[1] -= uniform_filter_2D(S_image_tm[1], size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S_image_tm[2] -= uniform_filter_2D(S_image_tm[2], size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
if self.use_gpu:
S1_bg = uniform_filter_2D(cp.mean(S_image_tm[1],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S2_bg = uniform_filter_2D(cp.mean(S_image_tm[2],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
S1_bg = uniform_filter_2D(np.mean(S_image_tm[1],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S2_bg = uniform_filter_2D(np.mean(S_image_tm[2],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i in range(self.N_defocus):
S_image_tm[1,:,:,i] -= S1_bg
S_image_tm[2,:,:,i] -= S2_bg
elif self.bg_option == 'local_fit':
if self.use_gpu:
bg_estimator = BackgroundEstimator2D_GPU(gpu_id=self.gpu_id)
if dim != 3:
S1_bg = bg_estimator.get_background(cp.mean(S_image_tm[1],axis=-1), order=poly_order, normalize=False)
S2_bg = bg_estimator.get_background(cp.mean(S_image_tm[2],axis=-1), order=poly_order, normalize=False)
else:
bg_estimator = BackgroundEstimator2D()
if dim != 3:
S1_bg = bg_estimator.get_background(np.mean(S_image_tm[1],axis=-1), order=poly_order, normalize=False)
S2_bg = bg_estimator.get_background(np.mean(S_image_tm[2],axis=-1), order=poly_order, normalize=False)
if dim ==3:
S_image_tm[1] -= bg_estimator.get_background(S_image_tm[1], order=poly_order, normalize=False)
S_image_tm[2] -= bg_estimator.get_background(S_image_tm[2], order=poly_order, normalize=False)
else:
for i in range(self.N_defocus):
S_image_tm[1,:,:,i] -= S1_bg
S_image_tm[2,:,:,i] -= S2_bg
if self.use_gpu:
S_image_tm = cp.asnumpy(S_image_tm)
return S_image_tm
def Polarization_recon(self, S_image_recon):
'''
reconstruction of polarization-related physical properties in QLIPP
Parameters
----------
S_image_recon : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
Returns
-------
Recon_para : numpy.ndarray
reconstructed polarization-related physical properties
channel 0 is retardance
channel 1 is in-plane orientation
channel 2 is brightfield
channel 3 is degree of polarization
'''
if self.use_gpu:
S_image_recon = cp.array(S_image_recon)
Recon_para = cp.zeros((self.N_Stokes,)+S_image_recon.shape[1:])
else:
Recon_para = np.zeros((self.N_Stokes,)+S_image_recon.shape[1:])
if self.use_gpu:
if self.N_Stokes == 4:
ret_wrapped = cp.arctan2((S_image_recon[1]**2 + S_image_recon[2]**2)**(1/2) * \
S_image_recon[3], S_image_recon[3]) # retardance
elif self.N_Stokes == 3:
ret_wrapped = cp.arcsin(cp.minimum((S_image_recon[1]**2 + S_image_recon[2]**2)**(0.5),1))
if self.cali == True:
sa_wrapped = 0.5*cp.arctan2(-S_image_recon[1], -S_image_recon[2]) % np.pi # slow-axis
else:
sa_wrapped = 0.5*cp.arctan2(-S_image_recon[1], S_image_recon[2]) % np.pi # slow-axis
else:
if self.N_Stokes == 4:
ret_wrapped = np.arctan2((S_image_recon[1]**2 + S_image_recon[2]**2)**(1/2) * \
S_image_recon[3], S_image_recon[3]) # retardance
elif self.N_Stokes == 3:
ret_wrapped = np.arcsin(np.minimum((S_image_recon[1]**2 + S_image_recon[2]**2)**(0.5),1))
if self.cali == True:
sa_wrapped = 0.5*np.arctan2(-S_image_recon[1], -S_image_recon[2]) % np.pi # slow-axis
else:
sa_wrapped = 0.5*np.arctan2(-S_image_recon[1], S_image_recon[2]) % np.pi # slow-axis
sa_wrapped[ret_wrapped<0] += np.pi/2
ret_wrapped[ret_wrapped<0] += np.pi
Recon_para[0] = ret_wrapped.copy()
Recon_para[1] = sa_wrapped%np.pi
Recon_para[2] = S_image_recon[0] # transmittance
if self.N_Stokes == 4:
Recon_para[3] = S_image_recon[4] # DoP
if self.use_gpu:
Recon_para = cp.asnumpy(Recon_para)
return Recon_para
def Birefringence_recon(self, S1_stack, S2_stack, reg = 1e-3):
# Birefringence deconvolution with slowly varying transmission approximation
if self.use_gpu:
Hu = cp.array(self.Hu, copy=True)
Hp = cp.array(self.Hp, copy=True)
AHA = [cp.sum(cp.abs(Hu)**2 + cp.abs(Hp)**2, axis=2) + reg, \
cp.sum(Hu*cp.conj(Hp) - cp.conj(Hu)*Hp, axis=2), \
-cp.sum(Hu*cp.conj(Hp) - cp.conj(Hu)*Hp, axis=2), \
cp.sum(cp.abs(Hu)**2 + cp.abs(Hp)**2, axis=2) + reg]
S1_stack_f = cp.fft.fft2(cp.array(S1_stack), axes=(0,1))
if self.cali:
S2_stack_f = cp.fft.fft2(-cp.array(S2_stack), axes=(0,1))
else:
S2_stack_f = cp.fft.fft2(cp.array(S2_stack), axes=(0,1))
b_vec = [cp.sum(-cp.conj(Hu)*S1_stack_f + cp.conj(Hp)*S2_stack_f, axis=2), \
cp.sum(cp.conj(Hp)*S1_stack_f + cp.conj(Hu)*S2_stack_f, axis=2)]
else:
AHA = [np.sum(np.abs(self.Hu)**2 + np.abs(self.Hp)**2, axis=2) + reg, \
np.sum(self.Hu*np.conj(self.Hp) - np.conj(self.Hu)*self.Hp, axis=2), \
-np.sum(self.Hu*np.conj(self.Hp) - np.conj(self.Hu)*self.Hp, axis=2), \
np.sum(np.abs(self.Hu)**2 + np.abs(self.Hp)**2, axis=2) + reg]
S1_stack_f = fft2(S1_stack, axes=(0,1))
if self.cali:
S2_stack_f = fft2(-S2_stack, axes=(0,1))
else:
S2_stack_f = fft2(S2_stack, axes=(0,1))
b_vec = [np.sum(-np.conj(self.Hu)*S1_stack_f + np.conj(self.Hp)*S2_stack_f, axis=2), \
np.sum(np.conj(self.Hp)*S1_stack_f + np.conj(self.Hu)*S2_stack_f, axis=2)]
del_phi_s, del_phi_c = Dual_variable_Tikhonov_deconv_2D(AHA, b_vec, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
Retardance = 2*(del_phi_s**2 + del_phi_c**2)**(1/2)
slowaxis = 0.5*np.arctan2(del_phi_s, del_phi_c)%np.pi
return Retardance, slowaxis
def Birefringence_recon_2D(self, S1_stack, S2_stack, method='Tikhonov', reg_br = 1,\
rho = 1e-5, lambda_br=1e-3, itr = 20, verbose=True):
'''
conduct 2D birefringence deconvolution from defocused or asymmetrically-illuminated set of intensity images
Parameters
----------
S1_stack : numpy.ndarray
defocused or asymmetrically-illuminated set of S1 intensity images with the size of (N, M, N_pattern*N_defocus)
S2_stack : numpy.ndarray
defocused or asymmetrically-illuminated set of S1 intensity images with the size of (N, M, N_pattern*N_defocus)
method : str
denoiser for 2D birefringence deconvolution
'Tikhonov' for Tikhonov denoiser
'TV' for TV denoiser
reg_br : float
Tikhonov regularization parameter
lambda_br : float
TV regularization parameter
rho : float
augmented Lagrange multiplier for 2D ADMM algorithm
itr : int
number of iterations for 2D ADMM algorithm
verbose : bool
option to display detailed progress of computations or not
Returns
-------
retardance : numpy.ndarray
2D retardance (in the unit of rad) reconstruction with the size of (N, M)
azimuth : numpy.ndarray
2D orientation reconstruction with the size of (N, M)
'''
if self.N_defocus == 1:
S1_stack = np.reshape(S1_stack, (self.N, self.M, 1))
S2_stack = np.reshape(S2_stack, (self.N, self.M, 1))
H_1_1c = self.H_dyadic_2D_OTF_in_plane[0,0]
H_1_1s = self.H_dyadic_2D_OTF_in_plane[0,1]
H_2_1c = self.H_dyadic_2D_OTF_in_plane[1,0]
H_2_1s = self.H_dyadic_2D_OTF_in_plane[1,1]
S1_stack_f = fft2(S1_stack, axes=(0,1))
S2_stack_f = fft2(S2_stack, axes=(0,1))
cross_term = np.sum(np.conj(H_1_1c)*H_1_1s + np.conj(H_2_1c)*H_2_1s, axis=2)
AHA = [np.sum(np.abs(H_1_1c)**2 + np.abs(H_2_1c)**2, axis=2), cross_term,\
np.conj(cross_term) , np.sum(np.abs(H_1_1s)**2 + np.abs(H_2_1s)**2, axis=2)]
AHA[0] += np.mean(np.abs(AHA[0]))*reg_br
AHA[3] += np.mean(np.abs(AHA[3]))*reg_br
b_vec = [np.sum(np.conj(H_1_1c)*S1_stack_f + np.conj(H_2_1c)*S2_stack_f, axis=2), \
np.sum(np.conj(H_1_1s)*S1_stack_f + np.conj(H_2_1s)*S2_stack_f, axis=2)]
if self.use_gpu:
AHA = cp.array(AHA)
b_vec = cp.array(b_vec)
if method == 'Tikhonov':
# Deconvolution with Tikhonov regularization
g_1c, g_1s = Dual_variable_Tikhonov_deconv_2D(AHA, b_vec, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
elif method == 'TV':
# ADMM deconvolution with anisotropic TV regularization
g_1c, g_1s = Dual_variable_ADMM_TV_deconv_2D(AHA, b_vec, rho, lambda_br, lambda_br, itr, verbose, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
azimuth = (np.arctan2(-g_1s, -g_1c)/2)%np.pi
retardance = ((np.abs(g_1s)**2 + np.abs(g_1c)**2)**(1/2))/(2*np.pi/self.lambda_illu)
return retardance, azimuth
def Birefringence_recon_3D(self, S1_stack, S2_stack, method='Tikhonov', reg_br = 1,\
rho = 1e-5, lambda_br=1e-3, itr = 20, verbose=True):
'''
conduct 3D deconvolution of 2D birefringence from defocused stack of intensity images
Parameters
----------
S1_stack : numpy.ndarray
defocused stack of S1 intensity images with the size of (N, M, N_defocus)
S2_stack : numpy.ndarray
defocused stack of S2 intensity images with the size of (N, M, N_defocus)
method : str
denoiser for 3D phase reconstruction
'Tikhonov' for Tikhonov denoiser
'TV' for TV denoiser
reg_br : float
Tikhonov regularization parameter
rho : float
augmented Lagrange multiplier for 3D ADMM algorithm
lambda_br : float
TV regularization parameter
itr : int
number of iterations for 3D ADMM algorithm
verbose : bool
option to display detailed progress of computations or not
Returns
-------
retardance : numpy.ndarray
3D reconstruction of retardance (in the unit of rad) with the size of (N, M, N_defocus)
azimuth : numpy.ndarray
3D reconstruction of 2D orientation with the size of (N, M, N_defocus)
'''
if self.pad_z != 0:
S1_pad = np.pad(S1_stack,((0,0),(0,0),(self.pad_z,self.pad_z)), mode='constant',constant_values=S1_stack.mean())
S2_pad = np.pad(S2_stack,((0,0),(0,0),(self.pad_z,self.pad_z)), mode='constant',constant_values=S2_stack.mean())
if self.pad_z < self.N_defocus:
S1_pad[:,:,:self.pad_z] = (S1_stack[:,:,:self.pad_z])[:,:,::-1]
S1_pad[:,:,-self.pad_z:] = (S1_stack[:,:,-self.pad_z:])[:,:,::-1]
S2_pad[:,:,:self.pad_z] = (S2_stack[:,:,:self.pad_z])[:,:,::-1]
S2_pad[:,:,-self.pad_z:] = (S2_stack[:,:,-self.pad_z:])[:,:,::-1]
else:
print('pad_z is larger than number of z-slices, use zero padding (not effective) instead of reflection padding')
S1_stack = S1_pad.copy()
S2_stack = S2_pad.copy()
H_1_1c = self.H_dyadic_OTF_in_plane[0,0,0]
H_1_1s = self.H_dyadic_OTF_in_plane[0,1,0]
H_2_1c = self.H_dyadic_OTF_in_plane[1,0,0]
H_2_1s = self.H_dyadic_OTF_in_plane[1,1,0]
S1_stack_f = fftn(S1_stack)
S2_stack_f = fftn(S2_stack)
cross_term = np.conj(H_1_1c)*H_1_1s + np.conj(H_2_1c)*H_2_1s
AHA = [np.abs(H_1_1c)**2 + np.abs(H_2_1c)**2, cross_term,\
np.conj(cross_term) , np.abs(H_1_1s)**2 + np.abs(H_2_1s)**2]
AHA[0] += np.mean(np.abs(AHA[0]))*reg_br
AHA[3] += np.mean(np.abs(AHA[3]))*reg_br
b_vec = [np.conj(H_1_1c)*S1_stack_f + np.conj(H_2_1c)*S2_stack_f, \
np.conj(H_1_1s)*S1_stack_f + np.conj(H_2_1s)*S2_stack_f]
if self.use_gpu:
AHA = cp.array(AHA)
b_vec = cp.array(b_vec)
if method == 'Tikhonov':
# Deconvolution with Tikhonov regularization
f_1c, f_1s = Dual_variable_Tikhonov_deconv_3D(AHA, b_vec, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
elif method == 'TV':
# ADMM deconvolution with anisotropic TV regularization
f_1c, f_1s = Dual_variable_ADMM_TV_deconv_3D(AHA, b_vec, rho, lambda_br, lambda_br, itr, verbose, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
azimuth = (np.arctan2(-f_1s, -f_1c)/2)%np.pi
retardance = ((np.abs(f_1s)**2 + np.abs(f_1c)**2)**(1/2))/(2*np.pi/self.lambda_illu)*self.psz
if self.pad_z != 0:
azimuth = azimuth[:,:,self.pad_z:-(self.pad_z)]
retardance = retardance[:,:,self.pad_z:-(self.pad_z)]
return retardance, azimuth
def Inclination_recon_geometric(self, retardance, orientation, on_axis_idx, reg_ret_pr = 1e-2):
'''
estimating 2D principal retardance and 3D orientation from off-axis retardance and orientation using geometric model
Parameters
----------
retardance : numpy.ndarray
measured retardance from different pattern illuminations with the size of (N_pattern, N, M)
orientation : numpy.ndarray
measured 2D orientation from different pattern illuminations with the size of (N_pattern, N, M)
on_axis_idx : int
index of the illumination pattern corresponding to on-axis illumination
reg_ret_pr : float
regularization for computing principal retardance
Returns
-------
inclination : numpy.ndarray
estimated inclination angle with the size of (N, M)
retardance_pr : numpy.ndarray
estimated principal retardance with the size of (N, M)
inc_coeff : numpy.ndarray
estimated inclination coefficients with the size of (6, N, M)
'''
retardance_on_axis = retardance[:,:,on_axis_idx].copy()
orientation_on_axis = orientation[:,:,on_axis_idx].copy()
retardance = np.transpose(retardance,(2,0,1))
N_meas = self.N_pattern * self.N_defocus
inc_coeff = np.reshape(self.geometric_inc_matrix_inv.dot(retardance.reshape((N_meas,self.N*self.M))), (6, self.N, self.M))
inc_coeff_sin_2theta = (inc_coeff[2]**2 + inc_coeff[3]**2)**(0.5)
inclination = np.arctan2(retardance_on_axis*2, inc_coeff_sin_2theta)
inclination = np.pi/2 - (np.pi/2-inclination)*np.sign(inc_coeff[2]*np.cos(orientation_on_axis)+inc_coeff[3]*np.sin(orientation_on_axis))
retardance_pr = retardance_on_axis*np.sin(inclination)**2 / (np.sin(inclination)**4+reg_ret_pr)
return inclination, retardance_pr, inc_coeff
def scattering_potential_tensor_recon_2D_vec(self, S_image_recon, reg_inc=1e-1*np.ones((7,)), cupy_det=False):
'''
Tikhonov reconstruction of 2D scattering potential tensor components with vectorial model in QUTIPP
Parameters
----------
S_image_recon : numpy.ndarray
background corrected Stokes parameters normalized with S0's mean with the size of (3, N, M, N_pattern)
reg_inc : numpy.ndarray
Tikhonov regularization parameters for 7 scattering potential tensor components with the size of (7,)
cupy_det : bool
option to use the determinant algorithm from cupy package (cupy v9 has very fast determinant calculation compared to array-based determinant calculation)
Returns
-------
f_tensor : numpy.ndarray
2D scattering potential tensor components with the size of (7, N, M)
'''
start_time = time.time()
S_stack_f = fft2(S_image_recon, axes=(1,2))
AHA = self.inc_AHA_2D_vec.copy()
for i in range(7):
AHA[i,i] += np.mean(np.abs(AHA[i,i]))*reg_inc[i]
b_vec = np.zeros((7,self.N,self.M), complex)
for i,j in itertools.product(range(7), range(self.N_Stokes)):
b_vec[i] += np.sum(np.conj(self.H_dyadic_2D_OTF[j,i])*S_stack_f[j],axis=2)
print('Finished preprocess, elapsed time: %.2f'%(time.time()-start_time))
if self.use_gpu:
if cupy_det:
AHA = cp.transpose(cp.array(AHA), (2,3,0,1))
b_vec = cp.transpose(cp.array(b_vec), (1,2,0))
determinant = cp.linalg.det(AHA)
f_tensor = cp.zeros((7, self.N, self.M), dtype='float32')
for i in range(7):
AHA_b_vec = AHA.copy()
AHA_b_vec[:,:,:,i] = b_vec.copy()
f_tensor[i] = cp.real(cp.fft.ifftn(cp.linalg.det(AHA_b_vec) / determinant))
else:
AHA = cp.array(AHA)
b_vec = cp.array(b_vec)
determinant = array_based_7x7_det(AHA)
f_tensor = cp.zeros((7, self.N, self.M))
for i in range(7):
AHA_b_vec = AHA.copy()
AHA_b_vec[:,i] = b_vec.copy()
f_tensor[i] = cp.real(cp.fft.ifft2(array_based_7x7_det(AHA_b_vec) / determinant))
f_tensor = cp.asnumpy(f_tensor)
else:
AHA_pinv = np.linalg.pinv(np.transpose(AHA,(2,3,0,1)))
f_tensor = np.real(ifft2(np.transpose(np.squeeze(np.matmul(AHA_pinv, np.transpose(b_vec,(1,2,0))[...,np.newaxis])),(2,0,1)),axes=(1,2)))
print('Finished reconstruction, elapsed time: %.2f'%(time.time()-start_time))
return f_tensor
def scattering_potential_tensor_recon_3D_vec(self, S_image_recon, reg_inc=1e-1*np.ones((7,)), cupy_det=False):
'''
Tikhonov reconstruction of 3D scattering potential tensor components with vectorial model in QUTIPP
Parameters
----------
S_image_recon : numpy.ndarray
background corrected Stokes parameters normalized with S0's mean with the size of (3, N_pattern, N, M, N_defocus)
reg_inc : numpy.ndarray
Tikhonov regularization parameters for 7 scattering potential tensor components with the size of (7,)
cupy_det : bool
option to use the determinant algorithm from cupy package (cupy v9 has very fast determinant calculation compared to array-based determinant calculation)
Returns
-------
f_tensor : numpy.ndarray
3D scattering potential tensor components with the size of (7, N, M, N_defocus)
'''
start_time = time.time()
if self.pad_z != 0:
S_pad = np.pad(S_image_recon,((0,0),(0,0),(0,0),(0,0),(self.pad_z,self.pad_z)), mode='constant',constant_values=0)
if self.pad_z < self.N_defocus:
S_pad[...,:self.pad_z] = (S_image_recon[...,:self.pad_z])[:,:,::-1]
S_pad[...,-self.pad_z:] = (S_image_recon[...,-self.pad_z:])[:,:,::-1]
else:
print('pad_z is larger than number of z-slices, use zero padding (not effective) instead of reflection padding')
S_image_recon = S_pad.copy()
S_stack_f = fftn(S_image_recon,axes=(-3,-2,-1))
AHA = self.inc_AHA_3D_vec.copy()
for i in range(7):
AHA[i,i] += np.mean(np.abs(AHA[i,i]))*reg_inc[i]
b_vec = np.zeros((7,self.N,self.M,self.N_defocus_3D), dtype='complex64')
for i,j in itertools.product(range(7), range(self.N_Stokes)):
b_vec[i] += np.sum(np.conj(self.H_dyadic_OTF[j,i])*S_stack_f[j],axis=0)
print('Finished preprocess, elapsed time: %.2f'%(time.time()-start_time))
if self.use_gpu:
if cupy_det:
AHA = cp.transpose(cp.array(AHA), (2,3,4,0,1))
b_vec = cp.transpose(cp.array(b_vec), (1,2,3,0))
determinant = cp.linalg.det(AHA)
f_tensor = cp.zeros((7, self.N, self.M, self.N_defocus_3D), dtype='float32')
for i in range(7):
AHA_b_vec = AHA.copy()
AHA_b_vec[:,:,:,:,i] = b_vec.copy()
f_tensor[i] = cp.real(cp.fft.ifftn(cp.linalg.det(AHA_b_vec) / determinant))
else:
AHA = cp.array(AHA)
b_vec = cp.array(b_vec)
determinant = array_based_7x7_det(AHA)
f_tensor = cp.zeros((7, self.N, self.M, self.N_defocus_3D), dtype='float32')
for i in range(7):
AHA_b_vec = AHA.copy()
AHA_b_vec[:,i] = b_vec.copy()
f_tensor[i] = cp.real(cp.fft.ifftn(array_based_7x7_det(AHA_b_vec) / determinant))
f_tensor = cp.asnumpy(f_tensor)
else:
AHA_pinv = np.linalg.pinv(np.transpose(AHA,(2,3,4,0,1)))
f_tensor = np.real(ifftn(np.transpose(np.squeeze(np.matmul(AHA_pinv, np.transpose(b_vec,(1,2,3,0))[...,np.newaxis])),(3,0,1,2)),axes=(1,2,3)))
if self.pad_z != 0:
f_tensor = f_tensor[...,self.pad_z:-(self.pad_z)]
print('Finished reconstruction, elapsed time: %.2f'%(time.time()-start_time))
return f_tensor
def scattering_potential_tensor_to_3D_orientation(self, f_tensor, S_image_recon=None, material_type='positive', reg_ret_pr = 1e-2, itr=20, step_size=0.3,verbose=True,fast_gpu_mode=False):
'''
estimating principal retardance, 3D orientation, optic sign from scattering potential tensor components
Parameters
----------
f_tensor : numpy.ndarray
scattering potential tensor components with the size of (7, N, M) or (7, N, M, N_defocus) for 3D
S_image_recon : numpy.ndarray
background corrected Stokes parameters normalized with S0's mean
material_type : str
'positive' for assumption of positively uniaxial material
'negative' for assumption of negatively uniaxial material
'unknown' for triggering optic sign estimation algorithm -> return two sets of solution with a probability map of material
reg_ret_pr : numpy.ndarray
regularization parameters for principal retardance estimation
itr : int
number of iterations for the optic sign retrieval algorithm
step_size : float
scaling of the gradient step size for the optic sign retrieval algorithm
verbose : bool
option to display details of optic sign retrieval algorithm in each iteration
fast_gpu_mode : bool
option to use faster gpu computation mode (all arrays in gpu, it may consume more memory)
Returns
-------
retardance_pr : numpy.ndarray
reconstructed principal retardance with the size of (2, N, M) for 2D and (2, N, M, N_defocus) for 3D
channel 0: positively uniaxial solution (or return retardance_pr_p when 'positive' is specified for material_type)
channel 1: negatively uniaxial solution (or return retardance_pr_n when 'negative' is specified for material_type)
azimuth : numpy.ndarray
reconstructed in-plane orientation with the size of (2, N, M) for 2D and (2, N, M, N_defocus) for 3D
channel 0: positively uniaxial solution (or return azimuth_p when 'positive' is specified for material_type)
channel 1: negatively uniaxial solution (or return azimuth_n when 'negative' is specified for material_type)
theta : numpy.ndarray
reconstructed out-of-plane inclination with the size of (2, N, M) for 2D and (2, N, M, N_defocus) for 3D
channel 0: positively uniaxial solution (or return theta_p when 'positive' is specified for material_type)
channel 1: negatively uniaxial solution (or return theta_n when 'negative' is specified for material_type)
mat_map : numpy.ndarray
reconstructed material tendancy with the size of (2, N, M) for 2D and (2, N, M, N_defocus) for 3D
channel 0: tendancy for positively uniaxial solution
channel 1: tendancy for negatively uniaxial solution
'''
if self.pad_z != 0 and material_type == 'unknown':
S_pad = np.pad(S_image_recon,((0,0),(0,0),(0,0),(0,0),(self.pad_z,self.pad_z)), mode='constant',constant_values=0)
f_tensor_pad = np.pad(f_tensor,((0,0),(0,0),(0,0),(self.pad_z,self.pad_z)), mode='constant',constant_values=0)
if self.pad_z < self.N_defocus:
S_pad[...,:self.pad_z] = (S_image_recon[...,:self.pad_z])[:,:,::-1]
S_pad[...,-self.pad_z:] = (S_image_recon[...,-self.pad_z:])[:,:,::-1]
f_tensor_pad[...,:self.pad_z] = (f_tensor[...,:self.pad_z])[:,:,::-1]
f_tensor_pad[...,-self.pad_z:] = (f_tensor[...,-self.pad_z:])[:,:,::-1]
else:
print('pad_z is larger than number of z-slices, use zero padding (not effective) instead of reflection padding')
S_image_recon = S_pad.copy()
f_tensor = f_tensor_pad.copy()
if material_type == 'positive' or 'unknown':
# Positive uniaxial material
retardance_pr_p, azimuth_p, theta_p = scattering_potential_tensor_to_3D_orientation_PN(f_tensor, material_type='positive', reg_ret_pr = reg_ret_pr)
if material_type == 'positive':
return retardance_pr_p, azimuth_p, theta_p
if material_type == 'negative' or 'unknown':
# Negative uniaxial material
retardance_pr_n, azimuth_n, theta_n = scattering_potential_tensor_to_3D_orientation_PN(f_tensor, material_type='negative', reg_ret_pr = reg_ret_pr)
if material_type == 'negative':
return retardance_pr_n, azimuth_n, theta_n
if material_type == 'unknown':
if f_tensor.ndim == 4:
S_stack_f = fftn(S_image_recon,axes=(-3,-2,-1))
elif f_tensor.ndim == 3:
S_stack_f = fft2(S_image_recon,axes=(1,2))
f_tensor_p = np.zeros((5,)+f_tensor.shape[1:])
f_tensor_p[0] = -retardance_pr_p*(np.sin(theta_p)**2)*np.cos(2*azimuth_p)
f_tensor_p[1] = -retardance_pr_p*(np.sin(theta_p)**2)*np.sin(2*azimuth_p)
f_tensor_p[2] = -retardance_pr_p*(np.sin(2*theta_p))*np.cos(azimuth_p)
f_tensor_p[3] = -retardance_pr_p*(np.sin(2*theta_p))*np.sin(azimuth_p)
f_tensor_p[4] = retardance_pr_p*(np.sin(theta_p)**2 - 2*np.cos(theta_p)**2)
f_tensor_n = np.zeros((5,)+f_tensor.shape[1:])
f_tensor_n[0] = -retardance_pr_n*(np.sin(theta_n)**2)*np.cos(2*azimuth_n)
f_tensor_n[1] = -retardance_pr_n*(np.sin(theta_n)**2)*np.sin(2*azimuth_n)
f_tensor_n[2] = -retardance_pr_n*(np.sin(2*theta_n))*np.cos(azimuth_n)
f_tensor_n[3] = -retardance_pr_n*(np.sin(2*theta_n))*np.sin(azimuth_n)
f_tensor_n[4] = retardance_pr_n*(np.sin(theta_n)**2 - 2*np.cos(theta_n)**2)
f_vec = f_tensor.copy()
x_map = np.zeros(f_tensor.shape[1:])
y_map = np.zeros(f_tensor.shape[1:])
if f_tensor.ndim == 4:
f_vec_f = fftn(f_vec, axes=(1,2,3))
S_est_vec = np.zeros((self.N_Stokes, self.N_pattern, self.N, self.M, self.N_defocus_3D), complex)
for p,q in itertools.product(range(self.N_Stokes), range(2)):
S_est_vec[p] += self.H_dyadic_OTF[p,q]*f_vec_f[np.newaxis,q]
elif f_tensor.ndim == 3:
f_vec_f = fft2(f_vec, axes=(1,2))
S_est_vec = np.zeros((self.N_Stokes, self.N, self.M, self.N_defocus*self.N_pattern), complex)
for p,q in itertools.product(range(self.N_Stokes), range(2)):
S_est_vec[p] += self.H_dyadic_2D_OTF[p,q]*f_vec_f[q,:,:,np.newaxis]
if self.use_gpu:
f_tensor_p = cp.array(f_tensor_p)
f_tensor_n = cp.array(f_tensor_n)
f_vec = cp.array(f_vec)
if fast_gpu_mode:
S_stack_f = cp.array(S_stack_f)
# iterative optic sign estimation algorithm
err = np.zeros(itr+1)
tic_time = time.time()
if verbose:
print('| Iter | error | Elapsed time (sec) |')
f1,ax = plt.subplots(2,2,figsize=(20,20))
for i in range(itr):
if self.use_gpu:
x_map = cp.array(x_map)
y_map = cp.array(y_map)
for j in range(5):
f_vec[j+2] = x_map*f_tensor_p[j] + y_map*f_tensor_n[j]
S_est_vec_update = S_est_vec.copy()
if self.use_gpu:
if fast_gpu_mode:
S_est_vec_update = cp.array(S_est_vec_update)
if f_tensor.ndim == 4:
f_vec_f = cp.fft.fftn(f_vec, axes=(1,2,3))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += cp.array(self.H_dyadic_OTF[p,q+2])*f_vec_f[np.newaxis,q+2]
elif f_tensor.ndim == 3:
f_vec_f = cp.fft.fft2(f_vec, axes=(1,2))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += cp.array(self.H_dyadic_2D_OTF[p,q+2])*f_vec_f[q+2,:,:,np.newaxis]
else:
if f_tensor.ndim == 4:
f_vec_f = cp.fft.fftn(f_vec, axes=(1,2,3))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += cp.asnumpy(cp.array(self.H_dyadic_OTF[p,q+2])*f_vec_f[np.newaxis,q+2])
elif f_tensor.ndim == 3:
f_vec_f = cp.fft.fft2(f_vec, axes=(1,2))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += cp.asnumpy(cp.array(self.H_dyadic_2D_OTF[p,q+2])*f_vec_f[q+2,:,:,np.newaxis])
else:
if f_tensor.ndim == 4:
f_vec_f = fftn(f_vec, axes=(1,2,3))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += self.H_dyadic_OTF[p,q+2]*f_vec_f[np.newaxis,q+2]
elif f_tensor.ndim == 3:
f_vec_f = fft2(f_vec, axes=(1,2))
for p,q in itertools.product(range(self.N_Stokes), range(5)):
S_est_vec_update[p] += self.H_dyadic_2D_OTF[p,q+2]*f_vec_f[q+2,:,:,np.newaxis]
S_diff = S_stack_f-S_est_vec_update
if fast_gpu_mode and self.use_gpu:
err[i+1] = cp.asnumpy(cp.sum(cp.abs(S_diff)**2))
else:
err[i+1] = np.sum(np.abs(S_diff)**2)
if err[i+1]>err[i] and i>0:
if self.use_gpu:
x_map = cp.asnumpy(x_map)
y_map = cp.asnumpy(y_map)
break
if self.use_gpu:
AH_S_diff = cp.zeros((5,)+f_tensor.shape[1:], complex)
if f_tensor.ndim == 4:
for p,q in itertools.product(range(5), range(self.N_Stokes)):
if fast_gpu_mode:
AH_S_diff[p] += cp.sum(cp.conj(cp.array(self.H_dyadic_OTF[q,p+2]))*S_diff[q],axis=0)
else:
AH_S_diff[p] += cp.sum(cp.conj(cp.array(self.H_dyadic_OTF[q,p+2]))*cp.array(S_diff[q]),axis=0)
grad_x_map = -cp.real(cp.sum(f_tensor_p*cp.fft.ifftn(AH_S_diff,axes=(1,2,3)),axis=0))
grad_y_map = -cp.real(cp.sum(f_tensor_n*cp.fft.ifftn(AH_S_diff,axes=(1,2,3)),axis=0))
elif f_tensor.ndim == 3:
for p,q in itertools.product(range(5), range(self.N_Stokes)):
if fast_gpu_mode:
AH_S_diff[p] += cp.sum(cp.conj(cp.array(self.H_dyadic_2D_OTF[q,p+2]))*S_diff[q],axis=2)
else:
AH_S_diff[p] += cp.sum(cp.conj(cp.array(self.H_dyadic_2D_OTF[q,p+2]))*cp.array(S_diff[q]),axis=2)
grad_x_map = -cp.real(cp.sum(f_tensor_p*cp.fft.ifft2(AH_S_diff,axes=(1,2)),axis=0))
grad_y_map = -cp.real(cp.sum(f_tensor_n*cp.fft.ifft2(AH_S_diff,axes=(1,2)),axis=0))
x_map -= grad_x_map/cp.max(cp.abs(grad_x_map))*step_size
y_map -= grad_y_map/cp.max(cp.abs(grad_y_map))*step_size
x_map = cp.asnumpy(x_map)
y_map = cp.asnumpy(y_map)
else:
AH_S_diff = np.zeros((5,)+f_tensor.shape[1:], complex)
if f_tensor.ndim == 4:
for p,q in itertools.product(range(5), range(self.N_Stokes)):
AH_S_diff[p] += np.sum(np.conj(self.H_dyadic_OTF[q,p+2])*S_diff[q],axis=0)
grad_x_map = -np.real(np.sum(f_tensor_p*ifftn(AH_S_diff,axes=(1,2,3)),axis=0))
grad_y_map = -np.real(np.sum(f_tensor_n*ifftn(AH_S_diff,axes=(1,2,3)),axis=0))
elif f_tensor.ndim == 3:
for p,q in itertools.product(range(5), range(self.N_Stokes)):
AH_S_diff[p] += np.sum(np.conj(self.H_dyadic_2D_OTF[q,p+2])*S_diff[q],axis=2)
grad_x_map = -np.real(np.sum(f_tensor_p*ifft2(AH_S_diff,axes=(1,2)),axis=0))
grad_y_map = -np.real(np.sum(f_tensor_n* | ifft2(AH_S_diff,axes=(1,2)) | numpy.fft.ifft2 |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
( | np.dtype("float") | numpy.dtype |
# Evolutionary optimizer for hyperparameters and architecture. Project at https://github.com/pgfeldman/optevolver
import concurrent.futures
import copy
import datetime
import getpass
import os
import random
import re
import threading
from enum import Enum
from typing import Dict, List, Tuple, Callable
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as st
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D # <-- DON'T DELETE, and note the capitalization!
from sklearn.utils import resample
import optevolver.hyperparameter.ValueAxis as VA
import optevolver.util.ExcelUtils as eu
class EvolverTags(Enum):
"""A class containing enumerations elements for use in the argument dictionaries"""
FITNESS = "fitness"
ID = "id"
FUNCTION = "func"
FLOAT = "float"
GENERATION = "generation"
GENOME = "genome"
THREAD_NAME = "thread_str"
FILENAME = "filename"
class Genome:
"""
Class that handles the evolution of a set of ValueAxis (i.e. the chromosome)
...
Attributes
----------
chromosome_dict: Dict
fitness: float
ea_list: List
population: List
meta_info: Dict
data_list: List
generation:int
Methods
-------
reset(self):
Resets all the variables. Needed to eliminate class cross-contamination of class-global variables
equals(self, g: "Genome") -> bool:
Does a deep compare of two Genomes. returns a True if they have the same structure and value(s).
get_chromosome_value(self, key: str) -> Dict:
mutate(self, chance: float = 0.1):
create_args_from_chromo(self, chromo: dict = None) -> Dict:
create_dict_from_chromo(self, chromo: dict = None) -> Dict:
calc_fitness(self, func, id_str: str) -> float:
calc_fitness2(self, args: Dict):
calc_fitness_stats(self, resample_size: int = 100) -> float:
get_data_list(self) -> List:
get_name(self) -> str:
to_dict(self):
to_string(self, meta: bool = True, chromo: bool = True) -> str:
"""
chromosome_dict: Dict
fitness: float
ea_list: List
population: List
meta_info: Dict
data_list: List
generation = 0
def __init__(self, evolve_axis_list: List, p1: 'Genome' = None, p2: 'Genome' = None, crossover: float = 0.5,
generation=0):
"""
Parameters
----------
evolve_axis_list : List
The list of all EvolveAxis used to create this genome
p1 :
Optional parent for this Genome. Two are required to breed.
p2
Optional parent for this Genome. Two are required to breed.
crossover: float
probability that a chromosome will be selected randomly from p1
generation: int
The generation (as determined by the calling EvolutionaryOpimizer) that this genome belongs to
"""
self.reset()
self.generation = generation
self.ea_list = copy.deepcopy(evolve_axis_list)
ea: VA.EvolveAxis
if p1 == None and p2 == None:
for ea in self.ea_list:
self.chromosome_dict[ea.name] = ea.get_random_val()
else:
# for ea in self.ea_list:
for i in range(len(self.ea_list)):
ea = self.ea_list[i]
ea1 = p1.ea_list[i]
ea2 = p2.ea_list[i]
probability = random.random()
if probability < crossover:
ea.set_value(ea1)
else:
ea.set_value(ea2)
self.chromosome_dict[ea.name] = ea.get_result()
def reset(self):
"""Resets all the variables. Needed to eliminate class cross-contamination of class-global variables"""
self.ea_list = []
self.chromosome_dict = {}
self.meta_info = {}
self.fitness = 0
self.population = []
self.generation = 0
self.data_list = []
def equals(self, g: "Genome") -> bool:
"""Does a deep compare of two Genomes. returns a True if they have the same structure and value(s)
Parameters
----------
g : Genome
The genome we are testing against
"""
d1 = self.create_args_from_chromo()
d2 = g.create_args_from_chromo()
if len(d1) != len(d2):
return False
for key, val in d1.items():
if d1[key] != d2[key]:
return False
return True
def get_chromosome_value(self, key: str) -> Dict:
""" Get the current value of a specified EvolveAxis
Parameters
----------
key : str
The name of the EvolveAxis
"""
return self.chromosome_dict[key]
def mutate(self, chance: float = 0.1):
""" Randomly set new values in the chromosomes that make up this genome
Parameters
----------
chance : float = 0.1
The probability that any particular chromosome will mutate. Default is 10%
"""
ea: VA.EvolveAxis
for ea in self.ea_list:
if random.random() < chance: # mutate.
# calculate a new random val
self.chromosome_dict[ea.name] = ea.get_random_val()
def create_args_from_chromo(self, chromo: dict = None) -> Dict:
""" Creates a dictionary that provides values that can be evaluated using the callback function passed to the
EvolutionaryOptimizer. An example of this is the function near the bottom of this file:
def example_evaluation_function(arguments: Dict) -> Tuple[Dict, Dict]:
The arguments:Dict parameter is created and returned by this method
Parameters
----------
chromo : dict = None
An optional chromosome. Otherwise the arguments are created by using this Genome's self.chromosome_dict
"""
if chromo == None:
chromo = self.chromosome_dict
to_return = {}
ea: VA.EvolveAxis
for ea in self.ea_list:
to_return[ea.name] = ea.get_result()
return to_return
def create_dict_from_chromo(self, chromo: dict = None) -> Dict:
""" Creates a dictionary that provides a detailed list of the parameters used by this genome. This differs from
create_args_from_chromo() by including nested parameters of each EvolveAxis
Parameters
----------
chromo : dict = None
An optional chromosome. Otherwise the arguments are created by using this Genome's self.chromosome_dict
"""
if chromo == None:
chromo = self.chromosome_dict
to_return = {}
ea: VA.EvolveAxis
for ea in self.ea_list:
dict = ea.get_last_history()
for key, value in dict.items():
to_return["{}".format(key)] = value
return to_return
def calc_fitness(self, func: Callable, id_str: str) -> float:
""" Depricated - Conceptually the heart of the approach. A pointer to a function is passed in, which is used to
calculate the fitness of whatever is being evaluated and returns it.
Parameters
----------
func : Callable
The function that will produce some fitness value. It returns two Dicts (d1, d2), where d1 must contain a
"fitness" value and d2, which contains data that will be recorded to the spreadsheet for post-hoc
analysis
id_str: str
The name for this evaluation. Added to the argument Dict in case it is needed, for example, as a file name
"""
args = self.create_args_from_chromo(self.chromosome_dict)
args[EvolverTags.ID.value] = id_str
d1, d2 = func(args)
self.data_list.append(d2)
self.fitness = d1[EvolverTags.FITNESS.value]
self.population.append(self.fitness)
return self.fitness
def calc_fitness2(self, args: Dict):
""" Conceptually the heart of the approach. A pointer to a function is passed in, which is used to
calculate the fitness of whatever is being evaluated.
Parameters
----------
args : Dict
Contains the arguments that will be passed to the evaluate function, and a reference to the function as
well. The function is deleted from the arguments, and the remaining Dict os passed to the function, which
is required to produce a fitness value. It returns two Dicts (d1, d2), where d1 must contain a
{EvolverTags.FITNESS.value : <some fitness value>} and d2, which contains data that will be recorded to the
spreadsheet for post-hoc analysis
"""
args.update(self.create_args_from_chromo())
func = args[EvolverTags.FUNCTION.value]
del args[EvolverTags.FUNCTION.value]
d1, d2 = func(args)
self.data_list.append(d2)
self.fitness = d1[EvolverTags.FITNESS.value]
self.population.append(self.fitness)
def calc_fitness_stats(self, resample_size: int = 100) -> float:
""" Creates a bootstrap resampling of the fitness values that have accumulated for this genome. Since the
fitness value may be stochastic, it's best to return a reasonable mean value. It returns the mean
fitness value from this population, and saves the 5%, 95%, minimum, and maximum values for post-hoc analysis
Parameters
----------
resample_size: int = 100
The size of the bootstrap population to resample into
"""
# print("calc_fitness_stats(): population = {}".format(len(self.population)))
boot = resample(self.population, replace=True, n_samples=resample_size, random_state=1)
s = pd.Series(boot)
conf = st.t.interval(0.95, len(boot) - 1, loc=s.mean(), scale=st.sem(boot))
self.meta_info = {'mean': s.mean(), '5_conf': conf[0], '95_conf': conf[1], 'max': s.max(), 'min': s.min()}
self.fitness = s.mean()
return self.fitness
def get_data_list(self) -> List:
""" Returns the list of parameters for this genome over time for export to spreadsheet. printing, etc"""
return self.data_list
def get_name(self) -> str:
""" Creates and returns a name constructed from the active key/value pairs in the active elements of the chromosomes"""
d = self.create_dict_from_chromo()
to_return = ""
for key, val in d.items():
to_return += "{}_".format(val)
return to_return.rstrip("_")
def to_dict(self) -> Dict:
""" Returns a Dict that contains all the meta information about this genome (population, generation, etc), and
the current parameters and values """
to_return = {}
to_return[EvolverTags.GENERATION.value] = self.generation
for key, val in self.meta_info.items():
to_return[key] = val
to_return.update(self.create_dict_from_chromo())
return to_return
def to_string(self, meta: bool = True, chromo: bool = True) -> str:
""" Returns a str that contains all the meta information about this genome (population, generation, etc), and
the current parameters and values """
to_return = "generation = {}, ".format(self.generation, )
if meta:
to_return += "meta: "
for key, val in self.meta_info.items():
to_return += "{}:{:.3f}, ".format(key, val)
if chromo:
to_return += "chromo: {}".format(self.create_dict_from_chromo(self.chromosome_dict))
return to_return.rstrip(",")
class EvolutionaryOpimizer:
"""
Class that manages the evolution of a population of Genomes
...
Attributes
----------
evolve_axis_list:List = []
The master list of all the EvoveAxis that make up the Genomes.
current_genome_list:List = []
The list of currently active Genomes
all_genomes_list:List = []
The list of all Genomes, including inactive ones for post-hoc analysis
best_genome_list:List = []
The list of highest-fitness Genomes, Typically the top 10% - 50%
best_genome_history_list:List = []
The list of the best Genome from each of the generations
keep_percent:float = 0.1
The percent to keep in the "best_genome" popuation. Default is 10%
resample_size:int = 100
The bootstrap resample population. Default is 100
num_genomes:int = 10
The number of "live" Genomes in the population. Default is 10
generation:int = 0
The current generation
logfile_name:str = "defaultLog.txt"
The name of the debugging logfile. Useful for multithreading debugging
threads:int = 0
Number of threads/gpus
thread_label:str = "gpu"
The label associated with the threads. Typically this would be "gpu", "tpu", or "cpu"
last_num_regex = None
A regex to get the last number in a string. Used to determine which thread a process is running in
Methods
-------
reset(self):
Resets all the variables. Needed to eliminate class cross-contamination of class-global variables
log(self, s: str):
Opens the specifies log file, writes a string, and closes it
add_axis(self, val_axis: VA.EvolveAxis):
Adds an EvolveAxis to the master axis list - self.evolve_axis_list
create_intital_genomes(self, num_genomes: int):
create the genomes of generation 0
breed_genomes(self, g1: Genome, g2: Genome, crossover_rate: float, mutation_rate: float) -> Genome:
Take two parent genomes and breed a child Genome, then mutate that child and return it
thread_function(self, args: List):
The function called by the thread pooler. All arguments are passed in in a Dict, including the function that
will do the model creation and evaluation. The number of the thread is determined and used to configure which
tensor processor (CPU:x, GPU:x, or TPU:x) this thread will utilize
utilize.
run_optimizer(self, eval_func: Callable, save_func: Callable, crossover_rate: float, mutation_rate: float) -> float:
Method that handles the evolution of a single generation of our population of Genomes, and returns an
average fitness for the Ensemble associated with the best Genome
get_ranked_chromosome(self, rank: int = 0) -> Dict:
Get the Genome of the current nth rank, and return its chromosome Dict
get_ranked_genome(self, rank: int = 0) -> Genome:
Get the Genome of the current nth rank, and return it
save_results(self, file_name: str, data_dict: Dict = None):
Save the results of this population's evolution to an Excel spreadsheet for post hoc analysis
to_string(self, meta: bool = True, chromo: bool = True) -> str:
Returns a string representation of this class
"""
evolve_axis_list: List = []
current_genome_list: List = []
all_genomes_list: List = []
best_genome_list: List = []
best_genome_history_list: List = []
keep_percent: float = 0.1
resample_size: int = 100
num_genomes: int = 10
generation: int = 0
logfile_name: str = "defaultLog.txt"
threads: int = 0
thread_label: str = "gpu"
last_num_regex = None
def __init__(self, keep_percent: float = 0.1, pop_size: int = 10, resample_size: int = 100, threads: int = 0,
logfile: str = None, thread_label: str = "gpu"):
""" Ctor - Sets up the the EvolutionaryOpimizer, but does not create the populations, since the
EvolveAxis haven't been added yet
Parameters
----------
keep_percent : float
The number of Genomes to keep from the previous generation. Defaults to 10%
pop_size : int
The number of Genomes in the population. Defaults to 10
resample_size : int
The bootstap distribution size that we calculate statistics from
threads : int
The number of device-specific threads that this class will manage. Default is 0
"""
self.reset()
self.keep_percent = keep_percent
self.num_genomes = pop_size
self.resample_size = resample_size
self.threads = threads
self.thread_label = thread_label
if logfile != None:
self.logfile_name = logfile
try:
os.remove(self.logfile_name)
except OSError as e: ## if failed, report it back to the user ##
print("Error: %s - %s. Creating file." % (e.filename, e.strerror))
def reset(self):
""" Resets all the variables. Needed to eliminate class cross-contamination of class-global variables """
self.evolve_axis_list = []
self.all_genomes_list = []
self.current_genome_list = []
self.best_genome_list = []
self.best_genome_history_list = []
self.keep_percent = 0.1
self.resample_size = 100
self.num_genomes = 10
self.generation = 0
self.threads = 0
self.thread_label = "gpu"
last_num_in_str_re = '(\d+)(?!.*\d)'
self.last_num_regex = re.compile(last_num_in_str_re)
def log(self, s: str):
""" Opens the specifies log file, writes a string, and closes it
Parameters
----------
s : str
The string to write to file
"""
with open(self.logfile_name, "a") as f:
f.write("{}\n".format(s))
def add_axis(self, val_axis: VA.EvolveAxis):
""" Adds an EvolveAxis to the master axis list - self.evolve_axis_list
Parameters
----------
val_axis : EvolveAxis
The initialized EvovleAxis
"""
self.evolve_axis_list.append(val_axis)
def create_intital_genomes(self, num_genomes: int):
""" create the genomes of generation 0
Parameters
----------
num_genomes : int
The number of Genomes to create as our evolving population
"""
self.num_genomes = num_genomes
for i in range(num_genomes):
# create a genome without parents. This genome will be a member of generation 0
g = Genome(self.evolve_axis_list, generation=self.generation)
# append to the list of currently active Genomes
self.current_genome_list.append(g)
# append to the list of all Genomes
self.all_genomes_list.append(g)
def breed_genomes(self, g1: Genome, g2: Genome, crossover_rate: float, mutation_rate: float) -> Genome:
""" Take two parent genomes and breed a child Genome, then mutate that child and return it
Parameters
----------
g1 : Genome
Parent 1
g2 : Genome
Parent 2
crossover_rate: float
probability that a chromosome will be selected randomly from p1
mutation_rate: float
The generation (as determined by the calling EvolutionaryOpimizer) that this genome belongs to
"""
g = Genome(self.evolve_axis_list, g1, g2, crossover_rate, generation=self.generation)
g.mutate(mutation_rate)
return g
def thread_function(self, args: Dict):
""" The function called by the thread pooler. All arguments are passed in in a Dict, including the function that
will do the model creation and evaluation. The number of the thread is determined and used to configure which
tensor processor (CPU:x, GPU:x, or TPU:x) this thread will utilize
utilize.
Parameters
----------
args : Dict
The values that are needed to calculate and evaluate fitness. An example would be:
{EvolverTags.ID.value: "eval_{}".format(i), EvolverTags.FUNCTION.value: eval_func, EvolverTags.GENOME.value: g}
where i is the index in a list of Genomes, eval_func is a reference to the function that will
calculate and evaluate fitness, and g is the Genome that contains the parameters to be evaluated
"""
# get the last number in the thread name. This is how we figure out the id of the device we'll use
num = self.last_num_regex.search(threading.current_thread().name)
# create the tf.distribute compatable argument for the device
thread_str = "{}:{}".format(self.thread_label, int(num.group(0)))
args[EvolverTags.THREAD_NAME.value] = thread_str
# get the genome we'll evaluate and delete it from the arguments
g = args[EvolverTags.GENOME.value]
del args[EvolverTags.GENOME.value]
# print("thread_func() args = {}".format(args))
# evaluate the genome, using the eval_func from the args Dict
g.calc_fitness2(args)
def run_optimizer(self, eval_func: Callable, save_func: Callable, crossover_rate: float,
mutation_rate: float) -> float:
""" Method that handles the evolution of a single generation of our population of Genomes, and returns an
average fitness for the Ensemble associated with the best Genome
Parameters
----------
eval_func: Callable
The function that performs the construction and evaluation of the model
save_func: Callable
The function that performs the saving of the ensemble of models that
crossover_rate: float
probability that a chromosome will be selected randomly from p1
mutation_rate: float
The generation (as determined by the calling EvolutionaryOpimizer) that this genome belongs to
"""
# increment the current generation first. This way we can tell the difference between these generations and the
# initial, 'generation 0' Genomes
self.generation += 1
# Declare types before the loop so the IDE knows what's going on
g: Genome
best_fitness = -1000.0
# iterate over all the current Genomes
for g in self.current_genome_list:
# set up the task list (needed for threading)
task_list = []
for i in range(self.num_genomes):
task = {EvolverTags.ID.value: "eval_{}".format(i), EvolverTags.FUNCTION.value: eval_func,
EvolverTags.GENOME.value: g}
task_list.append(task)
# A population of 0 means that this is a new Genome. We don't have to re-calculate a Genome's fitness
if len(g.population) == 0:
if self.threads == 0:
# if there are no threads, call g.calc_fitness directly. This makes debugging MUCH easier
for t in task_list:
g.calc_fitness2(t)
else:
# if there are threads, execute using the thread pool executing the thread_function with the
# task_list as the set of parameters
with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
executor.map(self.thread_function, task_list)
# calculate the fitess statistics for the ensemble of models created for the Genome
fitness = g.calc_fitness_stats(resample_size=self.resample_size)
# if the fitness is better, save it
if fitness > best_fitness:
result = save_func(g.get_name())
self.log(result)
best_fitness = fitness
# log the new Genome, now that the values have been calculated. Note that we log all Genomes,
# so we can see how effectively we're increasing fitness
self.log(g.to_string(meta=True, chromo=True))
# sort the list in place soch that the highest value of fitness is at index zero
self.current_genome_list.sort(key=lambda x: x.fitness, reverse=True)
# self.log(self.to_string())
# determine how many Genomes we're going to keep. If we use the default population size of ten, and the
# default keep_percent of 10%, then we would keep
num_best = int(np.ceil(len(self.current_genome_list) * self.keep_percent))
self.best_genome_list = []
# build a list of the best performing Genomes by taking the top performing Genome(s) of this generation.
# This could be the same Genome as the previous generation
bg = self.current_genome_list[0]
best_fitness = bg.fitness
self.best_genome_history_list.append(bg)
print("best: {}".format(bg.to_string(meta=True, chromo=False)))
# append the best Genomes to the best_genome_list, and keep track of any new
# best_fitness (This shouldn't change from above?)
for i in range(num_best):
g = self.current_genome_list[i]
self.best_genome_list.append(g)
best_fitness = max(best_fitness, g.fitness)
# clear the current_genome_list out and repopulate
self.current_genome_list = []
# first, add the best Genome(s) back in
for g in self.best_genome_list:
self.current_genome_list.append(g)
# randomly breed new genomes with a chance of mutation. Stop when we've generated a population
# of Genome's we've never had before
while len(self.current_genome_list) < self.num_genomes:
# choose two random parents, with replacement
g1i = random.randrange(len(self.best_genome_list))
g2i = random.randrange(len(self.best_genome_list))
g1 = self.best_genome_list[g1i]
g2 = self.best_genome_list[g2i]
# create a new Genome for evaluation
g = self.breed_genomes(g1, g2, crossover_rate, mutation_rate)
# test against all previous Genomes for a match. If there is, we'll try again
match = False
for gtest in self.all_genomes_list:
if g.equals(gtest):
match = True
break
# if there is no match with a previous Genome, add it to the current_genome_list for evaluation
# and the all_genomes_list history
if not match:
self.current_genome_list.append(g)
self.all_genomes_list.append(g)
# return the highest fitness for this set of Genomes
return best_fitness
def get_ranked_chromosome(self, rank: int = 0) -> Dict:
""" Get the Genome of the current nth rank, and return its chromosome Dict
Parameters
----------
rank: int = 0
The index of the Genome
"""
self.best_genome_history_list.sort(key=lambda x: x.fitness, reverse=True)
g = self.best_genome_history_list[rank]
c = g.chromosome_dict
return c
def get_ranked_genome(self, rank: int = 0) -> Genome:
""" Get the Genome of the current nth rank, and return it
Parameters
----------
rank: int = 0
The index of the Genome
"""
self.best_genome_history_list.sort(key=lambda x: x.fitness, reverse=True)
g = self.best_genome_history_list[rank]
return g
def save_results(self, file_name: str, data_dict: Dict = None):
""" Save the results of this population's evolution to an Excel spreadsheet for post hoc analysis
Parameters
----------
file_name: str
The name of the Excel file
data_dict: Dict = None
Optional dictionary of additional information to save
"""
print("save_results({})".format(file_name))
# sort the list
self.best_genome_history_list.sort(key=lambda x: x.fitness, reverse=True)
# create the setup Dict that will contain the meta information about this run
setup = {}
setup["user"] = getpass.getuser()
setup["date"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
setup["resample_size"] = self.resample_size
setup["keep percent"] = self.keep_percent
setup["num genomes"] = self.num_genomes
if data_dict:
for key, val in data_dict.items():
setup[key] = val
# create an Excel workbook
wb = eu.ExcelUtils()
wb.to_excel(file_name)
# write the setup data to its own tab
wb.dict_to_spreadsheet("setup", setup)
# set up the list of best chromosomes. These is a sequential list of the
# best chromosome for each generations
chromosome_list = []
g: Genome
for g in self.best_genome_history_list:
chromosome_list.append(g.to_dict())
# save this list to its own tab
wb.dict_list_matrix_to_spreadsheet("Chromosomes", chromosome_list)
# write and close
wb.finish_up()
def to_string(self):
""" Returns a string representation of this class """
str = "All genomes:\n"
for g in self.current_genome_list:
str += g.to_string() + "\n"
str += "\nBest genomes:\n"
for g in self.best_genome_list:
str += g.to_string() + "\n"
return str
# The following code provides an example of how to use the EvolutionaryOptimizer class
# This is an evaluation function that is passed to the Evolutionary Optomizer. There are three parameters passed in
# using the 'arguments' Dict. X and Y are used to create a surface that can be visualized
# (and are the only values used for an exhaustive search). An additional function parameter is added if available.
# Two Dicts are returned, one with a fitness value, and one with an ID
def example_evaluation_function(arguments: Dict) -> Tuple[Dict, Dict]:
x = arguments['X'] + random.random() - 0.5
y = arguments['Y'] + random.random() - 0.5
val = np.cos(x) + x * .1 + np.sin(y) + y * .1
if 'Zfunc' in arguments:
z = arguments['Zfunc'] + random.random() - 0.5
val += z
return {EvolverTags.FITNESS.value: val}, {
EvolverTags.FILENAME.value: "{}.tf".format(arguments[EvolverTags.ID.value])}
# A stub of a save function that
def example_save_function(name: str) -> str:
return "would have new best value: {}".format(name)
# The next four functions are used as elements of vzfunc EvolveAxis
def plus_func(v1: float, v2: float) -> float:
return v1 + v2
def minus_func(v1: float, v2: float) -> float:
return v1 - v2
def mult_func(v1: float, v2: float) -> float:
return v1 * v2
def div_func(v1: float, v2: float) -> float:
if v2 > 0:
return v1 / v2
return 0
# The main entry point if used as a standalone example
if __name__ == '__main__':
# create the x and y values for our surface. For this example, these are intervals from -5 to 5, with a step of 0.25
v1 = VA.EvolveAxis("X", VA.ValueAxisType.FLOAT, min=-5, max=5, step=0.25)
v2 = VA.EvolveAxis("Y", VA.ValueAxisType.FLOAT, min=-5, max=5, step=0.25)
# create an Evolve axis that contains a List of functions, and two EvolveAxis that will be the arguments for those functions.
# First, we create a List of function references
func_array = [plus_func, minus_func, mult_func, div_func]
# Next, we create the vzfunc EvolveAxis, using the List of functions
vzfunc = VA.EvolveAxis("Zfunc", VA.ValueAxisType.FUNCTION, range_array=func_array)
# Add child EvolveAxis that can provide the arguments to the functions. The order that they are instanced is
# the order in the function's argument list
vzvals = VA.EvolveAxis("Zvals1", VA.ValueAxisType.FLOAT, parent=vzfunc, min=0, max=5, step=0.5)
vzvals = VA.EvolveAxis("Zvals2", VA.ValueAxisType.FLOAT, parent=vzfunc, min=0, max=5, step=0.5)
# do an exhaustive evaluation for comparison. Each time a new, better value is found, add it to the list for plotting
prev_fitness = -10
num_exhaust = 0
exhaustive_list = []
for x in range(len(v1.range_array)):
for y in range(len(v2.range_array)):
num_exhaust += 1
args = {'X': v1.range_array[x], 'Y': v2.range_array[y], EvolverTags.ID.value: "eval_[{}]_[{}]".format(x, y)}
d1, d2 = example_evaluation_function(args)
cur_fitness = d1[EvolverTags.FITNESS.value]
if (cur_fitness > prev_fitness):
prev_fitness = cur_fitness
exhaustive_list.append(cur_fitness)
# now do it using evoultionary fitness landscape evaluation
# create an instance of the EvolutionaryOpimizer that keeps the top 50% of the genomes for each generation.
# Threads can equal the number of processors. Zero is best for stepping through code in a debugger
eo = EvolutionaryOpimizer(keep_percent=.5, threads=0)
# add the EvolveAxis. Order doesn't matter here
eo.add_axis(v1)
eo.add_axis(v2)
eo.add_axis(vzfunc)
# create an initial population of 10 genomes
eo.create_intital_genomes(10)
# run for the same number of steps that it took to create the exhaustive list. Note - this is completely arbitrary
# so that some nice plots can be made. In an actual version, there should ba a max number of iterations that a fitness
# no longer improves
# create a List of fitness values to plot
evolve_list = []
# set the number of generations
num_generations = len(exhaustive_list) * 2
for i in range(num_generations):
# evolve a generation, providing the evaluation and save functions, and a crossover and mutation rate of 50%
fitness = eo.run_optimizer(example_evaluation_function, example_save_function, 0.5, 0.5)
evolve_list.append(fitness)
# print("best fitness = {:.3f}".format(fitness))
# print the genomes
print("xxxxxxxxxxxxxxxx\n{}".format(eo.to_string()))
best_genome = eo.get_ranked_genome(0)
best_genome_data = best_genome.get_data_list()
d: Dict
print("best genome = {}".format(best_genome.get_name()))
for i in range(len(best_genome_data)):
d = best_genome_data[i]
for key, val in d.items():
print("data [{}]: {} = {}".format(i, key, val))
# save the results to a spreadsheet for post hoc analysis
eo.save_results("evolve_test.xlsx")
# plot the exhaustive and evolve sequences. The exhaustive line is almost deterministic and will pretty much look
# the same for each run. The evolved line is stochastic, and can change significantly for each run
fig = plt.figure(1)
plt.plot(exhaustive_list)
plt.plot(evolve_list)
plt.legend(["exhaustive ({} iterations)".format(num_exhaust), "evolved ({} iterations)".format(num_generations)])
# draw a picture of our XY fitness landscape. This is the same range used to create the axis and the same equation in
# def example_evaluation_function(arguments: Dict) -> Dict:
fig = plt.figure(2)
ax = fig.gca(projection='3d')
# Make our 3d surface using the same equation in example_evaluation_function()
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.cos(X) + X * .1 + | np.sin(Y) | numpy.sin |
"""
Add Cell Connectivity To Points
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Example for :class:`PVGeo.filters.AddCellConnToPoints`
This filter will add **linear** cell connectivity between scattered points.
You have the option to add ``VTK_LINE`` or ``VTK_POLYLINE`` connectivity.
``VTK_LINE`` connectivity makes a straight line between the points in order
(either in the order by index or using a nearest neighbor calculation).
The ``VTK_POLYLINE`` adds polyline connectivity between all points as one
spline (either in the order by index or using a nearest neighbor calculation).
"""
###############################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import pyvista
from PVGeo import points_to_poly_data
from PVGeo.filters import AddCellConnToPoints
###############################################################################
# First, lets generate some points which we'd like to connect
def path1(y):
"""Equation: x = a(y-h)^2 + k"""
a = -110.0 / 160.0 ** 2
x = a * y ** 2 + 110.0
idxs = np.argwhere(x > 0)
return x[idxs][:, 0], y[idxs][:, 0]
x, y = path1(np.arange(0.0, 200.0, 25.0))
zo = np.linspace(9.0, 11.0, num=len(y))
coords = np.vstack((x, y, zo)).T
# Shuffle points to demonstrate value of Nearest Neighbor
| np.random.shuffle(coords) | numpy.random.shuffle |
# OneZone.py: simple one-zone chemical evolution models
from functools import wraps
import numpy
from scipy import optimize
import hashlib
from astropy import units as u
def _recalc_model(method):
@wraps(method)
def wrapper(*args,**kwargs):
new_model_hash= args[0]._model_hash()
if new_model_hash != args[0]._current_model_hash:
args[0]._update_timescales()
args[0]._calc_equilibrium()
args[0]._current_model_hash= new_model_hash
new_solar_hash= args[0]._solar_hash()
if new_solar_hash != args[0]._current_solar_hash:
args[0]._calc_solar()
args[0]._current_solar_hash= new_solar_hash
return method(*args,**kwargs)
return wrapper
_defaults= {'eta':2.5,
'tau_SFE': 1.*u.Gyr,
'tau_SFH': 6.*u.Gyr,
'tau_Ia': 1.5*u.Gyr,
'min_dt_Ia': 0.15*u.Gyr,
'sfh': 'exp',
'mCC_O': 0.015,
'mCC_Fe': 0.0012,
'mIa_O': 0.,
'mIa_Fe': 0.0017,
'r': 0.4,
'tau_Ia_2': None,
'frac_Ia_2': 0.522,
'solar_O': 8.69,
'solar_Fe': 7.47}
class OneZone(object):
"""OneZone: simple one-zone chemical evolution models"""
def __init__(self,**kwargs):
"""
NAME:
__init__
PURPOSE:
Setup a simple OneZone chemical-evolution object using the analytical formalism from Weinberg, Andrews, & Freudenberg (2017)
INPUT:
The following can be updated on-the-fly:
eta= (2.5) outflow mass loading factor as a fraction of SFR (float)
tau_SFE= (1 Gyr) star-formation efficiency time scale (Quantity with units of time)
tau_SFH= (6 Gyr) star-formation exponential decay time scale
sfh= ('exp') use an exponential ['exp'] or linear-exponential ['linexp'] star-formation history
tau_Ia= (1.5 Gyr) SNe Ia exponential decay time scale
min_dt_Ia= (150 Myr) minimum time delay for SNe Ia
mCC_O= (0.015) mass fraction of oxygen returned by core-collapse SNe (mass of O / stellar mass formed)
mCC_Fe= (0.0012) mass fraction of iron returned by core-collapse SNe (mass of O / stellar mass formed)
mIa_O= (0.) mass fraction of oxygen returned by SNe Ia (mass of O / stellar mass formed)
mIa_Fe= (0.0017) mass fraction of iron returned by SNe Ia (mass of O / stellar mass formed)
r= (0.4) mass recycling parameter (core-collapse SNe + AGB returns): amount of mass returned at abundances of star at birth
solar_O= (8.69) solar oxygen number density on the x_O = 12 + log10(X_O/H) scale
solar_Fe=7.47 solar iron number density on the x_O = 12 + log10(X_O/H) scale
tau_Ia_2= (None) SNe Ia exponential decay time scale for second Ia component (useful when approximating t^{-1.1} delay time distribution)
frac_Ia_2= (0.522) fraction of Ias coming from the second decay time scale (useful when approximating t^{-1.1} delay time distribution)
OUTPUT:
instance
HISTORY:
2018-07-09 - Written - Bovy (UofT)
2018-11-01 - Added second Ia component for approximxating t^{-1.1} decay distribution - Bovy (UofT)
"""
self._initialize_params(**kwargs)
# Setup hash for storing models
self._current_model_hash= None
self._current_solar_hash= None
return None
def _initialize_params(self,**kwargs):
self._init_params= {} # To store initial
for key in _defaults.keys():
self.__dict__[key]= kwargs.get(key,_defaults[key])
self._init_params[key]= kwargs.get(key,_defaults[key])
return None
def initial(self):
for key in _defaults.keys():
self.__dict__[key]= self._init_params[key]
return None
def default(self):
for key in _defaults.keys():
self.__dict__[key]= _defaults[key]
return None
def __str__(self):
out= ''
for key in sorted(_defaults.keys()):
out+= '{0:<10}:\t{1}\n'.format(key,self.__dict__[key])
return out[:-1]
def _calc_solar(self):
self._logZO_solar= -2.25+self.solar_O-8.69
self._logZFe_solar= -2.93+self.solar_Fe-7.47
return None
# Equilibrium and model parameters
def _update_timescales(self):
# Update all relevant timescales for the model based on the current
# model parameters
self._tau_dep= self.tau_SFE/(1.+self.eta-self.r)
self._tau_dep_SFH= 1./(1./self._tau_dep-1./self.tau_SFH)
self._tau_dep_Ia= 1./(1./self._tau_dep-1./self.tau_Ia)
self._tau_Ia_SFH= 1./(1./self.tau_Ia-1./self.tau_SFH)
if not self.tau_Ia_2 is None:
self._tau_dep_Ia_2= 1./(1./self._tau_dep-1./self.tau_Ia_2)
self._tau_Ia_SFH_2= 1./(1./self.tau_Ia_2-1./self.tau_SFH)
return None
def _calc_equilibrium(self):
self._ZO_CC_eq= self.mCC_O*self._tau_dep_SFH/self.tau_SFE
self._ZO_Ia_eq= self.mIa_O*self._tau_dep_SFH/self.tau_SFE\
*self._tau_Ia_SFH/self.tau_Ia\
*numpy.exp(self.min_dt_Ia/self.tau_SFH)
self._ZFe_CC_eq= self.mCC_Fe*self._tau_dep_SFH/self.tau_SFE
self._ZFe_Ia_eq= self.mIa_Fe*self._tau_dep_SFH/self.tau_SFE\
*self._tau_Ia_SFH/self.tau_Ia\
*numpy.exp(self.min_dt_Ia/self.tau_SFH)
if not self.tau_Ia_2 is None:
self._ZO_Ia_eq*= (1.-self.frac_Ia_2)
self._ZFe_Ia_eq*= (1.-self.frac_Ia_2)
self._ZO_Ia_eq_2= self.frac_Ia_2*self.mIa_O\
*self._tau_dep_SFH/self.tau_SFE\
*self._tau_Ia_SFH_2/self.tau_Ia_2\
*numpy.exp(self.min_dt_Ia/self.tau_SFH)
self._ZFe_Ia_eq_2= self.frac_Ia_2*self.mIa_Fe\
*self._tau_dep_SFH/self.tau_SFE\
*self._tau_Ia_SFH_2/self.tau_Ia_2\
*numpy.exp(self.min_dt_Ia/self.tau_SFH)
return None
# Time evolution equations
def _evol_CC(self,t):
if self.sfh.lower() == 'exp':
return (1.-numpy.exp(-t/self._tau_dep_SFH))
else:
return (1.-self._tau_dep_SFH/t
*(1.-numpy.exp(-t/self._tau_dep_SFH)))
def _evol_Ia(self,t,tau_dep_Ia,tau_Ia_SFH):
# Ia contribution
dt= t-self.min_dt_Ia
idx= dt > 0.
out= numpy.zeros(t.shape)
if self.sfh.lower() == 'exp':
out[idx]+= \
(1.- | numpy.exp(-dt[idx]/self._tau_dep_SFH) | numpy.exp |
import numpy as np
from baselines import util
import os
import copy
import nltk
#import crf
import scipy.special
import sklearn
class HMM:
"""
Hidden Markov Model
"""
def __init__(self, n, m):
"""
fix n, m
:param n: number of states
:param m: number of observations
"""
self.n = n
self.m = m
self.t = np.zeros((n, n))
self.e = np.zeros((n, m))
self.start = np.asarray([1.0 / n] * n)
def pr_obs(self, i, list_features, t=None):
"""
:param i: state
:param list_features:
:param t: time, not used here
:return: probability of observing the features in state i
"""
res = 1
for f in list_features:
res *= self.e[i, f]
return res
def decode(self, a, include_crowd_obs=False):
"""
Viterbi decoding
:param a: seq of observations, each observation is a list of features
:return:
"""
l = len(a)
if l == 0:
return []
# c[t][i] = prob of best path time t, at state i
c = np.zeros((l, self.n))
c[0] = np.copy(self.start) # * self.e[:, a[0]]
# print self.n, c.shape
for i in range(self.n):
c[0][i] *= self.pr_obs(i, a[0])
# b[t][i] = backpointer
b = np.zeros((l, self.n))
for t in range(1, l, 1): # time
ob = a[t]
for i in range(self.n): # current state
for j in range(self.n): # previous state
# todo: change to log scale
p = c[t - 1][j] * self.t[j, i] * self.pr_obs(i, ob)
if include_crowd_obs:
p *= self.pr_crowd_labs(t, i, self.current_list_cl)
# print t, i, j, p
if p > c[t][i]:
c[t][i] = p
b[t][i] = j
# normalise otherwise p ends up as zeros with long sequences
c_t_total = 0
for i in range(self.n):
c_t_total += c[t][i]
for i in range(self.n):
c[t][i] /= c_t_total
res = np.zeros((l,))
# trace
p = 0
for i in range(self.n):
if c[l - 1][i] > p:
p = c[l - 1][i]
res[l - 1] = i
seq_prob = p
for t in range(l - 2, -1, -1):
res[t] = b[int(t + 1), int(res[t + 1])]
# print c
# print b
return res, seq_prob
def learn(self, sentences, smooth=0.001):
"""
learn parameters from labeled data
:param sentences: list of sentence, which is list of instance
:return:
"""
# counting
self.t = smooth * np.ones((self.n, self.n))
self.e = smooth * np.ones((self.n, self.m))
self.start = smooth * np.ones((self.n,))
for sentence in sentences:
if len(sentence) > 0:
i = sentence[0]
self.start[i.label] += 1
prev = -1 # previous state
for i in sentence:
state = i.label
if prev != -1:
self.t[prev][state] += 1
for f in i.features:
self.e[state][int(f)] += 1
prev = state
# save count for e
self.count_e = copy.deepcopy(self.e)
# normalizing
self.start = self.start * 1.0 / np.sum(self.start)
for i in range(self.n):
self.t[i] = self.t[i] * 1.0 / np.sum(self.t[i])
self.e[i] = self.e[i] * 1.0 / np.sum(self.e[i])
def decode_all(self, sentences):
self.res = []
self.res_prob = []
for s in sentences:
mls, mls_prob = self.decode(util.get_obs(s))
self.res.append(mls)
self.res_prob.append(mls_prob)
##########################################################################
##########################################################################
##########################################################################
##########################################################################
class WorkerModel:
"""
model of workers
"""
def __init__(self, n_workers = 47, n_class = 10, smooth = 0.001, ne = 9, rep = 'cv'):
"""
:param n_workers:
:param n_class:
:param smooth:
:param ne:
:param rep: representation. cv2 = confusion vec of accuracy in two cases: non-entity/ entity
"""
self.n_workers = n_workers
self.n = n_class
self.smooth = smooth
self.ne = ne
self.rep = rep
def learn_from_pos(self, data, pos):
"""
:param data: crowd_data
:param pos: sentence posterior
:return:
"""
count = self.smooth * np.ones( (self.n_workers, self.n, self.n))
for i, sentence in enumerate(data.sentences):
for j in range(len(sentence)):
for l, w in data.get_lw(i, j):
for k in range(self.n): # 'true' label = k
count[w][k][l] += pos[i][j][k]
self.learn_from_count(count)
def learn_from_count(self, count):
"""
:return:
"""
#save the count for debug
self.count = count
if self.rep == 'cv2':
ne = self.ne
self.cv = np.zeros((self.n_workers, 2))
for w in range(self.n_workers):
self.cv[w][0] = count[w][ne][ne] * 1.0 / np.sum(count[w][ne]) # accuracy for ne class
cc = self.smooth; cw = self.smooth # count for correct and wrong for non ne classes
for i in range(self.n):
if i != ne:
cc += count[w][i][i]
cw += np.sum(count[w][i]) - count[w][i][i]
self.cv[w][1] = cc * 1.0 / (cc + cw)
elif self.rep == 'cv':
self.cv = np.zeros((self.n_workers, self.n))
for w in range(self.n_workers):
if np.mod(w, 100) == 0:
print('M-step, processing worker counts %i of %i' % (w, self.n_workers))
for i in range(self.n):
self.cv[w][i] = count[w][i][i] * 1.0 / np.sum(count[w][i]) # accuracy for ne class
elif self.rep == 'cm_sage':
self.cm = np.zeros((self.n_workers, self.n, self.n))
# background dist
m = np.sum(count, axis=0)
for i in range(self.n): m[i] = m[i] * 1.0 / np.sum(m[i])
m = np.log(m)
for w in range(self.n_workers):
for i in range(self.n):
temp = additive.estimate(count[w][i], m[i])
temp = np.reshape(temp, (self.n,) )
self.cm[w][i] = np.exp(temp + m[i])
self.cm[w][i] = self.cm[w][i] * 1.0 / np.sum(self.cm[w][i])
else:
self.cm = np.zeros((self.n_workers, self.n, self.n))
for w in range(self.n_workers):
for k in range(self.n):
self.cm[w][k] = count[w][k] * 1.0 / np.sum(count[w][k])
def get_prob(self, w, true_lab, lab):
"""
:param w: worker
:param true_lab:
:param lab:
:return: probability of response lab given true label
"""
#return 1.0
if self.rep == 'cv2':
if self.ne == true_lab:
if true_lab == lab:
return self.cv[w][0]
else:
return (1 - self.cv[w][0]) / float(self.n - 1)
else:
if true_lab == lab:
return self.cv[w][1]
else:
return (1 - self.cv[w][1]) / float(self.n - 1)
elif self.rep == 'cv':
if true_lab == lab:
return self.cv[w][true_lab]
else:
return (1 - self.cv[w][true_lab]) / float(self.n - 1)
elif self.rep == 'cm_sage':
return self.cm[w][true_lab][lab]
else:
return self.cm[w][true_lab][lab]
class HMM_crowd(HMM):
def __init__(self, n, m, data, features, labels, n_workers=47, init_w=0.9, smooth=0.001, smooth_w=10, ne = 9, vb = None):
"""
:param data: util.crowd_data with crowd label
:return:
"""
HMM.__init__(self, n, m)
self.data = data
self.smooth = smooth
self.n_workers = n_workers
self.ep = 1e-300
self.features = features
self.labels = labels
self.init_w = init_w
self.ne = ne
#self.wsen = np.zeros((n_workers,))
#self.wspe = np.zeros((n_workers,))
self.wca = np.zeros((n, n_workers))
#self.ne = labels['O'] # id of 'non entity' label
self.ne = ne
self.smooth_w = smooth_w
self.n_sens = len(data.sentences)
self.vb = vb
def pr_crowd_labs(self, t, i, list_cl):
"""
:param t: time
:param i: the state
:param list_cl: list of util.crowddlab
:return: probability of observing crowd labels at state i
"""
res = 1# * self.prior[i]
for cl in list_cl:
wid = cl.wid
sen = cl.sen
lab = sen[t] # crowd label
# if i == self.ne:
# res *= self.wspe[wid] if lab == i else 1 - self.wspe[wid] # specificity
# else:
# res *= self.wsen[wid] if lab == i else 1 - self.wsen[wid] #
# sensitivity
#res *= self.wca[i, wid] if lab == i else 1 - self.wca[i, wid]
#res *= self.wa[wid][i][lab]
res *= self.wm.get_prob(wid, i, lab)
return res
def inference(self, sentence, list_cl, return_ab=False):
T = len(sentence) # number of timesteps
alpha = np.zeros((T, self.n)) # T * states
beta = np.zeros((T, self.n))
# alpha (forward):
for i in range(self.n):
alpha[0][i] = self.pr_obs(
i, sentence[0].features) * self.pr_crowd_labs(0, i, list_cl) * self.start[i]
for t in range(1, T, 1):
ins = sentence[t]
alpha_t_sum = 0
for i in range(self.n): # current state
alpha[t][i] = 0
for j in range(self.n): # previous state
alpha[t][i] += self.pr_obs(i, ins.features) * self.t[j][i] * alpha[t - 1][j] \
* self.pr_crowd_labs(t, i, list_cl)
alpha_t_sum += alpha[t][i]
# normalise
for i in range(self.n):
alpha[t][i] /= alpha_t_sum
# beta (backward):
for i in range(self.n):
beta[T - 1][i] = self.pr_obs(i, sentence[T - 1].features) * \
self.pr_crowd_labs(T - 1, i, list_cl)
for t in range(T - 2, -1, -1):
ins = sentence[t + 1]
beta_t_sum = 0
for i in range(self.n): # current state
beta[t][i] = 0
for j in range(self.n): # next state
beta[t][i] += self.pr_obs(j, ins.features) * self.t[i][j] * beta[t + 1][j] \
* self.pr_crowd_labs(t + 1, j, list_cl)#\
#* (self.start[i] if t == 0 else 1)
beta_t_sum += beta[t][i]
for i in range(self.n):
beta[t][i] /= beta_t_sum
if return_ab:
return (alpha, beta)
sen_posterior = []
# update counts
p = np.zeros((self.n,))
for t in range(T):
for i in range(self.n):
p[i] = self.ep + alpha[t][i] * beta[t][i]
p = p * 1.0 / np.sum(p) # normalilze
#save the posterior
sen_posterior.append(p.copy())
if t == 0: # update start counts
self.count_start += p
# update prior count
#self.count_prior += p
# update emission counts
ins = sentence[t]
for i in range(self.n):
for f in ins.features:
self.count_e[i][f] += p[i]
# update crowd params counts
for i in range(self.n): # state
for cl in list_cl:
wid = cl.wid
# worker ans
lab = cl.sen[t]
# if i == self.ne:
# if lab == self.ne:
# self.count_spe[wid][0] += p[i]
# else:
# self.count_spe[wid][1] += p[i]
# else:
# if lab == self.ne:
# self.count_sen[wid][0] += p[i]
# else:
# self.count_sen[wid][1] += p[i]
#if lab == i:
# self.count_wa[i, wid][1] += p[i]
#else:
# self.count_wa[i, wid][0] += p[i]
self.count_wa[wid][i][lab] += p[i]
trans_pos = []
# update transition counts
for t in range(T - 1):
p = np.zeros((self.n, self.n))
ins = sentence[t+1]
for i in range(self.n): # state at time t
for j in range(self.n): # state at time t+1
p[i][j] = self.ep + alpha[t][i] * self.t[i][j] * self.pr_obs(j, ins.features) \
* self.pr_crowd_labs(t + 1, j, list_cl) * beta[t + 1][j]
# update transition counts
p = p * 1.0 / np.sum(p)
for i in range(self.n):
#p[i] = p[i] * 1.0 / np.sum(p[i])
self.count_t[i] += p[i]
trans_pos.append(p.copy())
# log likelihood
ll = np.log( np.sum(alpha[t-1]) )
return (sen_posterior, trans_pos, ll)
def e_step(self):
"""
do alpha-beta passes
:return:
"""
# setup counting
self.count_t = self.smooth * np.ones((self.n, self.n))
self.count_e = self.smooth * np.ones((self.n, self.m))
self.count_start = self.smooth * np.ones((self.n,))
#self.count_sen = self.smooth * np.ones((self.n_workers,2))
#self.count_spe = self.smooth * np.ones((self.n_workers, 2))
#self.count_wca = self.smooth_w * np.ones((self.n, self.n_workers, 2))
#self.count_prior = self.smooth * np.ones( (self.n,) )
self.count_wa = self.smooth * np.ones( (self.n_workers, self.n, self.n) )
self.sen_posterior = []
self.trans_posterior = []
sum_ll = 0
for i, sentence in enumerate(self.data.sentences):
if np.mod(i, 100) == 0:
print('E-step, processing sentence %i of %i' % (i, len(self.data.sentences)))
if len(sentence) > 0:
sen_pos, trans_pos, ll = self.inference(sentence, self.data.crowdlabs[i])
sum_ll += ll
else:
sen_pos, trans_pos = ([], [])
self.sen_posterior.append (sen_pos)
self.trans_posterior.append(trans_pos)
# save sum of log-likelihood
self.sum_ll = sum_ll
def m_step(self):
if self.vb != None:
self.m_step_vb()
return
# normalize all the counts
self.start = self.count_start * 1.0 / np.sum(self.count_start)
#self.prior = self.count_prior * 1.0 / np.sum(self.count_prior)
for i in range(self.n):
if np.mod(i, 100) == 0:
print('M-step, processing class counts %i of %i' % (i, self.n))
self.t[i] = self.count_t[i] * 1.0 / np.sum(self.count_t[i])
self.e[i] = self.count_e[i] * 1.0 / np.sum(self.count_e[i])
self.wm.learn_from_count(self.count_wa)
#for w in range(self.n_workers):
#self.wsen[w] = self.count_sen[w][1] * 1.0 / (self.count_sen[w][0] + self.count_sen[w][1])
#self.wspe[w] = self.count_spe[w][0] * 1.0 / (self.count_spe[w][0] + self.count_spe[w][1])
#ne = self.ne
#self.wca[ne, w] = self.count_wca[ne, w][1] * 1.0 / \
# (self.count_wca[ne, w][0] + self.count_wca[ne, w][1])
# sum over pos class (not include non-entity)
#sum_pos_1 = np.sum(
# self.count_wca[:, w, 1]) - self.count_wca[ne, w, 1]
#sum_pos_0 = np.sum(
# self.count_wca[:, w, 0]) - self.count_wca[ne, w, 0]
#for i in range(self.n):
# if i != ne:
#self.wca[i, w] = self.count_wca[i, w][1] * 1.0 / (self.count_wca[i, w][0] + self.count_wca[i, w][1])
# self.wca[i, w] = sum_pos_1 * 1.0 / (sum_pos_1 + sum_pos_0)
#for i in range(self.n):
# self.wa[w][i] = self.count_wa[w][i] * 1.0 / np.sum(self.count_wa[w][i])
#self.wm.learn_from_pos(self.data, self.sen_posterior)
def m_step_vb(self):
"""
use Variational Bayes
"""
self.start = self.count_start * 1.0 / np.sum(self.count_start)
f = lambda x: np.exp( scipy.special.digamma(x))
for i in range(self.n):
if np.mod(i, 100) == 0:
print('VB M-step, processing class counts %i of %i' % (i, self.n))
self.count_t[i] = self.count_t[i] - self.smooth + self.vb[0]
self.count_e[i] = self.count_e[i] - self.smooth + self.vb[1]
self.t[i] = f(self.count_t[i] * 1.0) / f(np.sum(self.count_t[i]))
self.e[i] = f(self.count_e[i] * 1.0) / f(np.sum(self.count_e[i]))
self.wm.learn_from_count(self.count_wa)
def init_te_from_pos(self, pos):
"""
init transition and emission from posterior
"""
self.t = self.smooth * np.ones((self.n, self.n))
self.e = self.smooth * np.ones((self.n, self.m))
self.start = self.smooth * np.ones((self.n,))
for sentence, p in zip(self.data.sentences, pos):
if len(sentence) > 0:
self.start += p[0]
for t, ins in enumerate(sentence):
for i in range(self.n): #current state
for f in ins.features:
self.e[i][f] += p[t][i]
if t > 0:
for j in range(self.n): #previous state
self.t[j][i] += p[t-1][j] * p[t][i]
# normalizing
self.start = self.start * 1.0 / np.sum(self.start)
for i in range(self.n):
self.t[i] = self.t[i] * 1.0 / np.sum(self.t[i])
self.e[i] = self.e[i] * 1.0 / np.sum(self.e[i])
def init(self, init_type='mv', sen_a=1, sen_b=1, spe_a=1, spe_b=1, wm_rep =
'cv', save_count_e = False, dw_em = 5, wm_smooth = 0.001):
"""
:param init_type:
:param sen_a: :param sen_b: :param spe_a: :param spe_b: priors for sen, spe
expect MV to over-estimate worker
:return:
"""
if init_type == 'mv':
h = HMM(self.n, self.m)
mv_sen = util.mv_cd(self.data, self.labels)
h.learn(mv_sen, smooth=self.smooth)
for i in range(self.n):
self.start[i] = h.start[i]
#(correct_0, correct_1, wrong_0, wrong_1) = util.cal_workers_true_acc(self.data, util.get_all_lab(mv_sen), ne = self.ne)
self.wca = util.cal_workers_true_acc(
self.data, util.get_all_lab(mv_sen), ne=self.ne, return_ss=True)
# for i in range(self.n_workers):
#self.wsen[i] = (correct_1[i] + sen_a) * 1.0 / (wrong_1[i] + correct_1[i] + sen_a + sen_b)
#self.wsen[i] = 0.5
#self.wspe[i] = (correct_0[i] + spe_a) * 1.0 / (wrong_0[i] + correct_0[i] + spe_a + spe_b)
for s in range(self.n):
for s2 in range(self.n):
self.t[s][s2] = h.t[s][s2]
# for s in range(self.n):
for o in range(self.m):
self.e[s][o] = h.e[s][o]
#save the count of e for hmm_sage
if save_count_e:
self.count_e = h.count_e
#self.init2()
elif init_type == 'dw':
d = dw(self.n, self.m, self.data, self.features, self.labels,
self.n_workers, self.init_w, self.smooth)
d.init()
d.em(dw_em)
d.mls()
self.d = d
h = HMM(self.n, self.m)
sen = copy.deepcopy(self.data.sentences)
util.make_sen(sen, d.res)
h.learn(sen, smooth = self.smooth)
#self.wm = WorkerModel()
#self.wm.learn_from_pos(self.data, d.pos)
self.wm = WorkerModel(n_workers = self.n_workers, n_class = self.n,
rep=wm_rep, ne = self.ne, smooth = wm_smooth)
self.wm.learn_from_pos(self.data, d.pos)
self.start = h.start
for s in range(self.n):
for s2 in range(self.n):
self.t[s][s2] = h.t[s][s2]
# for s in range(self.n):
for o in range(self.m):
self.e[s][o] = h.e[s][o]
#self.init_te_from_pos(d.pos)
#save the count of e for sage
if save_count_e:
self.count_e = h.count_e
self.h = h
else:
# init params (uniform)
for i in range(self.n):
self.start[i] = 1.0 / self.n
self.wa = [0.9] * self.n_workers
for s in range(self.n):
for s2 in range(self.n):
self.t[s][s2] = 1.0 / self.n
for s in range(self.n):
for o in range(self.m):
self.e[s][o] = 1.0 / self.m
for w in range(self.n_workers):
#self.wsen[i] = 0.9
#self.wspe[i] = 0.6
for i in range(self.n):
self.wca[i, w] = 0.8
def init2(self):
"""
init
"""
pos = []
self.prior = np.zeros( (self.n,) )
for i, sentence in enumerate(self.data.sentences):
pos.append( self.smooth * np.ones((len(sentence), self.n)) )
for j in range(len(sentence)):
for l in self.data.get_labs(i, j): # labels for sen i, pos j
pos[i][j][l] += 1
pos[i][j] = pos[i][j] * 1.0 / np.sum(pos[i][j])
self.prior += pos[i][j]
self.prior = self.prior * 1.0 / np.sum(self.prior)
def learn(self, num=4):
"""
learn by EM
:return:
"""
# init params (uniform)
# for i in range(self.n):
# self.start[i] = 1.0/self.n
# self.wa = [0.9] * self.n_workers
# for s in range(self.n):
# for s2 in range(self.n):
# self.t[s][s2] = 1.0/self.n
#
# for s in range(self.n):
# for o in range(self.m):
# self.e[s][o] = 1.0/self.m
self.init()
self.em(num)
def em(self, num=4):
# run EM
for it in range(num):
print('HMM-crowd running e-step %i of %i' % (it, num))
self.e_step()
print('HMM-crowd running m-step %i of %i' % (it, num))
self.m_step()
def mls(self):
"""
compute the most likely states seq for all sentences
:return:
"""
self.res = []
self.res_prob = []
for s, sentence in enumerate(self.data.sentences):
if len(sentence) > 0:
self.current_list_cl = self.data.crowdlabs[s]
ml_states, ml_probs = self.decode(util.get_obs(
sentence), include_crowd_obs=True)
self.res.append(ml_states)
self.res_prob.append(ml_probs)
else:
self.res.append([])
self.res_prob.append(1)
def marginal_decode(self, th):
"""
decode by marginal prob
"""
self.res = []
for i in range(len(self.data.sentences)):
temp = []
for j in range(len(self.sen_posterior[i])):
temp.append ( np.argmax(self.sen_posterior[i][j]) )
self.res.append(temp)
def decode_sen_no(self, s):
self.current_list_cl = self.data.crowdlabs[s]
sentence = self.data.sentences[s]
ml_states, ml_probs = self.decode(util.get_obs(
sentence), include_crowd_obs=True)
return ml_states
def threshold(self, thresh = 0.9):
self.flag = np.zeros((self.n_sens,), dtype=bool)
for i, r in enumerate(self.res):
for j, l in enumerate(r):
if self.posterior[i][j][int(l)] < thresh:
self.flag[i] = True
def pos_decode(pos, th, ne = 9):
"""
decode by posterior:
res = argmax if pro > th
else res = ne
"""
res = []
for i in range(len(pos)):
temp = []
for j in range(len(pos[i])):
#p = copy.copy(pos[i][j])
#p[ne] = -1
k = np.argmax(pos[i][j])
if pos[i][j][k] > th:
temp.append(k)
else:
temp.append(ne)
res.append(temp)
return res
##########################################################################
##########################################################################
class HMM_sage(HMM_crowd):
def __init__(self, n, m, data, features, labels, n_workers=47, init_w=0.9, smooth=0.001, smooth_w=10):
HMM_crowd.__init__(self, n, m, data, features, labels,
n_workers, init_w, smooth)
HMM.eta = np.zeros((self.m, self.n))
def init(self, init_type = 'dw', wm_rep = 'cm'):
HMM_crowd.init(self, init_type=init_type, wm_rep=wm_rep, save_count_e = True)
self.estimate_sage()
def estimate_sage(self, mult = 2.0):
# dont do sage for non-entity
self.count_e[self.ne, :] = np.zeros((self.m,))
eq_m = np.sum(self.count_e, axis=0) / np.sum(self.count_e)
#eq_m = 1.0 / self.m * np.ones((self.m))
eq_m = np.log(eq_m)
eta = additive.estimate(mult*self.count_e.T, eq_m)
for i in range(self.n):
if i != self.ne:
self.e[i] = np.exp(eta[:, i] + eq_m) * 1.0 / \
np.sum(np.exp(eta[:, i] + eq_m))
# save eq_m and eta
self.eq_m = eq_m
self.eta = eta
def m_step(self):
HMM_crowd.m_step(self)
self.estimate_sage()
class dw(HMM_crowd):
"""
"""
def __init__(self, n, m, data, features, labels, n_workers=47, init_w=0.9, smooth=0.001, smooth_w=10):
"""
n: number of states
:param data: util.crowd_data with crowd label
:return:
"""
HMM_crowd.__init__(self, n, m, data, features, labels,
n_workers, init_w, smooth)
def init(self):
self.pos = []
self.prior = np.zeros( (self.n,) )
for i, sentence in enumerate(self.data.sentences):
self.pos.append( self.smooth * np.ones((len(sentence), self.n)) )
for j in range(len(sentence)):
for l in self.data.get_labs(i, j): # labels for sen i, pos j
self.pos[i][j][l] += 1
self.pos[i][j] = self.pos[i][j] * 1.0 / np.sum(self.pos[i][j])
self.prior += self.pos[i][j]
self.prior = self.prior * 1.0 / np.sum(self.prior)
def e_step(self):
for i, sentence in enumerate(self.data.sentences):
if np.mod(i, 100) == 0:
print('dw e-step, sentence %i of %i' % (i, len(self.data.sentences)))
self.pos[i] = np.ones( (len(sentence), self.n) )
for j in range(len(sentence)):
self.pos[i][j] = self.prior.copy()
for l, w in self.data.get_lw(i, j): # labels for sen i, pos j
self.pos[i][j] *= self.wa[w][:,l]
self.pos[i][j] = self.pos[i][j] * 1.0 / np.sum(self.pos[i][j])
def m_step(self):
count = self.smooth * np.ones ( (self.n_workers, self.n, self.n) )
count_prior = self.smooth * np.ones_like(self.prior)
#get-another-label heuristic: 0.9 to diagonal, uniform to elsewhere
print('dw m-step')
for w in range(self.n_workers):
#print('dw m-step, worker %i of %i' % (w, self.n_workers))
for i in range(self.n):
for j in range(self.n):
count[w][i][j] = 0.9 if i == j else 0.1 / (self.n-1)
for i, sentence in enumerate(self.data.sentences):
#print('dw w-step, sentence %i of %i' % (i, len(self.data.sentences)))
for j in range(len(sentence)):
count_prior += self.pos[i][j]
for l, w in self.data.get_lw(i,j):
for k in range(self.n): # 'true' label = k
count[w][k][l] += self.pos[i][j][k]
self.prior = count_prior * 1.0 / np.sum(count_prior)
self.wa = np.zeros( (self.n_workers, self.n, self.n) )
for w in range(self.n_workers):
#print('dw m-step part 3, worker %i of %i' % (w, self.n_workers))
for k in range(self.n):
self.wa[w][k] = count[w][k] * 1.0 / np.sum(count[w][k])
def em(self, iterations = 3):
self.init()
self.m_step()
for it in range(iterations):
self.e_step()
self.m_step()
def mls(self):
self.res = []
for i, sentence in enumerate(self.data.sentences):
self.res.append([0] * len(sentence))
for j in range(len(sentence)):
self.res[i][j] = np.argmax(self.pos[i][j])
##########################################################################
##########################################################################
data_atis = 'atis3_features.txt'
def run_test(test, h):
cnt = 0
correct = 0
for s in test:
x = util.get_obs(s)
g = util.get_lab(s)
p = h.decode(x)
for i, j in zip(g, p):
cnt += 1
if i == j:
correct += 1
print(correct * 1.0 / cnt)
def run(smooth, filename=data_atis):
#train, test, features, labels = util.load(filename)
#train, test, features, labels = util.load('data/WSJ_all.txt', 0.49)
train, test, features, labels = util.load('atis3_features.txt')
n = len(labels) + 1
m = len(features) + 1
h = HMM(n, m)
h.learn(train, smooth)
run_test(test, h)
return h
def run_crowd(filename='atis3_features.txt'):
train, test, features, labels = util.load(filename)
n = len(labels) + 1
m = len(features) + 1
s = util.simulator(train, features, labels, 1)
s.simulate()
hc = HMM_crowd(n, m, s.cd, features, labels)
return hc
def split_rod(all_sen, cd, features, labels):
"""
split rod data into validation/test
"""
n = len(all_sen) / 2
sen_val = all_sen[:n]
sen_test = all_sen[n:]
cd_val = util.crowd_data(sen_val, cd.crowdlabs[:n])
cd_test = util.crowd_data(sen_test, cd.crowdlabs[n:])
return (sen_val, sen_test, cd_val, cd_test)
def run_rod(smooth = 0.001, dirname = 'task1/val/'):
"""
use_set: validation or test
"""
#if use_set == 'val':
# dirname = 'task1/val/'
#elif use_set == 'test':
# dirname = 'task1/test/'
#else:
# dirname = ''
# read ground truth
all_sen, features, labels, docs = util.read_rod(dirname = dirname +
'ground_truth')
# read crowd labels
cd = util.read_workers_rod(all_sen, features, labels, docs, dirname =
dirname + 'mturk_train_data')
n = len(labels) + 1
m = len(features) + 1
hc = HMM_crowd(n, m, cd, features, labels, smooth = smooth)
hs = HMM_sage(n, m, cd, features, labels, smooth = smooth)
return hs, hc, all_sen, features, labels
def list_entities(sen, st, inside):
"""
list the occurence of an entity
"""
n = len(sen)
res = []
i = 0
while i < n:
if sen[i] == st:
x = i
i += 1
while i < n and sen[i] == inside:
i += 1
res.append((x, i - 1))
else:
i += 1
return res
def eval_ner(gold, sen, labels):
"""
evaluate NER
"""
if len(gold) != len(sen):
print(len(gold), len(sen))
raise "lenghts not equal"
tp = 0
#tn = 0
fp = 0
fn = 0
list_en = ["LOC", "MISC", "ORG", "PER"]
for en in list_en:
g = list_entities(gold, labels["B-" + en], labels["I-" + en])
s = list_entities(sen, labels["B-" + en], labels["I-" + en])
for loc in g:
if loc in s:
tp += 1
else:
fn += 1
for loc in s:
if loc not in g:
fp += 1
return (tp, fp, fn)
def eval_hc_train(hc, labels, print_err = False):
"""
evaluate in the train set
:param hc:
:param labels:
:return:
"""
n = len(hc.res)
tp = 0
fp = 0
fn = 0
for i in range(n):
if len(hc.data.sentences[i]) > 0:
(x, y, z) = eval_ner(util.get_lab(
hc.data.sentences[i]), hc.res[i], labels)
tp += x
fp += y
fn += z
try:
pre = tp * 1.0 / (tp + fp)
rec = tp * 1.0 / (tp + fn)
f = 2.0 * pre * rec / (pre + rec)
print(pre, rec, f)
except ZeroDivisionError:
print("DIV BY 0 ", tp, fp, fn)
def has_oov(sen):
for i in sen:
for j in i.features:
if j == 0:
return True
return False
def get_tag_hc(hc, sen):
return hc.decode(util.get_obs(sen))
def get_tag_t(tagger, sen, features):
words = []
for i in sen:
words.append(i.word)
x = nltk.pos_tag(words)
x = [crf.word2features(x, i) for i in range(len(x))]
tags = tagger.tag(x)
return list(map(int, tags))
def get_tag(f, sen, features, decoder):
if decoder == 'hc':
return get_tag_hc(f, sen)
else:
return get_tag_t(f, sen, features)
def eval_hc_test(hc, features, labels, print_err=False, decoder='hc'):
"""
evaluate in the train set
:param hc:
:param labels:
:return:
"""
tp = 0
fp = 0
fn = 0
dirname = "testa"
input = []
for file in os.listdir(dirname):
# print file
if file.endswith(".txt"):
f = open(os.path.join(dirname, file))
l = list(f)
input.extend(l)
f.close()
# return input
sentences = util.extract(input, features, labels, keep_word = True)
# return sentences
for sen in sentences:
if True:
# if not has_oov(sen):
#predicted = hc.decode(util.get_obs(sen))
predicted = get_tag(hc, sen, features, decoder)
(x, y, z) = eval_ner(util.get_lab(sen), predicted, labels)
tp += x
fp += y
fn += z
if print_err:
if y + z > 0:
print("sen: ", util.get_words(sen, features) + " OOV = " + str(has_oov(sen)))
print("true labels: ", util.get_lab_name(util.get_lab(sen), labels))
print("predicted: ", util.get_lab_name(predicted, labels))
try:
pre = tp * 1.0 / (tp + fp)
rec = tp * 1.0 / (tp + fn)
f = 2.0 * pre * rec / (pre + rec)
print(pre, rec, f)
except ZeroDivisionError:
print("DIV BY 0 ", tp, fp, fn)
def eval_seq_train(gold, pre, labels, hc = None, features = None):
"""
evaluate a sequence labeler
"""
n = len(gold)
tp = 0
fp = 0
fn = 0
for i in range(n):
(x, y, z) = eval_ner(gold[i], pre[i], labels)
tp += x
fp += y
fn += z
if hc != None:
if y + z > 0:
sen = hc.sentences[i]
print("sen: ", util.get_words(sen, features) + " OOV = " + str(has_oov(sen)))
print("true labels: ", util.get_lab_name(gold[i], labels))
print("predicted: ", util.get_lab_name(pre[i], labels))
try:
pre = tp * 1.0 / (tp + fp)
rec = tp * 1.0 / (tp + fn)
f = 2.0 * pre * rec / (pre + rec)
print(pre, rec, f)
except ZeroDivisionError:
print("DIV BY 0 ", tp, fp, fn)
return (pre, rec, f)
def eval_pico_word_bs(gold, res, n = 100, l = 0, r = 950, seed = 33):
"""
use bootstrap re-sample
"""
rs = np.random.RandomState()
rs.seed(seed)
a = rs.permutation(len(gold))
b = [] # list of indices to use
for index in a[l:r]:
b.append(index)
list_p = []; list_r = []; list_f = []
for bs in range(n):
c = sklearn.utils.resample(b, random_state = bs)
cm = np.zeros( (2,2) )
for index in c:
g = gold[index]
i = g[0]
t = g[1]
for x, y in zip(t, res[i]):
cm[x][y] += 1
p = cm[1][1] * 1.0 / (cm[1][1] + cm[0][1])
r = cm[1][1] * 1.0 / (cm[1][1] + cm[1][0])
f = 2 * p * r / (p + r)
list_p.append(p); list_r.append(r); list_f.append(f)
print( | np.mean(list_p) | numpy.mean |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sawyer environment for pushing objects."""
import metaworld.envs.mujoco.cameras as camera_configs
from metaworld.google import glfw
import mujoco_py
import numpy as np
from collections import OrderedDict
from gym.spaces import Dict, Box
from metaworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from metaworld.envs.mujoco.utils.rotation import euler2quat
from metaworld.envs.mujoco.sawyer_xyz.base import OBS_TYPE
sideview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0.2, 0.75, 0.4),
distance=0.8,
elevation=-55,
azimuth=180,
trackbodyid=-1,
)
topview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0., 1.0, 0.5),
distance=0.6,
elevation=-45,
azimuth=270,
trackbodyid=-1,
)
# list of changes
# object position has been changed to have lower variance
# the constant for pushing reward has been changed from 1000 -> 10
# added reset_goal function
# the observation "with_goal" has been changed
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
def __init__(
self,
random_init=False,
task_types=['pick_place', 'reach', 'push'],
task_type='pick_place',
obs_type='plain',
goal_low=(-0.1, 0.8, 0.05),
goal_high=(0.1, 0.9, 0.3),
liftThresh=0.04,
sampleMode='equal',
rotMode='fixed', #'fixed',
**kwargs):
self.quick_init(locals())
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.02, 0.58, 0.02)
obj_high = (0.02, 0.62, 0.02)
SawyerXYZEnv.__init__(
self,
frame_skip=5,
action_scale=1. / 100,
hand_low=hand_low,
hand_high=hand_high,
model_name=self.model_name,
**kwargs)
self.task_type = task_type
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.02]),
'hand_init_pos': np.array([0, .6, .2]),
}
# we only do one task from [pick_place, reach, push]
# per instance of SawyerReachPushPickPlaceEnv.
# Please only set task_type from constructor.
if self.task_type == 'pick_place':
self.goal = np.array([0.1, 0.8, 0.2])
elif self.task_type == 'reach':
self.goal = np.array([-0.1, 0.8, 0.2])
elif self.task_type == 'push':
self.goal = np.array([0.1, 0.8, 0.02])
else:
raise NotImplementedError
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
assert obs_type in OBS_TYPE
self.obs_type = obs_type
if goal_low is None:
goal_low = self.hand_low
if goal_high is None:
goal_high = self.hand_high
self.random_init = random_init
self.liftThresh = liftThresh
self.max_path_length = 150
self.rotMode = rotMode
self.sampleMode = sampleMode
self.task_types = task_types
if rotMode == 'fixed':
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif rotMode == 'rotz':
self.action_rot_scale = 1. / 50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif rotMode == 'quat':
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2 * np.pi, 1, 1, 1, 1]),
)
else:
self.action_space = Box(
np.array([-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1]),
np.array([1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1]),
)
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
if self.obs_type == 'plain':
self.observation_space = Box(
np.hstack((
self.hand_low,
obj_low,
)),
np.hstack((
self.hand_high,
obj_high,
)),
)
elif self.obs_type == 'with_goal':
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, goal_high)),
)
else:
raise NotImplementedError('If you want to use an observation\
with_obs_idx, please discretize the goal space after instantiate an environment.'
)
self.num_resets = 0
self.reset()
def get_goal(self):
return {
'state_desired_goal': self._state_goal,
}
@property
def model_name(self):
return get_asset_full_path(
'sawyer_xyz/sawyer_reach_push_pick_and_place.xml')
def step(self, action):
if self.rotMode == 'euler':
action_ = np.zeros(7)
action_[:3] = action[:3]
action_[3:] = euler2quat(action[3:6])
self.set_xyz_action_rot(action_)
elif self.rotMode == 'fixed':
self.set_xyz_action(action[:3])
elif self.rotMode == 'rotz':
self.set_xyz_action_rotz(action[:4])
else:
self.set_xyz_action_rot(action[:7])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward, reachRew, reachDist, pushRew, pushDist, pickRew, placeRew, placingDist = self.compute_reward(
action, obs_dict, mode=self.rewMode, task_type=self.task_type)
self.curr_path_length += 1
#info = self._get_info()
if self.curr_path_length == self.max_path_length:
done = True
else:
done = False
goal_dist = placingDist if self.task_type == 'pick_place' else pushDist
if self.task_type == 'reach':
success = float(reachDist <= 0.05)
else:
success = float(goal_dist <= 0.07)
info = {
'reachDist': reachDist,
'pickRew': pickRew,
'epRew': reward,
'goalDist': goal_dist,
'success': success
}
info['goal'] = self._state_goal
return ob, reward, done, info
def _get_obs(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate(
(hand, objPos - hand)) # delta position from the hand
if self.obs_type == 'with_goal_and_id':
return np.concatenate([flat_obs, self._state_goal, self._state_goal_idx])
elif self.obs_type == 'with_goal':
return np.concatenate([flat_obs, self._state_goal - objPos
]) # delta position of the goal from the object
elif self.obs_type == 'plain':
return np.concatenate([
flat_obs,
]) # TODO ZP do we need the concat?
else:
return np.concatenate([flat_obs, self._state_goal_idx])
def _get_obs_dict(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate((hand, objPos))
return dict(
state_observation=flat_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=objPos,
)
def _get_info(self):
pass
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('goal_{}'.format(
self.task_type))] = (
goal[:3])
for task_type in self.task_types:
if task_type != self.task_type:
self.data.site_xpos[self.model.site_name2id(
'goal_{}'.format(task_type))] = (
np.array([10.0, 10.0, 10.0]))
def _set_objCOM_marker(self):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
objPos = self.data.get_geom_xpos('objGeom')
self.data.site_xpos[self.model.site_name2id('objSite')] = (objPos)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def sample_goals(self, batch_size):
# Required by HER-TD3
goals = self.sample_goals_(batch_size)
if self.discrete_goal_space is not None:
goals = [self.discrete_goals[g].copy() for g in goals]
return {
'state_desired_goal': goals,
}
def sample_task(self):
idx = self.sample_goals_(1)
return self.discrete_goals[idx]
def adjust_initObjPos(self, orig_init_pos):
#This is to account for meshes for the geom and object are not aligned
#If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2]
adjustedPos = orig_init_pos[:2] + diff
#The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
return [
adjustedPos[0], adjustedPos[1],
self.data.get_geom_xpos('objGeom')[-1]
]
def reset_goal(self, goal=None):
# choose one of the discrete ends for a goal if none is given
if goal is None:
discrete_goal_list = np.array([[0.0, 0.9, 0.02], [0.0, 0.3, 0.02],
[-0.3, 0.6, 0.02], [0.3, 0.6, 0.02]])
goal_idx = [
0, 2, 3
][np.random.choice(3)] # skip the back goal as it is not reachable
goal = discrete_goal_list[goal_idx]
solve_reverse_task = np.random.choice(
1) # goal-object reversal to simulate potential reset problems
if solve_reverse_task:
goal = np.concatenate([goal[:2], [self.obj_init_pos[-1]]])
self.obj_init_pos, goal = goal, self.obj_init_pos
self._set_obj_xyz(self.obj_init_pos)
# update the chosen goal in environment
self._state_goal = np.concatenate((goal[:2], [self.obj_init_pos[-1]]))
self._set_goal_marker(self._state_goal)
# update quantities for reward calculation
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger) / 2
self.maxReachDist = np.linalg.norm(self.init_fingerCOM -
np.array(self._state_goal))
self.maxPushDist = np.linalg.norm(self.obj_init_pos[:2] -
np.array(self._state_goal)[:2])
self.maxPlacingDist = np.linalg.norm(
np.array(
[self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) -
np.array(self._state_goal)) + self.heightTarget
self.target_rewards = [
1000 * self.maxPlacingDist + 1000 * 2,
1000 * self.maxReachDist + 1000 * 2, 10 * self.maxPushDist + 10 * 2
]
if self.task_type == 'reach':
idx = 1
elif self.task_type == 'push':
idx = 2
else:
idx = 0
self.target_reward = self.target_rewards[idx]
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = np.random.uniform(
self.obj_and_goal_space.low,
self.obj_and_goal_space.high,
size=(self.obj_and_goal_space.low.size),
)
if self.task_type == 'push':
self.obj_init_pos = np.concatenate(
(goal_pos[:2], [self.obj_init_pos[-1]]))
else:
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
self.reset_goal() # segregate the call to goal resetting for reuse later
#self._set_obj_xyz_quat(self.obj_init_pos, self.obj_init_angle)
self.curr_path_length = 0
self.num_resets += 1
return self._get_obs()
def reset_model_to_idx(self, idx):
raise NotImplementedError('This API is deprecated! Please explicitly\
call `set_goal_` then reset the environment.')
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1, 1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger) / 2
self.pickCompleted = False
def get_site_pos(self, siteName):
_id = self.model.site_names.index(siteName)
return self.data.site_xpos[_id].copy()
def compute_rewards(self, actions, obsBatch):
#Required by HER-TD3
assert isinstance(obsBatch, dict) == True
obsList = obsBatch['state_observation']
rewards = [
self.compute_reward(action, obs, task_type=self.task_type)[0]
for action, obs in zip(actions, obsList)
]
return np.array(rewards)
def compute_reward(self, actions, obs, mode='general', task_type='reach'):
if isinstance(obs, dict):
obs = obs['state_observation']
objPos = obs[3:6]
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger) / 2
heightTarget = self.heightTarget
goal = self._state_goal
def compute_reward_reach(actions, obs, mode):
c1 = 1000
c2 = 0.01
c3 = 0.001
reachDist = np.linalg.norm(fingerCOM - goal)
# reachRew = -reachDist
# if reachDist < 0.1:
# reachNearRew = 1000*(self.maxReachDist - reachDist) + c1*(np.exp(-(reachDist**2)/c2) + np.exp(-(reachDist**2)/c3))
# else:
# reachNearRew = 0.
reachRew = c1 * (self.maxReachDist - reachDist) + c1 * (
np.exp(-(reachDist**2) / c2) + np.exp(-(reachDist**2) / c3))
reachRew = max(reachRew, 0)
# reachNearRew = max(reachNearRew,0)
# reachRew = -reachDist
reward = reachRew # + reachNearRew
return [reward, reachRew, reachDist, None, None, None, None, None]
def compute_reward_push(actions, obs, mode):
c1 = 10
c2 = 0.01
c3 = 0.001
assert np.all(goal == self.get_site_pos('goal_push'))
reachDist = np.linalg.norm(fingerCOM - objPos)
pushDist = np.linalg.norm(objPos[:2] - goal[:2])
reachRew = -reachDist
if reachDist < 0.05:
# pushRew = -pushDist
pushRew = c1 * (self.maxPushDist - pushDist) + c1 * (
np.exp(-(pushDist**2) / c2) + np.exp(-(pushDist**2) / c3))
pushRew = max(pushRew, 0)
else:
pushRew = 0
reward = self.reach_reward_scale * reachRew + pushRew
return [reward, reachRew, reachDist, pushRew, pushDist, None, None, None]
def compute_reward_pick_place(actions, obs, mode):
reachDist = np.linalg.norm(objPos - fingerCOM)
placingDist = np.linalg.norm(objPos - goal)
assert np.all(goal == self.get_site_pos('goal_pick_place'))
def reachReward():
reachRew = -reachDist # + min(actions[-1], -1)/50
reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])
zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])
if reachDistxy < 0.05: #0.02
reachRew = -reachDist
else:
reachRew = -reachDistxy - 2 * zRew
#incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1], 0) / 50
return reachRew, reachDist
def pickCompletionCriteria():
tolerance = 0.01
if objPos[2] >= (heightTarget - tolerance):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return (objPos[2] <
(self.objHeight + 0.005)) and (placingDist >
0.02) and (reachDist > 0.02)
# Object on the ground, far away from the goal, and from the gripper
#Can tweak the margin limits
def objGrasped(thresh=0):
sensorData = self.data.sensordata
return (sensorData[0] > thresh) and (sensorData[1] > thresh)
def orig_pickReward():
# hScale = 50
hScale = 100
# hScale = 1000
if self.pickCompleted and not (objDropped()):
return hScale * heightTarget
# elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
elif (reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005)):
return hScale * min(heightTarget, objPos[2])
else:
return 0
def general_pickReward():
hScale = 50
if self.pickCompleted and objGrasped():
return hScale * heightTarget
elif objGrasped() and (objPos[2] > (self.objHeight + 0.005)):
return hScale * min(heightTarget, objPos[2])
else:
return 0
def placeReward():
# c1 = 1000 ; c2 = 0.03 ; c3 = 0.003
c1 = 1000
c2 = 0.01
c3 = 0.001
if mode == 'general':
cond = self.pickCompleted and objGrasped()
else:
cond = self.pickCompleted and (reachDist < 0.1) and not (objDropped())
if cond:
placeRew = 1000 * (self.maxPlacingDist - placingDist) + c1 * (
np.exp(-(placingDist**2) / c2) + | np.exp(-(placingDist**2) / c3) | numpy.exp |
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from model_ddpg import Actor, Critic
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
BUFFER_SIZE = int(1e6) # replay buffer size
START_SIZE = int(1e3) # when to start training
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 0.05 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 3e-4 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
TRAIN_EVERY = 5 # how often to train a batch
TRAIN_STEPS = 10 # how many training steps when a batch is trained
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):
"""Initialize an Agent object.
Params
======
num_agents (int): number of agents
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
use_per (bool): whether to use prioritized replay buffer
"""
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.use_per = use_per
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
if use_per:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)
# Initialize time step
self.t_step = 0
def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):
# Get max predicted Q values (for next states) from target model
if is_train:
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
else:
self.actor_local.eval()
self.actor_target.eval()
self.critic_local.eval()
self.critic_target.eval()
with torch.no_grad():
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
self.actor_local.train()
self.actor_target.train()
self.critic_local.train()
self.critic_target.train()
return Q_expected, Q_targets
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
if self.use_per:
# Convert numpy array to torch tensor
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)
# Get max predicted Q values (for next states) from target model
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)
# Convert torch tensor to numpy array
states = states.cpu().data.numpy()
actions = actions.cpu().data.numpy()
rewards = rewards.cpu().data.numpy().squeeze(1).tolist()
next_states = next_states.cpu().data.numpy()
dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()
# Calculate error
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze(1)
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])
else:
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])
# Update time step
self.t_step += 1
# If enough samples are available in memory,
if len(self.memory) >= START_SIZE:
# Get random subset and learn every TRAIN_EVERY time steps,
if self.t_step % TRAIN_EVERY == 0:
for _ in range(TRAIN_STEPS):
if self.use_per:
experiences, idx_tree, is_weight = self.memory.sample()
self.learn(experiences, GAMMA, idx_tree, is_weight)
else:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, states, add_noise=True):
"""Returns epsilon-greedy actions for given state as per current policy."""
states = torch.from_numpy(states).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(states).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)
return | np.clip(actions, -1, 1) | numpy.clip |
from DartDeepRNN.rnn.RNNController import RNNController
from DartDeepRNN.util.Pose2d import Pose2d
from DartDeepRNN.util.Util import *
from OpenGL.GL import *
from OpenGL.GLU import *
from fltk import *
from PyCommon.modules.GUI.hpSimpleViewer import hpSimpleViewer as SimpleViewer
from PyCommon.modules.Renderer import ysRenderer as yr
from PyCommon.modules.Math import mmMath as mm
from PyCommon.modules.Motion import ysMotion as ym
from PyCommon.modules.Resource import ysMotionLoader as yf
from PyCommon.modules.dart.dart_ik import DartIk
import pydart2 as pydart
import numpy as np
MOTION_SCALE = .01
joint_point_list = [None, "Head_End", "LeftHand", "LeftFoot", "LeftToeBase", "RightHand", "RightFoot", "RightToeBase", "LeftArm",
"RightArm", "LeftForeArm", "LeftLeg", "RightForeArm", "RightLeg", "Spine", "LeftHandIndex1", "RightHandIndex1", "Neck1", "LeftUpLeg", "RightUpLeg"]
joint_list = ["Head", "Hips", "LHipJoint", "LeftArm", "LeftFoot", "LeftForeArm", "LeftHand",
"LeftLeg", "LeftShoulder", "LeftToeBase", "LeftUpLeg", "LowerBack", "Neck", "Neck1",
"RHipJoint", "RightArm", "RightFoot","RightForeArm","RightHand", "RightLeg","RightShoulder","RightToeBase","RightUpLeg",
"Spine","Spine1"]
class ModelViewer(object):
def __init__(self, folder):
pydart.init()
self.world = pydart.World(1./1200., "../data/cmu_with_ground.xml")
self.model = self.world.skeletons[1]
self.ik = DartIk(self.model)
self.controller = RNNController(folder)
self.all_angles = [[] for i in range(93)]
viewer = SimpleViewer(rect=[0, 0, 1280+300, 720+1+55], viewForceWnd=False)
self.viewer = viewer
# viewer.record(True)
viewer.record(False)
viewer.setMaxFrame(10000)
self.isFirst = True
self.lines = None
viewer.motionViewWnd.glWindow.set_mouse_pick(True)
def callback_btn(ptr):
self.controller.reset()
viewer.objectInfoWnd.addBtn('reset', callback_btn)
self.rc = yr.RenderContext()
self.rd_target_position = [None]
self.rd_frames = [None]
viewer.doc.addRenderer('contact', yr.PointsRenderer(self.rd_target_position, (0, 255, 0), save_state=False))
viewer.doc.addRenderer('MotionModel', yr.DartRenderer(self.world, (150,150,255), yr.POLYGON_FILL, save_state=False))
viewer.doc.addRenderer('rd_frames', yr.FramesRenderer(self.rd_frames))
def extraDrawCallback():
self.rd_target_position[0] = self.viewer.motionViewWnd.glWindow.pickPoint
self.step_model()
del self.rd_frames[:]
self.rd_frames.append(self.model.body(0).world_transform())
# for i in range(3):
# print(self.model.body(0).world_transform()[:3, i])
viewer.setExtraDrawCallback(extraDrawCallback)
viewer.startTimer(1. / 30.)
viewer.show()
# viewer.play()
Fl.run()
def get_target(self):
p = self.viewer.motionViewWnd.glWindow.pickPoint
target = Pose2d([p[0]/MOTION_SCALE, -p[2]/MOTION_SCALE])
target = self.controller.pose.relativePose(target)
target = target.p
t_len = v_len(target)
if t_len > 80:
ratio = 80/t_len
target[0] *= ratio
target[1] *= ratio
return target
def step_model(self):
contacts, points, angles, orientations, root_orientation = self.controller.step(self.get_target())
# pairs = [[0,11,3,4],
# [0,8,10,2],
# [0,13,6,7],
# [0,9,12,5],
# [0,1]]
pairs = [[0,18,11,3,4],
[0,14,8,10,2],
[0,19,13,6,7],
[0,14,9,12,5],
[0,14,17,1]]
self.lines = []
for pair in pairs:
for i in range(len(pair)-1):
self.lines.append([points[pair[i]], points[pair[i+1]]])
# print(len(orientations))
for i in range(len(angles)):
self.all_angles[i].append(angles[i])
for j in range(len(self.model.joints)):
if j == 0:
joint = self.model.joints[j] # type: pydart.FreeJoint
joint_idx = joint_list.index(joint.name)
hip_angles = mm.logSO3(np.dot(root_orientation, orientations[joint_idx]))
# hip_angles = mm.logSO3(root_orientation)
joint.set_position(np.array([hip_angles[0], hip_angles[1], hip_angles[2], points[0][0], points[0][1], points[0][2]]))
continue
joint = self.model.joints[j] # type: pydart.BallJoint
joint_idx = joint_list.index(joint.name)
joint.set_position(angles[joint_idx*3:joint_idx*3+3])
self.ik.clean_constraints()
self.ik.add_joint_pos_const('LeftForeArm', np.asarray(points[10]))
self.ik.add_joint_pos_const('LeftHand', np.asarray(points[2]))
self.ik.add_joint_pos_const('LeftLeg', np.asarray(points[11]))
self.ik.add_joint_pos_const('LeftFoot', np.asarray(points[3]))
if contacts[0] > 0.8 and False:
body_transform = self.model.body('LeftFoot').transform()[:3, :3]
angle = math.acos(body_transform[1, 1])
body_ori = np.dot(body_transform, mm.rotX(-angle))
self.ik.add_orientation_const('LeftFoot', body_ori)
self.ik.add_joint_pos_const('RightForeArm', np.asarray(points[12]))
self.ik.add_joint_pos_const('RightHand', np.asarray(points[5]))
self.ik.add_joint_pos_const('RightLeg', np.asarray(points[13]))
self.ik.add_joint_pos_const('RightFoot', np.asarray(points[6]))
self.ik.solve()
foot_joint_ori = mm.exp(self.model.joint('LeftFoot').position())
self.model.joint('LeftFoot').set_position(mm.logSO3(np.dot(foot_joint_ori, np.dot(mm.rotX(-.6), mm.rotZ(.4)))))
foot_joint_ori = mm.exp(self.model.joint('RightFoot').position())
self.model.joint('RightFoot').set_position(mm.logSO3(np.dot(foot_joint_ori, np.dot(mm.rotX(-.6), mm.rotZ(-.4)))))
left_foot = self.model.body('LeftFoot')
if (left_foot.to_world([0.05, -0.045, 0.1125])[1] < 0. or left_foot.to_world([-0.05, -0.045, 0.1125])[1] < 0.) \
and (left_foot.to_world([0.05, -0.045, -0.1125])[1] < 0. or left_foot.to_world([-0.05, -0.045, -0.1125])[1] < 0.):
left_toe_pos1 = left_foot.to_world([0.05, -0.045, +0.1125])
left_toe_pos1[1] = 0.
left_toe_pos2 = left_foot.to_world([-0.05, -0.045, +0.1125])
left_toe_pos2[1] = 0.
left_heel_pos1 = left_foot.to_world([0.05, -0.045, -0.1125])
left_heel_pos1[1] = 0.
left_heel_pos2 = left_foot.to_world([-0.05, -0.045, -0.1125])
left_heel_pos2[1] = 0.
self.ik.clean_constraints()
self.ik.add_position_const('LeftFoot', left_toe_pos1, np.array([0.05, -0.045, +0.1125]))
self.ik.add_position_const('LeftFoot', left_toe_pos2, np.array([-0.05, -0.045, +0.1125]))
self.ik.add_position_const('LeftFoot', left_heel_pos1, np.array([0.05, -0.045, -0.1125]))
self.ik.add_position_const('LeftFoot', left_heel_pos2, np.array([-0.05, -0.045, -0.1125]))
self.ik.solve()
right_foot = self.model.body('RightFoot')
if (right_foot.to_world([0.05, -0.045, 0.1125])[1] < 0. or right_foot.to_world([-0.05, -0.045, 0.1125])[1] < 0.) \
and (right_foot.to_world([0.05, -0.045, -0.1125])[1] < 0. or right_foot.to_world([-0.05, -0.045, -0.1125])[1] < 0.):
right_toe_pos1 = right_foot.to_world([0.05, -0.045, +0.1125])
right_toe_pos1[1] = 0.
right_toe_pos2 = right_foot.to_world([-0.05, -0.045, +0.1125])
right_toe_pos2[1] = 0.
right_heel_pos1 = right_foot.to_world([0.05, -0.045, -0.1125])
right_heel_pos1[1] = 0.
right_heel_pos2 = right_foot.to_world([-0.05, -0.045, -0.1125])
right_heel_pos2[1] = 0.
self.ik.clean_constraints()
self.ik.add_position_const('RightFoot', right_toe_pos1, np.array([0.05, -0.045, +0.1125]))
self.ik.add_position_const('RightFoot', right_toe_pos2, np.array([-0.05, -0.045, +0.1125]))
self.ik.add_position_const('RightFoot', right_heel_pos1, np.array([0.05, -0.045, -0.1125]))
self.ik.add_position_const('RightFoot', right_heel_pos2, | np.array([-0.05, -0.045, -0.1125]) | numpy.array |
import os
import json
import pickle
import codecs
import random
import shutil
import hashlib
import argparse
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import tensorflow as tf
from nets import *
import load_policy
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Humanoid-v2')
parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer to use for training (case-sensitive)')
parser.add_argument('--nonlinearity', type=str, default='elu', help='Type of nonlinearity used in neural network (tanh, relu, etc.)')
parser.add_argument('--net-size', type=float, default=4.0, help='Multiplier for the size of hidden layers')
parser.add_argument('--grad-clip', type=float, default=0.5, help='Maximum gradient norm')
parser.add_argument('--dropout-prob', type=float, default=0, help='Probability of dropping nodes in Dropout')
parser.add_argument('--weight-decay', type=float, default=3e-4, help='L2-regularizer strength')
parser.add_argument('--learning-rate', type=float, default=1e-3, help='Learning rate used in optimization')
parser.add_argument('--aggregation-rate', type=float, default=0.2, help='Rate of dataset aggregation')
parser.add_argument('--seed', type=int, default=1, help='Manual PRNG seed for reproducibility')
parser.add_argument('--only-mean', type=int, default=1, help='Only optimize mean of distribution for this many epochs (-1 means always)')
parser.add_argument('--num-demos', type=int, default=20, help='Number of expert demonstrations to learn from')
parser.add_argument('--num-layers', type=int, default=3, help='Number of layers in neural network')
parser.add_argument('--batch-size', type=int, default=128, help='Batch size used for training and evaluation')
parser.add_argument('--num-epochs', type=int, default=200, help='Number of training epochs')
parser.add_argument('--cpu', action='store_false', dest='gpu')
parser.add_argument('--render', action='store_true')
parser.add_argument('--record', action='store_true')
ARGS = parser.parse_args()
ARGS_STR = ''
for key, val in sorted(vars(ARGS).items()):
if key not in ['seed', 'gpu', 'render', 'record', 'env']:
ARGS_STR += str(val)
ARGS_HASH = hashlib.md5(str.encode(ARGS_STR)).hexdigest()[-8:]
SAVE_PATH = './output_dagger/%s/%s/seed_%d' % (ARGS_HASH, ARGS.env, ARGS.seed)
shutil.rmtree(SAVE_PATH, ignore_errors=True)
os.makedirs(SAVE_PATH, exist_ok=True)
def log(s, disp=True, write=False, filename='log.txt', **kwargs):
if disp:
print(s, **kwargs)
if write:
with codecs.open(os.path.join(SAVE_PATH, filename), 'a', 'utf-8') as f:
print(s, file=f, **kwargs)
def log_tabular(vals, keys=None, formats=None):
log(','.join([str(x) for x in vals]), disp=False, write=True, filename='log.csv')
if formats is not None:
assert len(formats) == len(vals)
vals = [x[0] % x[1] for x in zip(formats, vals)]
if keys is not None:
assert len(keys) == len(vals)
log(' | '.join(['%s: %s' % (x[0], str(x[1])) for x in zip(keys, vals)]), write=True)
ARGS.nonlinearity = ARGS.nonlinearity.lower()
ARGS_JSON = json.dumps(vars(ARGS), sort_keys=True, indent=4)
log('ARGS = %s' % ARGS_JSON)
ARGS.optimizer = eval('optim.%s' % ARGS.optimizer)
ARGS.nonlinearity = eval('F.%s' % ARGS.nonlinearity)
with open(os.path.join(SAVE_PATH, 'args.json'), 'w') as f:
f.write(ARGS_JSON)
# reproducibility
random.seed(ARGS.seed, version=2)
torch.manual_seed(random.randint(0, 2**32 - 1))
np.random.seed(random.randint(0, 2**32 - 1))
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
DEMO_PATH = 'expert_data_%d/%s.pkl' % (ARGS.num_demos, ARGS.env)
with open(DEMO_PATH, 'rb') as f:
demo = pickle.load(f)
DEMO_SIZE = demo['observations'].shape[0]
assert demo['actions'].shape[0] == DEMO_SIZE
demo['observations'] = np.reshape(demo['observations'], (DEMO_SIZE, -1))
demo['actions'] = np.reshape(demo['actions'], (DEMO_SIZE, -1))
OBS_DIM = demo['observations'].shape[1]
ACT_DIM = demo['actions'].shape[1]
# observation stats used for normalization
obs_mean = np.mean(demo['observations'], axis=0)
obs_mean_sqr = np.mean(demo['observations'] ** 2, axis=0)
obs_std = lambda m1, m2: np.maximum(m2 - m1 ** 2, 0) ** 0.5
count = DEMO_SIZE
log('Number of time-steps in demonstrations: %d' % DEMO_SIZE)
log('Dimensionality of observation-space: %d' % OBS_DIM)
log('Dimensionality of action-space: %d' % ACT_DIM)
# 90% / 10% split of dataset into training-set / evaluation-set
EVAL_SIZE = DEMO_SIZE // 10
TRAIN_SIZE = DEMO_SIZE - EVAL_SIZE
shuffle_idx = np.arange(DEMO_SIZE)
np.random.shuffle(shuffle_idx)
demo_eval = {
'observations': demo['observations'][shuffle_idx[:EVAL_SIZE]],
'actions': demo['actions'][shuffle_idx[:EVAL_SIZE]]
}
demo_train = {
'observations': demo['observations'][shuffle_idx[EVAL_SIZE:]],
'actions': demo['actions'][shuffle_idx[EVAL_SIZE:]]
}
DEVICE = torch.device('cuda' if ARGS.gpu and torch.cuda.is_available() else 'cpu')
log('DEVICE = %s' % str(DEVICE))
WIDTH = int(np.sqrt(OBS_DIM * ACT_DIM) * ARGS.net_size + 16)
log('Width of hidden layers: %d' % WIDTH, write=True)
hidden_layers = [WIDTH] * (ARGS.num_layers - 1)
net = ControlNet(widths=[OBS_DIM] + hidden_layers + [ACT_DIM], act_fn=ARGS.nonlinearity, dropout_prob=ARGS.dropout_prob)
net.set_obs_stats(obs_mean=obs_mean, obs_std=obs_std(obs_mean, obs_mean_sqr))
print(net)
net.to(DEVICE)
opt = ARGS.optimizer(net.parameters(), lr=ARGS.learning_rate, weight_decay=ARGS.weight_decay)
expert_policy = load_policy.load_policy('./experts/%s.pkl' % ARGS.env)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
env = gym.make(ARGS.env)
env.seed(ARGS.seed)
if ARGS.record:
env = gym.wrappers.Monitor(env, directory=SAVE_PATH, video_callable=lambda x: True)
def update_stats(observations, obs_mean, obs_mean_sqr, count, net):
n = observations.shape[0]
obs_mean = (obs_mean * count + n * np.mean(observations, axis=0)) / (count + n)
obs_mean_sqr = (obs_mean_sqr * count + n * np.mean(observations ** 2, axis=0)) / (count + n)
count += n
net.set_obs_stats(obs_mean=obs_mean, obs_std=obs_std(obs_mean, obs_mean_sqr))
return obs_mean, obs_mean_sqr, count
def aggregate_dataset(demo_train, demo_eval, observations, expert_policy, sess):
with sess.as_default():
actions = expert_policy(observations)
eval_size = observations.shape[0] // 10
new_train = {
'observations': observations[eval_size:],
'actions': actions[eval_size:]
}
new_eval = {
'observations': observations[:eval_size],
'actions': actions[:eval_size]
}
for key in demo_train:
demo_train[key] = np.concatenate((demo_train[key], new_train[key]), axis=0)
for key in demo_eval:
demo_eval[key] = np.concatenate((demo_eval[key], new_eval[key]), axis=0)
return demo_train, demo_eval
def train_batch(net, opt, obs_batch, act_batch, only_mean=False):
net.train()
mu, log_var = net.forward(obs_batch)
if only_mean:
log_var.zero_()
loss = net.loss(mu, log_var, act_batch)
opt.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), ARGS.grad_clip)
opt.step()
loss = loss.item()
return loss
def eval_batch(net, obs_batch, act_batch):
net.eval()
with torch.no_grad():
mu, log_var = net.forward(obs_batch)
loss = net.loss(mu, log_var, act_batch)
loss = loss.item()
return loss
def run_epoch(net, opt, demo, train, num_samples=None, **kwargs):
dataset_size = demo['observations'].shape[0]
assert demo['actions'].shape[0] == dataset_size
if train:
shuffle_idx = np.arange(dataset_size)
np.random.shuffle(shuffle_idx)
demo['observations'] = demo['observations'][shuffle_idx]
demo['actions'] = demo['actions'][shuffle_idx]
i = 0
total_loss = 0
if num_samples is None:
num_samples = dataset_size
while i < num_samples:
obs_batch = demo['observations'][i: min(i + ARGS.batch_size, num_samples)]
act_batch = demo['actions'][i: min(i + ARGS.batch_size, num_samples)]
cur_batch_size = obs_batch.shape[0]
i += ARGS.batch_size
obs_batch = torch.tensor(obs_batch, dtype=torch.float, device=DEVICE)
act_batch = torch.tensor(act_batch, dtype=torch.float, device=DEVICE)
if train:
loss = train_batch(net, opt, obs_batch, act_batch, **kwargs)
else:
loss = eval_batch(net, obs_batch, act_batch, **kwargs)
total_loss += cur_batch_size * loss
total_loss /= num_samples
return total_loss
def run_policy(net, env, render=False):
net.eval()
done = False
total_reward = 0
obs_list = []
obs = env.reset()
if render:
env.render()
while not done:
obs_list.append(obs)
with torch.no_grad():
obs = torch.tensor([obs], dtype=torch.float, device=DEVICE)
mu, log_var = net.forward(obs)
mu = mu.cpu().numpy()[0]
log_var = log_var.cpu().numpy()[0]
sigma = np.exp(log_var * 0.5)
act = | np.random.normal(loc=mu, scale=sigma) | numpy.random.normal |
from PIL import Image
import numpy as np
import cv2
class DeHaze():
def __init__(self,omega=0.8, k_size=9, model='origin'):
self.omega = omega
self.ksize = k_size
self.model = model
return
def atmosphere(self, img: Image): #calculate the atmospheric light in this paper
r, g, b = img.split()
r, g, b = np.array(r).flatten(), np.array(g).flatten(), np.array(b).flatten()
img_np = np.asarray(img, dtype='float')
dark_c = np.min(img_np, axis=-1)
first_100 = dark_c.argsort(axis=None)[-100:]
first_100r, first_100g, first_100b = r[first_100], g[first_100], b[first_100]
sum_rgb = first_100r + first_100g + first_100b
choice = np.argmax(sum_rgb)
return first_100r[choice], first_100g[choice], first_100b[choice] #float
def dark_channel(self, img_np:np.ndarray):
dark_c = np.min(img_np, axis=-1)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (self.ksize, self.ksize))
dark = cv2.erode(dark_c, kernel)
#Image.fromarray(dark).show()
return dark #np.ndarray
def transmission(self, img:Image):
a = self.atmosphere(img)
img_np = np.asarray(img, dtype='float')
img_temp = np.zeros_like(img_np)
img_temp[:, :, 0] = img_np[:, :, 0] / a[0]
img_temp[:, :, 1] = img_np[:, :, 1] / a[1]
img_temp[:, :, 2] = img_np[:, :, 2] / a[2]
#print(img_temp)
trans = 1 - self.omega * self.dark_channel(img_temp)
#Image.fromarray(np.uint8(trans * 255)).show()
#print(trans)
return trans #np.ndarray 3d
def cal_p(self, trans: np.ndarray):
p = np.zeros_like(trans)
p[trans > 0.5] = 1
p[trans < 0.5] = trans[trans < 0.5] * 2
return p
def run(self, img: Image):
img_np = np.asarray(img, dtype='float')
R = 255 - img_np
rev_img = Image.fromarray(np.uint8(R))
a = self.atmosphere(rev_img)
h, w, _ = img_np.shape
ar, ag, ab = | np.ones((h, w)) | numpy.ones |
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
import torch
import torch.nn
import torch.onnx
# GRU enums
GRU_DIR_FORWARD = 'forward'
GRU_DIR_REVERSE = 'reverse'
GRU_DIR_BIDIRECTIONAL = 'bidirectional'
GRU_DIRS = [GRU_DIR_FORWARD, GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(name=name, data_type=type, dims=tensor.shape, vals=tensor.reshape(tensor.size).tolist())
# Function to generate GRU ONNX test model
def gen_gru_onnx_test_model(model_path, seq_length, batch_size, hidden_size, input_size, direction, has_bias,
has_sequence_lens, has_initial_h, linear_before_reset=False):
# Validate parameters
assert direction in GRU_DIRS, 'ONNX GRU direction invalid!'
assert not has_sequence_lens, 'ONNX GRU Variable sequence length not supported'
# Get number of directions
num_directions = 2 if (direction == GRU_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 6 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = | np.random.randn(*B_shape) | numpy.random.randn |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to preprocess the raw annotation output to NP/object annotation files
import os
import sys
import json
import argparse
import numpy as np
from collections import Counter, defaultdict
from stanfordcorenlp import StanfordCoreNLP
def define_split(database):
with open(args.train_cap_file) as f:
train_ids = json.load(f).keys()
with open(args.val_cap_file) as f:
valtest_ids = json.load(f).keys()
val_split = np.random.rand(len(valtest_ids))>=0.5 # split a half as the test split
val_ids = [valtest_ids[i] for i,j in enumerate(val_split) if j]
test_ids = [valtest_ids[i] for i,j in enumerate(val_split) if ~j]
vid_ids = set(database.keys())
train_ann_ids = vid_ids.intersection(set(train_ids))
val_ann_ids = vid_ids.intersection(set(val_ids))
test_ann_ids = vid_ids.intersection(set(test_ids))
print('All data - total: {}, train split: {}, val split: {}, test split: {}'.format(len(train_ids+val_ids+test_ids), len(train_ids), len(val_ids), len(test_ids)))
print('Annotated data - total: {}, train split: {}, val split: {}, and test split: {}'.format(
len(vid_ids), len(train_ann_ids), len(val_ann_ids), len(test_ann_ids)))
return [train_ids, val_ids, test_ids]
def extract_attr(database, splits):
split_dict = {}
for split in splits:
split_dict.update({s:s for s in split})
print('Object classes defined on {} videos, freq threshold is {}'.format(len(split_dict), args.freq_thresh))
attr_all = [] # all the attributes
for vid_id, vid in database.items():
if split_dict.get(vid_id, -1) != -1:
for seg_id, seg in vid['segments'].items():
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
for box_id, box in obj['frame_ind'].items():
tmp = []
attr_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
if len(tmp) == 0:
tmp.append(attr[1].lower()) # convert to lowercase
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr[1].lower())
else:
attr_lst.append(tmp)
tmp = [attr[1].lower()]
if len(tmp) > 0: # the last one
attr_lst.append(tmp)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
attr_all.extend([' '.join(i) for i in attr_lst])
return attr_all
def prep_all(database, database_cap, obj_cls_lst, w2l, nlp):
w2d = {}
for ind, obj in enumerate(obj_cls_lst):
w2d[obj] = ind
avg_box = [] # number of boxes per segment
avg_attr = [] # number of attributes per box
attr_all = [] # all the attributes
crowd_all = [] # all the crowd labels
attr_dict = defaultdict(list)
with open(args.attr_to_video_file) as f:
for line in f.readlines():
line_split = line.split(',')
attr_id = line_split[0]
vid_name = line_split[-1]
attr = ','.join(line_split[1:-1])
vid_id, seg_id = vid_name.strip().split('_segment_')
attr_dict[(vid_id, str(int(seg_id)))].append([int(attr_id), attr])
print('Number of segments with attributes: {}'.format(len(attr_dict)))
vid_seg_dict = {}
for vid_id, vid in database.items():
for seg_id, _ in vid['segments'].items():
vid_seg_dict[(vid_id, seg_id)] = vid_seg_dict.get((vid_id, seg_id), 0) + 1
new_database = {}
new_database_np = {}
seg_counter = 0
for vid_id, cap in database_cap.items():
new_database_np[vid_id] = {'segments':{}}
new_seg = {}
for cap_id in range(len(cap['sentences'])):
new_obj_lst = defaultdict(list)
seg_id = str(cap_id)
new_database_np[vid_id]['segments'][seg_id] = {'objects':[]}
if vid_seg_dict.get((vid_id, seg_id), 0) == 0:
new_obj_lst['tokens'] = nlp.word_tokenize(cap['sentences'][cap_id].encode('utf-8')) # sentences not in ANet-BB
else:
vid = database[vid_id]
seg = vid['segments'][seg_id]
# preprocess attributes
attr_sent = sorted(attr_dict[(vid_id, seg_id)], key=lambda x:x[0])
start_ind = attr_sent[0][0]
# legacy token issues from our annotation tool
for ind, tup in enumerate(attr_sent):
if attr_sent[ind][1] == '\\,':
attr_sent[ind][1] = ','
new_obj_lst['tokens'] = [i[1] for i in attr_sent] # all the word tokens
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
np_ann = {}
box_id = obj['frame_ind'].keys()[0]
box = obj['frame_ind'].values()[0]
np_ann['frame_ind'] = int(box_id)
np_ann.update(box)
if len(box['attributes']) > 0: # just in case the attribute is empty, though it should not be
tmp = []
tmp_ind = []
tmp_obj = []
attr_lst = []
attr_ind_lst = []
tmp_np_ind = []
np_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
sorted_attr = [(x[0]-start_ind, x[1]) for x in sorted_attr] # index relative to the sent
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
attr_w = attr[1].lower()
if len(tmp) == 0:
tmp.append(attr_w) # convert to lowercase
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr_w)
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
tmp = [attr_w]
tmp_np_ind = [attr[0]]
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj = [attr_l]
tmp_ind = [attr[0]]
else:
tmp_obj = []
tmp_ind = []
else:
tmp_obj = []
tmp_ind = []
if len(tmp) > 0: # the last one
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
assert(len(np_lst) > 0)
np_ann['noun_phrases'] = np_lst
np_ann.pop('attributes', None)
new_database_np[vid_id]['segments'][seg_id]['objects'].append(np_ann)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
new_obj_lst['process_bnd_box'].append([box['xtl'], box['ytl'], box['xbr'], box['ybr']])
new_obj_lst['frame_ind'].append(int(box_id))
new_obj_lst['crowds'].append(box['crowds'])
new_obj_lst['process_clss'].append(attr_lst)
new_obj_lst['process_idx'].append(attr_ind_lst)
avg_attr.append(len(attr_lst))
attr_all.extend([' '.join(i) for i in attr_lst])
crowd_all.append(box['crowds'])
avg_box.append(len(new_obj_lst['frame_ind'])) # cound be 0
if len(new_obj_lst['frame_ind']) == 0:
new_obj_lst['process_bnd_box'] = []
new_obj_lst['frame_ind'] = [] # all empty
new_obj_lst['crowds'] = []
new_obj_lst['process_clss'] = []
new_obj_lst['process_idx'] = []
seg_counter += 1
new_seg[seg_id] = new_obj_lst
new_database_np[vid_id]['segments'][seg_id]['tokens'] = new_obj_lst['tokens']
new_database[vid_id] = {'segments':new_seg}
# quick stats
print('Number of videos: {} (including empty ones)'.format(len(new_database)))
print('Number of segments: {}'.format(seg_counter))
print('Average number of valid segments per video: {}'.format(np.mean([len(vid['segments']) for vid_id, vid in new_database.items()])))
print('Average number of box per segment: {} and frequency: {}'.format(np.mean(avg_box), Counter(avg_box)))
print('Average number of attributes per box: {} and frequency: {} (for valid box only)'.format(np.mean(avg_attr), Counter(avg_attr)))
crowd_freq = Counter(crowd_all)
print('Percentage of crowds: {} (for valid box only)'.format(crowd_freq[1]*1./(crowd_freq[1]+crowd_freq[0])))
return new_database, new_database_np
def freq_obj_list(attr_all, nlp, props):
# generate a list of object classes
num_nn_per_attr = []
anet_obj_cls = []
nn_wo_noun = [] # noun phrases that contain no nouns
w2lemma = defaultdict(list)
for i, v in enumerate(attr_all):
if i%10000 == 0:
print(i)
out = json.loads(nlp.annotate(v.encode('utf-8'), properties=props))
assert(out['sentences'] > 0)
counter = 0
for token in out['sentences'][0]['tokens']:
if ('NN' in token['pos']) or ('PRP' in token['pos']):
lemma_w = token['lemma']
anet_obj_cls.append(lemma_w)
w2lemma[token['word']].append(lemma_w)
counter += 1
num_nn_per_attr.append(counter)
if counter == 0:
nn_wo_noun.append(v)
top_nn_wo_noun = Counter(nn_wo_noun)
print('Frequency of NPs w/o nouns:')
print(top_nn_wo_noun.most_common(10))
print('Frequency of number of nouns per attribute:')
print(Counter(num_nn_per_attr))
top_obj_cls = Counter(anet_obj_cls)
print('Top 10 objects:', top_obj_cls.most_common(20))
obj_cls_lst = []
for w,freq in top_obj_cls.items():
if freq >= args.freq_thresh:
obj_cls_lst.append(w.encode('ascii'))
w2l = {}
for w, l in w2lemma.items():
# manually correct some machine lemmatization mistakes
spec_w2l = {'outfits':'outfit', 'mariachi':'mariachi', 'barrios':'barrio', 'mans':'man', 'bags':'bag', 'aerobics':'aerobic', 'motobikes':'motobike', 'graffiti':'graffiti', 'semi':'semi', 'los':'los', 'tutus':'tutu'}
if spec_w2l.get(w, -1) != -1: # one special case...
w2l[w] = spec_w2l[w]
print('Ambiguous lemma for: {}'.format(w))
else:
assert(len(set(l)) == 1)
w2l[w] = list(set(l))[0]
print('Number of words derived from lemma visual words {}'.format(len(w2l)))
return obj_cls_lst, w2l
def main(args):
nlp = StanfordCoreNLP(args.corenlp_path)
props={'annotators': 'ssplit, tokenize, lemma','pipelineLanguage':'en', 'outputFormat':'json'}
# load anet captions
with open(args.train_cap_file) as f:
database_cap = json.load(f)
with open(args.val_cap_file) as f:
database_cap.update(json.load(f))
print('Number of videos in ActivityNet Captions (train+val): {}'.format(len(database_cap)))
# load raw annotation output anet bb
with open(args.src_file) as f:
database = json.load(f)['database']
print('Number of videos in ActivityNet-BB (train+val): {}'.format(len(database)))
if os.path.isfile(args.split_file):
with open(args.split_file) as f:
all_splits = json.load(f)
splits = [all_splits['training'], all_splits['validation'], all_splits['testing']]
else:
raise '[WARNING] Cannot find the split file! Uncomment this if you want to create a new split.'
splits = define_split(database)
all_splits = {'training':splits[0], 'validation':splits[1], 'testing':splits[2]}
with open(args.split_file, 'w') as f:
json.dump(all_splits, f)
attr_all = extract_attr(database, splits[:2]) # define object classes on train/val data
obj_cls_lst, w2l = freq_obj_list(attr_all, nlp, props)
new_database, new_database_np = prep_all(database, database_cap, obj_cls_lst, w2l, nlp)
# write raw annotation file
new_database_np = {'database':new_database_np}
with open(args.target_np_file, 'w') as f:
json.dump(new_database_np, f)
# write pre-processed annotation file
new_database = {'vocab':obj_cls_lst, 'annotations':new_database}
with open(args.target_file, 'w') as f:
json.dump(new_database, f)
nlp.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='ActivityNet-Entities dataset preprocessing script.')
parser.add_argument('--dataset_root', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/', help='dataset root directory')
parser.add_argument('--corenlp_path', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttn/tools/stanford-corenlp-full-2018-02-27', help='path to stanford core nlp toolkit')
parser.add_argument('--freq_thresh', type=int, default=50, help='frequency threshold for determining object classes')
parser.add_argument('--train_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/train.json')
parser.add_argument('--val_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/val_1.json')
args = parser.parse_args()
args.src_file = args.dataset_root+'anet_bb.json' # the raw annotation file
args.target_np_file = args.dataset_root+'anet_entities.json' # output np file
args.target_file = args.dataset_root+'anet_entities_cleaned_class_thresh'+str(args.freq_thresh)+'.json' # output object file
args.attr_to_video_file = args.dataset_root+'attr_to_video.txt' # from annotation tool
args.split_file = args.dataset_root+'split_ids_anet_entities.json' # split file
| np.random.seed(123) | numpy.random.seed |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for processing depth images.
"""
from argparse import Namespace
import itertools
import numpy as np
import torch
import envs.utils.rotation_utils as ru
def get_camera_matrix(width, height, fov):
"""Returns a camera matrix from image size and fov."""
xc = (width - 1.) / 2.
zc = (height - 1.) / 2.
f = (width / 2.) / np.tan(np.deg2rad(fov / 2.))
camera_matrix = {'xc': xc, 'zc': zc, 'f': f}
camera_matrix = Namespace(**camera_matrix)
return camera_matrix
def get_point_cloud_from_z(Y, camera_matrix, scale=1):
"""Projects the depth image Y into a 3D point cloud.
Inputs:
Y is ...xHxW
camera_matrix
Outputs:
X is positive going right
Y is positive into the image
Z is positive up in the image
XYZ is ...xHxWx3
"""
x, z = np.meshgrid(np.arange(Y.shape[-1]),
np.arange(Y.shape[-2] - 1, -1, -1))
for _ in range(Y.ndim - 2):
x = np.expand_dims(x, axis=0)
z = np.expand_dims(z, axis=0)
X = (x[::scale, ::scale] - camera_matrix.xc) * \
Y[::scale, ::scale] / camera_matrix.f
Z = (z[::scale, ::scale] - camera_matrix.zc) * \
Y[::scale, ::scale] / camera_matrix.f
XYZ = np.concatenate((X[..., np.newaxis],
Y[::scale, ::scale][..., np.newaxis],
Z[..., np.newaxis]), axis=X.ndim)
return XYZ
def transform_camera_view(XYZ, sensor_height, camera_elevation_degree):
"""
Transforms the point cloud into geocentric frame to account for
camera elevation and angle
Input:
XYZ : ...x3
sensor_height : height of the sensor
camera_elevation_degree : camera elevation to rectify.
Output:
XYZ : ...x3
"""
R = ru.get_r_matrix(
[1., 0., 0.], angle=np.deg2rad(camera_elevation_degree))
XYZ = np.matmul(XYZ.reshape(-1, 3), R.T).reshape(XYZ.shape)
XYZ[..., 2] = XYZ[..., 2] + sensor_height
return XYZ
def transform_pose(XYZ, current_pose):
"""
Transforms the point cloud into geocentric frame to account for
camera position
Input:
XYZ : ...x3
current_pose : camera position (x, y, theta (radians))
Output:
XYZ : ...x3
"""
R = ru.get_r_matrix([0., 0., 1.], angle=current_pose[2] - np.pi / 2.)
XYZ = np.matmul(XYZ.reshape(-1, 3), R.T).reshape(XYZ.shape)
XYZ[:, :, 0] = XYZ[:, :, 0] + current_pose[0]
XYZ[:, :, 1] = XYZ[:, :, 1] + current_pose[1]
return XYZ
def bin_points(XYZ_cms, map_size, z_bins, xy_resolution):
"""Bins points into xy-z bins
XYZ_cms is ... x H x W x3
Outputs is ... x map_size x map_size x (len(z_bins)+1)
"""
sh = XYZ_cms.shape
XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]])
n_z_bins = len(z_bins) + 1
counts = []
for XYZ_cm in XYZ_cms:
isnotnan = np.logical_not(np.isnan(XYZ_cm[:, :, 0]))
X_bin = | np.round(XYZ_cm[:, :, 0] / xy_resolution) | numpy.round |
import numba.cuda as cuda
import numpy as np
import math
@cuda.jit
def mat_morph_mul_max_plus_impl(a, b, c, stmp, w, q, h):
row, col = cuda.grid(2)
tmp = stmp
if row < h and col < w:
for i in range(q):
val = a[row, i] + b[i, col]
tmp = max(tmp, val)
c[row, col] = tmp
@cuda.jit
def mat_morph_mul_max_minus_impl(a, b, c, stmp, w, q, h):
row, col = cuda.grid(2)
tmp = stmp
if row < h and col < w:
for i in range(q):
val = a[row, i] - b[i, col]
tmp = max(tmp, val)
c[row, col] = tmp
@cuda.jit
def mat_morph_mul_min_plus_impl(a, b, c, stmp, w, q, h):
row, col = cuda.grid(2)
tmp = stmp
if row < h and col < w:
for i in range(q):
val = a[row, i] + b[i, col]
tmp = min(tmp, val)
c[row, col] = tmp
@cuda.jit
def mat_morph_mul_min_minus_impl(a, b, c, stmp, w, q, h):
row, col = cuda.grid(2)
tmp = stmp
if row < h and col < w:
for i in range(q):
val = a[row, i] - b[i, col]
tmp = min(tmp, val)
c[row, col] = tmp
def mat_dot(fn, stmp, a, b, c, stream=0):
"""
m
---------
| |
q | b |
| |
q ---------
--------- ---------
| | | |
n | a | | c | n
| | | |
--------- ---------
m
"""
n = a.shape[0]
m = b.shape[1]
assert c.shape == (n, m)
assert a.shape[1] == b.shape[0]
q = a.shape[1]
if n*m > 1024:
threads_per_block = [32, 32]
bpg_n = int(math.ceil(n / threads_per_block[0]))
bpg_m = int(math.ceil(m / threads_per_block[1]))
blocks_per_grid = [bpg_n, bpg_m]
else:
threads_per_block = [n, m]
blocks_per_grid = [1, 1]
fn[blocks_per_grid, threads_per_block, stream](a, b, c, stmp, m, q, n)
def mat_morph_mul_max_plus(a, b, c, stream=0):
mat_dot(mat_morph_mul_max_plus_impl, float('-inf'), a, b, c, stream)
def mat_morph_mul_max_minus(a, b, c, stream=0):
mat_dot(mat_morph_mul_max_minus_impl, float('-inf'), a, b, c, stream)
def mat_morph_mul_min_plus(a, b, c, stream=0):
mat_dot(mat_morph_mul_min_plus_impl, float('+inf'), a, b, c, stream)
def mat_morph_mul_min_minus(a, b, c, stream=0):
mat_dot(mat_morph_mul_min_minus_impl, float('+inf'), a, b, c, stream)
################################################################################
import unittest
@cuda.jit
def _test_mat_mul_impl(a, b, c, stmp, w, q, h):
row, col = cuda.grid(2)
tmp = stmp
if row < h and col < w:
for i in range(q):
tmp += a[row, i] * b[i, col]
c[row, col] = tmp
def _test_mat_mul(a, b, c):
mat_dot(_test_mat_mul_impl, 0, a, b, c)
class TestMnnPackage(unittest.TestCase):
def setUp(self):
self.mat_a = np.matrix(
[[10, 2, -1, 0],
[4, -12, 4, 2]])
self.mat_b = np.matrix(
[[-1, 7],
[9, 12],
[0, 0],
[3, -5]])
def test_mat_dot_simple(self):
'''Smoke test'''
actual = np.zeros([2, 2])
expected = np.matrix([[ 8, 94],
[-106, -126]])
_test_mat_mul(self.mat_a, self.mat_b, actual)
np.testing.assert_array_equal(expected, actual)
def test_cuda_mat_dot(self):
'''Check whether CUDA kernel is invoked correctly for big matrices'''
n = 1024
m = 512
a = | np.random.randint(0, 10, size=n*m) | numpy.random.randint |
import json
import numpy as np
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from . import commons as bc
# データベースファイルの保存場所
database_directory = os.path.dirname(os.path.abspath(__file__)) + "/database/"
def perfCURVE(fcgs_e_rated,fcgs_e_75,fcgs_e_50,fcgs_hr_rated,fcgs_hr_75,fcgs_hr_50):
"""
Input
fcgs_e_rated : CGSの定格発電効率(低位発熱量基準)
fcgs_e_75 : CGSの負荷率0.75時発電効率(低位発熱量基準)
fcgs_e_50 : CGSの負荷率0.50時発電効率(低位発熱量基準)
fcgs_hr_rated : CGSの定格排熱効率(低位発熱量基準)
fcgs_hr_75 : CGSの負荷率0.75時排熱効率(低位発熱量基準)
fcgs_hr_50 : CGSの負荷率0.50時排熱効率(低位発熱量基準)
Output
fe2 : CGSの発電効率特性式の2次式の係数項
fe1 : CGSの発電効率特性式の1次式の係数項
fe0 : CGSの発電効率特性式の定数項
fhr2 : CGSの排熱効率特性式の2次式の係数項
fhr1 : CGSの排熱効率特性式の1次式の係数項
fhr0 : CGSの排熱効率特性式の定数項
"""
fe2 = 8 * ( fcgs_e_rated - 2*fcgs_e_75 +fcgs_e_50 )
fe1 = -2 * (5*fcgs_e_rated - 12*fcgs_e_75 + 7*fcgs_e_50 )
fe0 = 3 * fcgs_e_rated - 8*fcgs_e_75 + 6*fcgs_e_50
fhr2 = 8 * (fcgs_hr_rated - 2*fcgs_hr_75 + fcgs_hr_50 )
fhr1 = -2 * ( 5*fcgs_hr_rated - 12*fcgs_hr_75 + 7*fcgs_hr_50 )
fhr0 = 3 * fcgs_hr_rated - 8*fcgs_hr_75 + 6*fcgs_hr_50
return fe2,fe1,fe0,fhr2,fhr1,fhr0
def calc_energy(inputdata, resultJson_for_CGS, DEBUG = False):
resultJson = {}
# CGS系統名の取得
cgs_name = None
if len(inputdata["CogenerationSystems"]) != 1:
raise Exception("計算可能なCGS系統は1系統のみです。")
for cgs_name_list in inputdata["CogenerationSystems"]:
cgs_name = cgs_name_list
# CGSの発電機容量 kW
Ecgs_rated = inputdata["CogenerationSystems"][cgs_name]["RatedCapacity"]
# CGS設置台数 台
Ncgs = inputdata["CogenerationSystems"][cgs_name]["Number"]
# CGSの定格発電効率(低位発熱量基準) 無次元
fcgs_e_rated = inputdata["CogenerationSystems"][cgs_name]["PowerGenerationEfficiency_100"]
# CGSの負荷率0.75時発電効率(低位発熱量基準) 無次元
fcgs_e_75 = inputdata["CogenerationSystems"][cgs_name]["PowerGenerationEfficiency_75"]
# CGSの負荷率0.50時発電効率(低位発熱量基準) 無次元
fcgs_e_50 = inputdata["CogenerationSystems"][cgs_name]["PowerGenerationEfficiency_50"]
# CGSの定格排熱効率(低位発熱量基準) 無次元
fcgs_hr_rated = inputdata["CogenerationSystems"][cgs_name]["HeatGenerationEfficiency_100"]
# CGSの負荷率0.75時排熱効率(低位発熱量基準) 無次元
fcgs_hr_75 = inputdata["CogenerationSystems"][cgs_name]["HeatGenerationEfficiency_75"]
# CGSの負荷率0.50時排熱効率(低位発熱量基準) 無次元
fcgs_hr_50 = inputdata["CogenerationSystems"][cgs_name]["HeatGenerationEfficiency_50"]
# 排熱利用優先順位(冷熱源) ※1 無次元
npri_hr_c = inputdata["CogenerationSystems"][cgs_name]["HeatRecoveryPriorityCooling"]
# 排熱利用優先順位(温熱源) ※1 無次元
npri_hr_h = inputdata["CogenerationSystems"][cgs_name]["HeatRecoveryPriorityHeating"]
# 排熱利用優先順位(給湯) ※1 無次元
npri_hr_w = inputdata["CogenerationSystems"][cgs_name]["HeatRecoveryPriorityHotWater"]
# CGS24時間運転の有無 ※2 -
C24ope = inputdata["CogenerationSystems"][cgs_name]["24hourOperation"]
##----------------------------------------------------------------------------------
## 解説書附属書 G.10
##----------------------------------------------------------------------------------
# 日付dにおける空気調和設備の電力消費量 MWh/日
if resultJson_for_CGS["AC"]:
EAC_total_d = np.array(resultJson_for_CGS["AC"]["electric_power_comsumption"])
else:
EAC_total_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な排熱投入型吸収式冷温水機(系統)の冷熱源としての主機の一次エネルギー消費量 MJ/日
if resultJson_for_CGS["AC"]:
EAC_ref_c_d = np.array(resultJson_for_CGS["AC"]["E_ref_cgsC_ABS_day"])
else:
EAC_ref_c_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な排熱投入型吸収式冷温水機(系統)の冷熱源としての負荷率 無次元
if resultJson_for_CGS["AC"]:
mxLAC_ref_c_d = np.array(resultJson_for_CGS["AC"]["Lt_ref_cgsC_day"])
else:
mxLAC_ref_c_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な温熱源群の主機の一次エネルギー消費量 MJ/日
if resultJson_for_CGS["AC"]:
EAC_ref_h_hr_d = np.array(resultJson_for_CGS["AC"]["E_ref_cgsH_day"])
else:
EAC_ref_h_hr_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な温熱源群の熱源負荷 MJ/日
if resultJson_for_CGS["AC"]:
qAC_ref_h_hr_d = np.array(resultJson_for_CGS["AC"]["Q_ref_cgsH_day"])
else:
qAC_ref_h_hr_d = np.zeros(365)
# 日付dにおける機械換気設備の電力消費量 MWh/日
if resultJson_for_CGS["V"]:
EV_total_d = np.array(resultJson_for_CGS["V"]["Edesign_MWh_day"])
else:
EV_total_d = np.zeros(365)
# 日付dにおける照明設備の電力消費量 MWh/日
if resultJson_for_CGS["L"]:
EL_total_d = np.array(resultJson_for_CGS["L"]["Edesign_MWh_day"])
else:
EL_total_d = np.zeros(365)
# 日付dにおける給湯設備の電力消費量 MWh/日
if resultJson_for_CGS["HW"]:
EW_total_d = np.array(resultJson_for_CGS["HW"]["Edesign_MWh_Ele_day"])
else:
EW_total_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な給湯機(系統)の一次エネルギー消費量 MJ/日
if resultJson_for_CGS["HW"]:
EW_hr_d = np.array(resultJson_for_CGS["HW"]["Edesign_MJ_CGS_day"])
else:
EW_hr_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な給湯機(系統)の給湯負荷 MJ/日
if resultJson_for_CGS["HW"]:
qW_hr_d = np.array(resultJson_for_CGS["HW"]["Q_eqp_CGS_day"])
else:
qW_hr_d = np.zeros(365)
# 日付dにおける昇降機の電力消費量 MWh/日
if resultJson_for_CGS["EV"]:
EEV_total_d = np.array(resultJson_for_CGS["EV"]["Edesign_MWh_day"])
else:
EEV_total_d = np.zeros(365)
# 日付dにおける効率化設備(太陽光発電)の発電量 MWh/日
if resultJson_for_CGS["PV"]:
EPV_total_d = np.array(resultJson_for_CGS["PV"]["Edesign_MWh_day"])
else:
EPV_total_d = np.zeros(365)
# 日付dにおけるその他の電力消費量 MWh/日
if resultJson_for_CGS["OT"]:
EM_total_d = np.array(resultJson_for_CGS["OT"]["Edesign_MWh_day"])
else:
EM_total_d = np.zeros(365)
# 日付dにおけるCGSの排熱利用が可能な排熱投入型吸収式冷温水機(系統)の運転時間 h/日
if resultJson_for_CGS["AC"]:
TAC_c_d = | np.array(resultJson_for_CGS["AC"]["T_ref_cgsC_day"]) | numpy.array |
# FT_connect_functions
from __future__ import print_function
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import color, feature, filters, io, measure, morphology, segmentation, img_as_ubyte, transform, registration
import warnings
import math
import pandas as pd
import argparse
import subprocess
import re
import glob
from skimage.segmentation import clear_border
from ortools.graph import pywrapgraph
import time
from shutil import copyfile
from scipy.spatial import distance
from FT_connect_config import *
def genDisplacement(filename_t0,filename_t1):
global pathToSegs
temp1 = np.asarray(np.load(filename_t0,allow_pickle=True)).item()
imfilename1 = temp1['filename'].split('/')[-1]
img1 = io.imread(pathToSegs+imfilename1);
temp2 = np.asarray(np.load(filename_t1,allow_pickle=True)).item()
imfilename2 = temp2['filename'].split('/')[-1]
img2 = io.imread(pathToSegs+imfilename2);
shift_vector = registration.phase_cross_correlation(img1, img2)
return(shift_vector[0])
def buildFeatureFrame(filename_t0,timepoint,pathtoimage="./"):
temp = np.asarray(np.load(filename_t0,allow_pickle=True)).item()
imfilename = temp['filename'].split('/')[-1]
img = io.imread(pathtoimage+imfilename);
masks = clear_border(temp['masks'])
image_props = measure.regionprops_table(masks,
intensity_image=img,
properties=('label','area', 'centroid', 'bbox','mean_intensity'))
im_df = pd.DataFrame(image_props)
im_df['time'] = timepoint
return(im_df)
def expandBoundBox(FeatureFrame, expansion = 2):
hf_row = np.ceil((FeatureFrame['bbox-3']-FeatureFrame['bbox-1'])/2)
hf_col = np.ceil((FeatureFrame['bbox-2']-FeatureFrame['bbox-0'])/2)
maxes = expansion*np.amax(np.vstack((hf_row,hf_col)).T,axis=1).astype(int)
FeatureFrame['ebox-0'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['bbox-0']-maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-1'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['bbox-1']-maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-2'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['bbox-2']),FeatureFrame['bbox-2']+maxes)).T,axis=1).astype(int)
FeatureFrame['ebox-3'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['bbox-3']),FeatureFrame['bbox-3']+maxes)).T,axis=1).astype(int)
return(FeatureFrame)
def futureBoundBox(FeatureFrame,shiftVector):
FeatureFrame['fbox-0'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['ebox-0'] - shiftVector[1] )).T,axis=1).astype(int)
FeatureFrame['fbox-1'] = np.amax(np.vstack((np.zeros(FeatureFrame.shape[0]),FeatureFrame['ebox-1'] - shiftVector[0] )).T,axis=1).astype(int)
FeatureFrame['fbox-2'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['ebox-2']),FeatureFrame['ebox-2'] - shiftVector[1] )).T,axis=1).astype(int)
FeatureFrame['fbox-3'] = np.amin(np.vstack((np.zeros(FeatureFrame.shape[0])+np.max(FeatureFrame['ebox-3']),FeatureFrame['ebox-3'] - shiftVector[0] )).T,axis=1).astype(int)
return(FeatureFrame)
def expectedLocation(FeatureFrame,shiftVector):
FeatureFrame['fcentroid-0'] = FeatureFrame['centroid-0'] - shiftVector[1]
FeatureFrame['fcentroid-1'] = FeatureFrame['centroid-1'] - shiftVector[0]
return(FeatureFrame)
def genCandidateNodes(FeatureFrame_t0, FeatureFrame_t1):
candidates = (((np.asarray(FeatureFrame_t1['centroid-0'])[:,None]>=np.asarray(FeatureFrame_t0['fbox-0']))&
(np.asarray(FeatureFrame_t1['centroid-0'])[:,None]<np.asarray(FeatureFrame_t0['fbox-2']))&
(np.asarray(FeatureFrame_t1['centroid-1'])[:,None]>=np.asarray(FeatureFrame_t0['fbox-1']))&
(np.asarray(FeatureFrame_t1['centroid-1'])[:,None]<np.asarray(FeatureFrame_t0['fbox-3']))))
return(candidates)
def getDifference(FeatureFrame_t0, FeatureFrame_t1, feature="position",normed=True):
if (feature == "position"):
delta0 = (np.asarray(FeatureFrame_t1['centroid-0'])[:,None]-np.asarray(FeatureFrame_t0['fcentroid-0']))
delta1 = (np.asarray(FeatureFrame_t1['centroid-1'])[:,None]-np.asarray(FeatureFrame_t0['fcentroid-1']))
result = np.sqrt(delta0**2 + delta1**2 )
else :
result = np.abs(np.asarray(FeatureFrame_t1[feature])[:,None]-np.asarray(FeatureFrame_t0[feature]))
if normed:
result = result/10**np.floor(np.log10(np.max(result)))
return(result)
def DivSizeScore(mom_area, sis_area_1, sis_area_2):
global DivSizeDiffThreshold,DivSizeScoreReturn,DivSizeRatio_Min,DivSizeRatio_Max
areaRatio = (mom_area / (sis_area_1 + sis_area_2))
diffArea = np.abs(sis_area_1 - sis_area_2)/np.sqrt(sis_area_1 *sis_area_2)
if((areaRatio >= DivSizeRatio_Min)&(areaRatio < DivSizeRatio_Max)&(diffArea<DivSizeDiffThreshold)):
return(0.0)
else:
return(DivSizeScoreReturn)
def DivIntScore(sis_int_1, sis_int_2):
global DivIntensityDiffThreshold,DivIntensityScoreReturn
diffInt = np.abs(sis_int_1 - sis_int_2)/np.sqrt(sis_int_1*sis_int_2)
if((diffInt<DivIntensityDiffThreshold)):
return(0.0)
else:
return(DivIntensityScoreReturn)
def DivScore(FeatureFrame_t0, FeatureFrame_t1, index_mom, index_sis_1, index_sis_2):
global DivMoveScoreReturn, mitosis_RangeMultiplier
momFF_select = FeatureFrame_t0.loc[index_mom]
sis1FF_select = FeatureFrame_t1.loc[index_sis_1]
sis2FF_select = FeatureFrame_t1.loc[index_sis_2]
mom_loc = [momFF_select['centroid-0'],momFF_select['centroid-1']]
mom_corner_1 = [momFF_select['bbox-0'],momFF_select['bbox-1']]
mom_corner_2 = [momFF_select['bbox-2'],momFF_select['bbox-3']]
mom_area = (momFF_select['area'])
mom_range = distance.euclidean(mom_corner_1,mom_corner_2)
sis1_loc = [sis1FF_select['centroid-0'],sis1FF_select['centroid-1']]
sis1_area = (sis1FF_select['area'])
sis1_int = (sis1FF_select['mean_intensity'])
sis2_loc = [sis2FF_select['centroid-0'],sis2FF_select['centroid-1']]
sis2_area = (sis2FF_select['area'])
sis2_int = (sis2FF_select['mean_intensity'])
mom_s1_dist = distance.euclidean(sis1_loc,mom_loc)
mom_s2_dist = distance.euclidean(sis2_loc,mom_loc)
sis_middle_loc = (np.array(sis1_loc)+np.array(sis2_loc))/2
cost1 = distance.euclidean(sis_middle_loc,mom_loc)
cost2 = np.abs(mom_s1_dist-mom_s2_dist)
cost3 = distance.euclidean(sis1_loc,sis2_loc)
if(cost3 < (mitosis_RangeMultiplier*mom_range)):
MoveCost = cost1 + cost2/2
else:
MoveCost = DivMoveScoreReturn
SizeCost = DivSizeScore(mom_area=mom_area, sis_area_1=sis1_area, sis_area_2=sis2_area)
IntCost = DivIntScore(sis_int_1=sis1_int, sis_int_2=sis2_int)
finalScore = np.round((MoveCost+SizeCost+IntCost),1)
return([index_mom,index_sis_1,index_sis_2,finalScore])
def GenMitosisPairs(CandidateFrame, motherIndex):
#returns array of daughter index-pairs in candidate frame
DaughtersPossible = np.where(CandidateFrame[:,motherIndex])[0]
if(len(DaughtersPossible)>1):
DaughtersPairs = np.array(np.meshgrid(DaughtersPossible, DaughtersPossible)).T.reshape(-1,2)
Sisters = np.unique( | np.sort(DaughtersPairs) | numpy.sort |
import math as mt
import numpy as np
import byxtal.find_csl_dsc as fcd
import byxtal.integer_manipulations as iman
import byxtal.bp_basis as bpb
import byxtal.pick_fz_bpl as pfb
import numpy.linalg as nla
import ovito.data as ovd
from ovito.pipeline import StaticSource, Pipeline
import ovito.modifiers as ovm
from ovito.data import CutoffNeighborFinder
def find_int_solns(a_vec, b_vec):
"""
Given two basis vectors (a_vec and b_vec) in the primitive basis,
find the third basis vector (c_vec) such that the matrix
[a_vec, b_vec, c_vec] is a valid basis.
All the components of the vectors are integers and
the determinant of the matrix must be equal to **1**.
Parameters
-----------------
a_vec: numpy.array
The first basis vector. Must be an integer array.
b_vec: numpy.array
The second basis vector. Must be an integer array.
Returns
------------
l_p2_p1: numpy.array, (3X3, must be an integer array)
A 3x3 numpy array of integers that forms the new basis for the lattice.
"""
a1 = a_vec[0]
a2 = a_vec[1]
a3 = a_vec[2]
b1 = b_vec[0]
b2 = b_vec[1]
b3 = b_vec[2]
a = a2*b3 - a3*b2
b = -(a1*b3 - a3*b1)
c = a1*b2 - a2*b1
d = 1
a = int(a)
b = int(b)
c = int(c)
d = int(d)
p = mt.gcd(a, b)
if p == 0:
if c == 1:
y1 = 0
y2 = 0
y3 = 1
# l_p2_p1 = (np.hstack((a_vec, b_vec, np.array([[y1],[y2],[y3]]))))
l_p2_p1 = np.dstack((a_vec, b_vec, np.array([y1, y2, y3]))).squeeze()
det1 = nla.det(l_p2_p1)
if ((np.abs(det1)-1) > 1e-10):
raise Exception('Error with Diophantine solution')
else:
if det1 == -1:
l_p2_p1[:, 2] = -l_p2_p1[:, 2]
else:
raise Exception('Error with boundary-plane indices')
else:
a1 = int(a/p)
b1 = int(b/p)
# Let u0 and v0 any solution of a'u + b'v = c
int_soln1 = bpb.lbi_dioph_soln(a1, b1, c)
u0 = int(int_soln1[0])
v0 = int(int_soln1[1])
# z0, t0 any solution of cz + pt = d
int_soln2 = bpb.lbi_dioph_soln(c, p, d)
z0 = int(int_soln2[0])
t0 = int(int_soln2[1])
# x0, y0 any solution of a'x + b'y = t0
int_soln3 = bpb.lbi_dioph_soln(a1, b1, t0)
x0 = int(int_soln3[0])
y0 = int(int_soln3[1])
# The general solution of ax + by + cz = d is :
# x = x0 + b'k - u0m
# y = y0 - a'k - v0m
# z = z0 + pm with k and m any integer in \mathbb{Z}
tn1 = 10
ival = np.arange(-(tn1), tn1+1)
k1, m1 = np.meshgrid(ival, ival)
k1 = k1.flatten()
m1 = m1.flatten()
x = x0 + b1*k1 - u0*m1
y = y0 - a1*k1 - v0*m1
z = z0 + p*m1
l2_val = x**2 + y**2 + z**2
ind1 = np.where(l2_val == np.min(l2_val))[0][0]
y1 = x[ind1]
y2 = y[ind1]
y3 = z[ind1]
l_p2_p1 = (np.vstack((a_vec, b_vec, np.array([y1, y2, y3])))).transpose()
det1 = nla.det(l_p2_p1)
if (np.abs(det1-1) > (1e-10*np.max(np.abs(l_p2_p1)))):
raise Exception('Error with Diophantine solution')
else:
if det1 == -1:
l_p2_p1[:, 2] = -l_p2_p1[:, 2]
return (l_p2_p1).astype(int)
def compute_rCut(l2d_bp_po):
"""
Given two vectors in the interface plane, compute the
maximum of the norms of the two vectors.
Parameters
-----------------
l2d_bpb_po: numpy.array
The two vectors, expressed in the **po** reference frame,
that define the two-dimensional box vectors of the interface.
Returns
------------
rCut: float
The cut-off radius for replicating the lattice basis.
"""
bv1 = l2d_bp_po[:, 0]
bv2 = l2d_bp_po[:, 1]
l1 = nla.norm(bv1)
l2 = nla.norm(bv2)
l3 = nla.norm((bv1+bv2))
rCut = | np.max([l1, l2, l3]) | numpy.max |
import numpy as np
# version 03.10.2019
# solveODE(f, y0, Ttrans, Teval, dt, outSteps, method) solves ODEs for their solution sol
#
# arguments:
# f : right hand side of ode, function object, returns float or numpy array
# y0 : initial values,
# for onedimensional odes int or float
# for n-dimensional odes list or numpy array
# Ttrans : transient time, float
# Teval : evaluation time, float
# dt : integration time step, float
# outSteps : store every outSteps-th step in sol, integer
# method : method for numerical integration, string
class ODE_Solver:
# constructor
def __init__ (self):
pass
# numerical method functions perform integration step for given right-hand side f of ode
# performs explicit Euler step
# convergence order 1
def explicitEuler (self, f, y, t, dt):
return (y + f(y, t) * dt)
# performs implicit Euler step with fixed point iteration
# convergence order 1
def implicitEulerFPI (self, f, y, t, dt, tol = 1e-10):
x = y
x_prev = x + 2.0 * tol
j = 0
while np.linalg.norm(x - x_prev) >= tol and j < 15: # raise error
j += 1
x_prev = x
x = y + f(x, t) * dt
return (x)
# performs explicit midpoint step
# convergence order 2
def explicitMidpoint (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt / 2.0, t + dt / 2.0)
return (y + k2 * dt)
# performs explicit Runge-Kutta step of stage 2
# convergence order 2
def explicitHeun (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt, t + dt)
return (y + (k1 + k2) * dt / 2.0)
# performs explicit Runge-Kutta step of stage 4
# convergence order 4
def explicitRungeKutta4 (self, f, y, t, dt):
k1 = f(y, t)
k2 = f(y + k1 * dt / 2.0, t + dt / 2.0)
k3 = f(y + k2 * dt / 2.0, t + dt / 2.0)
k4 = f(y + k3 * dt, t + dt)
return (y + (k1 + 2.0 * (k2 + k3) + k4) * dt / 6.0)
def RungeKutta54_coefficients ():
# stage
s = 7
c = np.array([0, 1.0 / 5.0, 3.0, 10.0, 4.0 / 5.0, 8.0 / 9.0, 1.0, 1.0])
A = np.matrix([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0 / 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[3.0 / 40.0, 9.0 / 40.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[44.0 / 45.0, -56.0 / 15.0, 32.0 / 9.0, 0.0, 0.0, 0.0, 0.0],
[19372.0 / 6561.0, -25360.0 / 2187.0, 64448.0 / 6561.0, -212.0 / 729.0, 0.0, 0.0, 0.0],
[9017.0 / 3168.0, -355.0 / 33.0, 46732.0 / 5247.0, 49.0 / 176.0, -5103.0 / 18656.0, 0.0, 0.0],
[35.0 / 384.0, 0.0, 500.0 / 1113.0, 125.0 / 192.0, -2187.0 / 6784.0, 11.0 / 84.0, 0.0]])
# niederordrig
b1 = | np.array([5179.0 / 57600.0, 0.0, 7571.0 / 16695.0, 393.0 / 640.0, -92097.0 / 339200.0, 187.0 / 2100.0, 1.0 / 40.0]) | numpy.array |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import ParameterTuple, Parameter
context.set_context(device_target='CPU')
class LstmNet(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(LstmNet, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = P.LSTM(input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
input_np = np.array([[[0.6755, -1.6607, 0.1367], [0.4276, -0.7850, -0.3758]],
[[-0.6424, -0.6095, 0.6639], [0.7918, 0.4147, -0.5089]],
[[-1.5612, 0.0120, -0.7289], [-0.6656, -0.6626, -0.5883]],
[[-0.9667, -0.6296, -0.7310], [0.1026, -0.6821, -0.4387]],
[[-0.4710, 0.6558, -0.3144], [-0.8449, -0.2184, -0.1806]]
]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h = Parameter(initializer(
Tensor(
np.array([0.1, 0.1, 0.1, 0.1]).reshape((num_layers * num_directions, batch_size, hidden_size)).astype(
np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='h')
self.c = Parameter(initializer(
Tensor(
np.array([0.2, 0.2, 0.2, 0.2]).reshape((num_layers * num_directions, batch_size, hidden_size)).astype(
np.float32)),
[num_layers * num_directions, batch_size, hidden_size]), name='c')
wih = np.array([[3.4021e-01, -4.6622e-01, 4.5117e-01],
[-6.4257e-02, -2.4807e-01, 1.3550e-02], # i
[-3.2140e-01, 5.5578e-01, 6.3589e-01],
[1.6547e-01, -7.9030e-02, -2.0045e-01],
[-6.9863e-01, 5.9773e-01, -3.9062e-01],
[-3.0253e-01, -1.9464e-01, 7.0591e-01],
[-4.0835e-01, 3.6751e-01, 4.7989e-01],
[-5.6894e-01, -5.0359e-01, 4.7491e-01]]).astype(np.float32) # .reshape([1,-1])
whh = np.array([[-0.4820, -0.2350],
[-0.1195, 0.0519],
[0.2162, -0.1178],
[0.6237, 0.0711],
[0.4511, -0.3961],
[-0.5962, 0.0906],
[0.1867, -0.1225],
[0.1831, 0.0850]]).astype(np.float32) # .reshape([1,-1])
wih = wih.transpose((1, 0))
whh = whh.transpose((1, 0))
bih = np.zeros((1, 8)).astype(np.float32)
w_np = np.concatenate((wih, whh, bih), axis=0).reshape([-1, 1, 1])
self.w = Parameter(initializer(Tensor(w_np), w_np.shape), name='w')
@ms_function
def construct(self):
return self.lstm(self.x, self.h, self.c, self.w)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_lstm():
seq_len = 5
batch_size = 2
input_size = 3
hidden_size = 2
num_layers = 1
has_bias = True
bidirectional = False
dropout = 0.0
num_directions = 1
if bidirectional:
num_directions = 2
net = LstmNet(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)
y, h, c, _, _ = net()
print(y)
print(c)
print(h)
expect_y = np.array([[[-0.16709016, 0.13125697],
[-0.08438572, -0.01969833]],
[[-0.2746155, 0.32764038],
[-0.06504016, -0.07770399]],
[[-0.00140004, 0.17706314],
[0.03244496, -0.10135599]],
[[0.08328028, 0.06437367],
[-0.04133911, -0.11072896]],
[[0.19004421, -0.02852732],
[0.09138509, -0.00344161]]]
)
error = np.ones([num_layers, batch_size, hidden_size]) * 1.0e-4
diff = y.asnumpy() - expect_y
assert np.all(diff < error)
assert np.all(-diff < error)
#
expect_h = np.array([[[0.19004421, -0.02852732],
[0.09138509, -0.00344161]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = h.asnumpy() - expect_h
assert np.all(diff < error)
assert np.all(-diff < error)
#
expect_c = np.array([[[0.34533143, -0.06313794],
[0.169008, -0.00555446]]])
error = np.ones((num_layers * num_directions, batch_size, hidden_size)) * 1.0e-4
diff = c.asnumpy() - expect_c
assert np.all(diff < error)
assert np.all(-diff < error)
class MultiLayerBiLstmNet(nn.Cell):
def __init__(self, seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):
super(MultiLayerBiLstmNet, self).__init__()
num_directions = 1
if bidirectional:
num_directions = 2
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, has_bias=has_bias,
bidirectional=bidirectional, dropout=dropout)
input_np = np.array([[[-0.1887, -0.4144, -0.0235, 0.7489, 0.7522, 0.5969, 0.3342, 1.2198, 0.6786, -0.9404],
[-0.8643, -1.6835, -2.4965, 2.8093, 0.1741, 0.2707, 0.7387, -0.0939, -1.7990, 0.4765]],
[[-0.5963, -1.2598, -0.7226, 1.1365, -1.7320, -0.7302, 0.1221, -0.2111, -1.6173, -0.0706],
[0.8964, 0.1737, -1.0077, -0.1389, 0.4889, 0.4391, 0.7911, 0.3614, -1.9533, -0.9936]],
[[0.3260, -1.3312, 0.0601, 1.0726, -1.6010, -1.8733, -1.5775, 1.1579, -0.8801, -0.5742],
[-2.2998, -0.6344, -0.5409, -0.9221, -0.6500, 0.1206, 1.5215, 0.7517, 1.3691, 2.0021]],
[[-0.1245, -0.3690, 2.1193, 1.3852, -0.1841, -0.8899, -0.3646, -0.8575, -0.3131, 0.2026],
[1.0218, -1.4331, 0.1744, 0.5442, -0.7808, 0.2527, 0.1566, 1.1484, -0.7766, -0.6747]],
[[-0.6752, 0.9906, -0.4973, 0.3471, -0.1202, -0.4213, 2.0213, 0.0441, 0.9016, 1.0365],
[1.2223, -1.3248, 0.1207, -0.8256, 0.1816, 0.7057, -0.3105, 0.5713, 0.2804,
-1.0685]]]).astype(np.float32)
self.x = Parameter(initializer(Tensor(input_np), [seq_len, batch_size, input_size]), name='x')
self.h0 = Parameter(initializer(
Tensor( | np.ones((num_directions, batch_size, hidden_size)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Affiliation: TU Delft and Deltares, Delft, The Netherlands
Pre-processing for Gibbs sampler to:
1. Extract seasonal shape
2. Produce time shifts for the new scenarios
"""
#==============================================================================
#STEP 0 - Import data
import matplotlib.pyplot as plt
import numpy as np
from lmfit.models import LinearModel
from scipy.signal import argrelextrema
from scipy import stats
from scipy.stats import rankdata
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
import random
from scipy import interpolate
#==============================================================================
#Define functions
def seasonal_mean(x, freq):
"""
Return means for each period in x. freq is an int that gives the
number of periods per cycle. E.g., 12 for monthly. NaNs are ignored
in the mean.
"""
return np.array([np.nanmean(x[i::freq], axis=0) for i in range(freq)])
def seasonal_component(x, freq):
"""
Tiles seasonal means (periodical avereages) into a time seris as long as the original.
"""
nobs=len(x)
period_averages=seasonal_mean(x, freq)
period_averages -= np.mean(period_averages, axis=0)
return np.tile(period_averages.T, nobs // freq + 1).T[:nobs]
#==============================================================================
#Load and initialize data
data=np.load(r'tensor_daily_mean_5D.npy')
#Replace Nan with 0
data[np.where(np.isnan(data) == True)]=0
#Reshape
data=np.append(data[:,:,:,:,0],data[:,:,:,:,1],axis=3)
#Data view 3D
data_slice=data[:,3,0,:]
#CalendarYear
calendarYear=365.00
#==============================================================================
#Initialize empty vectors
#cosine_yearly_fitted=np.zeros((400, 90,np.shape(data)[3]))
x_data_ideal_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_ideal_cont_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_slice_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_smooth_matrix=np.zeros((420, 90,np.shape(data)[3]))
y_data_slice_smooth_365_nearest_matrix=np.zeros((420, 90,np.shape(data)[3]))
x_data_ideal_1D_matrix=np.zeros((np.shape(data)[0], np.shape(data)[3]))
y_data_365_nearest_matrix=np.zeros((np.shape(data)[0], np.shape(data)[3]))
deviation_matrix=np.zeros((90,np.shape(data)[3]))
line_intercept=np.zeros((1,np.shape(data)[3]))
line_slope=np.zeros((1,np.shape(data)[3]))
residual_pattern_sqr_matrix=np.zeros((int(calendarYear), np.shape(data)[3]))
#==============================================================================
#Zero to Nan
x_data_slice_matrix[x_data_slice_matrix == 0] = np.nan
x_data_ideal_matrix[x_data_ideal_matrix == 0] = np.nan
x_data_ideal_cont_matrix[x_data_ideal_cont_matrix == 0] = np.nan
y_data_slice_matrix[y_data_slice_matrix == 0] = np.nan
y_data_slice_smooth_matrix[y_data_slice_smooth_matrix == 0] = np.nan
y_data_slice_smooth_365_nearest_matrix[y_data_slice_smooth_365_nearest_matrix == 0] = np.nan
x_data_ideal_1D_matrix[x_data_ideal_1D_matrix == 0] = np.nan
y_data_365_nearest_matrix[y_data_365_nearest_matrix == 0] = np.nan
residual_pattern_sqr_matrix[residual_pattern_sqr_matrix == 0] = np.nan
#==============================================================================
#Choose time interval by the number of timesteps
datalimit1=0
datalimit2=32872
#Initialize empty matrices
y_data_detrended_matrix=np.zeros((datalimit2, np.shape(data)[3]))
trend=np.zeros((datalimit2,np.shape(data)[3]))
residual=np.zeros((datalimit2, | np.shape(data) | numpy.shape |
# License is MIT: see LICENSE.md.
"""Nestle: nested sampling routines to evaluate Bayesian evidence."""
import sys
import warnings
import math
import numpy as np
try:
from scipy.cluster.vq import kmeans2
HAVE_KMEANS = True
except ImportError: # pragma: no cover
HAVE_KMEANS = False
__all__ = ["sample", "print_progress", "mean_and_cov", "resample_equal",
"Result"]
__version__ = "0.2.0"
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
# -----------------------------------------------------------------------------
# Helpers
def vol_prefactor(n):
"""Volume constant for an n-dimensional sphere:
for n even: (2pi)^(n /2) / (2 * 4 * ... * n)
for n odd : 2 * (2pi)^((n-1)/2) / (1 * 3 * ... * n)
"""
if n % 2 == 0:
f = 1.
i = 2
while i <= n:
f *= (2. / i * math.pi)
i += 2
else:
f = 2.
i = 3
while i <= n:
f *= (2. / i * math.pi)
i += 2
return f
def randsphere(n, rstate=np.random):
"""Draw a random point within an n-dimensional unit sphere"""
z = rstate.randn(n)
return z * rstate.rand()**(1./n) / np.sqrt(np.sum(z**2))
def random_choice(a, p, rstate=np.random):
"""replacement for numpy.random.choice (only in numpy 1.7+)"""
if abs(np.sum(p) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("probabilities do not sum to 1")
r = rstate.rand()
i = 0
t = p[i]
while t < r:
i += 1
t += p[i]
return i
def resample_equal(samples, weights, rstate=None):
"""Resample the samples so that the final samples all have equal weight.
Each input sample appears in the output array either
`floor(weights[i] * N)` or `ceil(weights[i] * N)` times, with
`floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray`
Unequally weight samples returned by the nested sampling algorithm.
Shape is (N, ...), with N the number of samples.
weights : `~numpy.ndarray`
Weight of each sample. Shape is (N,).
Returns
-------
equal_weight_samples : `~numpy.ndarray`
Samples with equal weights, same shape as input samples.
Examples
--------
>>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
>>> w = np.array([0.6, 0.2, 0.15, 0.05])
>>> nestle.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in
`this PDF <http://people.isy.liu.se/rt/schon/Publications/HolSG2006.pdf>`_.
Another way to sample according to weights would be::
N = len(weights)
new_samples = samples[np.random.choice(N, size=N, p=weights)]
However, the method used in this function is less "noisy".
"""
if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("weights do not sum to 1")
if rstate is None:
rstate = np.random
N = len(weights)
# make N subdivisions, and choose positions with a consistent random offset
positions = (rstate.random() + np.arange(N)) / N
idx = np.zeros(N, dtype=np.int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx]
class Result(dict):
"""Represents a sampling result.
Since this class is essentially a subclass of dict with attribute
accessors, one can see which attributes are available using the
`keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if list(self.keys()):
m = max(list(map(len, list(self.keys())))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in list(self.items())])
else:
return self.__class__.__name__ + "()"
def summary(self):
"""Return a nicely formatted string giving summary."""
return ("niter: {:d}\n"
"ncall: {:d}\n"
"nsamples: {:d}\n"
"logz: {:6.3f} +/- {:6.3f}\n"
"h: {:6.3f}"
.format(self.niter, self.ncall, len(self.samples),
self.logz, self.logzerr, self.h))
def mean_and_cov(x, weights):
"""Compute weighted sample mean and covariance.
Parameters
----------
x : `~numpy.ndarray`
2-D array containing data samples. Shape is (M, N) where N is the
number of variables and M is the number of samples or observations.
This is ordering is equivalent to using ``rowvar=0`` in numpy.cov.
weights : `~numpy.ndarray`
1-D array of sample weights. Shape is (M,).
Returns
-------
mean : `~numpy.ndarray`
Weighted average of samples, with shape (N,).
cov : `~numpy.ndarray`
The covariance matrix of the variables with shape (N, N).
Notes
-----
Implements formula described here:
https://en.wikipedia.org/wiki/Sample_mean_and_sample_covariance
(see "weighted samples" section)
"""
mean = np.average(x, weights=weights, axis=0)
dx = x - mean
wsum = np.sum(weights)
w2sum = np.sum(weights**2)
cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx)
return mean, cov
def print_progress(info):
"""Callback function that prints a running total on a single line.
Parameters
----------
info : dict
Dictionary containing keys ``'it'`` and ``'logz'``.
"""
print("\r\033[Kit={:6d} logz={:8f}".format(info['it'], info['logz']),
end='')
sys.stdout.flush() # because flush keyword not in print() in py2.7
# -----------------------------------------------------------------------------
# Ellipsoid
class Ellipsoid(object):
"""An N-ellipsoid.
Defined by::
(x - v)^T A (x - v) = 1
where the vector ``v`` is the center of the ellipse and ``A`` is an N x N
matrix. Assumes that ``A`` is symmetric positive definite.
Parameters
----------
ctr : `~numpy.ndarray` with shape ``(N,)``
Coordinates of ellipse center. Note that the array is *not* copied.
This array is never modified internally.
a : `~numpy.ndarray` with shape ``(N, N)``
Matrix describing the axes. Watch out! This array is *not* copied.
but may be modified internally!
"""
def __init__(self, ctr, a):
self.n = len(ctr)
self.ctr = ctr # center coordinates
self.a = a # ~ inverse of covariance of points contained
self.vol = vol_prefactor(self.n) / np.sqrt(np.linalg.det(a))
# eigenvalues (l) are a^-2, b^-2, ... (lengths of principle axes)
# eigenvectors (v) are normalized principle axes
l, v = np.linalg.eigh(a)
self.axlens = 1. / np.sqrt(l)
# Scaled eigenvectors are the axes: axes[:,i] is the i-th
# axis. Multiplying this matrix by a vector will transform a
# point in the unit n-sphere into a point in the ellipsoid.
self.axes = np.dot(v, np.diag(self.axlens))
def scale_to_vol(self, vol):
"""Scale ellipoid to satisfy a target volume."""
f = (vol / self.vol) ** (1.0 / self.n) # linear factor
self.a *= f**-2
self.axlens *= f
self.axes *= f
self.vol = vol
def major_axis_endpoints(self):
"""Return the endpoints of the major axis"""
i = np.argmax(self.axlens) # which is the major axis?
v = self.axes[:, i] # vector to end of major axis
return self.ctr - v, self.ctr + v
def contains(self, x):
"""Does the ellipse contain the point?"""
d = x - self.ctr
return np.dot(np.dot(d, self.a), d) <= 1.0
def randoffset(self, rstate=np.random):
"""Return an offset from ellipsoid center, randomly distributed
within ellipsoid."""
return np.dot(self.axes, randsphere(self.n, rstate=rstate))
def sample(self, rstate=np.random):
"""Chose a sample randomly distributed within the ellipsoid.
Returns
-------
x : 1-d array
A single point within the ellipsoid.
"""
return self.ctr + self.randoffset(rstate=rstate)
def samples(self, nsamples, rstate=np.random):
"""Chose a sample randomly distributed within the ellipsoid.
Returns
-------
x : (nsamples, ndim) array
Coordinates within the ellipsoid.
"""
x = np.empty((nsamples, self.n), dtype=np.float)
for i in range(nsamples):
x[i, :] = self.sample(rstate=rstate)
return x
def __repr__(self):
return "Ellipsoid(ctr={})".format(self.ctr)
# -----------------------------------------------------------------------------
# Functions for determining the ellipsoid or set of ellipsoids bounding a
# set of points.
def make_eigvals_positive(a, targetprod):
"""For the symmetric square matrix ``a``, increase any zero eigenvalues
to fulfill the given target product of eigenvalues.
Returns a (possibly) new matrix."""
w, v = np.linalg.eigh(a) # Use eigh because we assume a is symmetric.
mask = w < 1.e-10
if np.any(mask):
nzprod = np.product(w[~mask]) # product of nonzero eigenvalues
nzeros = mask.sum() # number of zero eigenvalues
w[mask] = (targetprod / nzprod) ** (1./nzeros) # adjust zero eigvals
a = np.dot(np.dot(v, np.diag(w)), np.linalg.inv(v)) # re-form cov
return a
def bounding_ellipsoid(x, pointvol=0., minvol=False):
"""Calculate bounding ellipsoid containing a set of points x.
Parameters
----------
x : (npoints, ndim) ndarray
Coordinates of points.
pointvol : float, optional
Used to set a minimum bound on the ellipsoid volume when
minvol is True.
minvol : bool, optional
If True, ensure that ellipsoid volume is at least len(x) * pointvol.
Returns
-------
ellipsoid : Ellipsoid
"""
npoints, ndim = x.shape
# If there is only a single point, return an N-sphere with volume `pointvol`
# centered at the point.
if npoints == 1:
r = (pointvol / vol_prefactor(ndim))**(1./ndim)
return Ellipsoid(x[0], (1. / r**2) * np.identity(ndim))
# Calculate covariance of points
ctr = np.mean(x, axis=0)
delta = x - ctr
cov = np.cov(delta, rowvar=0)
# when ndim = 1, np.cov returns a 0-d array. Make it a 1x1 2-d array.
if ndim == 1:
cov = np.atleast_2d(cov)
# For a ball of uniformly distributed points, the covariance will be
# smaller than r^2 by a factor of 1/(n+2) [see, e.g.,
# http://mathoverflow.net/questions/35276/
# covariance-of-points-distributed-in-a-n-ball]. In nested sampling,
# we are supposing the points are uniformly distributed within
# an ellipse, so the same factor holds. Expand `cov`
# to compensate for that when defining the ellipse matrix:
cov *= (ndim + 2)
# Ensure that ``cov`` is nonsingular.
# It can be singular when the ellipsoid has zero volume, which happens
# when npoints <= ndim or when enough points are linear combinations
# of other points. (e.g., npoints = ndim+1 but one point is a linear
# combination of others). When this happens, we expand the ellipse
# in the zero dimensions to fulfill the volume expected from
# ``pointvol``.
targetprod = (npoints * pointvol / vol_prefactor(ndim))**2
cov = make_eigvals_positive(cov, targetprod)
# The matrix defining the ellipsoid.
a = np.linalg.inv(cov)
# Calculate expansion factor necessary to bound each point.
# Points should obey x^T A x <= 1, so we calculate x^T A x for
# each point and then scale A up or down to make the
# "outermost" point obey x^T A x = 1.
#
# fast way to compute delta[i] @ A @ delta[i] for all i.
f = np.einsum('...i, ...i', np.tensordot(delta, a, axes=1), delta)
fmax = np.max(f)
# Due to round-off errors, we actually scale the ellipse so the outermost
# point obeys x^T A x < 1 - (a bit), so that all the points will
# *definitely* obey x^T A x < 1.
one_minus_a_bit = 1. - SQRTEPS
if fmax > one_minus_a_bit:
a *= one_minus_a_bit / fmax
ell = Ellipsoid(ctr, a)
if minvol:
v = len(x) * pointvol
if ell.vol < v:
ell.scale_to_vol(v)
return ell
def _bounding_ellipsoids(x, ell, pointvol=0.):
"""Internal bounding ellipsoids method for when a bounding ellipsoid for
the entire set has already been calculated.
Parameters
----------
x : (npoints, ndim) ndarray
Coordinates of points.
ell : Ellipsoid, optional
If known, the bounding ellipsoid of the points `x`. If not supplied,
it will be calculated. This option is used when the function calls
itself recursively.
pointvol : float, optional
Volume represented by a single point. Used when number of points
per ellipsoid is less than number of dimensions in order to make
volume non-zero.
Returns
-------
ells : list of Ellipsoid
Ellipsoids.
"""
npoints, ndim = x.shape
# starting cluster centers for kmeans (k=2)
p1, p2 = ell.major_axis_endpoints() # returns two 1-d arrays
start_ctrs = np.vstack((p1, p2)) # shape is (k, N) = (2, N)
# Split points into two clusters using k-means clustering with k=2
# centroid = (2, ndim) ; label = (npoints,)
# [Each entry in `label` is 0 or 1, corresponding to cluster number]
centroid, label = kmeans2(x, k=start_ctrs, iter=10, minit='matrix')
# Get points in each cluster.
xs = [x[label == k, :] for k in (0, 1)] # points in each cluster
# If either cluster has less than ndim+1 points, the bounding ellipsoid
# will be ill-constrained, so we reject the split and simply return the
# ellipsoid bounding all the points.
if xs[0].shape[0] < 2 * ndim or xs[1].shape[0] < 2 * ndim:
return [ell]
# Bounding ellipsoid for each cluster, enlarging to minimum volume.
ells = [bounding_ellipsoid(xi, pointvol=pointvol, minvol=True)
for xi in xs]
# If the total volume decreased by a significant amount,
# then we will accept the split into subsets and try to perform the
# algorithm on each subset.
if ells[0].vol + ells[1].vol < 0.5 * ell.vol:
return (_bounding_ellipsoids(xs[0], ells[0], pointvol=pointvol) +
_bounding_ellipsoids(xs[1], ells[1], pointvol=pointvol))
# Otherwise, see if the total ellipse volume is significantly greater
# than expected. If it is, this indicates that there may be more than 2
# clusters and we should try to subdivide further.
if ell.vol > 2. * npoints * pointvol:
out = (_bounding_ellipsoids(xs[0], ells[0], pointvol=pointvol) +
_bounding_ellipsoids(xs[1], ells[1], pointvol=pointvol))
# only accept split if volume decreased significantly
if sum(e.vol for e in out) < 0.5 * ell.vol:
return out
# Otherwise, we are happy with the single bounding ellipse.
return [ell]
def bounding_ellipsoids(x, pointvol=0.):
"""Calculate a set of ellipses that bound the points.
Parameters
----------
x : (npoints, ndim) ndarray
Coordinates of points.
pointvol : float, optional
Volume represented by a single point. Used when number of points
per ellipsoid is less than number of dimensions in order to make
volume non-zero.
Returns
-------
ells : list of Ellipsoid
Ellipsoids.
"""
# Calculate a single bounding ellipsoid for the points, and enlarge it
# so that it has at least the minimum volume.
ell = bounding_ellipsoid(x, pointvol=pointvol, minvol=True)
return _bounding_ellipsoids(x, ell, pointvol=pointvol)
def sample_ellipsoids(ells, rstate=np.random):
"""Chose sample(s) randomly distributed within a set of
(possibly overlapping) ellipsoids.
Parameters
----------
ells : list of Ellipsoid
Returns
-------
x : 1-d ndarray
Coordinates within the ellipsoids.
"""
nells = len(ells)
if nells == 1:
return ells[0].sample(rstate=rstate)
# Select an ellipsoid at random, according to volumes
vols = np.array([ell.vol for ell in ells])
i = random_choice(nells, vols / vols.sum(), rstate=rstate)
# Select a point from the ellipsoid
x = ells[i].sample(rstate=rstate)
# How many ellipsoids is the sample in?
n = 1
for j in range(nells):
if j == i:
continue
n += ells[j].contains(x)
# Only accept the point with probability 1/n
# (If rejected, sample again).
if n == 1 or rstate.rand() < 1.0 / n:
return x
else:
return sample_ellipsoids(ells, rstate=rstate)
# -----------------------------------------------------------------------------
# Classes for dealing with non-parallel calls
class FakePool(object):
"""A fake Pool for serial function evaluations."""
def __init__(self):
pass
def submit(self, fn, *args, **kwargs):
return FakeFuture(fn, *args, **kwargs)
def map(self, func, *iterables):
return list(map(func, *iterables))
def shutdown(self):
pass
class FakeFuture(object):
"""A fake Future to mimic function calls."""
def __init__(self, fn, *args, **kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def result(self):
return self.fn(*self.args, **self.kwargs)
def cancel(self):
return True
# -----------------------------------------------------------------------------
# Sampler classes
class Sampler:
"""A sampler simply selects a new point obeying the likelihood bound,
given some existing set of points."""
def __init__(self, loglikelihood, prior_transform, points, rstate,
options, queue_size, pool):
self.loglikelihood = loglikelihood
self.prior_transform = prior_transform
self.points = points
self.rstate = rstate
self.set_options(options)
self.queue_size = queue_size
self.pool = pool
self.queue = []
self.submitted = 0
self.cancelled = 0
self.unused = 0
self.used = 0
def empty_queue(self):
"""Dump all operations on the queue."""
while self.queue:
x, v, f = self.queue.pop()
if f.cancel():
self.cancelled += 1
else:
self.unused += 1
def fill_queue(self):
"""Fill up the queue with operations."""
while len(self.queue)<self.queue_size:
x = self.propose_point()
v = self.prior_transform(x)
self.queue.append((x, v, self.pool.submit(self.loglikelihood, v)))
self.submitted += 1
def get_point_value(self):
""" Get evaluation sequentially from the queue. If we should
update our proposal distribution, do not refill the queue."""
if not self.queue:
self.fill_queue()
x, v, f = self.queue.pop(0)
r = f.result()
self.fill_queue()
self.used += 1
return x, v, r
class ClassicSampler(Sampler):
"""Picks an active point at random and evolves it with a
Metropolis-Hastings style MCMC with fixed number of iterations."""
def set_options(self, options):
self.steps = options.get('steps', 20)
def update(self, pointvol):
"""Calculate an ellipsoid to get the rough shape of the point
distribution correct, but then scale it down to the volume
corresponding to a single point."""
self.ell = bounding_ellipsoid(self.points, pointvol=pointvol)
self.ell.scale_to_vol(pointvol)
def propose_point(self, u, scale):
while True:
new_u = u + scale * self.ell.randoffset(rstate=self.rstate)
if np.all(new_u > 0.) and np.all(new_u < 1.):
break
return new_u
def new_point(self, loglstar):
# choose a point at random and copy it
i = self.rstate.randint(len(self.points))
u = self.points[i, :]
# evolve it.
scale = 1.
accept = 0
reject = 0
ncall = 0
while ncall < self.steps or accept == 0:
new_u = self.propose_point(u, scale)
new_v = self.prior_transform(new_u)
new_logl = self.loglikelihood(new_v)
if new_logl >= loglstar:
u = new_u
v = new_v
logl = new_logl
accept += 1
else:
reject += 1
# adjust scale, aiming for acceptance ratio of 0.5.
if accept > reject:
scale *= math.exp(1. / accept)
if accept < reject:
scale /= math.exp(1. / reject)
ncall += 1
return u, v, logl, ncall
class SingleEllipsoidSampler(Sampler):
"""Bounds active points in a single ellipsoid and samples randomly
from within that ellipsoid."""
def set_options(self, options):
self.enlarge = options.get('enlarge', 1.2)
def update(self, pointvol):
self.empty_queue()
self.ell = bounding_ellipsoid(self.points, pointvol=pointvol,
minvol=True)
self.ell.scale_to_vol(self.ell.vol * self.enlarge)
self.fill_queue()
def propose_point(self):
while True:
u = self.ell.sample(rstate=self.rstate)
if np.all(u > 0.) and np.all(u < 1.):
break
return u
def new_point(self, loglstar):
ncall = 0
while True:
u, v, logl = self.get_point_value()
ncall += 1
if logl >= loglstar:
break
return u, v, logl, ncall
class MultiEllipsoidSampler(Sampler):
"""Bounds active points in multiple ellipsoids and samples randomly
from within joint distribution."""
def set_options(self, options):
self.enlarge = options.get('enlarge', 1.2)
def update(self, pointvol):
self.empty_queue()
self.ells = bounding_ellipsoids(self.points, pointvol=pointvol)
for ell in self.ells:
ell.scale_to_vol(ell.vol * self.enlarge)
self.fill_queue()
def propose_point(self):
while True:
u = sample_ellipsoids(self.ells, rstate=self.rstate)
if np.all(u > 0.) and np.all (u < 1.):
break
return u
def new_point(self, loglstar):
ncall = 0
while True:
u, v, logl = self.get_point_value()
ncall += 1
if logl >= loglstar:
break
return u, v, logl, ncall
# -----------------------------------------------------------------------------
# Main entry point
_SAMPLERS = {'classic': ClassicSampler,
'single': SingleEllipsoidSampler,
'multi': MultiEllipsoidSampler}
def sample(loglikelihood, prior_transform, ndim, npoints=100,
method='single', update_interval=None, npdim=None,
maxiter=None, maxcall=None, dlogz=None, decline_factor=None,
rstate=None, callback=None, queue_size=None, pool=None, **options):
"""Perform nested sampling to evaluate Bayesian evidence.
Parameters
----------
loglikelihood : function
Function returning log(likelihood) given parameters as a 1-d numpy
array of length *ndim*.
prior_transform : function
Function translating a unit cube to the parameter space according to
the prior. The input is a 1-d numpy array with length *ndim*, where
each value is in the range [0, 1). The return value should also be a
1-d numpy array with length *ndim*, where each value is a parameter.
The return value is passed to the loglikelihood function. For example,
for a 2 parameter model with flat priors in the range [0, 2), the
function would be::
def prior_transform(u):
return 2.0 * u
ndim : int
Number of parameters returned by prior and accepted by loglikelihood.
npoints : int, optional
Number of active points. Larger numbers result in a more finely
sampled posterior (more accurate evidence), but also a larger
number of iterations required to converge. Default is 100.
method : {'classic', 'single', 'multi'}, optional
Method used to select new points. Choices are 'classic',
single-ellipsoidal ('single'), multi-ellipsoidal ('multi'). Default
is 'single'.
update_interval : int, optional
Only update the new point selector every ``update_interval``-th
likelihood call. Update intervals larger than 1 can be more efficient
when the likelihood function is very fast, particularly when
using the multi-ellipsoid method. Default is round(0.6 * npoints).
npdim : int, optional
Number of parameters accepted by prior. This might differ from *ndim*
in the case where a parameter of loglikelihood is dependent upon
multiple independently distributed parameters, some of which may be
nuisance parameters.
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if
termination condition is reached. Default is no limit.
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop earlier
if termination condition is reached. Default is no limit.
dlogz : float, optional
If supplied, iteration will stop when the estimated contribution
of the remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
``log(z + z_est) - log(z) < dlogz`` where *z* is the current evidence
from all saved samples, and *z_est* is the estimated contribution
from the remaining volume. This option and decline_factor are
mutually exclusive. If neither is specified, the default is
``dlogz=0.5``.
decline_factor : float, optional
If supplied, iteration will stop when the weight
(likelihood times prior volume) of newly saved samples has been
declining for ``decline_factor * nsamples`` consecutive samples.
A value of 1.0 seems to work pretty well. This option and dlogz
are mutually exclusive.
rstate : `~numpy.random.RandomState`, optional
RandomState instance. If not given, the global random state of the
``numpy.random`` module will be used.
callback : function, optional
Callback function to be called at each iteration. A single argument,
a dictionary, is passed to the callback. The keys include ``'it'``,
the current iteration number, and ``'logz'``, the current total
log evidence of all saved points. To simply print these at each
iteration, use the convience function
``callback=nestle.print_progress``.
queue_size: int, optional
Carry out evaluation in parallel by queueing up new active point
proposals using at most this many threads. Each thread independently
proposes new live points until one is selected.
Default is no parallelism (queue_size=1).
pool: ThreadPoolExecutor
Use this pool of workers to propose live points in parallel. If
queue_size>1 and `pool` is not specified, an Exception will be thrown.
Other Parameters
----------------
steps : int, optional
For the 'classic' method, the number of steps to take when selecting
a new point. Default is 20.
enlarge : float, optional
For the 'single' and 'multi' methods, enlarge the ellipsoid(s) by
this fraction in volume. Default is 1.2.
Returns
-------
result : `Result`
A dictionary-like object with attribute access: Attributes can be
accessed with, for example, either ``result['niter']`` or
``result.niter``. Attributes:
niter *(int)*
Number of iterations.
ncall *(int)*
Number of likelihood calls.
logz *(float)*
Natural logarithm of evidence (integral of posterior).
logzerr *(float)*
Estimated numerical (sampling) error on *logz*.
h *(float)*
Information. This is a measure of the "peakiness" of the
likelihood function. A constant likelihood has zero information.
samples *(ndarray)*
Parameter values of each sample. Shape is *(nsamples, ndim)*.
logvol *(ndarray)*
Natural log of prior volume of corresponding to each sample.
Shape is *(nsamples,)*.
logl *(ndarray)*
Natural log of the likelihood for each sample, as returned by
user-supplied *logl* function. Shape is *(nsamples,)*.
weights *(ndarray)*
Weight corresponding to each sample, normalized to unity.
These are proportional to ``exp(logvol + logl)``. Shape is
*(nsamples,)*.
"""
if npdim is None:
npdim = ndim
if maxiter is None:
maxiter = sys.maxsize
if maxcall is None:
maxcall = sys.maxsize
if method == 'multi' and not HAVE_KMEANS:
raise ValueError("scipy.cluster.vq.kmeans2 is required for the "
"'multi' method.") # pragma: no cover
if method not in _SAMPLERS:
raise ValueError("Unknown method: {:r}".format(method))
if npoints < 2 * ndim:
warnings.warn("You really want to make npoints >= 2 * ndim!")
if rstate is None:
rstate = np.random
# Stopping criterion.
if dlogz is not None and decline_factor is not None:
raise ValueError("Cannot specify two separate stopping criteria: "
"decline_factor and dlogz")
elif dlogz is None and decline_factor is None:
dlogz = 0.5
if update_interval is None:
update_interval = max(1, round(0.6 * npoints))
else:
update_interval = round(update_interval)
if update_interval < 1:
raise ValueError("update_interval must be >= 1")
# Parallel evaluation.
if queue_size is None or queue_size == 1:
queue_size = 1
pool = FakePool()
else:
if pool is None:
raise ValueError("Missing pool. Please provide a Pool object.")
# Initialize active points and calculate likelihoods
active_u = rstate.rand(npoints, npdim) # position in unit cube
active_v = | np.empty((npoints, ndim), dtype=np.float64) | numpy.empty |
"""Define the COOmatrix class."""
from __future__ import division, print_function
import numpy as np
from numpy import ndarray
from scipy.sparse import coo_matrix
from six import iteritems
from six.moves import range
from collections import OrderedDict
from openmdao.matrices.matrix import Matrix, _compute_index_map, sparse_types
class COOMatrix(Matrix):
"""
Sparse matrix in Coordinate list format.
Attributes
----------
_mat_range_cache : dict
Dictionary of cached CSC matrices needed for solving on a sub-range of the
parent CSC matrix.
"""
def __init__(self, comm):
"""
Initialize all attributes.
Parameters
----------
comm : MPI.Comm or <FakeComm>
communicator of the top-level system that owns the <Jacobian>.
"""
super(COOMatrix, self).__init__(comm)
self._mat_range_cache = {}
def _build_sparse(self, num_rows, num_cols):
"""
Allocate the data, rows, and cols for the sparse matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
Returns
-------
(ndarray, ndarray, ndarray)
data, rows, cols that can be used to construct a sparse matrix.
"""
counter = 0
submats = self._submats
metadata = self._metadata
pre_metadata = self._key_ranges = OrderedDict()
locations = {}
for key, (info, loc, src_indices, shape, factor) in iteritems(submats):
val = info['value']
rows = info['rows']
dense = (rows is None and (val is None or isinstance(val, ndarray)))
full_size = np.prod(shape)
if dense:
if src_indices is None:
delta = full_size
else:
delta = shape[0] * len(src_indices)
elif rows is None: # sparse matrix
delta = val.data.size
else: # list sparse format
delta = len(rows)
if loc in locations:
ind1, ind2, otherkey = locations[loc]
if not (src_indices is None and (ind2 - ind1) == delta == full_size):
raise RuntimeError("Keys %s map to the same sub-jacobian of a CSC or "
"CSR partial jacobian and at least one of them is either "
"not dense or uses src_indices. This can occur when "
"multiple inputs on the same "
"component are connected to the same output." %
sorted((key, otherkey)))
else:
ind1 = counter
counter += delta
ind2 = counter
locations[loc] = (ind1, ind2, key)
pre_metadata[key] = (ind1, ind2, dense, rows)
data = np.zeros(counter)
rows = np.empty(counter, dtype=int)
cols = np.empty(counter, dtype=int)
for key, (ind1, ind2, dense, jrows) in iteritems(pre_metadata):
info, loc, src_indices, shape, factor = submats[key]
irow, icol = loc
val = info['value']
idxs = None
if dense:
jac_type = ndarray
if src_indices is None:
colrange = np.arange(shape[1], dtype=int)
else:
colrange = src_indices
ncols = colrange.size
subrows = rows[ind1:ind2]
subcols = cols[ind1:ind2]
for i in range(shape[0]):
subrows[i * ncols: (i + 1) * ncols] = i
subcols[i * ncols: (i + 1) * ncols] = colrange
subrows += irow
subcols += icol
else: # sparse
if jrows is None:
jac_type = type(val)
jac = val.tocoo()
jrows = jac.row
jcols = jac.col
else:
jac_type = list
jcols = info['cols']
if src_indices is None:
rows[ind1:ind2] = jrows + irow
cols[ind1:ind2] = jcols + icol
else:
irows, icols, idxs = _compute_index_map(jrows, jcols,
irow, icol,
src_indices)
rows[ind1:ind2] = irows
cols[ind1:ind2] = icols
metadata[key] = (ind1, ind2, idxs, jac_type, factor)
return data, rows, cols
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (slice(ind1, ind2), jac_type, factor)
else:
# store reverse indices to avoid copying subjac data during
# update_submat.
metadata[key] = (np.argsort(idxs) + ind1, jac_type, factor)
self._matrix = coo_matrix((data, (rows, cols)),
shape=(num_rows, num_cols))
def _update_submat(self, key, jac):
"""
Update the values of a sub-jacobian.
Parameters
----------
key : (str, str)
the global output and input variable names.
jac : ndarray or scipy.sparse or tuple
the sub-jacobian, the same format with which it was declared.
"""
idxs, jac_type, factor = self._metadata[key]
if not isinstance(jac, jac_type) and (jac_type is list and not isinstance(jac, ndarray)):
raise TypeError("Jacobian entry for %s is of different type (%s) than "
"the type (%s) used at init time." % (key,
type(jac).__name__,
jac_type.__name__))
if isinstance(jac, ndarray):
self._matrix.data[idxs] = jac.flat
else: # sparse
self._matrix.data[idxs] = jac.data
if factor is not None:
self._matrix.data[idxs] *= factor
def _update_add_submat(self, key, jac):
"""
Add the subjac values to an existing sub-jacobian.
Parameters
----------
key : (str, str)
the global output and input variable names.
jac : ndarray or scipy.sparse or tuple
the sub-jacobian, the same format with which it was declared.
"""
idxs, jac_type, factor = self._metadata[key]
if not isinstance(jac, jac_type) and (jac_type is list and not isinstance(jac, ndarray)):
raise TypeError("Jacobian entry for %s is of different type (%s) than "
"the type (%s) used at init time." % (key,
type(jac).__name__,
jac_type.__name__))
if isinstance(jac, ndarray):
val = jac.flatten()
else: # sparse
val = jac.data
if factor is not None:
self._matrix.data[idxs] += val * factor
else:
self._matrix.data[idxs] += val
def _prod(self, in_vec, mode, ranges, mask=None):
"""
Perform a matrix vector product.
Parameters
----------
in_vec : ndarray[:]
incoming vector to multiply.
mode : str
'fwd' or 'rev'.
ranges : (int, int, int, int)
Min row, max row, min col, max col for the current system.
mask : ndarray of type bool, or None
Array used to zero out part of the matrix data.
Returns
-------
ndarray[:]
vector resulting from the product.
"""
# when we have a derivative based solver at a level below the
# group that owns the AssembledJacobian, we need to use only
# the part of the matrix that is relevant to the lower level
# system.
mat = self._matrix
if ranges is not None:
rstart, rend, cstart, cend = ranges
if rstart != 0 or cstart != 0 or rend != mat.shape[0] or cend != mat.shape[1]:
if ranges in self._mat_range_cache:
mat, idxs = self._mat_range_cache[ranges]
# update the data array of our smaller cached matrix with current data from
# self._matrix
mat.data[:] = self._matrix.data[idxs]
else:
rstart, rend, cstart, cend = ranges
rmat = mat.tocoo()
# find all row and col indices that are within the desired range
ridxs = np.nonzero(np.logical_and(rmat.row >= rstart, rmat.row < rend))[0]
cidxs = np.nonzero( | np.logical_and(rmat.col >= cstart, rmat.col < cend) | numpy.logical_and |
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
import numpy as np
class RefineHistory(object):
"""
A history tracking class for refinements
"""
INITIAL_ITERATION_INDEX = -1
LAST_ITERATION_INDEX = -1
ITERATION_INDEX = 0
RESIDUAL_INDEX = -1
SOLUTION_SELECTOR = np.s_[ITERATION_INDEX+1:RESIDUAL_INDEX]
PLOT_SAMPLE_SELECTOR = np.s_[ITERATION_INDEX+1:]
samples = None
_closed = False
@property
def best_entry(self):
if self._closed:
samples = self.samples
else:
samples = np.asanyarray(self.samples)
residuals = samples[:,self.RESIDUAL_INDEX]
return samples[np.where(residuals == residuals.min()),:][-1].flatten()
@property
def initial_entry(self):
if self._closed:
samples = self.samples
else:
samples = np.asanyarray(self.samples)
iterations = samples[:,self.ITERATION_INDEX]
return samples[np.where(iterations == self.INITIAL_ITERATION_INDEX),:][-1].flatten()
@property
def last_entry(self):
if self._closed:
samples = self.samples
else:
samples = np.asanyarray(self.samples)
iterations = samples[:,self.ITERATION_INDEX]
return samples[np.where(iterations == self.LAST_ITERATION_INDEX),:][-1].flatten()
@property
def best_solution(self):
return self.best_entry[self.SOLUTION_SELECTOR].flatten()
@property
def initial_solution(self):
return self.initial_entry[self.SOLUTION_SELECTOR]
@property
def last_solution(self):
return self.last_entry[self.SOLUTION_SELECTOR]
@property
def initial_residual(self):
return float(self.initial_entry[self.RESIDUAL_INDEX])
@property
def best_residual(self):
return float(self.best_entry[self.RESIDUAL_INDEX])
@property
def last_residual(self):
return float(self.last_entry[self.RESIDUAL_INDEX])
# ------------------------------------------------------------
# Initialization and other internals
# ------------------------------------------------------------
def __init__(self):
self.samples = []
def _sort_solutions_by_iteration(self):
self.samples.sort(key=lambda s: s[0])
# ------------------------------------------------------------
# ContextManager implementation
# ------------------------------------------------------------
def close(self):
self._closed = True
self._sort_solutions_by_iteration()
self.samples = | np.asanyarray(self.samples) | numpy.asanyarray |
from functools import wraps
from typing import Callable, TypeVar, cast
import numpy as np
from numba import njit
from numfun.barycentric import barycentric_interpolation
F = TypeVar('F', bound=Callable)
def complexify(g: F) -> F:
"""Decorator to apply g on real and imaginary parts and the return the sum.
:param g: A linear operator such that g(a+ib) = g(a) + i g(b)
:return: a function which adds g(real(input)) + 1j * g(imag(input))
"""
@wraps(g)
def wrapper(coefficients: np.ndarray) -> np.ndarray:
"""coefficients is a complex input array."""
# Make sure c is a numpy array
coefficients = 1.0 * np.array(coefficients)
if np.all(np.isreal(coefficients)):
return g(coefficients.real)
if np.all(np.isreal(1j * coefficients)):
return 1j * g(coefficients.imag)
u = g(coefficients.real)
v = g(coefficients.imag)
return u + 1j * v
return cast(F, wrapper)
@complexify
@njit
def chebyshev_coefficients_of_integral(coefficients: np.array) -> np.array:
"""Indefinite integral of a Function with given Chebyshev coefficients
such that f(-1) = 0.
NOTE: The algorithm works for complex coefficients, but for jit to
work, we have to wrap this in the @complexify decorator
###########################################################################
If the underlying function is represented as a n-vector c[k]:
\sum_{k=0}^{n-1} c_k T_k(x)
its integral is represented as a vector of length n+1 given by:
\sum_{k=0}^{n} b_k T_k(x)
where b_0 is determined from the constant of integration as
b_0 = \sum_{k=1}^{n} (-1)^(k+1) b_k
and other coefficients are given by
b_1 = c_0 - c_2/2,
b_k = (c_{k-1} - c_{k+1})/(2k) if 0 < k \leq n.
with c_{n+1} = c_{n+2} = 0.
Pages 32-33 of Mason & Handscomb,
"Chebyshev Polynomials". Chapman & Hall/CRC (2003).
###########################################################################
"""
# Handle the empty case:
n = len(coefficients)
if n == 0:
return np.zeros((0,))
# Make room in c[k] with zeros
c = np.zeros((n + 2,))
c[:n] = coefficients
# Initialize vector b for the integral
b = np.zeros((n + 1,))
# values of b_(2) ... b_(n+1):
b[2:] = (c[1:n] - c[3:n + 2]) / (2.0 * np.arange(2, n + 1))
# value of b_1
b[1] = c[0] - c[2] / 2.0
v = np.ones((n,))
v[1::2] = -1.0
# value of b_0 such that f(-1) = 0
b[0] = np.dot(v, b[1:])
return b
@complexify
@njit
def chebyshev_definite_integral(coefficients: np.array) -> float:
"""Definite integral of a function on the interval [-1, 1]."""
n = len(coefficients)
# Get the length of the coefficients:
if n == 0: # Trivial cases:
return np.nan
if n == 1: # Constant Function
return 2.0 * coefficients[0]
# General case
c = | np.zeros((n,)) | numpy.zeros |
import topogenesis as tg
import numpy as np
import pandas as pd
# Environment class
class environment():
def __init__(self, avail_lattice : tg.lattice, lattices : dict, agents_dict : dict, stencils: dict):
""" Creates an environment and agents based on given initial available lattice, lattices, agent dictionary and stencils. If all the lattices are of the
same shape and the values are normalized, a valid environment will be created.
Args:
avail_lattice (tg.lattice): a lattice initiating available voxels of the base envelope
lattices (dict): a dictionary of lattices of environmental values for each voxel
agents_dict (dict): a dictionary of agents, including attributes, preferences and behaviors of the agent
stencils (dict): [description]
"""
self.lattices = lattices
self.lattice_names = [lattices.keys()]
self.avail_lattice = avail_lattice
self.occ_lattice = avail_lattice * 0 - 1
self.bounds = avail_lattice.bounds
self.shape = avail_lattice.shape
self.stencils = stencils
self.initialization(agents_dict)
def all_lattice_update(self):
"""updates all the dynamic lattices
"""
# TODO: run lattice update on all dynamic lattices
pass
def lattice_update(self, lattice_key: str):
# TODO: recompute the specified lattice
pass
def all_agents_action(self):
"""perform the 'action' method on all agents
"""
# TODO: run the action method of all agents
pass
# Bahar: Let's talk about this one
def agent_action(self, agent_key: str):
""" a method which enables an agent to act based on its preferences to occupy a new voxel or negotiate to exchange another
"""
# TODO: run the action method of specified agents
agn = self.agents[agent_key]
while (agn.area <= agn.behavior['char']['area'] or agn.behavior['char']['area'] == 0.0):
neigh_lat = self.avail_lattice * 0.0
for s, w in zip(
agn.behaviors["neighbourhood"]["stencils"],
agn.behaviors["neighbourhood"]["weights"]):
# find and aggregate neighs
neigh_lat += agn.bhv_find_neighbour(s, self, rectilinear_exp=10.0) * w
# extract neigh values for agent
neigh_eval_lat = neigh_lat * agn.eval_lat * self.avail_lattice
# Occupation
##########
agn.bhv_occupy(self, np.unravel_index(neigh_eval_lat.argmax(), neigh_eval_lat.shape))
break
pass
def all_agents_evaluation(self):
""" evaluates all the agents based on their preferences for each environment lattice
"""
# TODO: run the evaluation method of all agents
pass
def agent_evaluation(self, agent_key: str):
"""evaluates a given agent based on its preference for each environment lattice
Args:
agent_key (str): specifies the agent with the given key
"""
# TODO: run the evaluation method of specified agents
pass
def all_neighbours_evaluation(self):
""" runs the evaluation on all stencils of all agents
"""
# TODO: run the evaluation method of all neighbours
for agn in self.agents:
for lat in self.lattices:
pass
pass
def all_agents_initialization(self, agents_dict : dict):
"""creating all of the agents in the environment with the given dictionary
Args:
agents_dict (dict): a reference dictionary of agent's names, ids, preferences, attributes and behaviors and so on
"""
env_agents = {}
for name, a in agents_dict.items():
# TODO: Check if the agents preferences are matching with the environment lattices
agent_preferences = a["preferences"].keys()
agent_pref_name_check = agent_preferences == self.lattices.keys()
if agent_pref_name_check:
# TODO: Check if the specified stencils by agent is available in the environment stencils
# TODO: Run agent initialization method
env_agents[name] = agent(name, self, a["attributes"], a["preferences"], a["behaviors"])
else:
pass
self.agents = env_agents
def initialization(self, agents_dict: dict):
"""if all the requirements are met, creates an environment with agents and neighbouring graph
Args:
agents_dict (dict): a reference dictionary of agent's names, ids, preferences, attributes and behaviors and so on
"""
if self.lattice_check():
self.all_agents_initialization(agents_dict)
self.init_neighbour_matrices()
else:
return
def lattice_check(self) -> bool:
"""chackes the prerequisites of initializing an environment which is lattices of the same shape and bound, and normalized values
Returns:
bool: if all the prerequisites are met, a valid environment will be created with a returning text informing its properties;
if false, returns the error and breaks the operation.
"""
shapes, bounds, mins, maxs = [],[],[],[]
# iterate over all lattices
for l in self.lattices.values():
shapes.append(l.shape)
bounds.append(l.bounds.flatten())
mins.append(l.min())
maxs.append(l.max())
# access lattice names in self.lattice_names --> Bahar: what is this?
shape_check = np.all(np.array(shapes) == self.shape)
bound_check = np.all(np.array(bounds) == self.bounds.flatten())
mins_check = np.array(mins).astype(float).min() >= 0.0
maxs_check = np.array(maxs).astype(float).max() <= 1.0
if (shape_check and bound_check and mins_check and maxs_check):
print("Valid environment created, shape =" + str(self.shape))
return True
else:
error = "Couldn't initialize the environment. Error: "
if not shape_check:
error += "lattice shapes doesn't match, "
if not bound_check:
error += "lattice bounds' doesn't match, "
if not mins_check:
error += "negative value found in a lattice, "
if not maxs_check:
error += "maximum value in a lattice is more than one."
print(error)
return False
def init_neighbour_matrices(self):
"""initializes the dictionary of neighbouring values of each voxels based on each stencil, that is a square matrice with rows and columns
equal to the number of voxels will be creaated, specifying each voxel's neighbours
"""
neigh_matrices = {}
for stencil_name, stencil in self.stencils.items():
neigh_matrices[stencil_name] = self.avail_lattice.find_neighbours(stencil)
self.neigh_matrices = neigh_matrices
# Agent class
class agent():
def __init__(self, name: str, env: environment, attributes: dict, preferences: dict, behaviors: list, origin: list = None):
"""initializes an agent with a name as a key to be defined by, the environment it exists in, the agent's prefernces of each environment lattice,
its behaviors and attributes, and the option of the initial origin.
Args:
name (str): agent's name
env (environment): the context
attributes (dict): define agent's attributes, i.e. id, area and so on
preferences (dict): define agent's environment lattice preferences. it should be a dictionary of the same lattices key in the environment
within each, there's a preference value to be used in evaluation
behaviors (list): a list of behaviors with valued weight
origin (list, optional): optional list of initial origin of the agent, if none, a random origin will be generated. Defaults to None.
"""
self.name = name
self.aid = attributes["identity"]["aid"]
self.attributes = attributes
self.preferences = preferences
self.behaviors = behaviors
if origin:
self.origin = origin
else:
self.origin = agent.find_seed(self, env)
self.occ_lattice = (env.occ_lattice * 0).astype(np.bool)
self.occ_lattice[tuple(self.origin.flatten())] = True
self.update_env_lattices(env)
self.evaluation(env)
# TODO: initialize the agent's available neighbour lattice per stencil
# self.neighbours = {}
# self.update_neighbor(env)
def find_seed(self, env: environment):
"""randomly generates an origin for the gent
Args:
env (environment): agent's environment context
Returns:
ndarray: index array of the selected origin
"""
# TODO: run the initial seed finding
avail_voxels = np.argwhere(env.avail_lattice == 1)
select_id = np.random.choice(len(avail_voxels), 1)
return avail_voxels[select_id]
def update_env_lattices(self, env:environment):
"""updates environment occupation and available lattice based on agent's current state
Args:
env (environment): agent's environment context
"""
env.occ_lattice[self.occ_lattice] = self.aid
env.avail_lattice[self.occ_lattice] = 0
def evaluation(self, env:environment):
"""evaluates all of the environment's lattices with concerning preference by multiplying for the agent and stores it in its eval_lat
Args:
env (environment): agent's environment context
"""
eval_lattice = tg.to_lattice( | np.ones(env.avail_lattice.shape) | numpy.ones |
import numpy as np
import random,sys
import scipy
from scipy.spatial.distance import pdist,squareform,cdist
#from scipy.spatial import distance_matrix
import matplotlib.pyplot as plt
import scipy
### "for loop" version
### faster than "matrix version"
### because only need to consider points within h_k
### for loop version
### run this cell to overwrite the previous matrix version
### because this version is faster
def adaptive_cluster(data, gap_par = 0.5, n0=None,debug=False,assign_outliers = 'nearest_cluster'):
'''
data:: a numeric numpy array
gap_par: the lambda parameter used to test the gap
n0: the initial neighbors for each data point.
debug: for debug
assign_outliers: nearest_cluster, assign outliers to nearest cluster. new_cluster, assign outliers to a new cluster
'''
weight_matrix_history = []
(n_points,n_features) = data.shape
#distance_matrix = scipy.spatial.distance_matrix(data,data)
## faster version
distance_matrix = scipy.spatial.distance.cdist(data, data, 'euclidean')
#print('distance_matrix.shape',distance_matrix.shape)
weight_matrix = np.zeros(shape=(n_points,n_points))
weight_matrix_history.append((0,weight_matrix))
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
### sort the distance matrix
sorted_distance_idx_matrix = np.argsort(distance_matrix,axis=1)
sorted_distance_matrix = np.sort(distance_matrix,axis=1)
#print('sorted_distance_matrix.shape',sorted_distance_matrix.shape)
#print('sorted_distance_idx_matrix.shape',sorted_distance_idx_matrix.shape)
### number of neighbors
if n0 is None:
n0 = 2*n_features+2
### h0 is the the radius such that the point has n0 neighbors
h0 = sorted_distance_matrix[:,n0]
#print('h0.shape',h0.shape)
### max(h0(Xi),h0(Xj))
#max_h0 = np.reshape([np.maximum(h0[i],h0[j]) for i in range(n_points) for j in range(n_points)],newshape=(n_points,n_points))
#print('max_h0.shape',max_h0.shape)
### weight_matrix
#weight_matrix = (distance_matrix <= max_h0).astype('int')
### faster version
h0_matrix = np.tile(h0, (n_points, 1))
h0_matrix_T = h0_matrix.T
h0_matrix_max = np.maximum(h0_matrix,h0_matrix_T)
weight_matrix = (distance_matrix<=h0_matrix_max).astype('int')
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
#################################################################
### find h sequence
a = 1.4142135623730951
b = 1.95
#gap_par = -1
max_distance = np.max(sorted_distance_matrix)
### h0 is a vector, each data point has n0 neighbors
### max(h0) makes sure that each data point has at least n0 neighbors
h_array = np.array([np.max(h0)])
#n_matrix = np.repeat(n0, n_points)
#n_matrix = n_matrix[:,np.newaxis]
k = 0
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
while h_array[k] <= max_distance:
### upper bound of n(Xi,h_k+1)
### given radius h_array[k], how many neighbors for each data point
### -1 removes its self from counting
n_upper = a * np.array([np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1 for i in np.arange(n_points)])
n_upper = (np.floor(n_upper)).astype('int')
### when h is big, the n_upper may be > n_points
n_upper = np.clip(n_upper, a_min=None,a_max=(n_points-1))
#print(n_upper)
### n_upper can decide the h_upper
h_upper_by_n_upper = np.min(np.array([sorted_distance_matrix[i,n_upper[i]] for i in np.arange(n_points)]))
### upper bound of h_k+1
h_upper = b*h_array[k]
### must satisfy both conditions
min_h_upper = np.minimum(h_upper_by_n_upper,h_upper)
#print(k,min_h_upper)
### append to the h_array
### just make sure h is not > max_distance
if min_h_upper <= max_distance:
if min_h_upper <= h_array[k]: break
#print(k,'h',min_h_upper)
h_array = np.append(h_array,min_h_upper)
k = k + 1
#################################################################
### check if those h satisfy the conditions
if debug:
for k in range(1,len(h_array)):
if h_array[k] <= b*h_array[k-1]:
continue
print('k',k,h_array[k],h_array[k-1],b*h_array[k-1],end=',')
print(h_array[k]/h_array[k-1])
else:
print('h error')
for k in range(1,len(h_array)):
for i in range(n_points):
n1 = np.sum(sorted_distance_matrix[i,:]<=h_array[k-1])-1
n2 = np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1
if n2<=a*n1 and n1>=n0 and n2>=n0:
continue
print('n',k,n1,n2,a*n1,end=',')
print(n2/n1)
else:
print('n error')
#################################################################
beta_a = (n_features+1.0)/2.0
beta_b = 0.5
beta_function = scipy.special.beta(beta_a,beta_b)
np.seterr(divide='ignore', invalid='ignore')
print('h_k',h_array[0])
for k in range(1,len(h_array)):
print('h_k',h_array[k])
#t_matrix = distance_matrix/h_array[k-1]
#beta_x_matrix = 1.0-(t_matrix**2)/4.0
#incomplete_beta_function_matrix = scipy.special.betainc(beta_a,beta_b,beta_x_matrix)
#q_matrix = incomplete_beta_function_matrix / (2*beta_function-incomplete_beta_function_matrix)
for i in range(n_points):
weight_matrix[i,i] = 1
for j in range(i,n_points):
#if weight_matrix[i,j] == 1:
# continue
#if i == j:
# weight_matrix[i,j] = 1
# continue
#if i > j:
# weight_matrix[i,j] = weight_matrix[j,i]
# continue
if distance_matrix[i,j] <= h_array[k] and h_array[k-1] >= h0[i] and h_array[k-1] >= h0[j]:
#### caclulate overlap
N_overlap = np.dot(weight_matrix[i,:],weight_matrix[j,:])
#### caclulate complement
#N_complement = np.zeros(shape=(n_points,n_points))
if k>1:
ind1 = (distance_matrix[j,:] > h_array[k-1]) + 0.0
ind2 = (distance_matrix[i,:] > h_array[k-1]) + 0.0
else:
ind1 = (distance_matrix[j,:] > h0_matrix_max[i,j]) + 0.0
ind2 = (distance_matrix[i,:] > h0_matrix_max[i,j]) + 0.0
N_complement = np.dot(weight_matrix[i,:],ind1) + np.dot(weight_matrix[j,:],ind2)
#### caclulate union
N_union = N_overlap + N_complement
#### theta
theta = N_overlap / N_union
#### q
t = distance_matrix[i,j]/h_array[k-1]
beta_x = 1.0-(t**2)/4.0
incomplete_beta_function = scipy.special.betainc(beta_a,beta_b,beta_x)
q = incomplete_beta_function / (2*beta_function-incomplete_beta_function)
#q = q_matrix[i,j]
T1 = N_union
#### this may raise warnings about log(0) or log(nan)
#### this is fine, since I used the whole matrix here
#### some of the points are out of the h(k) radius
#### we will mask those points in the later step
T2 = theta*np.log(theta/q)+(1.0-theta)*np.log((1.0-theta)/(1.0-q))
#### when N_overlap is 0, theta is 0, this leands to T is nan
#### replace those nan with 0 in T
#T2 = np.where(theta==0.0,0.0,T2)
#T2 = np.where(theta==1.0,0.0,T2)
#T3 = ((theta<=q).astype('int')-(theta>q).astype('int'))
### faster version
if theta<=q:
T = T1 * T2
else:
T = - (T1 * T2)
#T = T1 * T2 * T3
####
####
#weight_matrix[i,j] = (distance_matrix[i,j]<=h_array[k]) * (T<=gap_par) + 0.0
weight_matrix[i,j] = (T<=gap_par) + 0.0
#### be careful with those boundary points
#### theta=0 means no overlap at all
#### theta=1 means completely overlap
#### needs special treatment for them
if theta==0: weight_matrix[i,j] = 0
if theta==1: weight_matrix[i,j] = 1
####
weight_matrix[j,i] = weight_matrix[i,j]
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
### reset to default
np.seterr(divide='warn', invalid='warn')
### calculate S
S = np.sum(weight_matrix)
### extract clusters from weight matrix
labels = (np.zeros(shape=weight_matrix.shape[0]))
labels.fill(np.nan)
cluster_ind = 0
for i in range(len(labels)):
for j in range(len(labels)):
if i == j:continue
if weight_matrix[i,j] == 1:
if np.isnan(labels[i]) and | np.isnan(labels[j]) | numpy.isnan |
"""Functions defining expected losses from the instruments."""
from __future__ import annotations
from pathlib import Path
import numpy as np
from edges_cal import reflection_coefficient as rc
from ..config import config
from scipy import integrate
def balun_and_connector_loss(
band: str,
freq,
gamma_ant,
monte_carlo_flags=(False, False, False, False, False, False, False, False),
):
"""
Compute balun and connector losses.
Parameters
----------
band : str {'low3', 'mid'}
Parameters of the loss are different for each antenna.
freq : array-like
Frequency in MHz
gamma_ant: float
Reflection coefficient of antenna at the reference plane, the LNA input.
monte_carlo_flags : tuple of bool
Which parameters to add a random offset to, in order:
* tube_inner_radius
* tube_outer_radius
* tube_length
* connector_inner_radius
* connector_outer_radius
* connector_length
* metal_conductivity
* teflon_permittivity
Returns
-------
Gb : float or array-like
The balun loss
Gc : float or array-like
The connector loss
"""
# Angular frequency
w = 2 * np.pi * freq * 1e6
# Inch-to-meters conversion
inch2m = 1 / 39.370
# Conductivity of copper
# Pozar 3rd edition. Alan uses a different number. What
sigma_copper0 = 5.96 * 10 ** 7
# Metal conductivity
sigma_copper = 1 * sigma_copper0
sigma_brass = 0.24 * sigma_copper0
sigma_xx_inner = 0.24 * sigma_copper0
sigma_xx_outer = 0.024 * sigma_copper0
# Permeability
u0 = (
4 * np.pi * 10 ** (-7)
) # permeability of free space (same for copper, brass, etc., all nonmagnetic
ur_air = 1 # relative permeability of air
u_air = u0 * ur_air
# Permittivity
c = 299792458 # speed of light
e0 = 1 / (u0 * c ** 2) # permittivity of free space
parameters = {
"low": {
"balun_length": 43.6 * inch2m,
"connector_length": 0.8 * inch2m,
"er_air": 1.07,
"ric_b": ((5 / 16) * inch2m) / 2,
"roc_b": ((3 / 4) * inch2m) / 2,
"roc_c": (0.16 * inch2m) / 2,
},
"mid": {
"balun_length": 35 * inch2m,
"connector_length": 0.03,
"er_air": 1.2,
"ric_b": ((16 / 32) * inch2m) / 2,
"roc_b": ((1.25) * inch2m) / 2,
"roc_c": (0.161 * inch2m) / 2,
},
}
ep_air = e0 * parameters[band]["er_air"]
tan_delta_air = 0
epp_air = ep_air * tan_delta_air
er_teflon = 2.05 # why Alan????
ep_teflon = e0 * er_teflon
# http://www.kayelaby.npl.co.uk/general_physics/2_6/2_6_5.html
tan_delta_teflon = 0.0002
epp_teflon = ep_teflon * tan_delta_teflon
ur_teflon = 1 # relative permeability of teflon
u_teflon = u0 * ur_teflon
ric_b = parameters[band]["ric_b"]
if monte_carlo_flags[0]:
# 1-sigma of 3%
ric_b *= 1 + 0.03 * np.random.normal()
roc_b = parameters[band]["roc_b"]
if monte_carlo_flags[1]:
# 1-sigma of 3%
roc_b *= 1 + 0.03 * np.random.normal()
l_b = parameters[band]["balun_length"] # length in meters
if monte_carlo_flags[2]:
l_b += 0.001 * np.random.normal() # 1-sigma of 1 mm
# Connector dimensions
ric_c = (0.05 * inch2m) / 2 # radius of outer wall of inner conductor
if monte_carlo_flags[3]:
# 1-sigma of 3%, about < 0.04 mm
ric_c *= 1 + 0.03 * np.random.normal()
roc_c = parameters[band]["roc_c"]
if monte_carlo_flags[4]:
# 1-sigma of 3%
roc_c *= 1 + 0.03 * np.random.normal()
l_c = parameters[band]["connector_length"]
if monte_carlo_flags[5]:
l_c += 0.0001 * np.random.normal()
if monte_carlo_flags[6]:
sigma_copper *= 1 + 0.01 * np.random.normal()
sigma_brass *= 1 + 0.01 * np.random.normal()
sigma_xx_inner *= 1 + 0.01 * np.random.normal()
sigma_xx_outer *= 1 + 0.01 * np.random.normal()
if monte_carlo_flags[7] == 1:
# 1-sigma of 1%
epp_teflon *= 1 + 0.01 * np.random.normal()
# Skin Depth
skin_depth_copper = np.sqrt(2 / (w * u0 * sigma_copper))
skin_depth_brass = np.sqrt(2 / (w * u0 * sigma_brass))
skin_depth_xx_inner = np.sqrt(2 / (w * u0 * sigma_xx_inner))
skin_depth_xx_outer = np.sqrt(2 / (w * u0 * sigma_xx_outer))
# Surface resistance
Rs_copper = 1 / (sigma_copper * skin_depth_copper)
Rs_brass = 1 / (sigma_brass * skin_depth_brass)
Rs_xx_inner = 1 / (sigma_xx_inner * skin_depth_xx_inner)
Rs_xx_outer = 1 / (sigma_xx_outer * skin_depth_xx_outer)
def get_induc_cap_res_cond_prop(
ric, roc, skin_depth_inner, skin_depth_outer, rs_inner, rs_outer, u, ep, epp
):
L_inner = u0 * skin_depth_inner / (4 * np.pi * ric)
L_dielec = (u / (2 * np.pi)) * np.log(roc / ric)
L_outer = u0 * skin_depth_outer / (4 * np.pi * roc)
L = L_inner + L_dielec + L_outer
C = 2 * np.pi * ep / np.log(roc / ric)
R = (rs_inner / (2 * np.pi * ric)) + (rs_outer / (2 * np.pi * roc))
G = 2 * np.pi * w * epp / np.log(roc / ric)
return (
np.sqrt((R + 1j * w * L) * (G + 1j * w * C)),
np.sqrt((R + 1j * w * L) / (G + 1j * w * C)),
)
# Inductance per unit length
gamma_b, Zchar_b = get_induc_cap_res_cond_prop(
ric_b,
roc_b,
skin_depth_copper,
skin_depth_brass,
Rs_copper,
Rs_brass,
u_air,
ep_air,
epp_air,
)
gamma_c, Zchar_c = get_induc_cap_res_cond_prop(
ric_c,
roc_c,
skin_depth_xx_inner,
skin_depth_xx_outer,
Rs_xx_inner,
Rs_xx_outer,
u_teflon,
ep_teflon,
epp_teflon,
)
# Impedance of Agilent terminations
Zref = 50
Ropen, Rshort, Rmatch = rc.agilent_85033E(freq * 1e6, Zref, 1)
def get_gamma(r):
Z = rc.gamma2impedance(r, Zref)
Zin_b = rc.input_impedance_transmission_line(Zchar_b, gamma_b, l_b, Z)
Zin_c = rc.input_impedance_transmission_line(Zchar_c, gamma_c, l_c, Z)
Rin_b = rc.impedance2gamma(Zin_b, Zref)
Rin_c = rc.impedance2gamma(Zin_c, Zref)
return Rin_b, Rin_c
Rin_b_open, Rin_c_open = get_gamma(Ropen)
Rin_b_short, Rin_c_short = get_gamma(Rshort)
Rin_b_match, Rin_c_match = get_gamma(Rmatch)
# S-parameters (it has to be done in this order, first the Connector+Bend, then the
# Balun)
ra_c, S11c, S12S21c, S22c = rc.de_embed(
Ropen, Rshort, Rmatch, Rin_c_open, Rin_c_short, Rin_c_match, gamma_ant
)
# Reflection of antenna only, at the input of bend+connector
ra_b, S11b, S12S21b, S22b = rc.de_embed(
Ropen, Rshort, Rmatch, Rin_b_open, Rin_b_short, Rin_b_match, ra_c
)
def get_g(S11_rev, S12S21, ra_x, ra_y):
return (
np.abs(S12S21)
* (1 - np.abs(ra_x) ** 2)
/ (( | np.abs(1 - S11_rev * ra_x) | numpy.abs |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.imputation.bayes_mi import BayesGaussMI, MI
from numpy.testing import assert_allclose
def test_pat():
x = np.asarray([[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0],
[np.nan, 1, np.nan], [3, 2, 1]])
bm = BayesGaussMI(x)
assert_allclose(bm.patterns[0], np.r_[0, 2])
assert_allclose(bm.patterns[1], np.r_[1, 3])
def test_2x2():
# Generate correlated data with mean and variance
np.random.seed(3434)
x = np.random.normal(size=(1000, 2))
r = 0.5
x[:, 1] = r*x[:, 0] + | np.sqrt(1-r**2) | numpy.sqrt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.